Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
8c516a24
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
8c516a24
编写于
1月 15, 2019
作者:
Q
Qiao Longfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
remote min_row_size_to_use_multithread in adam interface test=develop
上级
7fd15ce5
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
9 addition
and
15 deletion
+9
-15
paddle/fluid/API.spec
paddle/fluid/API.spec
+1
-1
paddle/fluid/operators/optimizers/adam_op.cc
paddle/fluid/operators/optimizers/adam_op.cc
+1
-1
paddle/fluid/operators/optimizers/adam_op.h
paddle/fluid/operators/optimizers/adam_op.h
+5
-5
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+2
-8
未找到文件。
paddle/fluid/API.spec
浏览文件 @
8c516a24
...
@@ -418,7 +418,7 @@ paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning
...
@@ -418,7 +418,7 @@ paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.AdagradOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.AdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdagradOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.AdamOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'
, 'min_row_size_to_use_multithread'], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False, 0
))
paddle.fluid.optimizer.AdamOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon', 'regularization', 'name', 'lazy_mode'
], varargs=None, keywords=None, defaults=(0.001, 0.9, 0.999, 1e-08, None, None, False
))
paddle.fluid.optimizer.AdamOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.AdamOptimizer.apply_gradients ArgSpec(args=['self', 'params_grads'], varargs=None, keywords=None, defaults=None)
paddle.fluid.optimizer.AdamOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdamOptimizer.backward ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.optimizer.AdamOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
paddle.fluid.optimizer.AdamOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None))
...
...
paddle/fluid/operators/optimizers/adam_op.cc
浏览文件 @
8c516a24
...
@@ -120,7 +120,7 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -120,7 +120,7 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker {
"min_row_size_to_use_multithread and "
"min_row_size_to_use_multithread and "
"inner_op_parallelism is larger then 0, sparse update "
"inner_op_parallelism is larger then 0, sparse update "
"will run in multithread mode"
)
"will run in multithread mode"
)
.
SetDefault
(
0
);
.
SetDefault
(
100
0
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Adam Optimizer.
Adam Optimizer.
...
...
paddle/fluid/operators/optimizers/adam_op.h
浏览文件 @
8c516a24
...
@@ -494,14 +494,14 @@ class AdamOpKernel : public framework::OpKernel<T> {
...
@@ -494,14 +494,14 @@ class AdamOpKernel : public framework::OpKernel<T> {
<<
" min_row_size_to_use_multithread="
<<
" min_row_size_to_use_multithread="
<<
min_row_size_to_use_multithread
;
<<
min_row_size_to_use_multithread
;
if
(
FLAGS_inner_op_parallelism
>
10
)
{
if
(
FLAGS_inner_op_parallelism
>
10
)
{
LOG
(
WARNING
)
<<
"FLAGS_inner_op_parallelism "
VLOG
(
1
)
<<
"FLAGS_inner_op_parallelism "
<<
FLAGS_inner_op_parallelism
<<
" is two large!"
;
<<
FLAGS_inner_op_parallelism
<<
" is two large!"
;
}
}
auto
&
grad_rows
=
grad_merge
.
rows
();
auto
&
grad_rows
=
grad_merge
.
rows
();
std
::
unordered_map
<
size_t
,
int
>
row_id_to_grad_row_offset
;
std
::
unordered_map
<
size_t
,
int
>
row_id_to_grad_row_offset
;
size_t
param_row_count
=
param
.
numel
()
/
row_numel
;
size_t
param_row_count
=
param
.
numel
()
/
row_numel
;
if
(
param_row_count
<
1000
)
{
if
(
param_row_count
<
1000
)
{
LOG
(
WARNING
)
<<
"param_row_count should be larger then 1000 to use "
VLOG
(
1
)
<<
"param_row_count should be larger then 1000 to use "
"multi thread, currently "
"multi thread, currently "
<<
param_row_count
;
<<
param_row_count
;
}
}
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
8c516a24
...
@@ -734,8 +734,6 @@ class AdamOptimizer(Optimizer):
...
@@ -734,8 +734,6 @@ class AdamOptimizer(Optimizer):
may be very slow. The lazy mode only update the element that has gradient is the current
may be very slow. The lazy mode only update the element that has gradient is the current
mini-batch, so it will be much more faster. But this mode has different semantics with the
mini-batch, so it will be much more faster. But this mode has different semantics with the
original Adam algorithm and may lead to different result.
original Adam algorithm and may lead to different result.
min_row_size_to_use_multithread: if adam use sparse update and the param rows is very large,
you can use FLAGS_inner_op_parallelism and this flag to enable multi thread optimize.
Examples:
Examples:
.. code-block:: python
.. code-block:: python
...
@@ -756,8 +754,7 @@ class AdamOptimizer(Optimizer):
...
@@ -756,8 +754,7 @@ class AdamOptimizer(Optimizer):
epsilon
=
1e-8
,
epsilon
=
1e-8
,
regularization
=
None
,
regularization
=
None
,
name
=
None
,
name
=
None
,
lazy_mode
=
False
,
lazy_mode
=
False
):
min_row_size_to_use_multithread
=
0
):
assert
learning_rate
is
not
None
assert
learning_rate
is
not
None
assert
beta1
is
not
None
assert
beta1
is
not
None
assert
beta2
is
not
None
assert
beta2
is
not
None
...
@@ -771,7 +768,6 @@ class AdamOptimizer(Optimizer):
...
@@ -771,7 +768,6 @@ class AdamOptimizer(Optimizer):
self
.
_beta2
=
beta2
self
.
_beta2
=
beta2
self
.
_epsilon
=
epsilon
self
.
_epsilon
=
epsilon
self
.
_lazy_mode
=
lazy_mode
self
.
_lazy_mode
=
lazy_mode
self
.
_min_row_size_to_use_multithread
=
min_row_size_to_use_multithread
def
_create_accumulators
(
self
,
block
,
parameters
):
def
_create_accumulators
(
self
,
block
,
parameters
):
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
...
@@ -826,9 +822,7 @@ class AdamOptimizer(Optimizer):
...
@@ -826,9 +822,7 @@ class AdamOptimizer(Optimizer):
"beta1"
:
self
.
_beta1
,
"beta1"
:
self
.
_beta1
,
"beta2"
:
self
.
_beta2
,
"beta2"
:
self
.
_beta2
,
"epsilon"
:
self
.
_epsilon
,
"epsilon"
:
self
.
_epsilon
,
"lazy_mode"
:
self
.
_lazy_mode
,
"lazy_mode"
:
self
.
_lazy_mode
"min_row_size_to_use_multithread"
:
self
.
_min_row_size_to_use_multithread
},
},
stop_gradient
=
True
)
stop_gradient
=
True
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录