Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
8e4ed662
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8e4ed662
编写于
8月 21, 2020
作者:
T
tangwei12
提交者:
GitHub
8月 21, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix decay global counter (#26387)
* fix decay global counter * remove unused print, test=distp0
上级
ce7d5263
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
16 addition
and
10 deletion
+16
-10
python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py
.../fleet/parameter_server/distribute_transpiler/__init__.py
+3
-10
python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py
...paddle/fluid/incubate/fleet/parameter_server/ir/public.py
+13
-0
未找到文件。
python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py
浏览文件 @
8e4ed662
...
@@ -38,6 +38,7 @@ from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker
...
@@ -38,6 +38,7 @@ from paddle.fluid.incubate.fleet.base.role_maker import MPISymetricRoleMaker
from
paddle.fluid.incubate.fleet.parameter_server
import
version
from
paddle.fluid.incubate.fleet.parameter_server
import
version
from
paddle.fluid.incubate.fleet.parameter_server.ir.public
import
get_sparse_tablenames
from
paddle.fluid.incubate.fleet.parameter_server.ir.public
import
get_sparse_tablenames
from
paddle.fluid.incubate.fleet.parameter_server.ir.public
import
_get_lr_ops
from
paddle.fluid.incubate.fleet.parameter_server.ir.public
import
_get_lr_ops
from
paddle.fluid.incubate.fleet.parameter_server.ir.public
import
_has_global_step
from
paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy
import
TrainerRuntimeConfig
,
DistributedStrategy
,
\
from
paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy
import
TrainerRuntimeConfig
,
DistributedStrategy
,
\
SyncStrategy
,
AsyncStrategy
,
HalfAsyncStrategy
,
GeoStrategy
,
StrategyFactory
SyncStrategy
,
AsyncStrategy
,
HalfAsyncStrategy
,
GeoStrategy
,
StrategyFactory
...
@@ -161,9 +162,9 @@ class FleetTranspiler(Fleet):
...
@@ -161,9 +162,9 @@ class FleetTranspiler(Fleet):
print
(
trainer_config
)
print
(
trainer_config
)
lrs
=
_
get_lr_ops
(
self
.
_origin_main_program
)
lrs
=
_
has_global_step
(
_get_lr_ops
(
self
.
_origin_main_program
)
)
if
l
en
(
lrs
)
>
0
:
if
l
rs
>
0
:
kwargs
=
{
"need_global_step"
:
"1"
}
kwargs
=
{
"need_global_step"
:
"1"
}
else
:
else
:
kwargs
=
{
"need_global_step"
:
"0"
}
kwargs
=
{
"need_global_step"
:
"0"
}
...
@@ -186,14 +187,6 @@ class FleetTranspiler(Fleet):
...
@@ -186,14 +187,6 @@ class FleetTranspiler(Fleet):
recv_ctx
=
fleet
.
compiled_config
.
get_communicator_recv_context
(
recv_ctx
=
fleet
.
compiled_config
.
get_communicator_recv_context
(
recv_type
=
1
)
recv_type
=
1
)
for
name
,
ctx
in
send_ctx
.
items
():
print
(
"name: {}, ctx: {}"
.
format
(
name
,
ctx
))
print
(
"==== = ==== =============== ===="
)
for
name
,
ctx
in
recv_ctx
.
items
():
print
(
"name: {}, ctx: {}"
.
format
(
name
,
ctx
))
from
paddle.fluid.communicator
import
Communicator
from
paddle.fluid.communicator
import
Communicator
self
.
_communicator
=
Communicator
(
self
.
_communicator
=
Communicator
(
trainer_config
.
mode
,
kwargs
,
trainer_config
.
mode
,
kwargs
,
...
...
python/paddle/fluid/incubate/fleet/parameter_server/ir/public.py
浏览文件 @
8e4ed662
...
@@ -43,6 +43,8 @@ from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundR
...
@@ -43,6 +43,8 @@ from paddle.fluid.incubate.fleet.parameter_server.ir.ps_dispatcher import RoundR
OP_NAME_SCOPE
=
"op_namescope"
OP_NAME_SCOPE
=
"op_namescope"
CLIP_OP_NAME_SCOPE
=
"@CLIP"
CLIP_OP_NAME_SCOPE
=
"@CLIP"
STEP_COUNTER
=
"@PS_STEP_COUNTER@"
STEP_COUNTER
=
"@PS_STEP_COUNTER@"
LEARNING_RATE_DECAY_COUNTER
=
"@LR_DECAY_COUNTER@"
OP_ROLE_VAR_ATTR_NAME
=
core
.
op_proto_and_checker_maker
.
kOpRoleVarAttrName
()
OP_ROLE_VAR_ATTR_NAME
=
core
.
op_proto_and_checker_maker
.
kOpRoleVarAttrName
()
RPC_OP_ROLE_ATTR_NAME
=
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
()
RPC_OP_ROLE_ATTR_NAME
=
core
.
op_proto_and_checker_maker
.
kOpRoleAttrName
()
RPC_OP_ROLE_ATTR_VALUE
=
core
.
op_proto_and_checker_maker
.
OpRole
.
RPC
RPC_OP_ROLE_ATTR_VALUE
=
core
.
op_proto_and_checker_maker
.
OpRole
.
RPC
...
@@ -62,6 +64,17 @@ def _get_lr_ops(program):
...
@@ -62,6 +64,17 @@ def _get_lr_ops(program):
return
lr_ops
return
lr_ops
def
_has_global_step
(
lr_ops
):
if
len
(
lr_ops
)
>
0
:
for
idx
,
op
in
enumerate
(
lr_ops
):
if
op
.
type
!=
'increment'
:
continue
counter
=
op
.
input
(
"X"
)[
0
]
if
counter
==
LEARNING_RATE_DECAY_COUNTER
:
return
True
return
False
def
is_sparse_op
(
op
):
def
is_sparse_op
(
op
):
if
op
.
type
==
"lookup_table"
and
op
.
attr
(
'is_sparse'
)
is
True
and
op
.
attr
(
if
op
.
type
==
"lookup_table"
and
op
.
attr
(
'is_sparse'
)
is
True
and
op
.
attr
(
'is_distributed'
)
is
False
:
'is_distributed'
)
is
False
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录