Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
32d81909
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
32d81909
编写于
7月 17, 2018
作者:
T
typhoonzero
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix pserver with condition block
上级
0b9abcbe
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
56 addition
and
9 deletion
+56
-9
paddle/fluid/operators/listen_and_serv_op.cc
paddle/fluid/operators/listen_and_serv_op.cc
+9
-5
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
+44
-4
python/paddle/fluid/transpiler/distribute_transpiler.py
python/paddle/fluid/transpiler/distribute_transpiler.py
+3
-0
未找到文件。
paddle/fluid/operators/listen_and_serv_op.cc
浏览文件 @
32d81909
...
...
@@ -61,6 +61,8 @@ static void ParallelExecuteBlocks(
framework
::
Async
([
&
executor
,
&
prepared
,
&
program
,
&
scope
,
idx
]()
{
int
run_block
=
idx
;
// thread local
try
{
VLOG
(
3
)
<<
"running server block: "
<<
run_block
<<
"pointer: "
<<
prepared
[
run_block
].
get
();
executor
->
RunPreparedContext
(
prepared
[
run_block
].
get
(),
scope
);
}
catch
(
const
std
::
exception
&
e
)
{
LOG
(
ERROR
)
<<
"run sub program error "
<<
e
.
what
();
...
...
@@ -107,12 +109,14 @@ void ListenAndServOp::RunSyncLoop(
PADDLE_ENFORCE_GE
(
num_blocks
,
2
,
"server program should have at least 2 blocks"
);
std
::
vector
<
int
>
optimize_blocks_idx
;
for
(
auto
blk
:
optimize_blocks
)
{
optimize_blocks_idx
.
push_back
(
blk
->
ID
());
// Prepare all the server block
std
::
vector
<
int
>
optimize_blocks_list
;
for
(
size_t
i
=
1
;
i
<
program
->
Size
();
++
i
)
{
optimize_blocks_list
.
push_back
(
i
);
}
auto
optimize_prepared
=
executor
->
Prepare
(
*
program
,
optimize_blocks_idx
);
// Insert placeholder for block0 which holds current op itself.
auto
optimize_prepared
=
executor
->
Prepare
(
*
program
,
optimize_blocks_list
);
// Insert placeholder for block0 which holds current op itself,
// NOTE the first block in `optimize_prepared` should never be ran.
optimize_prepared
.
insert
(
optimize_prepared
.
begin
(),
std
::
shared_ptr
<
framework
::
ExecutorPrepareContext
>
(
nullptr
));
...
...
python/paddle/fluid/tests/unittests/test_dist_transpiler.py
浏览文件 @
32d81909
...
...
@@ -304,10 +304,50 @@ class TestL2Decay(TranspilerTest):
# TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer
# FIXME(typhoonzero): need to add test for async case:
# see https://github.com/PaddlePaddle/Paddle/issues/11691
class
TestAsyncSGD
(
TranspilerTest
):
pass
class
TestL2DecayWithPiecewise
(
TranspilerTest
):
def
net_conf
(
self
):
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
1000
],
dtype
=
'float32'
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
1000
,
act
=
None
,
param_attr
=
fluid
.
ParamAttr
(
name
=
'fc_w'
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
'fc_b'
))
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
base_lr
=
1.0
bd
=
[
1
,
10
,
20
,
30
]
lr
=
[
base_lr
*
(
0.1
**
i
)
for
i
in
range
(
len
(
bd
)
+
1
)]
sgd_optimizer
=
fluid
.
optimizer
.
Momentum
(
learning_rate
=
fluid
.
layers
.
piecewise_decay
(
boundaries
=
bd
,
values
=
lr
),
momentum
=
0.9
,
regularization
=
fluid
.
regularizer
.
L2Decay
(
1e-4
))
sgd_optimizer
.
minimize
(
avg_cost
)
return
def
test_transpiler
(
self
):
pserver
,
startup
=
self
.
get_pserver
(
self
.
pserver1_ep
)
trainer
=
self
.
get_trainer
()
self
.
assertEqual
(
len
(
pserver
.
blocks
),
9
)
self
.
assertEqual
([
op
.
type
for
op
in
pserver
.
blocks
[
1
].
ops
],
[
"increment"
,
"cast"
,
"fill_constant"
,
"fill_constant"
,
"less_than"
,
"logical_not"
,
"conditional_block"
,
"fill_constant"
,
"fill_constant"
,
"less_than"
,
"logical_not"
,
"logical_and"
,
"logical_and"
,
"conditional_block"
,
"fill_constant"
,
"fill_constant"
,
"less_than"
,
"logical_not"
,
"logical_and"
,
"logical_and"
,
"conditional_block"
,
"fill_constant"
,
"fill_constant"
,
"less_than"
,
"logical_not"
,
"logical_and"
,
"logical_and"
,
"conditional_block"
,
"fill_constant"
,
"conditional_block"
])
self
.
assertEqual
(
[
op
.
type
for
op
in
pserver
.
blocks
[
7
].
ops
],
[
"sum"
,
"scale"
,
"scale"
,
"elementwise_add"
,
"momentum"
])
self
.
assertEqual
(
[
op
.
type
for
op
in
pserver
.
blocks
[
8
].
ops
],
[
"sum"
,
"scale"
,
"scale"
,
"elementwise_add"
,
"momentum"
])
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/transpiler/distribute_transpiler.py
浏览文件 @
32d81909
...
...
@@ -461,6 +461,8 @@ class DistributeTranspiler(object):
per_opt_block
=
pserver_program
.
create_block
(
pre_block_idx
)
optimize_blocks
.
append
(
per_opt_block
)
# append grad merging ops before clip and weight decay
# cases may like:
# L2Decay op -> clip op -> optimize
for
_
,
op
in
enumerate
(
self
.
optimize_ops
):
# find the origin @GRAD var before clipping
grad_varname_for_block
=
__op_have_grad_input__
(
op
)
...
...
@@ -468,6 +470,7 @@ class DistributeTranspiler(object):
merged_var
=
self
.
_append_pserver_grad_merge_ops
(
per_opt_block
,
grad_varname_for_block
,
endpoint
,
grad_to_block_id
,
self
.
origin_program
)
break
# append optimize op once then append other ops.
for
_
,
op
in
enumerate
(
self
.
optimize_ops
):
# optimizer is connected to itself
if
ufind
.
is_connected
(
op
,
opt_op
)
and
op
not
in
global_ops
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录