未验证 提交 c62ef22d 编写于 作者: Y Yang Yang(Tony) 提交者: GitHub

Update parallel_do.md

上级 ad2dfef4
...@@ -72,10 +72,12 @@ block0 { ...@@ -72,10 +72,12 @@ block0 {
sgd(w1, w1_grad) sgd(w1, w1_grad)
} }
block1 { block1 {
parent_block: 0
vars: data, h1, h2, loss vars: data, h1, h2, loss
ops: fc, fc, softmax ops: fc, fc, softmax
} }
block2 { block2 {
parent_block: 1
vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad
ops: softmax_grad, ops: softmax_grad,
fc_grad fc_grad
...@@ -122,6 +124,7 @@ block0 { ...@@ -122,6 +124,7 @@ block0 {
parallel_do(block1) parallel_do(block1)
} }
block1 { block1 {
parent_block: 0
vars: w1, w2 vars: w1, w2
ops: init(w1), init(w2) ops: init(w1), init(w2)
} }
...@@ -137,16 +140,19 @@ block0 { ...@@ -137,16 +140,19 @@ block0 {
} }
block1 { block1 {
parent_block: 0
vars: data, h1, h2, loss vars: data, h1, h2, loss
ops: fc, fc, softmax ops: fc, fc, softmax
} }
block2 { block2 {
parent_block: 1
vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad vars: data_grad, h1_grad, h2_grad, loss_gard, w1_grad, w2_grad
ops: softmax_grad, ops: softmax_grad,
fc_grad, allreduce(places, scopes, w1_grad), fc_grad, allreduce(places, scopes, w1_grad),
fc_grad, allreduce(places, scopes, w2_grad) fc_grad, allreduce(places, scopes, w2_grad)
} }
block3 { block3 {
parent_block: 0
vars: lr vars: lr
ops: sgd(w2, w2_grad), ops: sgd(w2, w2_grad),
sgd(w1, w1_grad) sgd(w1, w1_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册