提交 cc18fffb 编写于 作者: C chengduoZH

add nest while_op

上级 f76f42c2
...@@ -63,7 +63,7 @@ class WhileOp : public framework::OperatorBase { ...@@ -63,7 +63,7 @@ class WhileOp : public framework::OperatorBase {
while (cond.data<bool>()[0]) { while (cond.data<bool>()[0]) {
auto &current_scope = scope.NewScope(); auto &current_scope = scope.NewScope();
step_scopes->push_back(&current_scope); step_scopes->push_back(&current_scope);
executor.RunPreparedContext(ctx.get(), &current_scope, false); executor.RunPreparedContext(ctx.get(), &current_scope, false, true, true);
if (is_test) { if (is_test) {
scope.DeleteScope(&current_scope); scope.DeleteScope(&current_scope);
} }
...@@ -169,7 +169,8 @@ class WhileGradOp : public framework::OperatorBase { ...@@ -169,7 +169,8 @@ class WhileGradOp : public framework::OperatorBase {
} }
} }
} }
executor.RunPreparedContext(ctx.get(), *cur_scope_iter, false); executor.RunPreparedContext(ctx.get(), *cur_scope_iter, false, true,
true);
auto &pg_names = Outputs(kXGRAD); auto &pg_names = Outputs(kXGRAD);
auto &p_names = Inputs(kX); auto &p_names = Inputs(kX);
......
...@@ -30,8 +30,10 @@ class TestWhileOp(unittest.TestCase): ...@@ -30,8 +30,10 @@ class TestWhileOp(unittest.TestCase):
"d1", shape=[10], append_batch_size=False, dtype='float32') "d1", shape=[10], append_batch_size=False, dtype='float32')
d2 = layers.data( d2 = layers.data(
"d2", shape=[10], append_batch_size=False, dtype='float32') "d2", shape=[10], append_batch_size=False, dtype='float32')
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
...@@ -45,11 +47,19 @@ class TestWhileOp(unittest.TestCase): ...@@ -45,11 +47,19 @@ class TestWhileOp(unittest.TestCase):
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=3) array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
array_len.stop_gradient = True array_len.stop_gradient = True
cond = layers.less_than(x=i, y=array_len) cond = layers.less_than(x=i, y=array_len)
j = layers.fill_constant(shape=[1], dtype='int64', value=1)
j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
array_len2.stop_gradient = True
cond2 = layers.less_than(x=j, y=array_len2)
while_op = layers.While(cond=cond) while_op = layers.While(cond=cond)
while_op2 = layers.While(cond=cond2)
with while_op.block(): with while_op.block():
d = layers.array_read(array=data_array, i=i) d = layers.array_read(array=data_array, i=i)
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
...@@ -59,7 +69,16 @@ class TestWhileOp(unittest.TestCase): ...@@ -59,7 +69,16 @@ class TestWhileOp(unittest.TestCase):
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
layers.less_than(x=i, y=array_len, cond=cond) layers.less_than(x=i, y=array_len, cond=cond)
sum_result = layers.array_read(array=mem_array, i=i) with while_op2.block():
d2 = layers.array_read(array=data_array, i=j)
prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True)
layers.array_write(result2, i=j, array=mem_array)
layers.less_than(x=j, y=array_len2, cond=cond2)
sum_result = layers.array_read(array=mem_array, i=j)
loss = layers.mean(sum_result) loss = layers.mean(sum_result)
append_backward(loss) append_backward(loss)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册