未验证 提交 3a59a7a1 编写于 作者: G guofei 提交者: GitHub

Make assign op support LoDTensorArray and modify while_loop API (#22309)

This PR makes assign op support LoDTensorArray and enable the loop_vars in
while_loop to support tuple or list.
上级 54a325a5
......@@ -1234,7 +1234,7 @@ Scope* OperatorWithKernel::PrepareData(
void OperatorWithKernel::ParseInputDataType(
const ExecutionContext& ctx, const std::string& name,
proto::VarType::Type* data_type) const {
proto::VarType::Type dafault_data_type =
proto::VarType::Type default_data_type =
static_cast<proto::VarType::Type>(-1);
const std::vector<Variable*> vars = ctx.MultiInputVar(name);
for (size_t i = 0; i < vars.size(); ++i) {
......@@ -1247,6 +1247,13 @@ void OperatorWithKernel::ParseInputDataType(
t = &var->Get<LoDTensor>();
} else if (var->IsType<SelectedRows>()) {
t = &(var->Get<SelectedRows>().value());
} else if (var->IsType<LoDTensorArray>()) {
auto t_arr = var->Get<LoDTensorArray>();
for (size_t j = 0; j < t_arr.size(); j++) {
if (t_arr[j].IsInitialized()) {
t = &(t_arr[j]);
}
}
}
if (t != nullptr) {
PADDLE_ENFORCE_EQ(
......@@ -1257,7 +1264,7 @@ void OperatorWithKernel::ParseInputDataType(
Type(), name, ctx.InputNames(name).at(i)));
proto::VarType::Type tmp = t->type();
PADDLE_ENFORCE(
tmp == *data_type || *data_type == dafault_data_type,
tmp == *data_type || *data_type == default_data_type,
platform::errors::InvalidArgument(
"The DataType of %s Op's duplicable Variable %s must be "
"consistent. The current variable type is (%s), but the "
......
......@@ -1000,15 +1000,12 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
while_loop_block = While(pre_cond, is_test, name)
with while_loop_block.block():
output_vars = body(*loop_vars)
map_structure(assign, output_vars, loop_vars)
if len(loop_vars) == 1:
assign(output_vars, loop_vars[0])
now_cond = cond(output_vars)
else:
for i in range(len(output_vars)):
assign(output_vars[i], loop_vars[i])
now_cond = cond(*output_vars)
assign(now_cond, pre_cond)
return loop_vars
......
......@@ -62,11 +62,7 @@ class TestApiWhileLoop(unittest.TestCase):
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
mem = layers.data(
name='mem',
shape=[10],
dtype='float32',
append_batch_size=False)
mem = fluid.data(name='mem', shape=[10], dtype='float32')
one = layers.fill_constant(shape=[10], dtype='float32', value=1)
out = layers.while_loop(cond, body, [i, mem])
......@@ -111,16 +107,8 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
j = layers.zeros(shape=[1], dtype='int64')
init = layers.data(
name='init',
shape=[3, 3],
dtype='float32',
append_batch_size=False)
sums = layers.data(
name='sums',
shape=[3, 3],
dtype='float32',
append_batch_size=False)
init = fluid.data(name='init', shape=[3, 3], dtype='float32')
sums = fluid.data(name='sums', shape=[3, 3], dtype='float32')
loop_len1 = layers.fill_constant(shape=[1], dtype='int64', value=2)
loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1)
......@@ -159,13 +147,11 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
i = layers.data(
name='i', shape=[1], dtype='float32', append_batch_size=False)
i = fluid.data(name='i', shape=[1], dtype='float32')
i.stop_gradient = False
eleven = layers.fill_constant(shape=[1], dtype='float32', value=11)
one = layers.fill_constant(shape=[1], dtype='float32', value=1)
x = layers.data(
name='x', shape=[1], dtype='float32', append_batch_size=False)
x = fluid.data(name='x', shape=[1], dtype='float32')
x.stop_gradient = False
out = layers.while_loop(cond, body, [i, x])
......@@ -189,63 +175,84 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
self.assertTrue(np.allclose(np.asarray(res[1]), i_grad))
class TestApiWhileLoop_NestedWithBackward(unittest.TestCase):
def test_nested_net_with_backward(self):
def external_cond(i, x, y):
return layers.less_than(i, ten)
def external_body(i, x, y):
def internal_cond(i, x, y):
return layers.less_than(i, five)
def internal_body(i, x, y):
x = layers.elementwise_add(x=i, y=i)
i = layers.increment(i)
return [i, x, y]
temp = layers.while_loop(internal_cond, internal_body, [i, x, y])
y = layers.elementwise_add(x=temp[1], y=i)
i = layers.increment(i)
return [i, x, y]
class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
def test_nested_net_with_backward_and_lodtensor(self):
def external_cond(i, j, x, mem_array):
return layers.less_than(i, array_len)
def external_body(i, j, x, mem_array):
def internal_cond(j, x, mem_array):
return layers.less_than(j, array_len2)
def internal_body(j, x, mem_array):
inner_data = layers.array_read(array=data_array, i=j)
inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = layers.elementwise_add(x=inner_data, y=inner_prev)
inner_sum_1 = layers.elementwise_add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True)
layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array]
outer_data = layers.array_read(array=data_array, i=i)
outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = layers.elementwise_add(x=outer_data, y=outer_prev)
outer_sum_1 = layers.elementwise_add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True)
layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = layers.while_loop(internal_cond, internal_body,
[j, x, mem_array])
return [i, j, x, mem_array]
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
i = layers.data(
name='i', shape=[1], dtype='float32', append_batch_size=False)
i.stop_gradient = False
ten = layers.fill_constant(shape=[1], dtype='float32', value=10)
five = layers.fill_constant(shape=[1], dtype='float32', value=5)
x = layers.data(
name='x', shape=[1], dtype='float32', append_batch_size=False)
d0 = fluid.data(name='d0', shape=[10], dtype='float32')
d1 = fluid.data(name='d1', shape=[10], dtype='float32')
d2 = fluid.data(name='d2', shape=[10], dtype='float32')
x = fluid.data(name='x', shape=[10], dtype='float32')
x.stop_gradient = False
y = layers.data(
name='y', shape=[1], dtype='float32', append_batch_size=False)
y.stop_gradient = False
out = layers.while_loop(external_cond, external_body, [i, x, y])
mean = layers.mean(out[2])
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
j = layers.fill_constant(shape=[1], dtype='int64', value=1)
j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
data = np.asarray([17]).astype('float32')
feed_x = np.zeros(1).astype('float32')
feed_i = np.ones(1).astype('float32')
feed_y = np.zeros(1).astype('float32')
i_grad = np.asarray(13).astype('int32')
out = layers.while_loop(external_cond, external_body,
[i, j, x, mem_array])
res = exe.run(main_program,
feed={'i': feed_i,
'x': feed_x,
'y': feed_y},
fetch_list=[mean.name, i.grad_name])
sum_result = layers.array_read(array=mem_array, i=j)
mean = layers.mean(sum_result)
append_backward(mean)
self.assertTrue(np.allclose(np.asarray(res[0]), data))
self.assertTrue(np.allclose(np.asarray(res[1]), i_grad))
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
d = []
for i in range(3):
d.append(np.random.random(size=[10]).astype('float32'))
feed_x = np.ones(10).astype('float32')
data_sum = d[0] + d[1] + d[2] + 3 * feed_x
x_grad = [0.3] * 10
res = exe.run(
main_program,
feed={'d0': d[0],
'd1': d[1],
'd2': d[2],
'x': feed_x},
fetch_list=[sum_result.name, x.grad_name])
self.assertTrue(np.allclose(res[0], data_sum))
self.assertTrue(np.allclose(res[1], x_grad))
class TestApiWhileLoopWithSwitchCase(unittest.TestCase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册