未验证 提交 b3ac1470 编写于 作者: L liym27 提交者: GitHub

Support create LoDTensorArray and return LoDTensorArray in control flow (#24609)

* Support to create LoDTensorArray in control flow (cond and while_loop)

* Fix bug: return LoDTensorArray in while_loop

* Change code in list_transformer.py to accommodate the new features. 
上级 1d034696
......@@ -19,35 +19,17 @@ import gast
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import AstNodeWrapper, NodeVarType, StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code, is_control_flow_to_transform
from paddle.fluid.framework import core, default_main_program, Variable
from paddle.fluid.framework import core, Variable
from paddle.fluid.layers import array_length, array_read, array_write, create_array
from paddle.fluid.layers import assign, cast, fill_constant, slice
from paddle.fluid.layers import assign, fill_constant, slice
from paddle.fluid.layers.control_flow import cond, while_loop, less_than, increment
def create_array_in_parent_blcok(null_array):
# TODO(liym27): Create a null tensor_array with the same name in parent block to avoid a bug in control flow,
# because in `null_array = create_array("float32")`, `null_array` is not a output of a real OP.
# See class ConditionalBlock for details.
prog = default_main_program()
parent_idx = prog.current_block().parent_idx
while parent_idx != -1:
parent_block = prog.block(parent_idx)
parent_block.create_var(
name=null_array.name,
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype="float32")
parent_idx = parent_block.parent_idx
# TODO(liym27): A better way to slice tensor array.
# Maybe support start == end for slice op.
def slice_tensor_array(array, start, end):
end = cast(end, "int32")
def true_fn():
null_array = create_array("float32")
create_array_in_parent_blcok(null_array)
return null_array
def false_fn(array, start, end):
......
......@@ -925,6 +925,49 @@ class WhileGuard(BlockGuard):
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
def get_inputs_outputs_in_block(current_block, inner_inputs, inner_outputs,
helper):
"""
Find inputs and outputs in current control flow block.
:param current_block: Current control flow block.
:param inner_inputs: Input var name of ops in current block.
:param inner_outputs: Output var name of ops in current block.
:return: inner_inputs, inner_outputs
"""
# Step1: update inner_inputs and inner_outputs
# NOTE: Here assumes that all variables are input or output of Ops,
# but some variables are created without appendding a real op.
# For example, in `arr = create_array(dtype)`, `arr` is not a output of a op.
for op in current_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
inner_inputs.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
# Step2: Remove LOD_TENSOR_ARRAY created in current control flow block.
remove_inner_inputs = set()
parent_block = helper.main_program.block(current_block.parent_idx)
for in_var_name in inner_inputs:
parent_block_var = parent_block._find_var_recursive(in_var_name)
current_block_var = None
if current_block.has_var(in_var_name):
current_block_var = current_block.var(in_var_name)
if not parent_block_var and current_block_var and \
current_block_var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
remove_inner_inputs.add(in_var_name)
inner_inputs = inner_inputs - remove_inner_inputs
return inner_inputs, inner_outputs
class While(object):
"""
:api_attr: Static Graph
......@@ -1023,15 +1066,8 @@ class While(object):
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
x_name_list, inner_outputs = get_inputs_outputs_in_block(
while_block, x_name_list, inner_outputs, self.helper)
out_vars = []
for inner_out_name in inner_outputs:
......@@ -1057,12 +1093,18 @@ class While(object):
"is_test": self.is_test})
def assign_skip_lod_tensor_array(inputs, outputs):
def assign_skip_lod_tensor_array(input, output):
"""
Skip the process of copying LoDTensorArray.
Assign input to output, but skip the process of copying LoDTensorArray unless it's created in while_block.
"""
if inputs.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
assign(inputs, outputs)
if input.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
main_program = input.block.program
parent_block = main_program.block(main_program.current_block()
.parent_idx)
if parent_block and not parent_block._find_var_recursive(input.name):
assign(input, output)
else:
assign(input, output)
def while_loop(cond, body, loop_vars, is_test=False, name=None):
......@@ -2111,21 +2153,8 @@ class ConditionalBlock(object):
intermediate = set()
params = set()
# NOTE: Here assumes that all variables are input or output of Ops,
# but some variables are created without appendding a real op.
# For example, in `arr = create_array(dtype)`, `arr` is not a output of a op.
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
params, intermediate = get_inputs_outputs_in_block(
inside_block, params, intermediate, helper=self.helper)
# Todo(liym27) Here assume that all params are in recursive parent block
# but when minimize() called in control flow, some params may be in
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册