未验证 提交 6c0755d9 编写于 作者: F feifei-111 提交者: GitHub

fluid API magration : Assert, increment, cond (#48885)

上级 c9f4cfad
...@@ -64,7 +64,7 @@ class HybridParallelInferenceHelper: ...@@ -64,7 +64,7 @@ class HybridParallelInferenceHelper:
element_in_arr = layers.array_read(array=arr, i=step_idx) element_in_arr = layers.array_read(array=arr, i=step_idx)
# write placehold data to global lod_tensor_array, # write placehold data to global lod_tensor_array,
# it need for send_v2 of lod_tensor_array # it need for send_v2 of lod_tensor_array
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(element_in_arr, i=step_idx, array=arr) layers.array_write(element_in_arr, i=step_idx, array=arr)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
...@@ -137,7 +137,7 @@ class HybridParallelInferenceHelper: ...@@ -137,7 +137,7 @@ class HybridParallelInferenceHelper:
with while_op.block(): with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx) input = layers.array_read(array=data, i=step_idx)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data) layers.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
......
...@@ -286,7 +286,7 @@ def _create_cond_block_and_update_optimizer( ...@@ -286,7 +286,7 @@ def _create_cond_block_and_update_optimizer(
) )
new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize) new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize)
layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None) paddle.static.nn.cond(cond_var, true_fn=true_apply_gradient, false_fn=None)
cond_op = main_program.global_block().ops[-1] cond_op = main_program.global_block().ops[-1]
cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize) cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize)
......
...@@ -83,7 +83,7 @@ class AdaRoundLoss: ...@@ -83,7 +83,7 @@ class AdaRoundLoss:
return round_loss return round_loss
round_loss = fluid.layers.cond( round_loss = paddle.static.nn.cond(
warm_start, warm_start,
lambda: fluid.layers.fill_constant( lambda: fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0 shape=[1], dtype='float32', value=0.0
......
...@@ -53,13 +53,10 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -53,13 +53,10 @@ from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
'Switch', 'Switch',
'increment',
'array_write', 'array_write',
'array_read', 'array_read',
'cond',
'StaticRNN', 'StaticRNN',
'Print', 'Print',
'Assert',
'while_loop', 'while_loop',
] ]
...@@ -100,7 +97,7 @@ def _select_input_infer_shape(first_shape, second_shape): ...@@ -100,7 +97,7 @@ def _select_input_infer_shape(first_shape, second_shape):
2. compare axis one by one: 2. compare axis one by one:
if a == b: we set axis to a if a == b: we set axis to a
if a != b: we set axis to -1 if a != b: we set axis to -1
for compatibilitynon declarative mode, we just return second_shape. for compatibility, non declarative mode, we just return second_shape.
""" """
if len(first_shape) != len(second_shape): if len(first_shape) != len(second_shape):
warnings.warn( warnings.warn(
...@@ -134,6 +131,7 @@ def select_input(inputs, mask): ...@@ -134,6 +131,7 @@ def select_input(inputs, mask):
# Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly # Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly
# assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}" # assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}"
output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape) output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape)
output_dtype = inputs[1].dtype output_dtype = inputs[1].dtype
output_type = inputs[1].type output_type = inputs[1].type
...@@ -149,84 +147,6 @@ def select_input(inputs, mask): ...@@ -149,84 +147,6 @@ def select_input(inputs, mask):
return out return out
def select_input_with_buildin_type(inputs, mask, name):
from paddle.jit.dy2static.variable_trans_func import (
to_static_variable,
)
from paddle.jit.dy2static.utils import UndefinedVar
false_var, true_var = inputs
if isinstance(false_var, UndefinedVar) and isinstance(
true_var, UndefinedVar
):
"""None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None."""
return None
if isinstance(false_var, Variable) and isinstance(true_var, Variable):
try:
return select_input(inputs, mask)
except Exception as e:
raise RuntimeError(
f"Exceptions throwed while doing select_input on {name}:\n{e}"
)
elif isinstance(false_var, support_ret_buildin_type) and isinstance(
false_var, type(true_var)
):
if false_var == true_var:
return false_var
else:
inputs = [
to_static_variable(false_var),
to_static_variable(true_var),
]
# Deal with the situations like this: false_var is int and true_var is Variable
elif (
isinstance(false_var, support_ret_buildin_type)
and isinstance(true_var, Variable)
) or (
isinstance(true_var, support_ret_buildin_type)
and isinstance(false_var, Variable)
):
inputs = [to_static_variable(false_var), to_static_variable(true_var)]
warnings.warn(
"Return results from different branches in cond are not same type: "
"false_var returned by false_fn is '{}' and true_var of true_fn is "
"'{}'".format(type(false_var), type(true_var))
)
elif (
isinstance(false_var, UndefinedVar)
and isinstance(true_var, (Variable,) + support_ret_buildin_type)
) or (
isinstance(true_var, UndefinedVar)
and isinstance(false_var, (Variable,) + support_ret_buildin_type)
):
def create_var_if_not_undefined_var(a):
if isinstance(a, UndefinedVar):
return a
return to_static_variable(a)
true_var, false_var = to_static_variable(true_var), to_static_variable(
false_var
)
inputs = [false_var, true_var]
else:
raise TypeError(
"Unsupported return type of true_fn and false_fn in cond: false_var "
"returned by false_fn is '{}' and true_var of true_fn is '{}'".format(
type(false_var), type(true_var)
)
)
try:
return select_input(inputs, mask)
except Exception as e:
raise RuntimeError(
f"Exceptions throwed while doing select_input on {name}:\n{e}"
)
def split_lod_tensor(input, mask, level=0): def split_lod_tensor(input, mask, level=0):
""" """
This function takes in an input that contains the complete lod information, This function takes in an input that contains the complete lod information,
...@@ -449,78 +369,6 @@ def Print( ...@@ -449,78 +369,6 @@ def Print(
return output return output
def Assert(cond, data=None, summarize=20, name=None):
'''
This API creates an op that asserts the given condition is true. If the
condition is false, prints the tensors in data. ``summarize`` specifies the
number of the elements in the tensors to print.
Args:
cond (Variable): The boolean condition tensor whose numel should be 1.
data (list|tuple, optional): list or tuple of tensors to print when
condition is not true. If it's ``None``, no tensor will be printed.
The default value is ``None``.
summarize (int, optional): Number of elements in the tensor to be
printed. If its value is -1, then all elements in the tensor will
be printed. The default value is 20.
name (str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
Operator: the created operation.
Raises:
TypeError: If ``cond`` is not boolean Variable.
TypeError: If ``data`` is not a list or tuple or ``None``.
TypeError: If ``summarize`` is not int.
TypeError: If ``name`` is not a string or ``None`` .
fluid.core.EnforceNotMet: If the condition is False in running time.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
condition = layers.reduce_max(x) < 1.0 # False
layers.Assert(condition, [x], 10, "example_assert_layer")
exe = fluid.Executor()
try:
exe.run(fluid.default_main_program())
# Print x and throws paddle.fluid.core.EnforceNotMet exception
# Example printed message for x:
#
# Variable: fill_constant_0.tmp_0
# - lod: {}
# - place: CPUPlace()
# - shape: [2, 3]
# - layout: NCHW
# - dtype: float
# - data: [2 2 2 2 2 2]
except fluid.core.EnforceNotMet as e:
print("Assert Exception Example")
'''
check_variable_and_dtype(cond, "cond", ["bool"], "fluid.layers.Assert")
check_type(data, "data", (list, tuple, type(None)), "fluid.layers.Assert")
check_type(summarize, "summarize", int, "fluid.layers.Assert")
check_type(name, "name", (str, type(None)), "fluid.layers.Assert")
layer_name = name if name else ('assert_' + cond.name)
helper = LayerHelper(layer_name, **locals())
op = helper.append_op(
type="assert",
inputs={"Cond": cond, "Data": [] if data is None else list(data)},
attrs={"summarize": summarize},
)
return op
# (TODO: Mine) There exists dependency. It will be removed later. # (TODO: Mine) There exists dependency. It will be removed later.
class BlockGuard: class BlockGuard:
""" """
...@@ -1215,7 +1063,7 @@ class While: ...@@ -1215,7 +1063,7 @@ class While:
cond = paddle.less_than(x=i, y=loop_len) cond = paddle.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond) while_op = fluid.layers.While(cond=cond)
with while_op.block(): with while_op.block():
i = fluid.layers.increment(x=i, value=1, in_place=True) i = paddle.increment(x=i, value=1)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond) paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
...@@ -1232,6 +1080,7 @@ class While: ...@@ -1232,6 +1080,7 @@ class While:
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
paddle.enable_static()
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1)
...@@ -1243,7 +1092,7 @@ class While: ...@@ -1243,7 +1092,7 @@ class While:
with while_op.block(): with while_op.block():
sums_tensor = fluid.layers.elementwise_add(x=data, y=data) sums_tensor = fluid.layers.elementwise_add(x=data, y=data)
fluid.layers.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign fluid.layers.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
i = fluid.layers.increment(x=i, value=1, in_place=True) i = paddle.increment(x=i, value=1)
data = fluid.layers.elementwise_add(x=data, y=one) data = fluid.layers.elementwise_add(x=data, y=one)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond) paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
...@@ -1513,47 +1362,6 @@ def _deal_with_undefined_var(output_vars, loop_vars): ...@@ -1513,47 +1362,6 @@ def _deal_with_undefined_var(output_vars, loop_vars):
return results return results
def increment(x, value=1.0, in_place=True):
"""
The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
Notice that the number of elements in :attr:`x` must be equal to 1.
Parameters:
x (Variable): A tensor that must always contain only one element, its data type supports
float32, float64, int32 and int64.
value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
in_place (bool, optional): Whether the OP should be performed in-place. Default: True.
Returns:
Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
fluid.layers.increment(counter) # [1.]
"""
if in_dygraph_mode():
return _C_ops.increment_(x, value)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment'
)
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)},
)
return out
def array_write(x, i, array=None): def array_write(x, i, array=None):
""" """
This OP writes the input ``x`` into the i-th position of the ``array`` This OP writes the input ``x`` into the i-th position of the ``array``
...@@ -1936,315 +1744,6 @@ class ConditionalBlock: ...@@ -1936,315 +1744,6 @@ class ConditionalBlock:
self.helper.main_program._sync_with_cpp() self.helper.main_program._sync_with_cpp()
def copy_var_to_parent_block(var, layer_helper):
if not isinstance(var, Variable):
return var
prog = layer_helper.main_program
parent_idx = prog.current_block().parent_idx
assert (
parent_idx >= 0
), "Got wrong parent block index when assigning var to parent scope in control_flow"
parent_block = prog.block(parent_idx)
if (
var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
and parent_block._find_var_recursive(var.name)
):
parent_block_var = var
else:
parent_block_var = parent_block.create_var(
dtype=var.dtype, shape=var.shape, type=var.type
)
assign(var, parent_block_var)
return parent_block_var
def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
"""
This API returns ``true_fn()`` if the predicate ``pred`` is true else
``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
``None`` if do nothing and this API will treat the callable simply returns
``None`` in this case.
``true_fn`` and ``false_fn`` should return same nest structure of tensors
or both return ``None`` if user doens't like to return anything. A nest
structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
list of tensors.
Note:
1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
the same shape because of dataflow model of PaddlePaddle while the
tensors in the tuples or the lists can have different shapes.
2. This API could be used under both static mode or dygraph mode. If it
is in dygraph mode, the API only runs one branch based on condition.
3. If it is in static mode, any tensors or operations created outside
or inside of ``true_fn`` and ``false_fn`` will be in net building
regardless of which branch is selected at runtime. This has frequently
surprised users who expected a lazy semantics. For example:
.. code-block:: python
import paddle
a = paddle.zeros((1, 1))
b = paddle.zeros((1, 1))
c = a * b
out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will be in net building and
run. ``a + c`` and ``b * b`` will be in net building, but only one
branch will be executed during runtime.
Args:
pred(Tensor): A boolean tensor whose numel should be 1. The boolean
value determines whether to return the result of ``true_fn`` or
``false_fn`` .
true_fn(callable, optional): A callable to be performed if ``pred`` is
true. The default value is ``None`` .
false_fn(callable, optional): A callable to be performed if ``pred`` is
false. The default value is ``None`` .
name(str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
return_names(sequence of string, optional): The default value is ``None`` .
Normally users don't have to set this parameters. A sequence of strings
to represents the name of returned vars. The structure of sequence must
be same with return values of true_fn and false_fn.
Returns:
Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the
predicate ``pred`` is true else ``false_fn()`` .
Raises:
TypeError: if ``true_fn`` or ``false_fn`` is not callable.
ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest
structure of tensors.
Examples:
.. code-block:: python
import paddle
#
# pseudocode:
# if 0.1 < 0.23:
# return 1, True
# else:
# return 3, 2
#
def true_func():
return paddle.full(shape=[1, 2], dtype='int32',
fill_value=1), paddle.full(shape=[2, 3],
dtype='bool',
fill_value=True)
def false_func():
return paddle.full(shape=[3, 4], dtype='float32',
fill_value=3), paddle.full(shape=[4, 5],
dtype='int64',
fill_value=2)
x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
pred = paddle.less_than(x=x, y=y, name=None)
ret = paddle.static.nn.cond(pred, true_func, false_func)
# ret is a tuple containing 2 tensors
# ret[0] = [[1 1]]
# ret[1] = [[ True True True]
# [ True True True]]
"""
if _non_static_mode():
assert isinstance(pred, Variable), "The pred in cond must be Variable"
assert pred.size == 1, "condition input's numel should be 1"
pred = pred.numpy()[0]
if pred:
if true_fn is not None:
if not callable(true_fn):
raise TypeError(
"The true_fn in cond must be callable, but received {}".format(
type(true_fn).__name__
)
)
return true_fn()
else:
if false_fn is not None:
if not callable(false_fn):
raise TypeError(
"The false_fn in cond must be callable, but received {}".format(
type(false_fn).__name__
)
)
return false_fn()
return None
check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond")
check_type(name, "name", (str, type(None)), "fluid.layers.cond")
helper = LayerHelper('cond', **locals())
true_output = None
false_output = None
copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
if true_fn is not None:
if not callable(true_fn):
raise TypeError(
"The true_fn in cond must be callable, but received {}".format(
type(true_fn).__name__
)
)
true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
with true_cond_block.block():
origin_true_output = true_fn()
if origin_true_output is not None:
true_output = map_structure(
copy_to_parent_func, origin_true_output
)
if false_fn is not None:
if not callable(false_fn):
raise TypeError(
"The false_fn in cond must be callable, but received {}".format(
type(false_fn).__name__
)
)
false_cond_block = ConditionalBlock(
[paddle.logical_not(pred)], is_scalar_condition=True
)
with false_cond_block.block():
origin_false_output = false_fn()
if origin_false_output is not None:
false_output = map_structure(
copy_to_parent_func, origin_false_output
)
if true_output is None and false_output is None:
return None
if true_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns None while false_fn returns non-None"
)
if false_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns non-None while false_fn returns None"
)
# Merge true and false output if they are not None
if return_names is None:
is_dy2staic = False
return_names = ["no name"] * len(_to_sequence_except_dict(true_output))
else:
"""
dy2static will set the return_names and expand the return values to UndefinedVar.
"""
is_dy2staic = True
# TODO: expand_undefined_var will replace None to Undefinedvar(), to fix cases like:
# a = None
# if condition:
# a = 1
# Because we can not use variable to express 'None'
true_output, false_output = expand_undefined_var(
true_output, false_output, return_names
)
if len(_to_sequence_except_dict(true_output)) != len(
_to_sequence_except_dict(false_output)
):
raise ValueError(
"true fn returns {} vars, but false fn returns {} vars, which is not equals".format(
len(_to_sequence_except_dict(true_output)),
len(_to_sequence_except_dict(false_output)),
)
)
for true_out, false_out, return_name in zip(
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(return_names),
):
try:
assert_same_structure(true_out, false_out, check_types=False)
except ValueError as e:
raise ValueError(
"Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format(
return_name, e
)
)
def check_ret_none(seq_true, seq_false, seq_names):
for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names):
f_true = flatten(f_true)
f_false = flatten(f_false)
for idx in range(len(f_true)):
if (
f_true[idx] is None
and f_false[idx] is not None
or f_false[idx] is None
and f_true[idx] is not None
):
warnings.warn(
"In cond : Var '{}' or part of it is set differently in ifelse branchs, "
"<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
"'None' in ifelse block might lead to error.".format(
f_name,
type(f_true[idx]),
f_true[idx],
type(f_false[idx]),
f_false[idx],
)
)
check_ret_none(
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(return_names),
)
if is_dy2staic:
true_output, false_output = change_none_to_undefinedvar(
true_output, false_output
)
mask = cast(pred, dtype='int32')
merge_func = (
lambda name, false_var, true_var: select_input_with_buildin_type(
[false_var, true_var], mask, name
)
)
def merge_every_var_list(false_vars, true_vars, name):
return map_structure(partial(merge_func, name), false_vars, true_vars)
merged_output = list(
map(
merge_every_var_list,
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(return_names),
)
)
merged_output = pack_sequence_as(false_output, flatten(merged_output))
return merged_output
def change_none_to_undefinedvar(nest1, nest2):
from paddle.jit.dy2static.utils import UndefinedVar
def map_fn(x):
if x is None:
return UndefinedVar("padding")
return x
nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1))))
nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2))))
return nest1_out, nest2_out
def _to_sequence_except_dict(x): def _to_sequence_except_dict(x):
""" """
In this function, dict is not viewed as sequence. In this function, dict is not viewed as sequence.
......
...@@ -901,7 +901,7 @@ def _dynamic_decode_imperative( ...@@ -901,7 +901,7 @@ def _dynamic_decode_imperative(
next_sequence_lengths, next_sequence_lengths,
) )
control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True) paddle.increment(x=step_idx_tensor, value=1.0)
step_idx += 1 step_idx += 1
cond = paddle.logical_not(paddle.all(finished)) cond = paddle.logical_not(paddle.all(finished))
...@@ -1060,7 +1060,8 @@ def _dynamic_decode_declarative( ...@@ -1060,7 +1060,8 @@ def _dynamic_decode_declarative(
outputs, outputs,
outputs_arrays, outputs_arrays,
) )
control_flow.increment(x=step_idx, value=1.0, in_place=True)
paddle.increment(x=step_idx, value=1.0)
# update the global_finished first, since it might be also in states of # update the global_finished first, since it might be also in states of
# decoder, which otherwise would write a stale finished status to array # decoder, which otherwise would write a stale finished status to array
tensor.assign(next_finished, global_finished) tensor.assign(next_finished, global_finished)
......
...@@ -7300,7 +7300,7 @@ class LookaheadOptimizer: ...@@ -7300,7 +7300,7 @@ class LookaheadOptimizer:
dtype='int32', dtype='int32',
persistable=True, persistable=True,
) )
layers.increment(x=step, value=1.0, in_place=True) paddle.increment(x=step, value=1.0)
# lookahead # lookahead
zero_var = layers.fill_constant( zero_var = layers.fill_constant(
...@@ -7534,7 +7534,7 @@ class GradientMergeOptimizer: ...@@ -7534,7 +7534,7 @@ class GradientMergeOptimizer:
with device_guard("cpu"): with device_guard("cpu"):
# step_var = (step_var + 1) % k_step # step_var = (step_var + 1) % k_step
layers.increment(x=step_var, value=1.0, in_place=True) paddle.increment(x=step_var, value=1.0)
main_block.append_op( main_block.append_op(
type='elementwise_mod', type='elementwise_mod',
inputs={'X': step_var, 'Y': k_step_var}, inputs={'X': step_var, 'Y': k_step_var},
...@@ -7664,7 +7664,7 @@ class GradientMergeOptimizer: ...@@ -7664,7 +7664,7 @@ class GradientMergeOptimizer:
) )
# step3. apply gradient # step3. apply gradient
layers.cond(cond, true_fn=true_apply_gradient, false_fn=None) paddle.static.nn.cond(cond, true_fn=true_apply_gradient, false_fn=None)
return self._optimize_ops return self._optimize_ops
......
...@@ -189,7 +189,7 @@ def get_program(): ...@@ -189,7 +189,7 @@ def get_program():
cur_pred = mlp_while(pre_input) cur_pred = mlp_while(pre_input)
# 更新循环条件 # 更新循环条件
i = fluid.layers.increment(x=i, value=1, in_place=True) i = paddle.increment(x=i, value=1)
fluid.layers.array_write(cur_pred, array=input_array, i=i) fluid.layers.array_write(cur_pred, array=input_array, i=i)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond) paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
......
...@@ -91,7 +91,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
with while_op.block(): with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx) input = layers.array_read(array=data, i=step_idx)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data) layers.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
......
...@@ -89,7 +89,7 @@ def dyfunc_with_if_else3(x): ...@@ -89,7 +89,7 @@ def dyfunc_with_if_else3(x):
m = x + 2 m = x + 2
n = x + 3 n = x + 3
return q, x, y, z return q, x, y, z
q, x, y, z = fluid.layers.cond(paddle.mean(x)[0] < 5, lambda : q, x, y, z = paddle.static.nn.cond(paddle.mean(x)[0] < 5, lambda :
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y), paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q, lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
x, y)) x, y))
......
...@@ -97,7 +97,7 @@ class MainNetWithDict(fluid.dygraph.Layer): ...@@ -97,7 +97,7 @@ class MainNetWithDict(fluid.dygraph.Layer):
), ),
} }
# TODO(Aurelius84): The following code will be converted into: # TODO(Aurelius84): The following code will be converted into:
# max_len = layers.cond(paddle.shape(input)[0] != max_len, # max_len = paddle.static.nn.cond(paddle.shape(input)[0] != max_len,
# lambda: paddle.shape(input)[0], lambda: max_len) # lambda: paddle.shape(input)[0], lambda: max_len)
# But max_len should be wrapped into tensor, which is not supported. # But max_len should be wrapped into tensor, which is not supported.
......
...@@ -154,7 +154,7 @@ def dyfunc_ifExp_with_while(x): ...@@ -154,7 +154,7 @@ def dyfunc_ifExp_with_while(x):
def body(i, ten, y): def body(i, ten, y):
# It will be converted into `layers.cond` as followed. # It will be converted into `layers.cond` as followed.
# map_func(lambda x: fluid.layers.cond(i==0, lambda: x, lambda: add_fn(x), y) # map_func(lambda x: paddle.static.nn.cond(i==0, lambda: x, lambda: add_fn(x), y)
y = map_func(lambda x: x if (i == 0) is not None else add_fn(x), y) y = map_func(lambda x: x if (i == 0) is not None else add_fn(x), y)
i += 1 i += 1
return [i, ten, y] return [i, ten, y]
...@@ -183,7 +183,7 @@ def dyfunc_ifExp(x): ...@@ -183,7 +183,7 @@ def dyfunc_ifExp(x):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
# It will be converted into `layers.cond` as followed. # It will be converted into `layers.cond` as followed.
# map_func(lambda x: fluid.layers.cond(i==1, lambda: x, lambda: add_fn(x), y) # map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y)
# `if (Tensor) == 1` is supported in dygraph. # `if (Tensor) == 1` is supported in dygraph.
y = map_func(lambda x: x if i == 1 else add_fn(x), y) y = map_func(lambda x: x if i == 1 else add_fn(x), y)
return y[0] return y[0]
......
...@@ -83,7 +83,7 @@ def while_loop_dyfunc_with_none(x): ...@@ -83,7 +83,7 @@ def while_loop_dyfunc_with_none(x):
def for_loop_dyfunc(max_len): def for_loop_dyfunc(max_len):
for i in range(max_len): for i in range(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
...@@ -104,14 +104,14 @@ def for_loop_dyfunc2(max_len): ...@@ -104,14 +104,14 @@ def for_loop_dyfunc2(max_len):
def for_loop_dyfunc3(max_len): def for_loop_dyfunc3(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
for i in range(1, 10, 2): for i in range(1, 10, 2):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
def for_loop_dyfunc4(max_len): def for_loop_dyfunc4(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
for i in range(10, 1, -2): for i in range(10, 1, -2):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
...@@ -119,7 +119,7 @@ def for_loop_dyfunc_not_support(max_len): ...@@ -119,7 +119,7 @@ def for_loop_dyfunc_not_support(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
a = -2 a = -2
for i in range(10, 1, a): for i in range(10, 1, a):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import warnings import warnings
import paddle import paddle
from paddle.fluid.layers.control_flow import cond from paddle.static.nn import cond
@paddle.jit.to_static @paddle.jit.to_static
......
...@@ -54,7 +54,7 @@ class TestQuantizationSubGraph(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestQuantizationSubGraph(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
core_graph = core.Graph(main_program.desc) core_graph = core.Graph(main_program.desc)
# We should create graph for test, otherwise it will throw a # We should create graph for test, otherwise it will throw a
......
...@@ -121,7 +121,7 @@ class TestIncrementInplace(unittest.TestCase): ...@@ -121,7 +121,7 @@ class TestIncrementInplace(unittest.TestCase):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[1], dtype='float32') a = paddle.static.data(name="a", shape=[1], dtype='float32')
b = fluid.layers.increment(a) b = paddle.increment(a)
place = paddle.NPUPlace(NPUPlace) place = paddle.NPUPlace(NPUPlace)
......
...@@ -43,9 +43,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -43,9 +43,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int32') i = layers.zeros(shape=[1], dtype='int32')
i = layers.cast(i, 'int64') i = layers.cast(i, 'int64')
...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -80,7 +80,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
......
...@@ -57,7 +57,7 @@ class TestCompatibility(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestCompatibility(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(x, y) pred = paddle.less_than(x, y)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
return main_program, startup_program, out return main_program, startup_program, out
......
...@@ -30,17 +30,17 @@ def _test_read_write(x): ...@@ -30,17 +30,17 @@ def _test_read_write(x):
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
arr = layers.array_write(x=x[0], i=i) arr = layers.array_write(x=x[0], i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
arr = layers.array_write(x=x[1], i=i, array=arr) arr = layers.array_write(x=x[1], i=i, array=arr)
i = layers.increment(x=i) i = paddle.increment(x=i)
arr = layers.array_write(x=x[2], i=i, array=arr) arr = layers.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
a0 = layers.array_read(array=arr, i=i) a0 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
a1 = layers.array_read(array=arr, i=i) a1 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
a2 = layers.array_read(array=arr, i=i) a2 = layers.array_read(array=arr, i=i)
mean_a0 = paddle.mean(a0) mean_a0 = paddle.mean(a0)
......
...@@ -17,6 +17,7 @@ import unittest ...@@ -17,6 +17,7 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.static.nn.control_flow import Assert
class TestAssertOp(unittest.TestCase): class TestAssertOp(unittest.TestCase):
...@@ -33,7 +34,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -33,7 +34,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1], dtype='bool', value=True shape=[1], dtype='bool', value=True
) )
layers.Assert(condition, []) Assert(condition, [])
self.run_network(net_func) self.run_network(net_func)
...@@ -42,7 +43,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -42,7 +43,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1], dtype='bool', value=False shape=[1], dtype='bool', value=False
) )
layers.Assert(condition) Assert(condition)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.run_network(net_func) self.run_network(net_func)
...@@ -52,7 +53,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -52,7 +53,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1, 2], dtype='bool', value=True shape=[1, 2], dtype='bool', value=True
) )
layers.Assert(condition, []) Assert(condition, [])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.run_network(net_func) self.run_network(net_func)
...@@ -62,7 +63,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -62,7 +63,7 @@ class TestAssertOp(unittest.TestCase):
zero = layers.fill_constant(shape=[1], dtype='int64', value=0) zero = layers.fill_constant(shape=[1], dtype='int64', value=0)
one = layers.fill_constant(shape=[1], dtype='int64', value=1) one = layers.fill_constant(shape=[1], dtype='int64', value=1)
condition = paddle.less_than(one, zero) # False condition = paddle.less_than(one, zero) # False
layers.Assert(condition, [zero, one]) Assert(condition, [zero, one])
print("test_assert_print_data") print("test_assert_print_data")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -72,7 +73,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -72,7 +73,7 @@ class TestAssertOp(unittest.TestCase):
def net_func(): def net_func():
x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) x = layers.fill_constant(shape=[10], dtype='float32', value=2.0)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
layers.Assert(condition, (x,), 5) Assert(condition, (x,), 5)
print("test_assert_summary") print("test_assert_summary")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -82,7 +83,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -82,7 +83,7 @@ class TestAssertOp(unittest.TestCase):
def net_func(): def net_func():
x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
layers.Assert(condition, [x], 10, name="test") Assert(condition, [x], 10, name="test")
print("test_assert_summary_greater_than_size") print("test_assert_summary_greater_than_size")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
......
...@@ -64,7 +64,7 @@ def convolutional_neural_network(use_py_reader): ...@@ -64,7 +64,7 @@ def convolutional_neural_network(use_py_reader):
acc = paddle.static.accuracy(input=prediction, label=label) acc = paddle.static.accuracy(input=prediction, label=label)
i = fluid.layers.zeros(shape=[1], dtype='int64') i = fluid.layers.zeros(shape=[1], dtype='int64')
array = fluid.layers.array_write(x=prediction, i=i) array = fluid.layers.array_write(x=prediction, i=i)
fluid.layers.increment(i) paddle.increment(i)
fluid.layers.array_write(x=acc, i=i, array=array) fluid.layers.array_write(x=acc, i=i, array=array)
return array, img, label, prediction, avg_loss, acc, py_reader return array, img, label, prediction, avg_loss, acc, py_reader
......
...@@ -54,7 +54,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestCondInputOutput(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is one tensor # out is one tensor
place = ( place = (
...@@ -94,7 +94,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestCondInputOutput(unittest.TestCase):
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
pred = layers.fill_constant(shape=[1], dtype='bool', value=True) pred = layers.fill_constant(shape=[1], dtype='bool', value=True)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
place = ( place = (
...@@ -138,7 +138,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestCondInputOutput(unittest.TestCase):
a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7)
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
a = layers.cond( a = paddle.static.nn.cond(
pred, lambda: true_func(a, i), lambda: false_func(a, i) pred, lambda: true_func(a, i), lambda: false_func(a, i)
) )
place = ( place = (
...@@ -183,9 +183,9 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -183,9 +183,9 @@ class TestCondInputOutput(unittest.TestCase):
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
out1 = layers.cond(pred, true_func, false_func) out1 = paddle.static.nn.cond(pred, true_func, false_func)
out2 = layers.cond(pred, None, false_func) out2 = paddle.static.nn.cond(pred, None, false_func)
out3 = layers.cond(pred, true_func, None) out3 = paddle.static.nn.cond(pred, true_func, None)
place = ( place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() if core.is_compiled_with_cuda()
...@@ -223,13 +223,15 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -223,13 +223,15 @@ class TestCondInputOutput(unittest.TestCase):
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
out = layers.cond(pred, i, func_return_one_tensor) out = paddle.static.nn.cond(pred, i, func_return_one_tensor)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
out = layers.cond(pred, func_return_one_tensor, np.asarray([3])) out = paddle.static.nn.cond(
pred, func_return_one_tensor, np.asarray([3])
)
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_none, func_return_one_tensor pred, func_return_none, func_return_one_tensor
) )
self.assertTrue( self.assertTrue(
...@@ -238,7 +240,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -238,7 +240,7 @@ class TestCondInputOutput(unittest.TestCase):
) )
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_two_tensors, func_return_none pred, func_return_two_tensors, func_return_none
) )
self.assertTrue( self.assertTrue(
...@@ -247,7 +249,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -247,7 +249,7 @@ class TestCondInputOutput(unittest.TestCase):
) )
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_one_tensor, func_return_two_tensors pred, func_return_one_tensor, func_return_two_tensors
) )
self.assertTrue( self.assertTrue(
...@@ -268,7 +270,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -268,7 +270,7 @@ class TestCondInputOutput(unittest.TestCase):
shape=[1], dtype='float32', value=1.25 shape=[1], dtype='float32', value=1.25
) )
b.stop_gradient = False b.stop_gradient = False
out = layers.cond(a - b < -1.0, lambda: a, lambda: b) out = paddle.static.nn.cond(a - b < -1.0, lambda: a, lambda: b)
append_backward(out) append_backward(out)
place = ( place = (
...@@ -308,14 +310,14 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -308,14 +310,14 @@ class TestCondNestedControlFlow(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def less_than_branch(i, a): def less_than_branch(i, a):
return layers.cond( return paddle.static.nn.cond(
i >= 3.0, i >= 3.0,
lambda: paddle.add(a, a), lambda: paddle.add(a, a),
lambda: paddle.subtract(a, a), lambda: paddle.subtract(a, a),
) )
def greater_equal_branch(i, a): def greater_equal_branch(i, a):
return layers.cond( return paddle.static.nn.cond(
i < 8.0, i < 8.0,
lambda: paddle.multiply(a, a), lambda: paddle.multiply(a, a),
lambda: paddle.divide(a, a), lambda: paddle.divide(a, a),
...@@ -326,7 +328,7 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -326,7 +328,7 @@ class TestCondNestedControlFlow(unittest.TestCase):
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='float32') i = fluid.data(name="i", shape=[1], dtype='float32')
a = 2.0 * i a = 2.0 * i
out = layers.cond( out = paddle.static.nn.cond(
i < 5.0, i < 5.0,
lambda: less_than_branch(i, a), lambda: less_than_branch(i, a),
lambda: greater_equal_branch(i, a), lambda: greater_equal_branch(i, a),
...@@ -370,14 +372,14 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -370,14 +372,14 @@ class TestCondNestedControlFlow(unittest.TestCase):
shape=[1], dtype='float32', value=1.24 shape=[1], dtype='float32', value=1.24
) )
b.stop_gradient = False b.stop_gradient = False
out = fluid.layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: fluid.layers.cond( lambda: paddle.static.nn.cond(
a - b < -1.0, a - b < -1.0,
lambda: paddle.add(a, b), lambda: paddle.add(a, b),
lambda: paddle.multiply(a, b), lambda: paddle.multiply(a, b),
), ),
lambda: fluid.layers.cond( lambda: paddle.static.nn.cond(
a == b, a == b,
lambda: paddle.subtract(a, b), lambda: paddle.subtract(a, b),
lambda: paddle.pow(a, b), lambda: paddle.pow(a, b),
...@@ -550,7 +552,7 @@ class TestCondBackward(unittest.TestCase): ...@@ -550,7 +552,7 @@ class TestCondBackward(unittest.TestCase):
def cond_func(i, img, label): def cond_func(i, img, label):
predicate = (i % 2) == 0 predicate = (i % 2) == 0
return layers.cond( return paddle.static.nn.cond(
predicate, predicate,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
...@@ -574,19 +576,19 @@ class TestCondBackward(unittest.TestCase): ...@@ -574,19 +576,19 @@ class TestCondBackward(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def branch(i, img, label): def branch(i, img, label):
return layers.cond( return paddle.static.nn.cond(
(i % 2) == 0, (i % 2) == 0,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
) )
def cond_func_simple_net_at_true(i, img, label): def cond_func_simple_net_at_true(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img) i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img)
) )
def cond_func_simple_net_at_false(i, img, label): def cond_func_simple_net_at_false(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label) i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label)
) )
...@@ -626,14 +628,14 @@ class TestCondBackward(unittest.TestCase): ...@@ -626,14 +628,14 @@ class TestCondBackward(unittest.TestCase):
predicate = (i % 2) == 0 predicate = (i % 2) == 0
else: else:
predicate = (i % 2) != 0 predicate = (i % 2) != 0
return layers.cond( return paddle.static.nn.cond(
predicate, predicate,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
) )
def cond_func(i, img, label): def cond_func(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, i < 5,
lambda: branch(i, img, label, True), lambda: branch(i, img, label, True),
lambda: branch(i, img, label, False), lambda: branch(i, img, label, False),
...@@ -665,16 +667,16 @@ class TestCondWithError(unittest.TestCase): ...@@ -665,16 +667,16 @@ class TestCondWithError(unittest.TestCase):
return pred return pred
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(None, func, func) paddle.static.nn.cond(None, func, func)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, func, set()) paddle.static.nn.cond(pred, func, set())
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, set(), func) paddle.static.nn.cond(pred, set(), func)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, func, func, set()) paddle.static.nn.cond(pred, func, func, set())
class TestCondWithDict(unittest.TestCase): class TestCondWithDict(unittest.TestCase):
......
...@@ -228,7 +228,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase):
hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6)
return hidden2 return hidden2
hidden2 = fluid.layers.cond(cond, true_fn, false_fn) hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn)
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=fluid.layers.fc(hidden2, size=10, act='softmax'), input=fluid.layers.fc(hidden2, size=10, act='softmax'),
...@@ -271,7 +271,7 @@ class TestCloneWithRaise(unittest.TestCase): ...@@ -271,7 +271,7 @@ class TestCloneWithRaise(unittest.TestCase):
hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6)
return hidden2 return hidden2
hidden2 = fluid.layers.cond(cond, true_fn, false_fn) hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn)
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=fluid.layers.fc(hidden2, size=10, act='softmax'), input=fluid.layers.fc(hidden2, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'),
......
...@@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): ...@@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
topk_coordinates = paddle.stack([batch_pos, indices], axis=2) topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
topk_coordinates.stop_gradient = stop_gradient topk_coordinates.stop_gradient = stop_gradient
score = paddle.gather_nd(x, topk_coordinates) score = paddle.gather_nd(x, topk_coordinates)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(score, i=step_idx, array=scores) layers.array_write(score, i=step_idx, array=scores)
length_cond = paddle.less_than(x=step_idx, y=max_len) length_cond = paddle.less_than(x=step_idx, y=max_len)
layers.assign(length_cond, cond) layers.assign(length_cond, cond)
......
...@@ -83,10 +83,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -83,10 +83,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
...@@ -112,7 +112,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
prev = paddle.reshape(prev, shape=[10]) prev = paddle.reshape(prev, shape=[10])
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block(): with while_op2.block():
...@@ -122,7 +122,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -122,7 +122,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
prev2 = paddle.reshape(prev2, shape=[10]) prev2 = paddle.reshape(prev2, shape=[10])
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
......
...@@ -16,8 +16,9 @@ import unittest ...@@ -16,8 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.layers import array_write, data, increment, mul, zeros from paddle.fluid.layers import array_write, data, mul, zeros
class TestExecutor(unittest.TestCase): class TestExecutor(unittest.TestCase):
...@@ -26,13 +27,13 @@ class TestExecutor(unittest.TestCase): ...@@ -26,13 +27,13 @@ class TestExecutor(unittest.TestCase):
a = data(name='a', shape=[784], dtype='float32') a = data(name='a', shape=[784], dtype='float32')
array = array_write(x=a, i=i) array = array_write(x=a, i=i)
i = increment(i) i = paddle.increment(i)
b = data( b = data(
name='b', shape=[784, 100], dtype='float32', append_batch_size=False name='b', shape=[784, 100], dtype='float32', append_batch_size=False
) )
array_write(x=b, i=i, array=array) array_write(x=b, i=i, array=array)
i = increment(i) i = paddle.increment(i)
out = mul(x=a, y=b) out = mul(x=a, y=b)
array_write(x=out, i=i, array=array) array_write(x=out, i=i, array=array)
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from simple_nets import simple_fc_net, simple_fc_net_with_inputs from simple_nets import simple_fc_net, simple_fc_net_with_inputs
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -35,9 +36,9 @@ class TestFetchLoDTensorArray(unittest.TestCase): ...@@ -35,9 +36,9 @@ class TestFetchLoDTensorArray(unittest.TestCase):
opt.minimize(loss) opt.minimize(loss)
array = layers.array_write(x=img, i=i) array = layers.array_write(x=img, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(x=label, i=i, array=array) layers.array_write(x=label, i=i, array=array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(x=loss, i=i, array=array) layers.array_write(x=loss, i=i, array=array)
return loss, array return loss, array
......
...@@ -1579,7 +1579,7 @@ class TestLayer(LayerTest): ...@@ -1579,7 +1579,7 @@ class TestLayer(LayerTest):
b = fluid.layers.fill_constant( b = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.23 shape=[1], dtype='float32', value=0.23
) )
out = fluid.layers.cond( out = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1599,12 +1599,12 @@ class TestLayer(LayerTest): ...@@ -1599,12 +1599,12 @@ class TestLayer(LayerTest):
b = fluid.dygraph.to_variable( b = fluid.dygraph.to_variable(
np.array([0.23]).astype('float32') np.array([0.23]).astype('float32')
) )
out = layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
) )
out2 = layers.cond( out2 = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1615,18 +1615,18 @@ class TestLayer(LayerTest): ...@@ -1615,18 +1615,18 @@ class TestLayer(LayerTest):
eager_dynamic_res, eager_dynamic_res2 eager_dynamic_res, eager_dynamic_res2
) )
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a < b, 'str', 'str') paddle.static.nn.cond(a < b, 'str', 'str')
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a >= b, 'str', 'str') paddle.static.nn.cond(a >= b, 'str', 'str')
a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
out = layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
) )
out2 = layers.cond( out2 = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1635,9 +1635,9 @@ class TestLayer(LayerTest): ...@@ -1635,9 +1635,9 @@ class TestLayer(LayerTest):
dynamic_res2 = out2.numpy() dynamic_res2 = out2.numpy()
np.testing.assert_array_equal(dynamic_res, dynamic_res2) np.testing.assert_array_equal(dynamic_res, dynamic_res2)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a < b, 'str', 'str') paddle.static.nn.cond(a < b, 'str', 'str')
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a >= b, 'str', 'str') paddle.static.nn.cond(a >= b, 'str', 'str')
np.testing.assert_array_equal(static_res, dynamic_res) np.testing.assert_array_equal(static_res, dynamic_res)
np.testing.assert_array_equal(static_res, eager_dynamic_res) np.testing.assert_array_equal(static_res, eager_dynamic_res)
......
...@@ -237,7 +237,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -237,7 +237,7 @@ class TestMathOpPatches(unittest.TestCase):
one = paddle.ones(shape=[1], dtype='int32') one = paddle.ones(shape=[1], dtype='int32')
zero = fluid.layers.zeros(shape=[1], dtype='int32') zero = fluid.layers.zeros(shape=[1], dtype='int32')
cond = one == zero cond = one == zero
c = fluid.layers.cond(cond, lambda: a + b, lambda: a - b) c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -115,7 +115,7 @@ class SimpleNetWithCond: ...@@ -115,7 +115,7 @@ class SimpleNetWithCond:
return cond_res return cond_res
cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32')) cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32'))
sum_cond = fluid.layers.cond(cond_i > 1.0, cond_true, cond_false) sum_cond = paddle.static.nn.cond(cond_i > 1.0, cond_true, cond_false)
sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond]) sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond])
mean_out = paddle.mean(sum_all) mean_out = paddle.mean(sum_all)
if use_bf16: if use_bf16:
......
...@@ -50,7 +50,7 @@ class TestProfiler(unittest.TestCase): ...@@ -50,7 +50,7 @@ class TestProfiler(unittest.TestCase):
with while_op.block(): with while_op.block():
hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
layers.array_write(hidden_n, i, data_arr) layers.array_write(hidden_n, i, data_arr)
fluid.layers.increment(x=counter, value=1, in_place=True) paddle.increment(x=counter, value=1)
paddle.assign(paddle.less_than(x=counter, y=until), cond) paddle.assign(paddle.less_than(x=counter, y=until), cond)
hidden_n = layers.array_read(data_arr, i) hidden_n = layers.array_read(data_arr, i)
......
...@@ -46,7 +46,7 @@ class TestProgramToReadableCode(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestProgramToReadableCode(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
def test_program_code(self): def test_program_code(self):
self.var._to_readable_code() self.var._to_readable_code()
......
...@@ -59,7 +59,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -59,7 +59,7 @@ class TestApiWhileLoop(unittest.TestCase):
def body(i, mem): def body(i, mem):
mem = paddle.add(x=mem, y=one) mem = paddle.add(x=mem, y=one)
i = layers.increment(i) i = paddle.increment(i)
return [i, mem] return [i, mem]
main_program = Program() main_program = Program()
...@@ -100,7 +100,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -100,7 +100,7 @@ class TestApiWhileLoop(unittest.TestCase):
test_list_dict[0]["test_key"] test_list_dict[0]["test_key"]
) )
i = layers.increment(i) i = paddle.increment(i)
return [i, ten, test_dict, test_list, test_list_dict] return [i, ten, test_dict, test_list, test_list_dict]
main_program = Program() main_program = Program()
...@@ -174,7 +174,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
def internal_body(j, init, sums): def internal_body(j, init, sums):
init = paddle.add(x=init, y=ones) init = paddle.add(x=init, y=ones)
sums = paddle.add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
j = layers.increment(j) j = paddle.increment(j)
return [j, init, sums] return [j, init, sums]
result = paddle.static.nn.while_loop( result = paddle.static.nn.while_loop(
...@@ -184,7 +184,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -184,7 +184,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
init = result[1] init = result[1]
sums = result[2] sums = result[2]
sums = paddle.add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
i = layers.increment(i) i = paddle.increment(i)
return [i, j, init, sums] return [i, j, init, sums]
main_program = Program() main_program = Program()
...@@ -229,7 +229,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): ...@@ -229,7 +229,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
def body(i, x): def body(i, x):
x = paddle.multiply(x=i, y=i) x = paddle.multiply(x=i, y=i)
i = layers.increment(i) i = paddle.increment(i)
return [i, x] return [i, x]
main_program = Program() main_program = Program()
...@@ -324,7 +324,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -324,7 +324,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
inner_prev = layers.array_read(array=mem_array, i=j) inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = paddle.add(x=inner_data, y=inner_prev) inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
inner_sum_1 = paddle.add(x=x, y=inner_sum_0) inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(inner_sum_1, i=j, array=mem_array) layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array] return [j, x, mem_array]
...@@ -332,7 +332,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -332,7 +332,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
outer_prev = layers.array_read(array=mem_array, i=i) outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = paddle.add(x=outer_data, y=outer_prev) outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
outer_sum_1 = paddle.add(x=x, y=outer_sum_0) outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(outer_sum_1, i=i, array=mem_array) layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = paddle.static.nn.while_loop( j, x, mem_array = paddle.static.nn.while_loop(
internal_cond, internal_body, [j, x, mem_array] internal_cond, internal_body, [j, x, mem_array]
...@@ -352,9 +352,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -352,9 +352,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -444,7 +444,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -444,7 +444,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
return 1 return 1
def cond_returns_not_bool_tensor(i): def cond_returns_not_bool_tensor(i):
return layers.increment(i) return paddle.increment(i)
def cond_returns_bool_tensor(i): def cond_returns_bool_tensor(i):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
...@@ -456,14 +456,14 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -456,14 +456,14 @@ class TestApiWhileLoop_Error(unittest.TestCase):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
def body(i): def body(i):
return layers.increment(i) return paddle.increment(i)
def body_returns_error_length(i): def body_returns_error_length(i):
i = layers.increment(i) i = paddle.increment(i)
return [i, i] return [i, i]
def body_returns_error_type(i, ten): def body_returns_error_type(i, ten):
return layers.increment(i) return paddle.increment(i)
def cond_returns_with_mutable_dict(i, test_dict): def cond_returns_with_mutable_dict(i, test_dict):
return i > 0 return i > 0
...@@ -472,7 +472,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -472,7 +472,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
test_dict['new_key'] = layers.fill_constant( test_dict['new_key'] = layers.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
) )
return layers.increment(i), test_dict return paddle.increment(i), test_dict
def cond_returns_with_mutable_list(i, test_list): def cond_returns_with_mutable_list(i, test_list):
return i > 0 return i > 0
...@@ -481,7 +481,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -481,7 +481,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
test_list.append( test_list.append(
layers.fill_constant(shape=[1], dtype='int64', value=1) layers.fill_constant(shape=[1], dtype='int64', value=1)
) )
return layers.increment(i), test_list return paddle.increment(i), test_list
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
......
...@@ -42,9 +42,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -42,9 +42,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -63,7 +63,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -72,7 +72,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -72,7 +72,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
...@@ -134,7 +134,7 @@ class BadInputTest(unittest.TestCase): ...@@ -134,7 +134,7 @@ class BadInputTest(unittest.TestCase):
def test_bad_x(): def test_bad_x():
x = [1, 2, 3] x = [1, 2, 3]
fluid.layers.increment(x) paddle.increment(x)
self.assertRaises(TypeError, test_bad_x) self.assertRaises(TypeError, test_bad_x)
......
...@@ -41,9 +41,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -41,9 +41,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -62,7 +62,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -62,7 +62,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
......
...@@ -341,7 +341,7 @@ def get_value_for_bool_tensor(var, item): ...@@ -341,7 +341,7 @@ def get_value_for_bool_tensor(var, item):
var_shape[0] = 0 var_shape[0] = 0
return paddle.empty(var_shape, dtype=var.dtype) return paddle.empty(var_shape, dtype=var.dtype)
from .layers.control_flow import cond from paddle.static.nn import cond
return cond( return cond(
item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var) item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var)
...@@ -874,7 +874,7 @@ def set_value_for_bool_tensor(var, item, value): ...@@ -874,7 +874,7 @@ def set_value_for_bool_tensor(var, item, value):
out = scatter_nd_add(var, idx, gather_val_new) out = scatter_nd_add(var, idx, gather_val_new)
var[:] = out var[:] = out
from .layers.control_flow import cond from paddle.static.nn import cond
# If all the bool index is False, just do nothing # If all the bool index is False, just do nothing
cond(item.any(), lambda: idx_not_empty(var, item, value)) cond(item.any(), lambda: idx_not_empty(var, item, value))
......
...@@ -19,7 +19,7 @@ from .variable_trans_func import ( ...@@ -19,7 +19,7 @@ from .variable_trans_func import (
to_static_variable, to_static_variable,
) )
from paddle.fluid.framework import core, Variable from paddle.fluid.framework import core, Variable
from paddle.fluid.layers import Assert, Print from paddle.fluid.layers import Print
from paddle.fluid.layers import ( from paddle.fluid.layers import (
array_read, array_read,
array_write, array_write,
...@@ -33,9 +33,7 @@ from paddle.fluid.layers import ( ...@@ -33,9 +33,7 @@ from paddle.fluid.layers import (
control_flow, control_flow,
) )
from paddle.fluid.layers.control_flow import ( from paddle.fluid.layers.control_flow import (
cond,
while_loop, while_loop,
increment,
) )
from .return_transformer import ( from .return_transformer import (
RETURN_NO_VALUE_VAR_NAME, RETURN_NO_VALUE_VAR_NAME,
...@@ -395,7 +393,7 @@ def _run_paddle_cond( ...@@ -395,7 +393,7 @@ def _run_paddle_cond(
return ret return ret
try: try:
cond_outs = control_flow.cond( cond_outs = paddle.static.nn.cond(
pred, new_true_fn, new_false_fn, None, return_name_ids pred, new_true_fn, new_false_fn, None, return_name_ids
) )
except Exception as e: except Exception as e:
...@@ -734,6 +732,8 @@ def convert_assert(cond, message=""): ...@@ -734,6 +732,8 @@ def convert_assert(cond, message=""):
if isinstance(cond, Variable): if isinstance(cond, Variable):
cond = cast(cond, "bool") cond = cast(cond, "bool")
# NOTE: message is not used because Paddle Assert has no corresponding parameter to use. # NOTE: message is not used because Paddle Assert has no corresponding parameter to use.
from paddle.static.nn.control_flow import Assert
return Assert(cond) return Assert(cond)
else: else:
assert cond, message assert cond, message
...@@ -786,7 +786,8 @@ def _run_paddle_pop(array, *args): ...@@ -786,7 +786,8 @@ def _run_paddle_pop(array, *args):
def body(i, new_array): def body(i, new_array):
item = array_read(array=array, i=i) item = array_read(array=array, i=i)
array_write(item, paddle.tensor.array_length(new_array), new_array) array_write(item, paddle.tensor.array_length(new_array), new_array)
i = increment(i)
i = paddle.increment(i)
return i, new_array return i, new_array
arr_len = paddle.tensor.array_length(array) arr_len = paddle.tensor.array_length(array)
...@@ -816,7 +817,9 @@ def _slice_tensor_array(array, start, end): ...@@ -816,7 +817,9 @@ def _slice_tensor_array(array, start, end):
new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0]) new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0])
return new_array return new_array
new_array = cond(start == end, true_fn, lambda: false_fn(array, start, end)) new_array = paddle.static.nn.cond(
start == end, true_fn, lambda: false_fn(array, start, end)
)
return new_array return new_array
......
...@@ -29,7 +29,6 @@ from .control_flow import ( ...@@ -29,7 +29,6 @@ from .control_flow import (
from .common import bilinear_tensor_product # noqa: F401 from .common import bilinear_tensor_product # noqa: F401
from .common import py_func # noqa: F401 from .common import py_func # noqa: F401
from ...tensor.creation import create_parameter # noqa: F401 from ...tensor.creation import create_parameter # noqa: F401
from ...fluid.layers import cond # noqa: F401
from ...fluid.layers import conv2d # noqa: F401 from ...fluid.layers import conv2d # noqa: F401
from ...fluid.layers import crf_decoding # noqa: F401 from ...fluid.layers import crf_decoding # noqa: F401
from ...fluid.layers import layer_norm # noqa: F401 from ...fluid.layers import layer_norm # noqa: F401
...@@ -59,6 +58,8 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401 ...@@ -59,6 +58,8 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401
from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401 from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401 from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401
from .control_flow import cond
__all__ = [ # noqa __all__ = [ # noqa
'fc', 'fc',
'batch_norm', 'batch_norm',
......
...@@ -27,16 +27,90 @@ from paddle.common_ops_import import ( ...@@ -27,16 +27,90 @@ from paddle.common_ops_import import (
from paddle.fluid.framework import Operator, Program, Variable from paddle.fluid.framework import Operator, Program, Variable
# Temporary solution, it will be deleted later # Temporary solution, it will be deleted later
from paddle.fluid.layers.control_flow import cond from paddle.fluid.layers.control_flow import ConditionalBlock, select_input
from paddle.fluid.layers.tensor import assign, cast
from paddle.fluid.layers.utils import ( from paddle.fluid.layers.utils import (
assert_same_structure, assert_same_structure,
copy_mutable_vars, copy_mutable_vars,
flatten,
hold_mutable_vars, hold_mutable_vars,
is_sequence, is_sequence,
map_structure, map_structure,
pack_sequence_as,
to_sequence,
) )
def Assert(cond, data=None, summarize=20, name=None):
'''
This API creates an op that asserts the given condition is true. If the
condition is false, prints the tensors in data. ``summarize`` specifies the
number of the elements in the tensors to print.
Args:
cond (Variable): The boolean condition tensor whose numel should be 1.
data (list|tuple, optional): list or tuple of tensors to print when
condition is not true. If it's ``None``, no tensor will be printed.
The default value is ``None``.
summarize (int, optional): Number of elements in the tensor to be
printed. If its value is -1, then all elements in the tensor will
be printed. The default value is 20.
name (str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
Operator: the created operation.
Examples:
.. code-block:: python
import paddle
from paddle.static.nn.control_flow import Assert
paddle.enable_static()
x = paddle.full([2, 3], 2.0, 'float32')
condition = paddle.max(x) < 1.0 # False
Assert(condition, [x], 10, "example_assert_layer")
exe = paddle.static.Executor()
try:
exe.run(paddle.static.default_main_program())
# Print x and throws ValueError
# Example printed message for x:
#
# Variable: fill_constant_0.tmp_0
# - lod: {}
# - place: CPUPlace()
# - shape: [2, 3]
# - layout: NCHW
# - dtype: float
# - data: [2 2 2 2 2 2]
except ValueError as e:
print("Assert Exception Example")
'''
check_variable_and_dtype(
cond, "cond", ["bool"], "static.nn.control_flow.Assert"
)
check_type(
data, "data", (list, tuple, type(None)), "static.nn.control_flow.Assert"
)
check_type(summarize, "summarize", int, "static.nn.control_flow.Assert")
check_type(name, "name", (str, type(None)), "static.nn.control_flow.Assert")
layer_name = name if name else ('assert_' + cond.name)
helper = LayerHelper(layer_name, **locals())
op = helper.append_op(
type="assert",
inputs={"Cond": cond, "Data": [] if data is None else list(data)},
attrs={"summarize": summarize},
)
return op
class BlockGuard: class BlockGuard:
""" """
BlockGuard class. BlockGuard class.
...@@ -795,3 +869,464 @@ def switch_case(branch_index, branch_fns, default=None, name=None): ...@@ -795,3 +869,464 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
final_fn = false_fn final_fn = false_fn
return final_fn() return final_fn()
def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
"""
This API returns ``true_fn()`` if the predicate ``pred`` is true else
``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
``None`` if do nothing and this API will treat the callable simply returns
``None`` in this case.
``true_fn`` and ``false_fn`` should return same nest structure of tensors
or both return ``None`` if user doens't like to return anything. A nest
structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
list of tensors.
Note:
1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
the same shape because of dataflow model of PaddlePaddle while the
tensors in the tuples or the lists can have different shapes.
2. This API could be used under both static mode or dygraph mode. If it
is in dygraph mode, the API only runs one branch based on condition.
3. If it is in static mode, any tensors or operations created outside
or inside of ``true_fn`` and ``false_fn`` will be in net building
regardless of which branch is selected at runtime. This has frequently
surprised users who expected a lazy semantics. For example:
.. code-block:: python
import paddle
a = paddle.zeros((1, 1))
b = paddle.zeros((1, 1))
c = a * b
out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will be in net building and
run. ``a + c`` and ``b * b`` will be in net building, but only one
branch will be executed during runtime.
Args:
pred(Tensor): A boolean tensor whose numel should be 1. The boolean
value determines whether to return the result of ``true_fn`` or
``false_fn`` .
true_fn(callable, optional): A callable to be performed if ``pred`` is
true. The default value is ``None`` .
false_fn(callable, optional): A callable to be performed if ``pred`` is
false. The default value is ``None`` .
name(str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
return_names(sequence of string, optional): The default value is ``None`` .
Normally users don't have to set this parameters. A sequence of strings
to represents the name of returned vars. The structure of sequence must
be same with return values of true_fn and false_fn.
Returns:
Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the
predicate ``pred`` is true else ``false_fn()`` .
Examples:
.. code-block:: python
import paddle
#
# pseudocode:
# if 0.1 < 0.23:
# return 1, True
# else:
# return 3, 2
#
def true_func():
return paddle.full(shape=[1, 2], dtype='int32',
fill_value=1), paddle.full(shape=[2, 3],
dtype='bool',
fill_value=True)
def false_func():
return paddle.full(shape=[3, 4], dtype='float32',
fill_value=3), paddle.full(shape=[4, 5],
dtype='int64',
fill_value=2)
x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
pred = paddle.less_than(x=x, y=y, name=None)
ret = paddle.static.nn.cond(pred, true_func, false_func)
# ret is a tuple containing 2 tensors
# ret[0] = [[1 1]]
# ret[1] = [[ True True True]
# [ True True True]]
"""
if _non_static_mode():
assert isinstance(pred, Variable), "The pred in cond must be Variable"
assert pred.size == 1, "condition input's numel should be 1"
pred = pred.numpy()[0]
if pred:
if true_fn is not None:
if not callable(true_fn):
raise TypeError(
"The true_fn in cond must be callable, but received {}".format(
type(true_fn).__name__
)
)
return true_fn()
else:
if false_fn is not None:
if not callable(false_fn):
raise TypeError(
"The false_fn in cond must be callable, but received {}".format(
type(false_fn).__name__
)
)
return false_fn()
return None
check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond")
check_type(name, "name", (str, type(None)), "fluid.layers.cond")
helper = LayerHelper('cond', **locals())
true_output = None
false_output = None
copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
if true_fn is not None:
if not callable(true_fn):
raise TypeError(
"The true_fn in cond must be callable, but received {}".format(
type(true_fn).__name__
)
)
true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
with true_cond_block.block():
origin_true_output = true_fn()
if origin_true_output is not None:
true_output = map_structure(
copy_to_parent_func, origin_true_output
)
if false_fn is not None:
if not callable(false_fn):
raise TypeError(
"The false_fn in cond must be callable, but received {}".format(
type(false_fn).__name__
)
)
false_cond_block = ConditionalBlock(
[paddle.logical_not(pred)], is_scalar_condition=True
)
with false_cond_block.block():
origin_false_output = false_fn()
if origin_false_output is not None:
false_output = map_structure(
copy_to_parent_func, origin_false_output
)
if true_output is None and false_output is None:
return None
if true_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns None while false_fn returns non-None"
)
if false_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns non-None while false_fn returns None"
)
# Merge true and false output if they are not None
if return_names is None:
is_dy2staic = False
return_names = ["no name"] * len(_to_sequence_except_dict(true_output))
else:
"""
dy2static will set the return_names and expand the return values to UndefinedVar.
"""
is_dy2staic = True
# TODO: expand_undefined_var will replace None to Undefinedvar(), to fix cases like:
# a = None
# if condition:
# a = 1
# Because we can not use variable to express 'None'
true_output, false_output = expand_undefined_var(
true_output, false_output, return_names
)
if len(_to_sequence_except_dict(true_output)) != len(
_to_sequence_except_dict(false_output)
):
raise ValueError(
"true fn returns {} vars, but false fn returns {} vars, which is not equals".format(
len(_to_sequence_except_dict(true_output)),
len(_to_sequence_except_dict(false_output)),
)
)
for true_out, false_out, return_name in zip(
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(return_names),
):
try:
assert_same_structure(true_out, false_out, check_types=False)
except ValueError as e:
raise ValueError(
"Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format(
return_name, e
)
)
def check_ret_none(seq_true, seq_false, seq_names):
for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names):
f_true = flatten(f_true)
f_false = flatten(f_false)
for idx in range(len(f_true)):
if (
f_true[idx] is None
and f_false[idx] is not None
or f_false[idx] is None
and f_true[idx] is not None
):
warnings.warn(
"In cond : Var '{}' or part of it is set differently in ifelse branchs, "
"<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
"'None' in ifelse block might lead to error.".format(
f_name,
type(f_true[idx]),
f_true[idx],
type(f_false[idx]),
f_false[idx],
)
)
check_ret_none(
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(return_names),
)
if is_dy2staic:
true_output, false_output = change_none_to_undefinedvar(
true_output, false_output
)
mask = cast(pred, dtype='int32')
merge_func = (
lambda name, false_var, true_var: select_input_with_buildin_type(
[false_var, true_var], mask, name
)
)
def merge_every_var_list(false_vars, true_vars, name):
return map_structure(partial(merge_func, name), false_vars, true_vars)
merged_output = list(
map(
merge_every_var_list,
_to_sequence_except_dict(false_output),
_to_sequence_except_dict(true_output),
_to_sequence_except_dict(return_names),
)
)
merged_output = pack_sequence_as(false_output, flatten(merged_output))
return merged_output
def copy_var_to_parent_block(var, layer_helper):
if not isinstance(var, Variable):
return var
prog = layer_helper.main_program
parent_idx = prog.current_block().parent_idx
assert (
parent_idx >= 0
), "Got wrong parent block index when assigning var to parent scope in control_flow"
parent_block = prog.block(parent_idx)
if (
var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY
and parent_block._find_var_recursive(var.name)
):
parent_block_var = var
else:
parent_block_var = parent_block.create_var(
dtype=var.dtype, shape=var.shape, type=var.type
)
assign(var, parent_block_var)
return parent_block_var
def select_input_with_buildin_type(inputs, mask, name):
from paddle.jit.dy2static.utils import UndefinedVar
from paddle.jit.dy2static.variable_trans_func import to_static_variable
false_var, true_var = inputs
if isinstance(false_var, UndefinedVar) and isinstance(
true_var, UndefinedVar
):
"""None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None."""
return None
if isinstance(false_var, Variable) and isinstance(true_var, Variable):
try:
return select_input(inputs, mask)
except Exception as e:
raise RuntimeError(
f"Exceptions throwed while doing select_input on {name}:\n{e}"
)
elif isinstance(false_var, support_ret_buildin_type) and isinstance(
false_var, type(true_var)
):
if false_var == true_var:
return false_var
else:
inputs = [
to_static_variable(false_var),
to_static_variable(true_var),
]
# Deal with the situations like this: false_var is int and true_var is Variable
elif (
isinstance(false_var, support_ret_buildin_type)
and isinstance(true_var, Variable)
) or (
isinstance(true_var, support_ret_buildin_type)
and isinstance(false_var, Variable)
):
inputs = [to_static_variable(false_var), to_static_variable(true_var)]
warnings.warn(
"Return results from different branches in cond are not same type: "
"false_var returned by false_fn is '{}' and true_var of true_fn is "
"'{}'".format(type(false_var), type(true_var))
)
elif (
isinstance(false_var, UndefinedVar)
and isinstance(true_var, (Variable,) + support_ret_buildin_type)
) or (
isinstance(true_var, UndefinedVar)
and isinstance(false_var, (Variable,) + support_ret_buildin_type)
):
def create_var_if_not_undefined_var(a):
if isinstance(a, UndefinedVar):
return a
return to_static_variable(a)
true_var, false_var = to_static_variable(true_var), to_static_variable(
false_var
)
inputs = [false_var, true_var]
else:
raise TypeError(
"Unsupported return type of true_fn and false_fn in cond: false_var "
"returned by false_fn is '{}' and true_var of true_fn is '{}'".format(
type(false_var), type(true_var)
)
)
try:
return select_input(inputs, mask)
except Exception as e:
raise RuntimeError(
f"Exceptions throwed while doing select_input on {name}:\n{e}"
)
def _is_sequence_except_dict(x):
"""
In this function, dict is not viewed as sequence.
"""
if isinstance(x, dict):
return False
return is_sequence(x)
def _to_sequence_except_dict(x):
"""
In this function, dict is not viewed as sequence.
"""
if isinstance(x, dict):
return [x]
return to_sequence(x)
def expand_undefined_var(nest1, nest2, names):
"""TODO: make this function recursively.
nest1: Var1, (UndefinedVar, [1,2,3])
nest2: Var2, ([1,2,3,4], UndefinedVar)
In this case, we should not expand recursively.
"""
from paddle.jit.dy2static.return_transformer import RETURN_VALUE_PREFIX
from paddle.jit.dy2static.utils import UndefinedVar
def pack_undefined_var_as(seq):
return pack_sequence_as(
seq, [UndefinedVar("padding") for i in flatten(seq)]
)
def map_fn(n1, n2, name, order):
if not name.startswith(RETURN_VALUE_PREFIX) and (
isinstance(n1, UndefinedVar) or n1 is None
):
if n1 is None and n2 is not None:
if order == 0:
warnings.warn(
"In cond : Var '{}' or part of it is set differently in ifelse branchs, "
"<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
"'None' in ifelse block might lead to error.".format(
name, type(n1), n1, type(n2), n2
)
)
else:
warnings.warn(
"In cond : Var '{}' or part of it is set differently in ifelse branchs, "
"<{}, {}> in true branch and <{}, {}> in false branch. Set var to "
"'None' in ifelse block might lead to error.".format(
name, type(n2), n2, type(n1), n1
)
)
return pack_undefined_var_as(n2)
return n1
nest1_out = list(
map(
map_fn,
_to_sequence_except_dict(nest1),
_to_sequence_except_dict(nest2),
_to_sequence_except_dict(names),
[0 for i in _to_sequence_except_dict(names)],
)
)
nest2_out = list(
map(
map_fn,
_to_sequence_except_dict(nest2),
_to_sequence_except_dict(nest1),
_to_sequence_except_dict(names),
[1 for i in _to_sequence_except_dict(names)],
)
)
if not _is_sequence_except_dict(nest1):
nest1_out = nest1_out[0]
if not _is_sequence_except_dict(nest2):
nest2_out = nest2_out[0]
return nest1_out, nest2_out
def change_none_to_undefinedvar(nest1, nest2):
from paddle.jit.dy2static.utils import UndefinedVar
def map_fn(x):
if x is None:
return UndefinedVar("padding")
return x
nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1))))
nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2))))
return nest1_out, nest2_out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册