diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index c2c9a31769db39247df78b49892e6e6766c96216..8085ac077c31152af9a8ad195167a7dfec881912 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -64,7 +64,7 @@ class HybridParallelInferenceHelper: element_in_arr = layers.array_read(array=arr, i=step_idx) # write placehold data to global lod_tensor_array, # it need for send_v2 of lod_tensor_array - layers.increment(x=step_idx, value=1.0, in_place=True) + paddle.increment(x=step_idx, value=1.0) layers.array_write(element_in_arr, i=step_idx, array=arr) with paddle.fluid.device_guard(f'{device}:0'): @@ -137,7 +137,7 @@ class HybridParallelInferenceHelper: with while_op.block(): with paddle.fluid.device_guard(f'{device}:all'): input = layers.array_read(array=data, i=step_idx) - layers.increment(x=step_idx, value=1.0, in_place=True) + paddle.increment(x=step_idx, value=1.0) layers.array_write(input, i=step_idx, array=data) with paddle.fluid.device_guard(f'{device}:0'): diff --git a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py index b6a22c778e894756a28776996c013d3f28a0e599..c4ccb89d2f56fc846387fb5f88f4b9b39d7b1b0d 100644 --- a/python/paddle/distributed/passes/auto_parallel_gradient_merge.py +++ b/python/paddle/distributed/passes/auto_parallel_gradient_merge.py @@ -286,7 +286,7 @@ def _create_cond_block_and_update_optimizer( ) new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize) - layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None) + paddle.static.nn.cond(cond_var, true_fn=true_apply_gradient, false_fn=None) cond_op = main_program.global_block().ops[-1] cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize) diff --git a/python/paddle/fluid/contrib/slim/quantization/adaround.py b/python/paddle/fluid/contrib/slim/quantization/adaround.py index b024c0d77399675723bb4a9542c064fd0dc23f93..f4bbccd7f1d86d1aac02564f6ad85f682a2b18e4 100644 --- a/python/paddle/fluid/contrib/slim/quantization/adaround.py +++ b/python/paddle/fluid/contrib/slim/quantization/adaround.py @@ -83,7 +83,7 @@ class AdaRoundLoss: return round_loss - round_loss = fluid.layers.cond( + round_loss = paddle.static.nn.cond( warm_start, lambda: fluid.layers.fill_constant( shape=[1], dtype='float32', value=0.0 diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index b30824019947b8408bc864b22237fdd9ceece658..ed15416fd35e36d006b6d262630fdd7dddd3420e 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -53,13 +53,10 @@ from paddle import _C_ops, _legacy_C_ops __all__ = [ 'Switch', - 'increment', 'array_write', 'array_read', - 'cond', 'StaticRNN', 'Print', - 'Assert', 'while_loop', ] @@ -100,7 +97,7 @@ def _select_input_infer_shape(first_shape, second_shape): 2. compare axis one by one: if a == b: we set axis to a if a != b: we set axis to -1 - for compatibility,non declarative mode, we just return second_shape. + for compatibility, non declarative mode, we just return second_shape. """ if len(first_shape) != len(second_shape): warnings.warn( @@ -134,6 +131,7 @@ def select_input(inputs, mask): # Select input should expand the shape. If it is - 1 and valid number, use - 1 first. If the dim is different, an error will be reported directly # assert inputs[0].dtype == inputs[1].dtype, f"Expect the inputs should have the same dtype, but get {inputs[0].dtype} and {inputs[1].dtype}" + output_shape = _select_input_infer_shape(inputs[0].shape, inputs[1].shape) output_dtype = inputs[1].dtype output_type = inputs[1].type @@ -149,84 +147,6 @@ def select_input(inputs, mask): return out -def select_input_with_buildin_type(inputs, mask, name): - from paddle.jit.dy2static.variable_trans_func import ( - to_static_variable, - ) - from paddle.jit.dy2static.utils import UndefinedVar - - false_var, true_var = inputs - - if isinstance(false_var, UndefinedVar) and isinstance( - true_var, UndefinedVar - ): - """None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None.""" - return None - - if isinstance(false_var, Variable) and isinstance(true_var, Variable): - try: - return select_input(inputs, mask) - except Exception as e: - raise RuntimeError( - f"Exceptions throwed while doing select_input on {name}:\n{e}" - ) - - elif isinstance(false_var, support_ret_buildin_type) and isinstance( - false_var, type(true_var) - ): - if false_var == true_var: - return false_var - else: - inputs = [ - to_static_variable(false_var), - to_static_variable(true_var), - ] - # Deal with the situations like this: false_var is int and true_var is Variable - elif ( - isinstance(false_var, support_ret_buildin_type) - and isinstance(true_var, Variable) - ) or ( - isinstance(true_var, support_ret_buildin_type) - and isinstance(false_var, Variable) - ): - inputs = [to_static_variable(false_var), to_static_variable(true_var)] - warnings.warn( - "Return results from different branches in cond are not same type: " - "false_var returned by false_fn is '{}' and true_var of true_fn is " - "'{}'".format(type(false_var), type(true_var)) - ) - elif ( - isinstance(false_var, UndefinedVar) - and isinstance(true_var, (Variable,) + support_ret_buildin_type) - ) or ( - isinstance(true_var, UndefinedVar) - and isinstance(false_var, (Variable,) + support_ret_buildin_type) - ): - - def create_var_if_not_undefined_var(a): - if isinstance(a, UndefinedVar): - return a - return to_static_variable(a) - - true_var, false_var = to_static_variable(true_var), to_static_variable( - false_var - ) - inputs = [false_var, true_var] - else: - raise TypeError( - "Unsupported return type of true_fn and false_fn in cond: false_var " - "returned by false_fn is '{}' and true_var of true_fn is '{}'".format( - type(false_var), type(true_var) - ) - ) - try: - return select_input(inputs, mask) - except Exception as e: - raise RuntimeError( - f"Exceptions throwed while doing select_input on {name}:\n{e}" - ) - - def split_lod_tensor(input, mask, level=0): """ This function takes in an input that contains the complete lod information, @@ -449,78 +369,6 @@ def Print( return output -def Assert(cond, data=None, summarize=20, name=None): - ''' - This API creates an op that asserts the given condition is true. If the - condition is false, prints the tensors in data. ``summarize`` specifies the - number of the elements in the tensors to print. - - Args: - cond (Variable): The boolean condition tensor whose numel should be 1. - data (list|tuple, optional): list or tuple of tensors to print when - condition is not true. If it's ``None``, no tensor will be printed. - The default value is ``None``. - summarize (int, optional): Number of elements in the tensor to be - printed. If its value is -1, then all elements in the tensor will - be printed. The default value is 20. - name (str, optional): The default value is ``None`` . Normally users - don't have to set this parameter. For more information, please - refer to :ref:`api_guide_Name` . - - Returns: - Operator: the created operation. - - Raises: - TypeError: If ``cond`` is not boolean Variable. - TypeError: If ``data`` is not a list or tuple or ``None``. - TypeError: If ``summarize`` is not int. - TypeError: If ``name`` is not a string or ``None`` . - fluid.core.EnforceNotMet: If the condition is False in running time. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - import paddle.fluid.layers as layers - - x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) - condition = layers.reduce_max(x) < 1.0 # False - layers.Assert(condition, [x], 10, "example_assert_layer") - - exe = fluid.Executor() - try: - exe.run(fluid.default_main_program()) - # Print x and throws paddle.fluid.core.EnforceNotMet exception - # Example printed message for x: - # - # Variable: fill_constant_0.tmp_0 - # - lod: {} - # - place: CPUPlace() - # - shape: [2, 3] - # - layout: NCHW - # - dtype: float - # - data: [2 2 2 2 2 2] - except fluid.core.EnforceNotMet as e: - print("Assert Exception Example") - - ''' - check_variable_and_dtype(cond, "cond", ["bool"], "fluid.layers.Assert") - check_type(data, "data", (list, tuple, type(None)), "fluid.layers.Assert") - check_type(summarize, "summarize", int, "fluid.layers.Assert") - check_type(name, "name", (str, type(None)), "fluid.layers.Assert") - - layer_name = name if name else ('assert_' + cond.name) - helper = LayerHelper(layer_name, **locals()) - - op = helper.append_op( - type="assert", - inputs={"Cond": cond, "Data": [] if data is None else list(data)}, - attrs={"summarize": summarize}, - ) - - return op - - # (TODO: Mine) There exists dependency. It will be removed later. class BlockGuard: """ @@ -1215,7 +1063,7 @@ class While: cond = paddle.less_than(x=i, y=loop_len) while_op = fluid.layers.While(cond=cond) with while_op.block(): - i = fluid.layers.increment(x=i, value=1, in_place=True) + i = paddle.increment(x=i, value=1) paddle.assign(paddle.less_than(x=i, y=loop_len), cond) exe = fluid.Executor(fluid.CPUPlace()) @@ -1232,6 +1080,7 @@ class While: import paddle.fluid as fluid import numpy as np + paddle.enable_static() i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) loop_len = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) one = fluid.layers.fill_constant(shape=[1], dtype='float32', value=1) @@ -1243,7 +1092,7 @@ class While: with while_op.block(): sums_tensor = fluid.layers.elementwise_add(x=data, y=data) fluid.layers.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign - i = fluid.layers.increment(x=i, value=1, in_place=True) + i = paddle.increment(x=i, value=1) data = fluid.layers.elementwise_add(x=data, y=one) paddle.assign(paddle.less_than(x=i, y=loop_len), cond) @@ -1513,47 +1362,6 @@ def _deal_with_undefined_var(output_vars, loop_vars): return results -def increment(x, value=1.0, in_place=True): - """ - The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`. - Notice that the number of elements in :attr:`x` must be equal to 1. - - Parameters: - x (Variable): A tensor that must always contain only one element, its data type supports - float32, float64, int32 and int64. - value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0. - in_place (bool, optional): Whether the OP should be performed in-place. Default: True. - - Returns: - Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`. - - Examples: - .. code-block:: python - - import paddle.fluid as fluid - counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.] - fluid.layers.increment(counter) # [1.] - """ - if in_dygraph_mode(): - return _C_ops.increment_(x, value) - - check_variable_and_dtype( - x, 'x', ['float32', 'float64', 'int32', 'int64'], 'increment' - ) - helper = LayerHelper("increment", **locals()) - if not in_place: - out = helper.create_variable_for_type_inference(dtype=x.dtype) - else: - out = x - helper.append_op( - type='increment', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'step': float(value)}, - ) - return out - - def array_write(x, i, array=None): """ This OP writes the input ``x`` into the i-th position of the ``array`` @@ -1936,315 +1744,6 @@ class ConditionalBlock: self.helper.main_program._sync_with_cpp() -def copy_var_to_parent_block(var, layer_helper): - if not isinstance(var, Variable): - return var - prog = layer_helper.main_program - parent_idx = prog.current_block().parent_idx - assert ( - parent_idx >= 0 - ), "Got wrong parent block index when assigning var to parent scope in control_flow" - parent_block = prog.block(parent_idx) - - if ( - var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY - and parent_block._find_var_recursive(var.name) - ): - parent_block_var = var - else: - parent_block_var = parent_block.create_var( - dtype=var.dtype, shape=var.shape, type=var.type - ) - assign(var, parent_block_var) - return parent_block_var - - -def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): - """ - This API returns ``true_fn()`` if the predicate ``pred`` is true else - ``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to - ``None`` if do nothing and this API will treat the callable simply returns - ``None`` in this case. - - ``true_fn`` and ``false_fn`` should return same nest structure of tensors - or both return ``None`` if user doens't like to return anything. A nest - structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or - list of tensors. - - Note: - 1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have - the same shape because of dataflow model of PaddlePaddle while the - tensors in the tuples or the lists can have different shapes. - - 2. This API could be used under both static mode or dygraph mode. If it - is in dygraph mode, the API only runs one branch based on condition. - - 3. If it is in static mode, any tensors or operations created outside - or inside of ``true_fn`` and ``false_fn`` will be in net building - regardless of which branch is selected at runtime. This has frequently - surprised users who expected a lazy semantics. For example: - - .. code-block:: python - - import paddle - - a = paddle.zeros((1, 1)) - b = paddle.zeros((1, 1)) - c = a * b - out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) - - No matter whether ``a < b`` , ``c = a * b`` will be in net building and - run. ``a + c`` and ``b * b`` will be in net building, but only one - branch will be executed during runtime. - - Args: - pred(Tensor): A boolean tensor whose numel should be 1. The boolean - value determines whether to return the result of ``true_fn`` or - ``false_fn`` . - true_fn(callable, optional): A callable to be performed if ``pred`` is - true. The default value is ``None`` . - false_fn(callable, optional): A callable to be performed if ``pred`` is - false. The default value is ``None`` . - name(str, optional): The default value is ``None`` . Normally users - don't have to set this parameter. For more information, please - refer to :ref:`api_guide_Name` . - return_names(sequence of string, optional): The default value is ``None`` . - Normally users don't have to set this parameters. A sequence of strings - to represents the name of returned vars. The structure of sequence must - be same with return values of true_fn and false_fn. - - Returns: - Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the - predicate ``pred`` is true else ``false_fn()`` . - - Raises: - TypeError: if ``true_fn`` or ``false_fn`` is not callable. - ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest - structure of tensors. - - Examples: - .. code-block:: python - - import paddle - - # - # pseudocode: - # if 0.1 < 0.23: - # return 1, True - # else: - # return 3, 2 - # - - def true_func(): - return paddle.full(shape=[1, 2], dtype='int32', - fill_value=1), paddle.full(shape=[2, 3], - dtype='bool', - fill_value=True) - - - def false_func(): - return paddle.full(shape=[3, 4], dtype='float32', - fill_value=3), paddle.full(shape=[4, 5], - dtype='int64', - fill_value=2) - - - x = paddle.full(shape=[1], dtype='float32', fill_value=0.1) - y = paddle.full(shape=[1], dtype='float32', fill_value=0.23) - pred = paddle.less_than(x=x, y=y, name=None) - ret = paddle.static.nn.cond(pred, true_func, false_func) - # ret is a tuple containing 2 tensors - # ret[0] = [[1 1]] - # ret[1] = [[ True True True] - # [ True True True]] - - """ - if _non_static_mode(): - assert isinstance(pred, Variable), "The pred in cond must be Variable" - assert pred.size == 1, "condition input's numel should be 1" - pred = pred.numpy()[0] - if pred: - if true_fn is not None: - if not callable(true_fn): - raise TypeError( - "The true_fn in cond must be callable, but received {}".format( - type(true_fn).__name__ - ) - ) - return true_fn() - else: - if false_fn is not None: - if not callable(false_fn): - raise TypeError( - "The false_fn in cond must be callable, but received {}".format( - type(false_fn).__name__ - ) - ) - return false_fn() - return None - - check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond") - check_type(name, "name", (str, type(None)), "fluid.layers.cond") - helper = LayerHelper('cond', **locals()) - true_output = None - false_output = None - copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper) - if true_fn is not None: - if not callable(true_fn): - raise TypeError( - "The true_fn in cond must be callable, but received {}".format( - type(true_fn).__name__ - ) - ) - true_cond_block = ConditionalBlock([pred], is_scalar_condition=True) - with true_cond_block.block(): - origin_true_output = true_fn() - if origin_true_output is not None: - true_output = map_structure( - copy_to_parent_func, origin_true_output - ) - if false_fn is not None: - if not callable(false_fn): - raise TypeError( - "The false_fn in cond must be callable, but received {}".format( - type(false_fn).__name__ - ) - ) - false_cond_block = ConditionalBlock( - [paddle.logical_not(pred)], is_scalar_condition=True - ) - with false_cond_block.block(): - origin_false_output = false_fn() - if origin_false_output is not None: - false_output = map_structure( - copy_to_parent_func, origin_false_output - ) - - if true_output is None and false_output is None: - return None - - if true_output is None: - raise ValueError( - "Incompatible return values of true_fn and false_fn in cond: " - "true_fn returns None while false_fn returns non-None" - ) - if false_output is None: - raise ValueError( - "Incompatible return values of true_fn and false_fn in cond: " - "true_fn returns non-None while false_fn returns None" - ) - - # Merge true and false output if they are not None - if return_names is None: - is_dy2staic = False - return_names = ["no name"] * len(_to_sequence_except_dict(true_output)) - else: - """ - dy2static will set the return_names and expand the return values to UndefinedVar. - """ - is_dy2staic = True - - # TODO: expand_undefined_var will replace None to Undefinedvar(), to fix cases like: - # a = None - # if condition: - # a = 1 - # Because we can not use variable to express 'None' - true_output, false_output = expand_undefined_var( - true_output, false_output, return_names - ) - - if len(_to_sequence_except_dict(true_output)) != len( - _to_sequence_except_dict(false_output) - ): - raise ValueError( - "true fn returns {} vars, but false fn returns {} vars, which is not equals".format( - len(_to_sequence_except_dict(true_output)), - len(_to_sequence_except_dict(false_output)), - ) - ) - for true_out, false_out, return_name in zip( - _to_sequence_except_dict(true_output), - _to_sequence_except_dict(false_output), - _to_sequence_except_dict(return_names), - ): - try: - assert_same_structure(true_out, false_out, check_types=False) - except ValueError as e: - raise ValueError( - "Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format( - return_name, e - ) - ) - - def check_ret_none(seq_true, seq_false, seq_names): - for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names): - f_true = flatten(f_true) - f_false = flatten(f_false) - for idx in range(len(f_true)): - if ( - f_true[idx] is None - and f_false[idx] is not None - or f_false[idx] is None - and f_true[idx] is not None - ): - warnings.warn( - "In cond : Var '{}' or part of it is set differently in ifelse branchs, " - "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " - "'None' in ifelse block might lead to error.".format( - f_name, - type(f_true[idx]), - f_true[idx], - type(f_false[idx]), - f_false[idx], - ) - ) - - check_ret_none( - _to_sequence_except_dict(true_output), - _to_sequence_except_dict(false_output), - _to_sequence_except_dict(return_names), - ) - - if is_dy2staic: - true_output, false_output = change_none_to_undefinedvar( - true_output, false_output - ) - - mask = cast(pred, dtype='int32') - merge_func = ( - lambda name, false_var, true_var: select_input_with_buildin_type( - [false_var, true_var], mask, name - ) - ) - - def merge_every_var_list(false_vars, true_vars, name): - return map_structure(partial(merge_func, name), false_vars, true_vars) - - merged_output = list( - map( - merge_every_var_list, - _to_sequence_except_dict(false_output), - _to_sequence_except_dict(true_output), - _to_sequence_except_dict(return_names), - ) - ) - merged_output = pack_sequence_as(false_output, flatten(merged_output)) - return merged_output - - -def change_none_to_undefinedvar(nest1, nest2): - from paddle.jit.dy2static.utils import UndefinedVar - - def map_fn(x): - if x is None: - return UndefinedVar("padding") - return x - - nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1)))) - nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2)))) - return nest1_out, nest2_out - - def _to_sequence_except_dict(x): """ In this function, dict is not viewed as sequence. diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 31dfc905a6952c4a3ee94e539d32528903dd1378..48c8c4744147975c2920b2c1c27ed5fa89b9de43 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -901,7 +901,7 @@ def _dynamic_decode_imperative( next_sequence_lengths, ) - control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True) + paddle.increment(x=step_idx_tensor, value=1.0) step_idx += 1 cond = paddle.logical_not(paddle.all(finished)) @@ -1060,7 +1060,8 @@ def _dynamic_decode_declarative( outputs, outputs_arrays, ) - control_flow.increment(x=step_idx, value=1.0, in_place=True) + + paddle.increment(x=step_idx, value=1.0) # update the global_finished first, since it might be also in states of # decoder, which otherwise would write a stale finished status to array tensor.assign(next_finished, global_finished) diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 42c57193941f30a5b07e94230bc7e389358aa9d6..2bf2c4542b6808fcb73293dcdd93cf008ec4b45f 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -7300,7 +7300,7 @@ class LookaheadOptimizer: dtype='int32', persistable=True, ) - layers.increment(x=step, value=1.0, in_place=True) + paddle.increment(x=step, value=1.0) # lookahead zero_var = layers.fill_constant( @@ -7534,7 +7534,7 @@ class GradientMergeOptimizer: with device_guard("cpu"): # step_var = (step_var + 1) % k_step - layers.increment(x=step_var, value=1.0, in_place=True) + paddle.increment(x=step_var, value=1.0) main_block.append_op( type='elementwise_mod', inputs={'X': step_var, 'Y': k_step_var}, @@ -7664,7 +7664,7 @@ class GradientMergeOptimizer: ) # step3. apply gradient - layers.cond(cond, true_fn=true_apply_gradient, false_fn=None) + paddle.static.nn.cond(cond, true_fn=true_apply_gradient, false_fn=None) return self._optimize_ops diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py index 83eb2ae8aad8c8cd9d599075f6e1b2cd4e6aad29..8b0f746aa657698b79e4a2558b872fc576a02903 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py @@ -189,7 +189,7 @@ def get_program(): cur_pred = mlp_while(pre_input) # 更新循环条件 - i = fluid.layers.increment(x=i, value=1, in_place=True) + i = paddle.increment(x=i, value=1) fluid.layers.array_write(cur_pred, array=input_array, i=i) paddle.assign(paddle.less_than(x=i, y=loop_len), cond) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py index b4d1cbca4cff04f060f6feb8cf53adef795c9ae2..44bcbb6e709086fe16ae9b0837693a60924789eb 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py @@ -91,7 +91,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): with while_op.block(): with paddle.fluid.device_guard(f'{device}:all'): input = layers.array_read(array=data, i=step_idx) - layers.increment(x=step_idx, value=1.0, in_place=True) + paddle.increment(x=step_idx, value=1.0) layers.array_write(input, i=step_idx, array=data) with paddle.fluid.device_guard(f'{device}:0'): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py index c352bc71f7315d2df207b65c14a9ccf2750932b1..f0ca20d3df735190c7b4b37c81daf53205d5e8e0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py @@ -89,7 +89,7 @@ def dyfunc_with_if_else3(x): m = x + 2 n = x + 3 return q, x, y, z - q, x, y, z = fluid.layers.cond(paddle.mean(x)[0] < 5, lambda : + q, x, y, z = paddle.static.nn.cond(paddle.mean(x)[0] < 5, lambda : paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y), lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q, x, y)) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py index aa5fa35d9c1d47a1cf8fb57de4422b2b0fb2e85f..2c882fc332e706bd1f407d8650a0d25e89fd0486 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_dict.py @@ -97,7 +97,7 @@ class MainNetWithDict(fluid.dygraph.Layer): ), } # TODO(Aurelius84): The following code will be converted into: - # max_len = layers.cond(paddle.shape(input)[0] != max_len, + # max_len = paddle.static.nn.cond(paddle.shape(input)[0] != max_len, # lambda: paddle.shape(input)[0], lambda: max_len) # But max_len should be wrapped into tensor, which is not supported. diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py index edb3a3b9d3829277f8f3a9e44c5e460e082ed981..13b95d0b31cd7634fd44bf048fe60a034680a50a 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_ifelse.py @@ -154,7 +154,7 @@ def dyfunc_ifExp_with_while(x): def body(i, ten, y): # It will be converted into `layers.cond` as followed. - # map_func(lambda x: fluid.layers.cond(i==0, lambda: x, lambda: add_fn(x), y) + # map_func(lambda x: paddle.static.nn.cond(i==0, lambda: x, lambda: add_fn(x), y) y = map_func(lambda x: x if (i == 0) is not None else add_fn(x), y) i += 1 return [i, ten, y] @@ -183,7 +183,7 @@ def dyfunc_ifExp(x): i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # It will be converted into `layers.cond` as followed. - # map_func(lambda x: fluid.layers.cond(i==1, lambda: x, lambda: add_fn(x), y) + # map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y) # `if (Tensor) == 1` is supported in dygraph. y = map_func(lambda x: x if i == 1 else add_fn(x), y) return y[0] diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py index 3e3382b47a09f4a3602d90be689c191bae82647d..4d2a70ee1cf4b15a08e24450e2b8b84c8eeabf4f 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py @@ -83,7 +83,7 @@ def while_loop_dyfunc_with_none(x): def for_loop_dyfunc(max_len): for i in range(max_len): ret = fluid.layers.zeros(shape=[1], dtype='float32') - fluid.layers.increment(ret, value=2.0, in_place=True) + paddle.increment(ret, value=2.0) return ret @@ -104,14 +104,14 @@ def for_loop_dyfunc2(max_len): def for_loop_dyfunc3(max_len): ret = fluid.layers.zeros(shape=[1], dtype='float32') for i in range(1, 10, 2): - fluid.layers.increment(ret, value=2.0, in_place=True) + paddle.increment(ret, value=2.0) return ret def for_loop_dyfunc4(max_len): ret = fluid.layers.zeros(shape=[1], dtype='float32') for i in range(10, 1, -2): - fluid.layers.increment(ret, value=2.0, in_place=True) + paddle.increment(ret, value=2.0) return ret @@ -119,7 +119,7 @@ def for_loop_dyfunc_not_support(max_len): ret = fluid.layers.zeros(shape=[1], dtype='float32') a = -2 for i in range(10, 1, a): - fluid.layers.increment(ret, value=2.0, in_place=True) + paddle.increment(ret, value=2.0) return ret diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py index e74e06f8f9b682c49c5fc0bd91b50fa4a892a7f5..3ba8caea85099b4dc1df639c0442f2e46f73ce83 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_warning.py @@ -16,7 +16,7 @@ import unittest import warnings import paddle -from paddle.fluid.layers.control_flow import cond +from paddle.static.nn import cond @paddle.jit.to_static diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py index e61110c7164b1acaeb1f8b191253cdb9f35f20d3..4cfc7a1faaa4f90776687edffde097ff6b506bb9 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py @@ -54,7 +54,7 @@ class TestQuantizationSubGraph(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) pred = paddle.less_than(y, x) - out = layers.cond(pred, true_func, false_func) + out = paddle.static.nn.cond(pred, true_func, false_func) core_graph = core.Graph(main_program.desc) # We should create graph for test, otherwise it will throw a diff --git a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py index 39f11737bce6cf9638436459531b17e5b4f70b6a..2467860bd93a7331d5adcc8b225c8625fad84ae5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_increment_op_npu.py @@ -121,7 +121,7 @@ class TestIncrementInplace(unittest.TestCase): with paddle.static.program_guard(main_prog, startup_prog): a = paddle.static.data(name="a", shape=[1], dtype='float32') - b = fluid.layers.increment(a) + b = paddle.increment(a) place = paddle.NPUPlace(NPUPlace) diff --git a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py index 17b0711e91a0f717b9dfa5f65dc7d73296332dd0..c2b5d73b76d9548bb3b8787ef62ce6576087c7c5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py @@ -43,9 +43,9 @@ class TestWhileOp(unittest.TestCase): init = layers.zeros(shape=[10], dtype='float32') mem_array = layers.array_write(x=init, i=i) data_array = layers.array_write(x=d0, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d1, i, array=data_array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int32') i = layers.cast(i, 'int64') @@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): prev = layers.array_read(array=mem_array, i=i) result = layers.sums(input=[d, prev]) - i = layers.increment(x=i, in_place=True) + i = paddle.increment(x=i) layers.array_write(result, i=i, array=mem_array) paddle.assign(paddle.less_than(x=i, y=array_len), cond) @@ -80,7 +80,7 @@ class TestWhileOp(unittest.TestCase): prev2 = layers.array_read(array=mem_array, i=j) result2 = layers.sums(input=[d2, prev2]) - j = layers.increment(x=j, in_place=True) + j = paddle.increment(x=j) layers.array_write(result2, i=j, array=mem_array) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) diff --git a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py index c989ff866e8c016e9a90c5c6e2b6a3b30d7f66ef..000f1b7d4b856140707edfbdcf2783e0c28b6e58 100644 --- a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py +++ b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py @@ -57,7 +57,7 @@ class TestCompatibility(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) pred = paddle.less_than(x, y) - out = layers.cond(pred, true_func, false_func) + out = paddle.static.nn.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors return main_program, startup_program, out diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index 907bb65cfce25cc43354a30675d9e4f484448416..d60239a8ad1699e7a731c75e0baf30ca4497cf6c 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -30,17 +30,17 @@ def _test_read_write(x): i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = False arr = layers.array_write(x=x[0], i=i) - i = layers.increment(x=i) + i = paddle.increment(x=i) arr = layers.array_write(x=x[1], i=i, array=arr) - i = layers.increment(x=i) + i = paddle.increment(x=i) arr = layers.array_write(x=x[2], i=i, array=arr) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = False a0 = layers.array_read(array=arr, i=i) - i = layers.increment(x=i) + i = paddle.increment(x=i) a1 = layers.array_read(array=arr, i=i) - i = layers.increment(x=i) + i = paddle.increment(x=i) a2 = layers.array_read(array=arr, i=i) mean_a0 = paddle.mean(a0) diff --git a/python/paddle/fluid/tests/unittests/test_assert_op.py b/python/paddle/fluid/tests/unittests/test_assert_op.py index d59194aef56cbabfb5e1aa8dc64a128b7dd9d6d7..f62ad38d459d1a4bbb63ce27fa173bf3ff0e9913 100644 --- a/python/paddle/fluid/tests/unittests/test_assert_op.py +++ b/python/paddle/fluid/tests/unittests/test_assert_op.py @@ -17,6 +17,7 @@ import unittest import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers +from paddle.static.nn.control_flow import Assert class TestAssertOp(unittest.TestCase): @@ -33,7 +34,7 @@ class TestAssertOp(unittest.TestCase): condition = layers.fill_constant( shape=[1], dtype='bool', value=True ) - layers.Assert(condition, []) + Assert(condition, []) self.run_network(net_func) @@ -42,7 +43,7 @@ class TestAssertOp(unittest.TestCase): condition = layers.fill_constant( shape=[1], dtype='bool', value=False ) - layers.Assert(condition) + Assert(condition) with self.assertRaises(ValueError): self.run_network(net_func) @@ -52,7 +53,7 @@ class TestAssertOp(unittest.TestCase): condition = layers.fill_constant( shape=[1, 2], dtype='bool', value=True ) - layers.Assert(condition, []) + Assert(condition, []) with self.assertRaises(ValueError): self.run_network(net_func) @@ -62,7 +63,7 @@ class TestAssertOp(unittest.TestCase): zero = layers.fill_constant(shape=[1], dtype='int64', value=0) one = layers.fill_constant(shape=[1], dtype='int64', value=1) condition = paddle.less_than(one, zero) # False - layers.Assert(condition, [zero, one]) + Assert(condition, [zero, one]) print("test_assert_print_data") with self.assertRaises(ValueError): @@ -72,7 +73,7 @@ class TestAssertOp(unittest.TestCase): def net_func(): x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) condition = paddle.max(x) < 1.0 - layers.Assert(condition, (x,), 5) + Assert(condition, (x,), 5) print("test_assert_summary") with self.assertRaises(ValueError): @@ -82,7 +83,7 @@ class TestAssertOp(unittest.TestCase): def net_func(): x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) condition = paddle.max(x) < 1.0 - layers.Assert(condition, [x], 10, name="test") + Assert(condition, [x], 10, name="test") print("test_assert_summary_greater_than_size") with self.assertRaises(ValueError): diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index b5f3ada246371e38b461d507547817a63150e054..b5f20bda5842374c7e3698c10f78303acf03bdd7 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -64,7 +64,7 @@ def convolutional_neural_network(use_py_reader): acc = paddle.static.accuracy(input=prediction, label=label) i = fluid.layers.zeros(shape=[1], dtype='int64') array = fluid.layers.array_write(x=prediction, i=i) - fluid.layers.increment(i) + paddle.increment(i) fluid.layers.array_write(x=acc, i=i, array=array) return array, img, label, prediction, avg_loss, acc, py_reader diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index bc5a73d048dfad3b5ee5c2136dd1d8a96798a3e5..3176ace0a381366b138cdaa99f04602706bd82ec 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -54,7 +54,7 @@ class TestCondInputOutput(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) pred = paddle.less_than(y, x) - out = layers.cond(pred, true_func, false_func) + out = paddle.static.nn.cond(pred, true_func, false_func) # out is one tensor place = ( @@ -94,7 +94,7 @@ class TestCondInputOutput(unittest.TestCase): startup_program = Program() with program_guard(main_program, startup_program): pred = layers.fill_constant(shape=[1], dtype='bool', value=True) - out = layers.cond(pred, true_func, false_func) + out = paddle.static.nn.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors place = ( @@ -138,7 +138,7 @@ class TestCondInputOutput(unittest.TestCase): a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) i = fluid.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 - a = layers.cond( + a = paddle.static.nn.cond( pred, lambda: true_func(a, i), lambda: false_func(a, i) ) place = ( @@ -183,9 +183,9 @@ class TestCondInputOutput(unittest.TestCase): with program_guard(main_program, startup_program): i = fluid.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 - out1 = layers.cond(pred, true_func, false_func) - out2 = layers.cond(pred, None, false_func) - out3 = layers.cond(pred, true_func, None) + out1 = paddle.static.nn.cond(pred, true_func, false_func) + out2 = paddle.static.nn.cond(pred, None, false_func) + out3 = paddle.static.nn.cond(pred, true_func, None) place = ( fluid.CUDAPlace(0) if core.is_compiled_with_cuda() @@ -223,13 +223,15 @@ class TestCondInputOutput(unittest.TestCase): i = fluid.data(name="i", shape=[1], dtype='int32') pred = (i % 2) == 0 with self.assertRaises(TypeError): - out = layers.cond(pred, i, func_return_one_tensor) + out = paddle.static.nn.cond(pred, i, func_return_one_tensor) with self.assertRaises(TypeError): - out = layers.cond(pred, func_return_one_tensor, np.asarray([3])) + out = paddle.static.nn.cond( + pred, func_return_one_tensor, np.asarray([3]) + ) with self.assertRaises(Exception) as e: - out = layers.cond( + out = paddle.static.nn.cond( pred, func_return_none, func_return_one_tensor ) self.assertTrue( @@ -238,7 +240,7 @@ class TestCondInputOutput(unittest.TestCase): ) with self.assertRaises(Exception) as e: - out = layers.cond( + out = paddle.static.nn.cond( pred, func_return_two_tensors, func_return_none ) self.assertTrue( @@ -247,7 +249,7 @@ class TestCondInputOutput(unittest.TestCase): ) with self.assertRaises(Exception) as e: - out = layers.cond( + out = paddle.static.nn.cond( pred, func_return_one_tensor, func_return_two_tensors ) self.assertTrue( @@ -268,7 +270,7 @@ class TestCondInputOutput(unittest.TestCase): shape=[1], dtype='float32', value=1.25 ) b.stop_gradient = False - out = layers.cond(a - b < -1.0, lambda: a, lambda: b) + out = paddle.static.nn.cond(a - b < -1.0, lambda: a, lambda: b) append_backward(out) place = ( @@ -308,14 +310,14 @@ class TestCondNestedControlFlow(unittest.TestCase): paddle.enable_static() def less_than_branch(i, a): - return layers.cond( + return paddle.static.nn.cond( i >= 3.0, lambda: paddle.add(a, a), lambda: paddle.subtract(a, a), ) def greater_equal_branch(i, a): - return layers.cond( + return paddle.static.nn.cond( i < 8.0, lambda: paddle.multiply(a, a), lambda: paddle.divide(a, a), @@ -326,7 +328,7 @@ class TestCondNestedControlFlow(unittest.TestCase): with program_guard(main_program, startup_program): i = fluid.data(name="i", shape=[1], dtype='float32') a = 2.0 * i - out = layers.cond( + out = paddle.static.nn.cond( i < 5.0, lambda: less_than_branch(i, a), lambda: greater_equal_branch(i, a), @@ -370,14 +372,14 @@ class TestCondNestedControlFlow(unittest.TestCase): shape=[1], dtype='float32', value=1.24 ) b.stop_gradient = False - out = fluid.layers.cond( + out = paddle.static.nn.cond( a < b, - lambda: fluid.layers.cond( + lambda: paddle.static.nn.cond( a - b < -1.0, lambda: paddle.add(a, b), lambda: paddle.multiply(a, b), ), - lambda: fluid.layers.cond( + lambda: paddle.static.nn.cond( a == b, lambda: paddle.subtract(a, b), lambda: paddle.pow(a, b), @@ -550,7 +552,7 @@ class TestCondBackward(unittest.TestCase): def cond_func(i, img, label): predicate = (i % 2) == 0 - return layers.cond( + return paddle.static.nn.cond( predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10), @@ -574,19 +576,19 @@ class TestCondBackward(unittest.TestCase): paddle.enable_static() def branch(i, img, label): - return layers.cond( + return paddle.static.nn.cond( (i % 2) == 0, lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10), ) def cond_func_simple_net_at_true(i, img, label): - return layers.cond( + return paddle.static.nn.cond( i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img) ) def cond_func_simple_net_at_false(i, img, label): - return layers.cond( + return paddle.static.nn.cond( i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label) ) @@ -626,14 +628,14 @@ class TestCondBackward(unittest.TestCase): predicate = (i % 2) == 0 else: predicate = (i % 2) != 0 - return layers.cond( + return paddle.static.nn.cond( predicate, lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10), ) def cond_func(i, img, label): - return layers.cond( + return paddle.static.nn.cond( i < 5, lambda: branch(i, img, label, True), lambda: branch(i, img, label, False), @@ -665,16 +667,16 @@ class TestCondWithError(unittest.TestCase): return pred with self.assertRaises(TypeError): - layers.cond(None, func, func) + paddle.static.nn.cond(None, func, func) with self.assertRaises(TypeError): - layers.cond(pred, func, set()) + paddle.static.nn.cond(pred, func, set()) with self.assertRaises(TypeError): - layers.cond(pred, set(), func) + paddle.static.nn.cond(pred, set(), func) with self.assertRaises(TypeError): - layers.cond(pred, func, func, set()) + paddle.static.nn.cond(pred, func, func, set()) class TestCondWithDict(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 52ee114ae8383d1d61f11b3fd006c0e16ed26a0b..3e1881dda4b043879b4a278b8b2d012eab0a27d3 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -228,7 +228,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) return hidden2 - hidden2 = fluid.layers.cond(cond, true_fn, false_fn) + hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn) loss = paddle.nn.functional.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), @@ -271,7 +271,7 @@ class TestCloneWithRaise(unittest.TestCase): hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) return hidden2 - hidden2 = fluid.layers.cond(cond, true_fn, false_fn) + hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn) loss = paddle.nn.functional.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'), diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index f4071ac4149c936afd1d5439216fa269004d9ca2..0fae89509808318a111e7ad0dc0ba8a09a3f4220 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): topk_coordinates = paddle.stack([batch_pos, indices], axis=2) topk_coordinates.stop_gradient = stop_gradient score = paddle.gather_nd(x, topk_coordinates) - layers.increment(x=step_idx, value=1.0, in_place=True) + paddle.increment(x=step_idx, value=1.0) layers.array_write(score, i=step_idx, array=scores) length_cond = paddle.less_than(x=step_idx, y=max_len) layers.assign(length_cond, cond) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index 13704cb6105c2811f0bf717cd99154b449170096..ce23dcb54e5b0e169a67319e711fb59dfd4a5eab 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -83,10 +83,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): mem_array = layers.array_write(x=init, i=i) data_array = layers.array_write(x=d0, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d1, i, array=data_array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') @@ -112,7 +112,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): prev = paddle.reshape(prev, shape=[10]) result = layers.sums(input=[d, prev]) - i = layers.increment(x=i, in_place=True) + i = paddle.increment(x=i) layers.array_write(result, i=i, array=mem_array) paddle.assign(paddle.less_than(x=i, y=array_len), cond) with while_op2.block(): @@ -122,7 +122,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): prev2 = paddle.reshape(prev2, shape=[10]) result2 = layers.sums(input=[d2, prev2]) - j = layers.increment(x=j, in_place=True) + j = paddle.increment(x=j) layers.array_write(result2, i=j, array=mem_array) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) diff --git a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py index 639f84295b24b75cedc8884324de50860ee35996..bf54cef74b9881d8b4d024fd4110c7b3d14f8b56 100644 --- a/python/paddle/fluid/tests/unittests/test_executor_and_mul.py +++ b/python/paddle/fluid/tests/unittests/test_executor_and_mul.py @@ -16,8 +16,9 @@ import unittest import numpy as np +import paddle from paddle.fluid.executor import Executor -from paddle.fluid.layers import array_write, data, increment, mul, zeros +from paddle.fluid.layers import array_write, data, mul, zeros class TestExecutor(unittest.TestCase): @@ -26,13 +27,13 @@ class TestExecutor(unittest.TestCase): a = data(name='a', shape=[784], dtype='float32') array = array_write(x=a, i=i) - i = increment(i) + i = paddle.increment(i) b = data( name='b', shape=[784, 100], dtype='float32', append_batch_size=False ) array_write(x=b, i=i, array=array) - i = increment(i) + i = paddle.increment(i) out = mul(x=a, y=b) array_write(x=out, i=i, array=array) diff --git a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py index b2869d88882375c4c81748663ddb851cd5074195..a55ecbad35b405dd81d09ef334b6b22747823157 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_lod_tensor_array.py @@ -18,6 +18,7 @@ import unittest import numpy as np from simple_nets import simple_fc_net, simple_fc_net_with_inputs +import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers @@ -35,9 +36,9 @@ class TestFetchLoDTensorArray(unittest.TestCase): opt.minimize(loss) array = layers.array_write(x=img, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(x=label, i=i, array=array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(x=loss, i=i, array=array) return loss, array diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 83cec6d60443fad89d8f5d586c41904099d6d161..121a8f8b9d47f12481fba9aa8bb79e374158b031 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -1579,7 +1579,7 @@ class TestLayer(LayerTest): b = fluid.layers.fill_constant( shape=[1], dtype='float32', value=0.23 ) - out = fluid.layers.cond( + out = paddle.static.nn.cond( a >= b, lambda: greater_equal_branch(a, b), lambda: less_than_branch(a, b), @@ -1599,12 +1599,12 @@ class TestLayer(LayerTest): b = fluid.dygraph.to_variable( np.array([0.23]).astype('float32') ) - out = layers.cond( + out = paddle.static.nn.cond( a < b, lambda: less_than_branch(a, b), lambda: greater_equal_branch(a, b), ) - out2 = layers.cond( + out2 = paddle.static.nn.cond( a >= b, lambda: greater_equal_branch(a, b), lambda: less_than_branch(a, b), @@ -1615,18 +1615,18 @@ class TestLayer(LayerTest): eager_dynamic_res, eager_dynamic_res2 ) with self.assertRaises(TypeError): - layers.cond(a < b, 'str', 'str') + paddle.static.nn.cond(a < b, 'str', 'str') with self.assertRaises(TypeError): - layers.cond(a >= b, 'str', 'str') + paddle.static.nn.cond(a >= b, 'str', 'str') a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) - out = layers.cond( + out = paddle.static.nn.cond( a < b, lambda: less_than_branch(a, b), lambda: greater_equal_branch(a, b), ) - out2 = layers.cond( + out2 = paddle.static.nn.cond( a >= b, lambda: greater_equal_branch(a, b), lambda: less_than_branch(a, b), @@ -1635,9 +1635,9 @@ class TestLayer(LayerTest): dynamic_res2 = out2.numpy() np.testing.assert_array_equal(dynamic_res, dynamic_res2) with self.assertRaises(TypeError): - layers.cond(a < b, 'str', 'str') + paddle.static.nn.cond(a < b, 'str', 'str') with self.assertRaises(TypeError): - layers.cond(a >= b, 'str', 'str') + paddle.static.nn.cond(a >= b, 'str', 'str') np.testing.assert_array_equal(static_res, dynamic_res) np.testing.assert_array_equal(static_res, eager_dynamic_res) diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index fb3b342131beec945ac517b08d9ae070a83c681a..41cd39088f379a000afdf20a556545b93950d224 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -237,7 +237,7 @@ class TestMathOpPatches(unittest.TestCase): one = paddle.ones(shape=[1], dtype='int32') zero = fluid.layers.zeros(shape=[1], dtype='int32') cond = one == zero - c = fluid.layers.cond(cond, lambda: a + b, lambda: a - b) + c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b) place = fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer_grad.py b/python/paddle/fluid/tests/unittests/test_optimizer_grad.py index e20d563ebdc03aef2aa3f930ed86d22d401a53da..99c4d79bb3168dd49aa4b5fa9c4db7ce4cce60a3 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer_grad.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer_grad.py @@ -115,7 +115,7 @@ class SimpleNetWithCond: return cond_res cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32')) - sum_cond = fluid.layers.cond(cond_i > 1.0, cond_true, cond_false) + sum_cond = paddle.static.nn.cond(cond_i > 1.0, cond_true, cond_false) sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond]) mean_out = paddle.mean(sum_all) if use_bf16: diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 62e5acf3992c3582ab09474ce0ad1fa638442f79..bf4fa80a1246f32cf1ee0990681eb058eeb4face 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -50,7 +50,7 @@ class TestProfiler(unittest.TestCase): with while_op.block(): hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') layers.array_write(hidden_n, i, data_arr) - fluid.layers.increment(x=counter, value=1, in_place=True) + paddle.increment(x=counter, value=1) paddle.assign(paddle.less_than(x=counter, y=until), cond) hidden_n = layers.array_read(data_arr, i) diff --git a/python/paddle/fluid/tests/unittests/test_program_code.py b/python/paddle/fluid/tests/unittests/test_program_code.py index e60706794f5b1cb626cc655e8801e9aaf7f90e4f..3ecd2619c15fcfea026ad019824d7fe28ed644ac 100644 --- a/python/paddle/fluid/tests/unittests/test_program_code.py +++ b/python/paddle/fluid/tests/unittests/test_program_code.py @@ -46,7 +46,7 @@ class TestProgramToReadableCode(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) pred = paddle.less_than(y, x) - out = layers.cond(pred, true_func, false_func) + out = paddle.static.nn.cond(pred, true_func, false_func) def test_program_code(self): self.var._to_readable_code() diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index 3bee6eef639501497f5883430bad28276e135648..533429d5b0fc8f11c943e998ff3998d9d12c0f4f 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -59,7 +59,7 @@ class TestApiWhileLoop(unittest.TestCase): def body(i, mem): mem = paddle.add(x=mem, y=one) - i = layers.increment(i) + i = paddle.increment(i) return [i, mem] main_program = Program() @@ -100,7 +100,7 @@ class TestApiWhileLoop(unittest.TestCase): test_list_dict[0]["test_key"] ) - i = layers.increment(i) + i = paddle.increment(i) return [i, ten, test_dict, test_list, test_list_dict] main_program = Program() @@ -174,7 +174,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): def internal_body(j, init, sums): init = paddle.add(x=init, y=ones) sums = paddle.add(x=init, y=sums) - j = layers.increment(j) + j = paddle.increment(j) return [j, init, sums] result = paddle.static.nn.while_loop( @@ -184,7 +184,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): init = result[1] sums = result[2] sums = paddle.add(x=init, y=sums) - i = layers.increment(i) + i = paddle.increment(i) return [i, j, init, sums] main_program = Program() @@ -229,7 +229,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): def body(i, x): x = paddle.multiply(x=i, y=i) - i = layers.increment(i) + i = paddle.increment(i) return [i, x] main_program = Program() @@ -324,7 +324,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): inner_prev = layers.array_read(array=mem_array, i=j) inner_sum_0 = paddle.add(x=inner_data, y=inner_prev) inner_sum_1 = paddle.add(x=x, y=inner_sum_0) - j = layers.increment(x=j, in_place=True) + j = paddle.increment(x=j) layers.array_write(inner_sum_1, i=j, array=mem_array) return [j, x, mem_array] @@ -332,7 +332,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): outer_prev = layers.array_read(array=mem_array, i=i) outer_sum_0 = paddle.add(x=outer_data, y=outer_prev) outer_sum_1 = paddle.add(x=x, y=outer_sum_0) - i = layers.increment(x=i, in_place=True) + i = paddle.increment(x=i) layers.array_write(outer_sum_1, i=i, array=mem_array) j, x, mem_array = paddle.static.nn.while_loop( internal_cond, internal_body, [j, x, mem_array] @@ -352,9 +352,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): init = layers.zeros(shape=[10], dtype='float32') mem_array = layers.array_write(x=init, i=i) data_array = layers.array_write(x=d0, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d1, i, array=data_array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True @@ -444,7 +444,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): return 1 def cond_returns_not_bool_tensor(i): - return layers.increment(i) + return paddle.increment(i) def cond_returns_bool_tensor(i): return paddle.less_than(i, ten) @@ -456,14 +456,14 @@ class TestApiWhileLoop_Error(unittest.TestCase): return paddle.less_than(i, ten) def body(i): - return layers.increment(i) + return paddle.increment(i) def body_returns_error_length(i): - i = layers.increment(i) + i = paddle.increment(i) return [i, i] def body_returns_error_type(i, ten): - return layers.increment(i) + return paddle.increment(i) def cond_returns_with_mutable_dict(i, test_dict): return i > 0 @@ -472,7 +472,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): test_dict['new_key'] = layers.fill_constant( shape=[1], dtype='int64', value=1 ) - return layers.increment(i), test_dict + return paddle.increment(i), test_dict def cond_returns_with_mutable_list(i, test_list): return i > 0 @@ -481,7 +481,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): test_list.append( layers.fill_constant(shape=[1], dtype='int64', value=1) ) - return layers.increment(i), test_list + return paddle.increment(i), test_list main_program = Program() startup_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index 06b3d2d8f25543926d2477eeb775d70fdb0caed5..d9b8e521728719ab70d660bbc0d74e9c6cb4cd3c 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -42,9 +42,9 @@ class TestWhileOp(unittest.TestCase): init = layers.zeros(shape=[10], dtype='float32') mem_array = layers.array_write(x=init, i=i) data_array = layers.array_write(x=d0, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d1, i, array=data_array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True @@ -63,7 +63,7 @@ class TestWhileOp(unittest.TestCase): prev = layers.array_read(array=mem_array, i=i) result = layers.sums(input=[d, prev]) - i = layers.increment(x=i, in_place=True) + i = paddle.increment(x=i) layers.array_write(result, i=i, array=mem_array) paddle.assign(paddle.less_than(x=i, y=array_len), cond) @@ -72,7 +72,7 @@ class TestWhileOp(unittest.TestCase): prev2 = layers.array_read(array=mem_array, i=j) result2 = layers.sums(input=[d2, prev2]) - j = layers.increment(x=j, in_place=True) + j = paddle.increment(x=j) layers.array_write(result2, i=j, array=mem_array) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) @@ -134,7 +134,7 @@ class BadInputTest(unittest.TestCase): def test_bad_x(): x = [1, 2, 3] - fluid.layers.increment(x) + paddle.increment(x) self.assertRaises(TypeError, test_bad_x) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py index 6b2d658067bc7aa2d1e4fc9233e7b6482eb006b3..d627b8fb35d724671ac05c19cff00413a85ddc32 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py @@ -41,9 +41,9 @@ class TestWhileOp(unittest.TestCase): init = layers.zeros(shape=[10], dtype='float32') mem_array = layers.array_write(x=init, i=i) data_array = layers.array_write(x=d0, i=i) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d1, i, array=data_array) - i = layers.increment(i) + i = paddle.increment(i) layers.array_write(d2, i, array=data_array) i = layers.zeros(shape=[1], dtype='int64') i.stop_gradient = True @@ -62,7 +62,7 @@ class TestWhileOp(unittest.TestCase): prev = layers.array_read(array=mem_array, i=i) result = layers.sums(input=[d, prev]) - i = layers.increment(x=i, in_place=True) + i = paddle.increment(x=i) layers.array_write(result, i=i, array=mem_array) paddle.assign(paddle.less_than(x=i, y=array_len), cond) @@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): prev2 = layers.array_read(array=mem_array, i=j) result2 = layers.sums(input=[d2, prev2]) - j = layers.increment(x=j, in_place=True) + j = paddle.increment(x=j) layers.array_write(result2, i=j, array=mem_array) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/fluid/variable_index.py index 31d587269db71c9fd1f80c6b02ad206873fea630..6081f3d9a3e570616eb3907184b27f999fdfdbd0 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/fluid/variable_index.py @@ -341,7 +341,7 @@ def get_value_for_bool_tensor(var, item): var_shape[0] = 0 return paddle.empty(var_shape, dtype=var.dtype) - from .layers.control_flow import cond + from paddle.static.nn import cond return cond( item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var) @@ -874,7 +874,7 @@ def set_value_for_bool_tensor(var, item, value): out = scatter_nd_add(var, idx, gather_val_new) var[:] = out - from .layers.control_flow import cond + from paddle.static.nn import cond # If all the bool index is False, just do nothing cond(item.any(), lambda: idx_not_empty(var, item, value)) diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index f67e1dd1585b35846ecb47783de6ff7faa85cd7d..9e39c6df445f3ed854bb183deba538ab4cabfe46 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -19,7 +19,7 @@ from .variable_trans_func import ( to_static_variable, ) from paddle.fluid.framework import core, Variable -from paddle.fluid.layers import Assert, Print +from paddle.fluid.layers import Print from paddle.fluid.layers import ( array_read, array_write, @@ -33,9 +33,7 @@ from paddle.fluid.layers import ( control_flow, ) from paddle.fluid.layers.control_flow import ( - cond, while_loop, - increment, ) from .return_transformer import ( RETURN_NO_VALUE_VAR_NAME, @@ -395,7 +393,7 @@ def _run_paddle_cond( return ret try: - cond_outs = control_flow.cond( + cond_outs = paddle.static.nn.cond( pred, new_true_fn, new_false_fn, None, return_name_ids ) except Exception as e: @@ -734,6 +732,8 @@ def convert_assert(cond, message=""): if isinstance(cond, Variable): cond = cast(cond, "bool") # NOTE: message is not used because Paddle Assert has no corresponding parameter to use. + from paddle.static.nn.control_flow import Assert + return Assert(cond) else: assert cond, message @@ -786,7 +786,8 @@ def _run_paddle_pop(array, *args): def body(i, new_array): item = array_read(array=array, i=i) array_write(item, paddle.tensor.array_length(new_array), new_array) - i = increment(i) + + i = paddle.increment(i) return i, new_array arr_len = paddle.tensor.array_length(array) @@ -816,7 +817,9 @@ def _slice_tensor_array(array, start, end): new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0]) return new_array - new_array = cond(start == end, true_fn, lambda: false_fn(array, start, end)) + new_array = paddle.static.nn.cond( + start == end, true_fn, lambda: false_fn(array, start, end) + ) return new_array diff --git a/python/paddle/static/nn/__init__.py b/python/paddle/static/nn/__init__.py index 7d449c16be557654a9589a98bdd4c45f9d667f5d..f446769837eb6a7b1db09e8d57d8785987bd83e9 100755 --- a/python/paddle/static/nn/__init__.py +++ b/python/paddle/static/nn/__init__.py @@ -29,7 +29,6 @@ from .control_flow import ( from .common import bilinear_tensor_product # noqa: F401 from .common import py_func # noqa: F401 from ...tensor.creation import create_parameter # noqa: F401 -from ...fluid.layers import cond # noqa: F401 from ...fluid.layers import conv2d # noqa: F401 from ...fluid.layers import crf_decoding # noqa: F401 from ...fluid.layers import layer_norm # noqa: F401 @@ -59,6 +58,8 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401 from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401 from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401 +from .control_flow import cond + __all__ = [ # noqa 'fc', 'batch_norm', diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 9374f654b4c063e431776873faf7f42054fd07fa..3b2ae2537cded061f8db3289b10a608db47a6cc8 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -27,16 +27,90 @@ from paddle.common_ops_import import ( from paddle.fluid.framework import Operator, Program, Variable # Temporary solution, it will be deleted later -from paddle.fluid.layers.control_flow import cond +from paddle.fluid.layers.control_flow import ConditionalBlock, select_input +from paddle.fluid.layers.tensor import assign, cast from paddle.fluid.layers.utils import ( assert_same_structure, copy_mutable_vars, + flatten, hold_mutable_vars, is_sequence, map_structure, + pack_sequence_as, + to_sequence, ) +def Assert(cond, data=None, summarize=20, name=None): + ''' + This API creates an op that asserts the given condition is true. If the + condition is false, prints the tensors in data. ``summarize`` specifies the + number of the elements in the tensors to print. + + Args: + cond (Variable): The boolean condition tensor whose numel should be 1. + data (list|tuple, optional): list or tuple of tensors to print when + condition is not true. If it's ``None``, no tensor will be printed. + The default value is ``None``. + summarize (int, optional): Number of elements in the tensor to be + printed. If its value is -1, then all elements in the tensor will + be printed. The default value is 20. + name (str, optional): The default value is ``None`` . Normally users + don't have to set this parameter. For more information, please + refer to :ref:`api_guide_Name` . + + Returns: + Operator: the created operation. + + Examples: + .. code-block:: python + + import paddle + from paddle.static.nn.control_flow import Assert + + paddle.enable_static() + x = paddle.full([2, 3], 2.0, 'float32') + condition = paddle.max(x) < 1.0 # False + Assert(condition, [x], 10, "example_assert_layer") + + exe = paddle.static.Executor() + try: + exe.run(paddle.static.default_main_program()) + # Print x and throws ValueError + # Example printed message for x: + # + # Variable: fill_constant_0.tmp_0 + # - lod: {} + # - place: CPUPlace() + # - shape: [2, 3] + # - layout: NCHW + # - dtype: float + # - data: [2 2 2 2 2 2] + except ValueError as e: + print("Assert Exception Example") + + ''' + check_variable_and_dtype( + cond, "cond", ["bool"], "static.nn.control_flow.Assert" + ) + check_type( + data, "data", (list, tuple, type(None)), "static.nn.control_flow.Assert" + ) + check_type(summarize, "summarize", int, "static.nn.control_flow.Assert") + check_type(name, "name", (str, type(None)), "static.nn.control_flow.Assert") + + layer_name = name if name else ('assert_' + cond.name) + helper = LayerHelper(layer_name, **locals()) + + op = helper.append_op( + type="assert", + inputs={"Cond": cond, "Data": [] if data is None else list(data)}, + attrs={"summarize": summarize}, + ) + + return op + + class BlockGuard: """ BlockGuard class. @@ -795,3 +869,464 @@ def switch_case(branch_index, branch_fns, default=None, name=None): final_fn = false_fn return final_fn() + + +def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): + """ + This API returns ``true_fn()`` if the predicate ``pred`` is true else + ``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to + ``None`` if do nothing and this API will treat the callable simply returns + ``None`` in this case. + + ``true_fn`` and ``false_fn`` should return same nest structure of tensors + or both return ``None`` if user doens't like to return anything. A nest + structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or + list of tensors. + + Note: + 1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have + the same shape because of dataflow model of PaddlePaddle while the + tensors in the tuples or the lists can have different shapes. + + 2. This API could be used under both static mode or dygraph mode. If it + is in dygraph mode, the API only runs one branch based on condition. + + 3. If it is in static mode, any tensors or operations created outside + or inside of ``true_fn`` and ``false_fn`` will be in net building + regardless of which branch is selected at runtime. This has frequently + surprised users who expected a lazy semantics. For example: + + .. code-block:: python + + import paddle + + a = paddle.zeros((1, 1)) + b = paddle.zeros((1, 1)) + c = a * b + out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) + + No matter whether ``a < b`` , ``c = a * b`` will be in net building and + run. ``a + c`` and ``b * b`` will be in net building, but only one + branch will be executed during runtime. + + Args: + pred(Tensor): A boolean tensor whose numel should be 1. The boolean + value determines whether to return the result of ``true_fn`` or + ``false_fn`` . + true_fn(callable, optional): A callable to be performed if ``pred`` is + true. The default value is ``None`` . + false_fn(callable, optional): A callable to be performed if ``pred`` is + false. The default value is ``None`` . + name(str, optional): The default value is ``None`` . Normally users + don't have to set this parameter. For more information, please + refer to :ref:`api_guide_Name` . + return_names(sequence of string, optional): The default value is ``None`` . + Normally users don't have to set this parameters. A sequence of strings + to represents the name of returned vars. The structure of sequence must + be same with return values of true_fn and false_fn. + + Returns: + Tensor|list(Tensor)|tuple(Tensor): returns ``true_fn()`` if the + predicate ``pred`` is true else ``false_fn()`` . + + Examples: + .. code-block:: python + + import paddle + + # + # pseudocode: + # if 0.1 < 0.23: + # return 1, True + # else: + # return 3, 2 + # + + def true_func(): + return paddle.full(shape=[1, 2], dtype='int32', + fill_value=1), paddle.full(shape=[2, 3], + dtype='bool', + fill_value=True) + + + def false_func(): + return paddle.full(shape=[3, 4], dtype='float32', + fill_value=3), paddle.full(shape=[4, 5], + dtype='int64', + fill_value=2) + + + x = paddle.full(shape=[1], dtype='float32', fill_value=0.1) + y = paddle.full(shape=[1], dtype='float32', fill_value=0.23) + pred = paddle.less_than(x=x, y=y, name=None) + ret = paddle.static.nn.cond(pred, true_func, false_func) + # ret is a tuple containing 2 tensors + # ret[0] = [[1 1]] + # ret[1] = [[ True True True] + # [ True True True]] + + """ + if _non_static_mode(): + assert isinstance(pred, Variable), "The pred in cond must be Variable" + assert pred.size == 1, "condition input's numel should be 1" + pred = pred.numpy()[0] + if pred: + if true_fn is not None: + if not callable(true_fn): + raise TypeError( + "The true_fn in cond must be callable, but received {}".format( + type(true_fn).__name__ + ) + ) + return true_fn() + else: + if false_fn is not None: + if not callable(false_fn): + raise TypeError( + "The false_fn in cond must be callable, but received {}".format( + type(false_fn).__name__ + ) + ) + return false_fn() + return None + + check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond") + check_type(name, "name", (str, type(None)), "fluid.layers.cond") + helper = LayerHelper('cond', **locals()) + true_output = None + false_output = None + copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper) + if true_fn is not None: + if not callable(true_fn): + raise TypeError( + "The true_fn in cond must be callable, but received {}".format( + type(true_fn).__name__ + ) + ) + true_cond_block = ConditionalBlock([pred], is_scalar_condition=True) + with true_cond_block.block(): + origin_true_output = true_fn() + if origin_true_output is not None: + true_output = map_structure( + copy_to_parent_func, origin_true_output + ) + if false_fn is not None: + if not callable(false_fn): + raise TypeError( + "The false_fn in cond must be callable, but received {}".format( + type(false_fn).__name__ + ) + ) + false_cond_block = ConditionalBlock( + [paddle.logical_not(pred)], is_scalar_condition=True + ) + with false_cond_block.block(): + origin_false_output = false_fn() + if origin_false_output is not None: + false_output = map_structure( + copy_to_parent_func, origin_false_output + ) + + if true_output is None and false_output is None: + return None + + if true_output is None: + raise ValueError( + "Incompatible return values of true_fn and false_fn in cond: " + "true_fn returns None while false_fn returns non-None" + ) + if false_output is None: + raise ValueError( + "Incompatible return values of true_fn and false_fn in cond: " + "true_fn returns non-None while false_fn returns None" + ) + + # Merge true and false output if they are not None + if return_names is None: + is_dy2staic = False + return_names = ["no name"] * len(_to_sequence_except_dict(true_output)) + else: + """ + dy2static will set the return_names and expand the return values to UndefinedVar. + """ + is_dy2staic = True + + # TODO: expand_undefined_var will replace None to Undefinedvar(), to fix cases like: + # a = None + # if condition: + # a = 1 + # Because we can not use variable to express 'None' + true_output, false_output = expand_undefined_var( + true_output, false_output, return_names + ) + + if len(_to_sequence_except_dict(true_output)) != len( + _to_sequence_except_dict(false_output) + ): + raise ValueError( + "true fn returns {} vars, but false fn returns {} vars, which is not equals".format( + len(_to_sequence_except_dict(true_output)), + len(_to_sequence_except_dict(false_output)), + ) + ) + for true_out, false_out, return_name in zip( + _to_sequence_except_dict(true_output), + _to_sequence_except_dict(false_output), + _to_sequence_except_dict(return_names), + ): + try: + assert_same_structure(true_out, false_out, check_types=False) + except ValueError as e: + raise ValueError( + "Incompatible return values of `{}` in true_fn and false_fn in cond: {}".format( + return_name, e + ) + ) + + def check_ret_none(seq_true, seq_false, seq_names): + for f_true, f_false, f_name in zip(seq_true, seq_false, seq_names): + f_true = flatten(f_true) + f_false = flatten(f_false) + for idx in range(len(f_true)): + if ( + f_true[idx] is None + and f_false[idx] is not None + or f_false[idx] is None + and f_true[idx] is not None + ): + warnings.warn( + "In cond : Var '{}' or part of it is set differently in ifelse branchs, " + "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " + "'None' in ifelse block might lead to error.".format( + f_name, + type(f_true[idx]), + f_true[idx], + type(f_false[idx]), + f_false[idx], + ) + ) + + check_ret_none( + _to_sequence_except_dict(true_output), + _to_sequence_except_dict(false_output), + _to_sequence_except_dict(return_names), + ) + + if is_dy2staic: + true_output, false_output = change_none_to_undefinedvar( + true_output, false_output + ) + + mask = cast(pred, dtype='int32') + merge_func = ( + lambda name, false_var, true_var: select_input_with_buildin_type( + [false_var, true_var], mask, name + ) + ) + + def merge_every_var_list(false_vars, true_vars, name): + return map_structure(partial(merge_func, name), false_vars, true_vars) + + merged_output = list( + map( + merge_every_var_list, + _to_sequence_except_dict(false_output), + _to_sequence_except_dict(true_output), + _to_sequence_except_dict(return_names), + ) + ) + merged_output = pack_sequence_as(false_output, flatten(merged_output)) + return merged_output + + +def copy_var_to_parent_block(var, layer_helper): + if not isinstance(var, Variable): + return var + prog = layer_helper.main_program + parent_idx = prog.current_block().parent_idx + assert ( + parent_idx >= 0 + ), "Got wrong parent block index when assigning var to parent scope in control_flow" + parent_block = prog.block(parent_idx) + + if ( + var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY + and parent_block._find_var_recursive(var.name) + ): + parent_block_var = var + else: + parent_block_var = parent_block.create_var( + dtype=var.dtype, shape=var.shape, type=var.type + ) + assign(var, parent_block_var) + return parent_block_var + + +def select_input_with_buildin_type(inputs, mask, name): + from paddle.jit.dy2static.utils import UndefinedVar + from paddle.jit.dy2static.variable_trans_func import to_static_variable + + false_var, true_var = inputs + + if isinstance(false_var, UndefinedVar) and isinstance( + true_var, UndefinedVar + ): + """None -> UndefinedVar, so the real value is a [None, UndefinedVar] or [None, None], we just return None.""" + return None + + if isinstance(false_var, Variable) and isinstance(true_var, Variable): + try: + return select_input(inputs, mask) + except Exception as e: + raise RuntimeError( + f"Exceptions throwed while doing select_input on {name}:\n{e}" + ) + + elif isinstance(false_var, support_ret_buildin_type) and isinstance( + false_var, type(true_var) + ): + if false_var == true_var: + return false_var + else: + inputs = [ + to_static_variable(false_var), + to_static_variable(true_var), + ] + # Deal with the situations like this: false_var is int and true_var is Variable + elif ( + isinstance(false_var, support_ret_buildin_type) + and isinstance(true_var, Variable) + ) or ( + isinstance(true_var, support_ret_buildin_type) + and isinstance(false_var, Variable) + ): + inputs = [to_static_variable(false_var), to_static_variable(true_var)] + warnings.warn( + "Return results from different branches in cond are not same type: " + "false_var returned by false_fn is '{}' and true_var of true_fn is " + "'{}'".format(type(false_var), type(true_var)) + ) + elif ( + isinstance(false_var, UndefinedVar) + and isinstance(true_var, (Variable,) + support_ret_buildin_type) + ) or ( + isinstance(true_var, UndefinedVar) + and isinstance(false_var, (Variable,) + support_ret_buildin_type) + ): + + def create_var_if_not_undefined_var(a): + if isinstance(a, UndefinedVar): + return a + return to_static_variable(a) + + true_var, false_var = to_static_variable(true_var), to_static_variable( + false_var + ) + inputs = [false_var, true_var] + else: + raise TypeError( + "Unsupported return type of true_fn and false_fn in cond: false_var " + "returned by false_fn is '{}' and true_var of true_fn is '{}'".format( + type(false_var), type(true_var) + ) + ) + try: + return select_input(inputs, mask) + except Exception as e: + raise RuntimeError( + f"Exceptions throwed while doing select_input on {name}:\n{e}" + ) + + +def _is_sequence_except_dict(x): + """ + In this function, dict is not viewed as sequence. + """ + if isinstance(x, dict): + return False + return is_sequence(x) + + +def _to_sequence_except_dict(x): + """ + In this function, dict is not viewed as sequence. + """ + if isinstance(x, dict): + return [x] + return to_sequence(x) + + +def expand_undefined_var(nest1, nest2, names): + """TODO: make this function recursively. + nest1: Var1, (UndefinedVar, [1,2,3]) + nest2: Var2, ([1,2,3,4], UndefinedVar) + In this case, we should not expand recursively. + """ + from paddle.jit.dy2static.return_transformer import RETURN_VALUE_PREFIX + from paddle.jit.dy2static.utils import UndefinedVar + + def pack_undefined_var_as(seq): + return pack_sequence_as( + seq, [UndefinedVar("padding") for i in flatten(seq)] + ) + + def map_fn(n1, n2, name, order): + if not name.startswith(RETURN_VALUE_PREFIX) and ( + isinstance(n1, UndefinedVar) or n1 is None + ): + if n1 is None and n2 is not None: + if order == 0: + warnings.warn( + "In cond : Var '{}' or part of it is set differently in ifelse branchs, " + "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " + "'None' in ifelse block might lead to error.".format( + name, type(n1), n1, type(n2), n2 + ) + ) + else: + warnings.warn( + "In cond : Var '{}' or part of it is set differently in ifelse branchs, " + "<{}, {}> in true branch and <{}, {}> in false branch. Set var to " + "'None' in ifelse block might lead to error.".format( + name, type(n2), n2, type(n1), n1 + ) + ) + return pack_undefined_var_as(n2) + return n1 + + nest1_out = list( + map( + map_fn, + _to_sequence_except_dict(nest1), + _to_sequence_except_dict(nest2), + _to_sequence_except_dict(names), + [0 for i in _to_sequence_except_dict(names)], + ) + ) + nest2_out = list( + map( + map_fn, + _to_sequence_except_dict(nest2), + _to_sequence_except_dict(nest1), + _to_sequence_except_dict(names), + [1 for i in _to_sequence_except_dict(names)], + ) + ) + if not _is_sequence_except_dict(nest1): + nest1_out = nest1_out[0] + if not _is_sequence_except_dict(nest2): + nest2_out = nest2_out[0] + return nest1_out, nest2_out + + +def change_none_to_undefinedvar(nest1, nest2): + from paddle.jit.dy2static.utils import UndefinedVar + + def map_fn(x): + if x is None: + return UndefinedVar("padding") + return x + + nest1_out = pack_sequence_as(nest1, list(map(map_fn, flatten(nest1)))) + nest2_out = pack_sequence_as(nest2, list(map(map_fn, flatten(nest2)))) + return nest1_out, nest2_out