From 1c7ae95401fd5aa7e2e1405295a331a5a7c7ce00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A7=9C=E6=B0=B8=E4=B9=85?= <34344716+yjjiang11@users.noreply.github.com> Date: Thu, 29 Dec 2022 09:01:36 +0800 Subject: [PATCH] rm in_legacy part8 (#49386) * rm legacy layers part6 * rm non_static_mode * modify non_static * minor change * rm loss * rm in_legacy part8 * minor change --- python/paddle/fluid/layers/collective.py | 61 +- python/paddle/fluid/layers/control_flow.py | 58 +- .../fluid/layers/layer_function_generator.py | 18 +- .../fluid/layers/learning_rate_scheduler.py | 25 +- python/paddle/fluid/layers/nn.py | 255 ++++--- python/paddle/fluid/layers/sequence_lod.py | 28 +- python/paddle/fluid/layers/tensor.py | 394 +++++------ python/paddle/geometric/math.py | 177 +++-- .../geometric/message_passing/send_recv.py | 300 ++++----- .../utils/cpp_extension/extension_utils.py | 17 +- python/paddle/vision/ops.py | 629 ++++++------------ 11 files changed, 794 insertions(+), 1168 deletions(-) diff --git a/python/paddle/fluid/layers/collective.py b/python/paddle/fluid/layers/collective.py index 7764235dc2..db451bcce9 100644 --- a/python/paddle/fluid/layers/collective.py +++ b/python/paddle/fluid/layers/collective.py @@ -13,7 +13,7 @@ # limitations under the License. from ..layer_helper import LayerHelper, unique_name -from ..framework import Variable, in_dygraph_mode, _in_legacy_dygraph +from ..framework import Variable, in_dygraph_mode import paddle from paddle import _C_ops, _legacy_C_ops @@ -120,42 +120,31 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False): task = group.process_group.all_gather(x, out) task.wait() return out - - if _in_legacy_dygraph(): - attrs = ( - 'nranks', - nranks, - 'ring_id', - ring_id, - 'use_calc_stream', - use_calc_stream, + else: + helper = LayerHelper(op_type, **locals()) + out_shape = list(x.shape[:]) + if out_shape[0] > 0: + out_shape[0] *= nranks + out = helper.create_variable( + name=unique_name.generate_with_ignorable_key( + '.'.join([x.name, op_type]) + ), + shape=out_shape, + dtype=x.dtype, + type=x.type, + persistable=x.persistable, ) - return _legacy_C_ops.c_allgather(x, *attrs) - - helper = LayerHelper(op_type, **locals()) - out_shape = list(x.shape[:]) - if out_shape[0] > 0: - out_shape[0] *= nranks - out = helper.create_variable( - name=unique_name.generate_with_ignorable_key( - '.'.join([x.name, op_type]) - ), - shape=out_shape, - dtype=x.dtype, - type=x.type, - persistable=x.persistable, - ) - helper.append_op( - type=op_type, - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={ - 'nranks': nranks, - 'ring_id': ring_id, - 'use_calc_stream': use_calc_stream, - }, - ) - return out + helper.append_op( + type=op_type, + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={ + 'nranks': nranks, + 'ring_id': ring_id, + 'use_calc_stream': use_calc_stream, + }, + ) + return out def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False): diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 60fb065470..9e24e18e29 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -21,9 +21,7 @@ from ..framework import ( Program, Variable, Operator, - _non_static_mode, static_only, - _in_legacy_dygraph, in_dygraph_mode, ) from ..layer_helper import LayerHelper, unique_name @@ -1154,7 +1152,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): "but given shape as {0}.".format(list(pre_cond.shape)) ) - if _non_static_mode(): + if in_dygraph_mode(): now_cond = pre_cond.numpy()[0] while now_cond: output_vars = body(*loop_vars) @@ -1168,33 +1166,33 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): now_cond = cond(*output_vars).numpy()[0] map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) return loop_vars - - while_loop_block = While(pre_cond, is_test, name) - has_mutable_vars_in_loop = hold_mutable_vars(loop_vars) - with while_loop_block.block(): - # If a variable with mutable type is included in loop_vars, like `dict/list`, - # modifying it in the body function will cause origin variable to be modified - # synchronously. This will raise an assignment error out of while block. - # Here we make a copy of the mutable vars to avoid this problem. - if has_mutable_vars_in_loop: - new_loop_vars = copy_mutable_vars(loop_vars) - output_vars = body(*new_loop_vars) - else: - output_vars = body(*loop_vars) - if not isinstance(output_vars, (list, tuple)): - output_vars = [output_vars] - try: - loop_vars = _deal_with_undefined_var(output_vars, loop_vars) - assert_same_structure(output_vars, loop_vars, check_types=False) - except ValueError as e: - raise ValueError( - "body in while_loop should return the same arity " - "(length and structure) as loop_vars: {0}".format(e) - ) - now_cond = cond(*output_vars) - map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) - assign(now_cond, pre_cond) - return loop_vars + else: + while_loop_block = While(pre_cond, is_test, name) + has_mutable_vars_in_loop = hold_mutable_vars(loop_vars) + with while_loop_block.block(): + # If a variable with mutable type is included in loop_vars, like `dict/list`, + # modifying it in the body function will cause origin variable to be modified + # synchronously. This will raise an assignment error out of while block. + # Here we make a copy of the mutable vars to avoid this problem. + if has_mutable_vars_in_loop: + new_loop_vars = copy_mutable_vars(loop_vars) + output_vars = body(*new_loop_vars) + else: + output_vars = body(*loop_vars) + if not isinstance(output_vars, (list, tuple)): + output_vars = [output_vars] + try: + loop_vars = _deal_with_undefined_var(output_vars, loop_vars) + assert_same_structure(output_vars, loop_vars, check_types=False) + except ValueError as e: + raise ValueError( + "body in while_loop should return the same arity " + "(length and structure) as loop_vars: {0}".format(e) + ) + now_cond = cond(*output_vars) + map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) + assign(now_cond, pre_cond) + return loop_vars # (TODO: Mine) There exists dependency. It will be removed later. diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 8de02b495c..bb5d06157e 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -24,13 +24,10 @@ from ..framework import ( Variable, core, convert_np_dtype_to_dtype_, - _non_static_mode, in_dygraph_mode, - _in_legacy_dygraph, ) from ..layer_helper import LayerHelper from ..data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph from paddle import _C_ops, _legacy_C_ops __all__ = [ @@ -276,7 +273,7 @@ def generate_activation_fn(op_type): return op(x) # TODO(dev): Because some ops' yaml has not been migrated. # Replace it with _in_legacy_dygraph while all yaml work is done. - if _non_static_mode(): + if in_dygraph_mode() and hasattr(_legacy_C_ops, op_type): op = getattr(_legacy_C_ops, op_type) return op(x) @@ -327,15 +324,16 @@ def generate_inplace_fn(inplace_op_type): origin_op_type = inplace_op_type[:-1] def func(x, name=None): - if _non_static_mode(): + if in_dygraph_mode(): op = getattr(_legacy_C_ops, inplace_op_type) return op(x) - warnings.warn( - "In static mode, {}() is the same as {}() and does not perform inplace operation.".format( - inplace_op_type, origin_op_type + else: + warnings.warn( + "In static mode, {}() is the same as {}() and does not perform inplace operation.".format( + inplace_op_type, origin_op_type + ) ) - ) - return generate_activation_fn(origin_op_type)(x, name) + return generate_activation_fn(origin_op_type)(x, name) func.__name__ = inplace_op_type func.__doc__ = """ diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 49399a8cca..8521b9357c 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -27,9 +27,14 @@ import paddle from . import control_flow from . import nn from . import tensor -from ..framework import default_main_program, Parameter, unique_name, name_scope +from ..framework import ( + default_main_program, + Parameter, + unique_name, + name_scope, + in_dygraph_mode, +) from ..framework import Variable -from ..framework import _non_static_mode from ..dygraph import learning_rate_scheduler as imperate_lr from ..data_feeder import check_variable_and_dtype, check_type @@ -99,7 +104,7 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0): learning_rate) """ with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.NoamDecay( d_model, warmup_steps, learning_rate=learning_rate ) @@ -160,7 +165,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.ExponentialDecay( learning_rate, decay_steps, decay_rate, staircase ) @@ -222,7 +227,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.NaturalExpDecay( learning_rate, decay_steps, decay_rate, staircase ) @@ -282,7 +287,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): staircase=True)) """ with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.InverseTimeDecay( learning_rate, decay_steps, decay_rate, staircase ) @@ -337,7 +342,7 @@ def polynomial_decay( """ with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.PolynomialDecay( learning_rate, decay_steps, end_learning_rate, power, cycle ) @@ -414,7 +419,7 @@ def piecewise_decay(boundaries, values): if len(values) - len(boundaries) != 1: raise ValueError("len(values) - len(boundaries) should be 1") - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.PiecewiseDecay(boundaries, values, 0) return decay else: @@ -488,7 +493,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): ) with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): decay = imperate_lr.CosineDecay( learning_rate, step_each_epoch, epochs ) @@ -569,7 +574,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): linear_step = float(end_lr) - float(start_lr) with default_main_program()._lr_schedule_guard(): - if _non_static_mode(): + if in_dygraph_mode(): lr = imperate_lr.LinearLrWarmup( learning_rate, warmup_steps, start_lr, end_lr ) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 56c5753ac2..264c8ce6da 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -22,19 +22,16 @@ import numpy as np import paddle from ..layer_helper import LayerHelper -from paddle.fluid.framework import _in_legacy_dygraph from ..initializer import Normal, Constant from ..framework import ( Variable, OpProtoHolder, - _non_static_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags, - _in_legacy_dygraph, in_dygraph_mode, ) from ..framework import _current_expected_place @@ -128,10 +125,6 @@ def _elementwise_op_in_dygraph( OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, ) out = op(x, y) - - if _in_legacy_dygraph(): - op = getattr(_legacy_C_ops, op_name) - out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) return dygraph_utils._append_activation_in_dygraph( out, act, use_mkldnn=use_mkldnn ) @@ -794,26 +787,25 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): if in_dygraph_mode(): return _C_ops.sum(input, dim, None, keep_dim) - elif _in_legacy_dygraph(): - return _legacy_C_ops.reduce_sum( - input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all + else: + attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all} + check_variable_and_dtype( + input, + 'input', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'reduce_sum', ) - attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all} - check_variable_and_dtype( - input, - 'input', - ['float16', 'float32', 'float64', 'int32', 'int64'], - 'reduce_sum', - ) - helper = LayerHelper('reduce_sum', **locals()) - out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - helper.append_op( - type='reduce_sum', - inputs={'X': input}, - outputs={'Out': out}, - attrs=attrs, - ) - return out + helper = LayerHelper('reduce_sum', **locals()) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype() + ) + helper.append_op( + type='reduce_sum', + inputs={'X': input}, + outputs={'Out': out}, + attrs=attrs, + ) + return out def autoincreased_step_counter(counter_name=None, begin=1, step=1): @@ -895,7 +887,7 @@ def unsqueeze(input, axes, name=None): y = fluid.layers.unsqueeze(input=x, axes=[1]) """ - if _non_static_mode(): + if in_dygraph_mode(): if isinstance(axes, int): axes = [axes] elif isinstance(axes, Variable): @@ -905,98 +897,106 @@ def unsqueeze(input, axes, name=None): item.numpy().item(0) if isinstance(item, Variable) else item for item in axes ] - if _in_legacy_dygraph(): - out, _ = _legacy_C_ops.unsqueeze2(input, 'axes', axes) - return out return _C_ops.unsqueeze(input, axes) + else: + check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') + check_variable_and_dtype( + input, + 'input', + [ + 'float16', + 'float32', + 'float64', + 'bool', + 'int8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'unsqueeze', + ) + helper = LayerHelper("unsqueeze2", **locals()) + inputs = {"X": input} + attrs = {} - check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') - check_variable_and_dtype( - input, - 'input', - [ - 'float16', - 'float32', - 'float64', - 'bool', - 'int8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], - 'unsqueeze', - ) - helper = LayerHelper("unsqueeze2", **locals()) - inputs = {"X": input} - attrs = {} - - if isinstance(axes, int): - axes = [axes] - if isinstance(axes, Variable): - axes.stop_gradient = True - inputs["AxesTensor"] = axes - elif isinstance(axes, (list, tuple)): - if utils._contain_var(axes): - inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) - else: - attrs["axes"] = axes + if isinstance(axes, int): + axes = [axes] + if isinstance(axes, Variable): + axes.stop_gradient = True + inputs["AxesTensor"] = axes + elif isinstance(axes, (list, tuple)): + if utils._contain_var(axes): + inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes) + else: + attrs["axes"] = axes - out = helper.create_variable_for_type_inference(dtype=input.dtype) - x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op( - type="unsqueeze2", - inputs=inputs, - attrs=attrs, - outputs={"Out": out, "XShape": x_shape}, - ) + out = helper.create_variable_for_type_inference(dtype=input.dtype) + x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) + helper.append_op( + type="unsqueeze2", + inputs=inputs, + attrs=attrs, + outputs={"Out": out, "XShape": x_shape}, + ) - return out + return out def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): - if _non_static_mode(): + if in_dygraph_mode(): op = getattr(_legacy_C_ops, op_name) if binary_op: return op(x, y) else: return op(x) - check_variable_and_dtype( - x, - "x", - ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], - op_name, - ) - if y is not None: + else: check_variable_and_dtype( - y, - "y", + x, + "x", ["bool", "int8", "int16", "int32", "int64", "float32", "float64"], op_name, ) - if out is not None: - check_type(out, "out", Variable, op_name) + if y is not None: + check_variable_and_dtype( + y, + "y", + [ + "bool", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + ], + op_name, + ) + if out is not None: + check_type(out, "out", Variable, op_name) - helper = LayerHelper(op_name, **locals()) + helper = LayerHelper(op_name, **locals()) - if binary_op and x.dtype != y.dtype: - raise ValueError( - "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." - % (op_name, x.dtype, y.dtype) - ) + if binary_op and x.dtype != y.dtype: + raise ValueError( + "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." + % (op_name, x.dtype, y.dtype) + ) - if out is None: - out = helper.create_variable_for_type_inference(dtype=x.dtype) + if out is None: + out = helper.create_variable_for_type_inference(dtype=x.dtype) - if binary_op: - helper.append_op( - type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} - ) - else: - helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) + if binary_op: + helper.append_op( + type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} + ) + else: + helper.append_op( + type=op_name, inputs={"X": x}, outputs={"Out": out} + ) - return out + return out @templatedoc() @@ -1082,30 +1082,28 @@ def clip_by_norm(x, max_norm, name=None): if in_dygraph_mode(): return _C_ops.clip_by_norm(x, max_norm) - if _non_static_mode(): - return _legacy_C_ops.clip_by_norm(x, 'max_norm', max_norm) + else: + helper = LayerHelper("clip_by_norm", **locals()) + check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm') + check_type(max_norm, 'max_norm', (float), 'clip_by_norm') - helper = LayerHelper("clip_by_norm", **locals()) - check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm') - check_type(max_norm, 'max_norm', (float), 'clip_by_norm') + if name is None: + name = unique_name.generate_with_ignorable_key( + ".".join([helper.name, 'tmp']) + ) - if name is None: - name = unique_name.generate_with_ignorable_key( - ".".join([helper.name, 'tmp']) + out = helper.create_variable( + type=x.type, name=name, dtype=x.dtype, persistable=False ) - out = helper.create_variable( - type=x.type, name=name, dtype=x.dtype, persistable=False - ) - - helper.append_op( - type="clip_by_norm", - inputs={"X": x}, - attrs={"max_norm": max_norm}, - outputs={"Out": out}, - ) + helper.append_op( + type="clip_by_norm", + inputs={"X": x}, + attrs={"max_norm": max_norm}, + outputs={"Out": out}, + ) - return out + return out @templatedoc() @@ -1132,19 +1130,16 @@ def merge_selected_rows(x, name=None): """ if in_dygraph_mode(): return _C_ops.merge_selected_rows(x) - - if _non_static_mode(): - return _legacy_C_ops.merge_selected_rows(x) - - helper = LayerHelper("merge_selected_rows", **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - helper.append_op( - type="merge_selected_rows", - inputs={"X": x}, - attrs={}, - outputs={"Out": out}, - ) - return out + else: + helper = LayerHelper("merge_selected_rows", **locals()) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + helper.append_op( + type="merge_selected_rows", + inputs={"X": x}, + attrs={}, + outputs={"Out": out}, + ) + return out @templatedoc() diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index a09ef08daf..99ed064a48 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -17,9 +17,7 @@ from .layer_function_generator import templatedoc from ..framework import ( core, Variable, - _non_static_mode, in_dygraph_mode, - _in_legacy_dygraph, convert_np_dtype_to_dtype_, ) from ..layer_helper import LayerHelper @@ -156,7 +154,7 @@ def sequence_conv( """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'sequence_conv' @@ -258,7 +256,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_softmax', **locals()) check_variable_and_dtype( @@ -363,7 +361,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first') """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'sequence_pool' @@ -441,7 +439,7 @@ def sequence_concat(input, name=None): out = paddle.static.nn.sequence_concat(input=[x, y]) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_concat', **locals()) @@ -640,7 +638,7 @@ def sequence_slice(input, offset, length, name=None): length=length) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_slice", **locals()) @@ -794,7 +792,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): # data: [1 2 1 2 3 4 3 4] """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand' @@ -916,7 +914,7 @@ def sequence_expand_as(x, y, name=None): # data: [1 1 1 2 2 2 3 4] """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as' @@ -1019,7 +1017,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_pad', **locals()) check_variable_and_dtype( @@ -1108,7 +1106,7 @@ def sequence_unpad(x, length, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_unpad', **locals()) check_variable_and_dtype( @@ -1183,7 +1181,7 @@ def sequence_reshape(input, new_dim): x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_reshape', **locals()) check_variable_and_dtype( @@ -1268,7 +1266,7 @@ def sequence_scatter(input, index, updates, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_scatter', **locals()) @@ -1350,7 +1348,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['int32', 'int64'], 'sequence_enumerate' @@ -1479,7 +1477,7 @@ def sequence_reverse(x, name=None): x_reversed = paddle.static.nn.sequence_reverse(x) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_reverse", **locals()) check_variable_and_dtype( diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 15ab8ba5f6..08c352a3f0 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -19,9 +19,7 @@ from ..layer_helper import LayerHelper from ..framework import ( _current_expected_place, convert_np_dtype_to_dtype_, - _non_static_mode, _varbase_creator, - _in_legacy_dygraph, in_dygraph_mode, ) from ..framework import Variable @@ -81,59 +79,53 @@ def cast(x, dtype): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) return _C_ops.cast(x, dtype) + else: + check_variable_and_dtype( + x, + 'x', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) + check_dtype( + dtype, + 'dtype', + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'int8', + 'int16', + 'int32', + 'int64', + 'uint8', + 'uint16', + ], + 'cast', + ) - if _non_static_mode(): - if not isinstance(dtype, core.VarDesc.VarType): - dtype = convert_np_dtype_to_dtype_(dtype) - out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) + helper = LayerHelper('cast', **locals()) + out = helper.create_variable_for_type_inference( + dtype=dtype, stop_gradient=x.stop_gradient + ) + helper.append_op( + type='cast', + inputs={'X': [x]}, + outputs={'Out': [out]}, + attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype}, + ) return out - check_variable_and_dtype( - x, - 'x', - [ - 'bool', - 'float16', - 'float32', - 'float64', - 'int16', - 'int32', - 'int64', - 'uint8', - 'uint16', - ], - 'cast', - ) - check_dtype( - dtype, - 'dtype', - [ - 'bool', - 'float16', - 'float32', - 'float64', - 'int8', - 'int16', - 'int32', - 'int64', - 'uint8', - 'uint16', - ], - 'cast', - ) - - helper = LayerHelper('cast', **locals()) - out = helper.create_variable_for_type_inference( - dtype=dtype, stop_gradient=x.stop_gradient - ) - helper.append_op( - type='cast', - inputs={'X': [x]}, - outputs={'Out': [out]}, - attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype}, - ) - return out - def concat(input, axis=0, name=None): """ @@ -191,73 +183,69 @@ def concat(input, axis=0, name=None): input = [t for t in input if t.shape.count(0) == 0] out = _C_ops.concat(input, axis) return out - - if _in_legacy_dygraph(): - if isinstance(axis, Variable): - axis = axis.numpy() - axis = axis.item(0) + else: + check_type(input, 'input', (list, tuple, Variable), 'concat') if not isinstance(input, Variable): - input = [t for t in input if t.shape.count(0) == 0] - out = _varbase_creator() - _legacy_C_ops.concat(input, out, 'axis', axis) - return out + for id, x in enumerate(input): + check_variable_and_dtype( + x, + 'input[' + str(id) + ']', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'concat', + ) + if x.dtype != input[0].dtype: + raise TypeError( + "All the Tensors in the input must have the same data type." + ) + else: + input = [input] + check_type(axis, 'axis', (int, Variable), 'concat') - check_type(input, 'input', (list, tuple, Variable), 'concat') - if not isinstance(input, Variable): - for id, x in enumerate(input): - check_variable_and_dtype( - x, - 'input[' + str(id) + ']', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + if isinstance(axis, Variable): + check_dtype( + axis.dtype, + 'axis', + ['int32', 'int64'], 'concat', + "The data type of axis must be int32 or int64 when axis is a Tensor", ) - if x.dtype != input[0].dtype: - raise TypeError( - "All the Tensors in the input must have the same data type." - ) - else: - input = [input] - check_type(axis, 'axis', (int, Variable), 'concat') - if isinstance(axis, Variable): - check_dtype( - axis.dtype, - 'axis', - ['int32', 'int64'], - 'concat', - "The data type of axis must be int32 or int64 when axis is a Tensor", + helper = LayerHelper('concat', **locals()) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype() ) - helper = LayerHelper('concat', **locals()) - out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) - - if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: - # NOTE(liym27): Don't remove this if branch! - # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0] - # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode. + if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: + # NOTE(liym27): Don't remove this if branch! + # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0] + # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode. - assert len(input) == 1, ( - "If the elements of 'input' in concat are Variable(LoDTensorArray), " - "number of the elements must be 1, but received %s." % len(input) - ) - out_index = helper.create_variable_for_type_inference(dtype="int32") - helper.append_op( - type='tensor_array_to_tensor', - inputs={'X': input[0]}, - outputs={'Out': [out], 'OutIndex': [out_index]}, - attrs={'axis': axis, 'use_stack': False}, - ) - else: - inputs = {'X': input} - attrs = {} - if isinstance(axis, Variable): - axis.stop_gradient = True - attrs['axis'] = axis + assert len(input) == 1, ( + "If the elements of 'input' in concat are Variable(LoDTensorArray), " + "number of the elements must be 1, but received %s." + % len(input) + ) + out_index = helper.create_variable_for_type_inference(dtype="int32") + helper.append_op( + type='tensor_array_to_tensor', + inputs={'X': input[0]}, + outputs={'Out': [out], 'OutIndex': [out_index]}, + attrs={'axis': axis, 'use_stack': False}, + ) + else: + inputs = {'X': input} + attrs = {} + if isinstance(axis, Variable): + axis.stop_gradient = True + attrs['axis'] = axis - helper.append_op( - type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs - ) - return out + helper.append_op( + type='concat', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + ) + return out def sums(input, out=None): @@ -391,22 +379,15 @@ def assign(input, output=None): input = numpy.array(input) # NOTE(Aurelius84): Why we judge core.VarBase? # In case of @to_static, a VarBase can be as input of `assign`, - # but _non_static_mode()==False under @to_static, which means + # but in_dygraph_mode()==False under @to_static, which means # isinstance(VarBase, Variable) == False. It will cause return None # after this api. if isinstance(input, (Variable, core.VarBase)): - if _non_static_mode(): - if in_dygraph_mode() and output is None: + if in_dygraph_mode(): + if output is None: output = _C_ops.assign(input) - elif in_dygraph_mode() and output is not None: - _C_ops.assign_out_(input, output) else: - if output is None: - if _in_legacy_dygraph(): - output = core.VarBase() - else: - output = core.eager.Tensor() - _legacy_C_ops.assign(input, output) + _C_ops.assign_out_(input, output) else: check_dtype( input.dtype, @@ -480,18 +461,6 @@ def assign(input, output=None): values, _current_expected_place(), ) - elif _in_legacy_dygraph(): - if output is None: - output = core.VarBase() - _legacy_C_ops.assign_value( - output, - 'shape', - list(input.shape), - 'dtype', - dtype, - value_name, - values, - ) else: if output is None: output = helper.create_variable_for_type_inference( @@ -507,7 +476,7 @@ def assign(input, output=None): }, ) - if is_inplace and _non_static_mode(): + if is_inplace and in_dygraph_mode(): output._bump_inplace_version() return output @@ -591,83 +560,56 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): _C_ops.full_(out, shape, float(value), dtype, place) out.stop_gradient = True return out - - if _in_legacy_dygraph(): - shape = utils.convert_shape_to_list(shape) - if out is None: - out = _varbase_creator(dtype=dtype) - + else: + helper = LayerHelper("fill_constant", **locals()) + inputs = {} if isinstance(value, Variable): - if dtype in ['uint8', 'int16', 'int32', 'int64']: - attrs['str_value'] = str(int(value.numpy().item(0))) - else: - attrs['str_value'] = str(float(value.numpy().item(0))) - - _legacy_C_ops.fill_constant( - out, - 'value', - float(value), - 'force_cpu', - force_cpu, + if convert_dtype(value.dtype) != dtype: + value = cast(value, dtype) + inputs['ValueTensor'] = value + + check_shape(shape) + check_dtype( + dtype, 'dtype', - out.dtype, - 'str_value', - attrs['str_value'], - 'shape', - shape, + [ + 'bool', + 'float16', + 'float32', + 'float64', + 'uint8', + 'int16', + 'int32', + 'int64', + 'complex64', + 'complex128', + ], + 'fill_constant', ) - out.stop_gradient = True - return out + check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') - helper = LayerHelper("fill_constant", **locals()) - inputs = {} - if isinstance(value, Variable): - if convert_dtype(value.dtype) != dtype: - value = cast(value, dtype) - inputs['ValueTensor'] = value - - check_shape(shape) - check_dtype( - dtype, - 'dtype', - [ - 'bool', - 'float16', - 'float32', - 'float64', - 'uint8', - 'int16', - 'int32', - 'int64', - 'complex64', - 'complex128', - ], - 'fill_constant', - ) - check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant') + if out is not None: + check_variable_and_dtype( + out, 'out', [convert_dtype(dtype)], 'fill_constant' + ) - if out is not None: - check_variable_and_dtype( - out, 'out', [convert_dtype(dtype)], 'fill_constant' + helper = LayerHelper("fill_constant", **locals()) + utils.get_shape_tensor_inputs( + inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' ) - helper = LayerHelper("fill_constant", **locals()) - utils.get_shape_tensor_inputs( - inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' - ) - - if out is None: - out = helper.create_variable_for_type_inference(dtype=dtype) - attrs['dtype'] = out.dtype - helper.append_op( - type='fill_constant', - inputs=inputs, - outputs={'Out': [out]}, - attrs=attrs, - stop_gradient=True, - ) - out.stop_gradient = True - return out + if out is None: + out = helper.create_variable_for_type_inference(dtype=dtype) + attrs['dtype'] = out.dtype + helper.append_op( + type='fill_constant', + inputs=inputs, + outputs={'Out': [out]}, + attrs=attrs, + stop_gradient=True, + ) + out.stop_gradient = True + return out @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @@ -727,29 +669,29 @@ def fill_constant_batch_size_like( ) out.stop_gradient = True return out - - helper = LayerHelper("fill_constant_batch_size_like", **locals()) - out = helper.create_variable_for_type_inference(dtype=dtype) - attrs = { - 'shape': shape, - 'dtype': out.dtype, - 'value': float(value), - 'input_dim_idx': input_dim_idx, - 'output_dim_idx': output_dim_idx, - 'force_cpu': force_cpu, - } - if convert_dtype(dtype) in ['int64', 'int32']: - attrs['str_value'] = str(int(value)) else: - attrs['str_value'] = str(float(value)) - helper.append_op( - type='fill_constant_batch_size_like', - inputs={'Input': input}, - outputs={'Out': [out]}, - attrs=attrs, - ) - out.stop_gradient = True - return out + helper = LayerHelper("fill_constant_batch_size_like", **locals()) + out = helper.create_variable_for_type_inference(dtype=dtype) + attrs = { + 'shape': shape, + 'dtype': out.dtype, + 'value': float(value), + 'input_dim_idx': input_dim_idx, + 'output_dim_idx': output_dim_idx, + 'force_cpu': force_cpu, + } + if convert_dtype(dtype) in ['int64', 'int32']: + attrs['str_value'] = str(int(value)) + else: + attrs['str_value'] = str(float(value)) + helper.append_op( + type='fill_constant_batch_size_like', + inputs={'Input': input}, + outputs={'Out': [out]}, + attrs=attrs, + ) + out.stop_gradient = True + return out def argmin(x, axis=0): diff --git a/python/paddle/geometric/math.py b/python/paddle/geometric/math.py index 4ed370da0c..fabaab5efc 100644 --- a/python/paddle/geometric/math.py +++ b/python/paddle/geometric/math.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode +from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper __all__ = [] @@ -52,32 +52,27 @@ def segment_sum(data, segment_ids, name=None): """ if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "SUM")[0] - if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool( - data, segment_ids, 'pooltype', "SUM" + else: + check_variable_and_dtype( + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" ) - return out - check_variable_and_dtype( - data, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "segment_pool", - ) - check_variable_and_dtype( - segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" - ) - - helper = LayerHelper("segment_sum", **locals()) - out = helper.create_variable_for_type_inference(dtype=data.dtype) - summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op( - type="segment_pool", - inputs={"X": data, "SegmentIds": segment_ids}, - outputs={"Out": out, "SummedIds": summed_ids}, - attrs={"pooltype": "SUM"}, - ) - return out + helper = LayerHelper("segment_sum", **locals()) + out = helper.create_variable_for_type_inference(dtype=data.dtype) + summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "SUM"}, + ) + return out def segment_mean(data, segment_ids, name=None): @@ -114,32 +109,28 @@ def segment_mean(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MEAN")[0] - if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool( - data, segment_ids, 'pooltype', "MEAN" + else: + + check_variable_and_dtype( + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" ) - return out - check_variable_and_dtype( - data, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "segment_pool", - ) - check_variable_and_dtype( - segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" - ) - - helper = LayerHelper("segment_mean", **locals()) - out = helper.create_variable_for_type_inference(dtype=data.dtype) - summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op( - type="segment_pool", - inputs={"X": data, "SegmentIds": segment_ids}, - outputs={"Out": out, "SummedIds": summed_ids}, - attrs={"pooltype": "MEAN"}, - ) - return out + helper = LayerHelper("segment_mean", **locals()) + out = helper.create_variable_for_type_inference(dtype=data.dtype) + summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MEAN"}, + ) + return out def segment_min(data, segment_ids, name=None): @@ -175,32 +166,27 @@ def segment_min(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MIN")[0] - if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool( - data, segment_ids, 'pooltype', "MIN" + else: + check_variable_and_dtype( + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" ) - return out - check_variable_and_dtype( - data, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "segment_pool", - ) - check_variable_and_dtype( - segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" - ) - - helper = LayerHelper("segment_min", **locals()) - out = helper.create_variable_for_type_inference(dtype=data.dtype) - summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op( - type="segment_pool", - inputs={"X": data, "SegmentIds": segment_ids}, - outputs={"Out": out, "SummedIds": summed_ids}, - attrs={"pooltype": "MIN"}, - ) - return out + helper = LayerHelper("segment_min", **locals()) + out = helper.create_variable_for_type_inference(dtype=data.dtype) + summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MIN"}, + ) + return out def segment_max(data, segment_ids, name=None): @@ -236,29 +222,24 @@ def segment_max(data, segment_ids, name=None): if in_dygraph_mode(): return _C_ops.segment_pool(data, segment_ids, "MAX")[0] - if _in_legacy_dygraph(): - out, tmp = _legacy_C_ops.segment_pool( - data, segment_ids, 'pooltype', "MAX" + else: + check_variable_and_dtype( + data, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "segment_pool", + ) + check_variable_and_dtype( + segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" ) - return out - check_variable_and_dtype( - data, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "segment_pool", - ) - check_variable_and_dtype( - segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" - ) - - helper = LayerHelper("segment_max", **locals()) - out = helper.create_variable_for_type_inference(dtype=data.dtype) - summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) - helper.append_op( - type="segment_pool", - inputs={"X": data, "SegmentIds": segment_ids}, - outputs={"Out": out, "SummedIds": summed_ids}, - attrs={"pooltype": "MAX"}, - ) - return out + helper = LayerHelper("segment_max", **locals()) + out = helper.create_variable_for_type_inference(dtype=data.dtype) + summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype) + helper.append_op( + type="segment_pool", + inputs={"X": data, "SegmentIds": segment_ids}, + outputs={"Out": out, "SummedIds": summed_ids}, + attrs={"pooltype": "MAX"}, + ) + return out diff --git a/python/paddle/geometric/message_passing/send_recv.py b/python/paddle/geometric/message_passing/send_recv.py index 76627e9827..5f3ca7afe3 100644 --- a/python/paddle/geometric/message_passing/send_recv.py +++ b/python/paddle/geometric/message_passing/send_recv.py @@ -14,13 +14,13 @@ import numpy as np -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.fluid.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, ) -from paddle.fluid.framework import Variable, _in_legacy_dygraph, in_dygraph_mode +from paddle.fluid.framework import Variable, in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper from .utils import ( @@ -118,68 +118,61 @@ def send_u_recv( # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. - if _in_legacy_dygraph(): - out_size = convert_out_size_to_list(out_size) - out, tmp = _legacy_C_ops.graph_send_recv( - x, - src_index, - dst_index, - None, - 'reduce_op', - reduce_op.upper(), - 'out_size', - out_size, - ) - return out if in_dygraph_mode(): out_size = convert_out_size_to_list(out_size) return _C_ops.send_u_recv( x, src_index, dst_index, reduce_op.upper(), out_size ) - - check_variable_and_dtype( - x, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "graph_send_recv", - ) - check_variable_and_dtype( - src_index, "Src_index", ("int32", "int64"), "graph_send_recv" - ) - check_variable_and_dtype( - dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv" - ) - if out_size: - check_type( - out_size, - 'out_size', - (int, np.int32, np.int64, Variable), - 'graph_send_recv', + else: + check_variable_and_dtype( + x, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_recv", + ) + check_variable_and_dtype( + src_index, "Src_index", ("int32", "int64"), "graph_send_recv" ) - if isinstance(out_size, Variable): - check_dtype( - out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_recv' + check_variable_and_dtype( + dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv" ) + if out_size: + check_type( + out_size, + 'out_size', + (int, np.int32, np.int64, Variable), + 'graph_send_recv', + ) + if isinstance(out_size, Variable): + check_dtype( + out_size.dtype, + 'out_size', + ['int32', 'int64'], + 'graph_send_recv', + ) - helper = LayerHelper("send_u_recv", **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - dst_count = helper.create_variable_for_type_inference( - dtype="int32", stop_gradient=True - ) + helper = LayerHelper("send_u_recv", **locals()) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + dst_count = helper.create_variable_for_type_inference( + dtype="int32", stop_gradient=True + ) - inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index} - attrs = {"reduce_op": reduce_op.upper()} - get_out_size_tensor_inputs( - inputs=inputs, attrs=attrs, out_size=out_size, op_type='graph_send_recv' - ) + inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index} + attrs = {"reduce_op": reduce_op.upper()} + get_out_size_tensor_inputs( + inputs=inputs, + attrs=attrs, + out_size=out_size, + op_type='graph_send_recv', + ) - helper.append_op( - type="graph_send_recv", - inputs=inputs, - outputs={"Out": out, "Dst_count": dst_count}, - attrs=attrs, - ) - return out + helper.append_op( + type="graph_send_recv", + inputs=inputs, + outputs={"Out": out, "Dst_count": dst_count}, + attrs=attrs, + ) + return out def send_ue_recv( @@ -302,86 +295,81 @@ def send_ue_recv( # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. - if _in_legacy_dygraph(): + if in_dygraph_mode(): out_size = convert_out_size_to_list(out_size) - out, tmp = _legacy_C_ops.graph_send_ue_recv( + return _C_ops.send_ue_recv( x, y, src_index, dst_index, - None, - 'message_op', message_op.upper(), - 'reduce_op', reduce_op.upper(), - 'out_size', out_size, ) - return out - if in_dygraph_mode(): - out_size = convert_out_size_to_list(out_size) - return _C_ops.send_ue_recv( + else: + check_variable_and_dtype( x, + "X", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_ue_recv", + ) + check_variable_and_dtype( y, - src_index, - dst_index, - message_op.upper(), - reduce_op.upper(), - out_size, + "Y", + ("float32", "float64", "int32", "int64", "float16"), + "graph_send_ue_recv", ) + check_variable_and_dtype( + src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv" + ) + check_variable_and_dtype( + dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv" + ) + if out_size: + check_type( + out_size, + 'out_size', + (int, np.int32, np.int64, Variable), + 'graph_send_ue_recv', + ) + if isinstance(out_size, Variable): + check_dtype( + out_size.dtype, + 'out_size', + ['int32', 'int64'], + 'graph_send_ue_recv', + ) - check_variable_and_dtype( - x, - "X", - ("float32", "float64", "int32", "int64", "float16"), - "graph_send_ue_recv", - ) - check_variable_and_dtype( - y, - "Y", - ("float32", "float64", "int32", "int64", "float16"), - "graph_send_ue_recv", - ) - check_variable_and_dtype( - src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv" - ) - check_variable_and_dtype( - dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv" - ) - if out_size: - check_type( - out_size, - 'out_size', - (int, np.int32, np.int64, Variable), - 'graph_send_ue_recv', + helper = LayerHelper("send_ue_recv", **locals()) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + dst_count = helper.create_variable_for_type_inference( + dtype="int32", stop_gradient=True ) - if isinstance(out_size, Variable): - check_dtype( - out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_ue_recv' + + inputs = { + "X": x, + "Y": y, + "Src_index": src_index, + "Dst_index": dst_index, + } + attrs = { + "message_op": message_op.upper(), + "reduce_op": reduce_op.upper(), + } + get_out_size_tensor_inputs( + inputs=inputs, + attrs=attrs, + out_size=out_size, + op_type='graph_send_ue_recv', ) - helper = LayerHelper("send_ue_recv", **locals()) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - dst_count = helper.create_variable_for_type_inference( - dtype="int32", stop_gradient=True - ) - - inputs = {"X": x, "Y": y, "Src_index": src_index, "Dst_index": dst_index} - attrs = {"message_op": message_op.upper(), "reduce_op": reduce_op.upper()} - get_out_size_tensor_inputs( - inputs=inputs, - attrs=attrs, - out_size=out_size, - op_type='graph_send_ue_recv', - ) - - helper.append_op( - type="graph_send_ue_recv", - inputs=inputs, - outputs={"Out": out, "Dst_count": dst_count}, - attrs=attrs, - ) - return out + helper.append_op( + type="graph_send_ue_recv", + inputs=inputs, + outputs={"Out": out, "Dst_count": dst_count}, + attrs=attrs, + ) + return out def send_uv(x, y, src_index, dst_index, message_op="add", name=None): @@ -466,43 +454,39 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): if in_dygraph_mode(): return _C_ops.send_uv(x, y, src_index, dst_index, message_op.upper()) else: - if _in_legacy_dygraph(): - return _legacy_C_ops.graph_send_uv( - x, y, src_index, dst_index, "message_op", message_op.upper() - ) - else: - helper = LayerHelper("graph_send_uv", **locals()) - check_variable_and_dtype( - x, - 'x', - ['int32', 'int64', 'float32', 'float64', 'float16'], - 'graph_send_uv', - ) - check_variable_and_dtype( - y, - 'y', - ['int32', 'int64', 'float32', 'float64', 'float16'], - 'graph_send_uv', - ) - check_variable_and_dtype( - src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv' - ) - check_variable_and_dtype( - dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv' - ) - out = helper.create_variable_for_type_inference(dtype=x.dtype) - - inputs = { - 'x': x, - 'y': y, - 'src_index': src_index, - 'dst_index': dst_index, - } - attrs = {'message_op': message_op.upper()} - helper.append_op( - type="graph_send_uv", - inputs=inputs, - attrs=attrs, - outputs={"out": out}, - ) - return out + + helper = LayerHelper("graph_send_uv", **locals()) + check_variable_and_dtype( + x, + 'x', + ['int32', 'int64', 'float32', 'float64', 'float16'], + 'graph_send_uv', + ) + check_variable_and_dtype( + y, + 'y', + ['int32', 'int64', 'float32', 'float64', 'float16'], + 'graph_send_uv', + ) + check_variable_and_dtype( + src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv' + ) + check_variable_and_dtype( + dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv' + ) + out = helper.create_variable_for_type_inference(dtype=x.dtype) + + inputs = { + 'x': x, + 'y': y, + 'src_index': src_index, + 'dst_index': dst_index, + } + attrs = {'message_op': message_op.upper()} + helper.append_op( + type="graph_send_uv", + inputs=inputs, + attrs=attrs, + outputs={"out": out}, + ) + return out diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index 29a4deeb1c..4b3d5b9880 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -1001,7 +1001,7 @@ def _custom_api_content(op_name): """ import paddle.fluid.core as core from paddle.fluid.core import VarBase, CustomOpKernelContext - from paddle.fluid.framework import _non_static_mode, _dygraph_tracer, _in_legacy_dygraph, in_dygraph_mode + from paddle.fluid.framework import _dygraph_tracer, in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper def {op_name}({inputs}): @@ -1024,16 +1024,11 @@ def _custom_api_content(op_name): ctx.add_outputs(outs[out_name]) core.eager._run_custom_op(ctx, "{op_name}", True) else: - if _in_legacy_dygraph(): - for out_name in out_names: - outs[out_name] = VarBase() - _dygraph_tracer().trace_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs) - else: - helper = LayerHelper("{op_name}", **locals()) - for out_name in out_names: - outs[out_name] = helper.create_variable(dtype='float32') - - helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs) + helper = LayerHelper("{op_name}", **locals()) + for out_name in out_names: + outs[out_name] = helper.create_variable(dtype='float32') + + helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs) res = [outs[out_name] for out_name in out_names] diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 76872bda2f..2c69804d48 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -18,12 +18,7 @@ from paddle import _C_ops, _legacy_C_ops from paddle.tensor.math import _add_with_axis from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import ( - Variable, - _in_legacy_dygraph, - _non_static_mode, - in_dygraph_mode, -) +from ..fluid.framework import Variable, in_dygraph_mode from ..fluid.initializer import Normal from ..fluid.layer_helper import LayerHelper from ..fluid.layers import utils @@ -211,76 +206,56 @@ def yolo_loss( ) return loss - if _non_static_mode(): - loss, _, _ = _legacy_C_ops.yolov3_loss( - x, - gt_box, - gt_label, - gt_score, - 'anchors', - anchors, - 'anchor_mask', - anchor_mask, - 'class_num', - class_num, - 'ignore_thresh', - ignore_thresh, - 'downsample_ratio', - downsample_ratio, - 'use_label_smooth', - use_label_smooth, - 'scale_x_y', - scale_x_y, + else: + helper = LayerHelper('yolov3_loss', **locals()) + + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss') + check_variable_and_dtype( + gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss' ) - return loss + check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss') + check_type(anchors, 'anchors', (list, tuple), 'yolo_loss') + check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss') + check_type(class_num, 'class_num', int, 'yolo_loss') + check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss') + check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss') - helper = LayerHelper('yolov3_loss', **locals()) + loss = helper.create_variable_for_type_inference(dtype=x.dtype) - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss') - check_variable_and_dtype( - gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss' - ) - check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss') - check_type(anchors, 'anchors', (list, tuple), 'yolo_loss') - check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss') - check_type(class_num, 'class_num', int, 'yolo_loss') - check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss') - check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss') - - loss = helper.create_variable_for_type_inference(dtype=x.dtype) - - objectness_mask = helper.create_variable_for_type_inference(dtype='int32') - gt_match_mask = helper.create_variable_for_type_inference(dtype='int32') - - inputs = { - "X": x, - "GTBox": gt_box, - "GTLabel": gt_label, - } - if gt_score is not None: - inputs["GTScore"] = gt_score - - attrs = { - "anchors": anchors, - "anchor_mask": anchor_mask, - "class_num": class_num, - "ignore_thresh": ignore_thresh, - "downsample_ratio": downsample_ratio, - "use_label_smooth": use_label_smooth, - "scale_x_y": scale_x_y, - } - - helper.append_op( - type='yolov3_loss', - inputs=inputs, - outputs={ - 'Loss': loss, - 'ObjectnessMask': objectness_mask, - 'GTMatchMask': gt_match_mask, - }, - attrs=attrs, - ) - return loss + objectness_mask = helper.create_variable_for_type_inference( + dtype='int32' + ) + gt_match_mask = helper.create_variable_for_type_inference(dtype='int32') + + inputs = { + "X": x, + "GTBox": gt_box, + "GTLabel": gt_label, + } + if gt_score is not None: + inputs["GTScore"] = gt_score + + attrs = { + "anchors": anchors, + "anchor_mask": anchor_mask, + "class_num": class_num, + "ignore_thresh": ignore_thresh, + "downsample_ratio": downsample_ratio, + "use_label_smooth": use_label_smooth, + "scale_x_y": scale_x_y, + } + + helper.append_op( + type='yolov3_loss', + inputs=inputs, + outputs={ + 'Loss': loss, + 'ObjectnessMask': objectness_mask, + 'GTMatchMask': gt_match_mask, + }, + attrs=attrs, + ) + return loss def yolo_box( @@ -409,64 +384,42 @@ def yolo_box( ) return boxes, scores - if _non_static_mode(): - boxes, scores = _legacy_C_ops.yolo_box( - x, - img_size, - 'anchors', - anchors, - 'class_num', - class_num, - 'conf_thresh', - conf_thresh, - 'downsample_ratio', - downsample_ratio, - 'clip_bbox', - clip_bbox, - 'scale_x_y', - scale_x_y, - 'iou_aware', - iou_aware, - 'iou_aware_factor', - iou_aware_factor, + else: + helper = LayerHelper('yolo_box', **locals()) + + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box') + check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box') + check_type(anchors, 'anchors', (list, tuple), 'yolo_box') + check_type(conf_thresh, 'conf_thresh', float, 'yolo_box') + + boxes = helper.create_variable_for_type_inference(dtype=x.dtype) + scores = helper.create_variable_for_type_inference(dtype=x.dtype) + + attrs = { + "anchors": anchors, + "class_num": class_num, + "conf_thresh": conf_thresh, + "downsample_ratio": downsample_ratio, + "clip_bbox": clip_bbox, + "scale_x_y": scale_x_y, + "iou_aware": iou_aware, + "iou_aware_factor": iou_aware_factor, + } + + helper.append_op( + type='yolo_box', + inputs={ + "X": x, + "ImgSize": img_size, + }, + outputs={ + 'Boxes': boxes, + 'Scores': scores, + }, + attrs=attrs, ) return boxes, scores - helper = LayerHelper('yolo_box', **locals()) - - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box') - check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box') - check_type(anchors, 'anchors', (list, tuple), 'yolo_box') - check_type(conf_thresh, 'conf_thresh', float, 'yolo_box') - - boxes = helper.create_variable_for_type_inference(dtype=x.dtype) - scores = helper.create_variable_for_type_inference(dtype=x.dtype) - - attrs = { - "anchors": anchors, - "class_num": class_num, - "conf_thresh": conf_thresh, - "downsample_ratio": downsample_ratio, - "clip_bbox": clip_bbox, - "scale_x_y": scale_x_y, - "iou_aware": iou_aware, - "iou_aware_factor": iou_aware_factor, - } - - helper.append_op( - type='yolo_box', - inputs={ - "X": x, - "ImgSize": img_size, - }, - outputs={ - 'Boxes': boxes, - 'Scores': scores, - }, - attrs=attrs, - ) - return boxes, scores - def prior_box( input, @@ -587,31 +540,6 @@ def prior_box( ) return box, var - if _in_legacy_dygraph(): - attrs = ( - 'min_sizes', - min_sizes, - 'aspect_ratios', - aspect_ratios, - 'variances', - variance, - 'flip', - flip, - 'clip', - clip, - 'step_w', - steps[0], - 'step_h', - steps[1], - 'offset', - offset, - 'min_max_aspect_ratios_order', - min_max_aspect_ratios_order, - ) - if cur_max_sizes is not None: - attrs += ('max_sizes', cur_max_sizes) - box, var = _legacy_C_ops.prior_box(input, image, *attrs) - return box, var else: attrs = { 'min_sizes': min_sizes, @@ -783,36 +711,6 @@ def box_coder( raise TypeError("Input prior_box_var must be Variable or list") return output_box - if _in_legacy_dygraph(): - if isinstance(prior_box_var, Variable): - output_box = _legacy_C_ops.box_coder( - prior_box, - prior_box_var, - target_box, - "code_type", - code_type, - "box_normalized", - box_normalized, - "axis", - axis, - ) - elif isinstance(prior_box_var, list): - output_box = _legacy_C_ops.box_coder( - prior_box, - None, - target_box, - "code_type", - code_type, - "box_normalized", - box_normalized, - "axis", - axis, - "variance", - prior_box_var, - ) - else: - raise TypeError("Input prior_box_var must be Variable or list") - return output_box else: helper = LayerHelper("box_coder", **locals()) @@ -989,35 +887,6 @@ def deform_conv2d( out = _add_with_axis(pre_bias, bias, axis=1) else: out = pre_bias - elif _in_legacy_dygraph(): - attrs = ( - 'strides', - stride, - 'paddings', - padding, - 'dilations', - dilation, - 'deformable_groups', - deformable_groups, - 'groups', - groups, - 'im2col_step', - 1, - ) - if use_deform_conv2d_v1: - op_type = 'deformable_conv_v1' - pre_bias = getattr(_legacy_C_ops, op_type)( - x, offset, weight, *attrs - ) - else: - op_type = 'deformable_conv' - pre_bias = getattr(_legacy_C_ops, op_type)( - x, offset, mask, weight, *attrs - ) - if bias is not None: - out = _add_with_axis(pre_bias, bias, axis=1) - else: - out = pre_bias else: check_variable_and_dtype( x, "x", ['float32', 'float64'], 'deform_conv2d' @@ -1370,31 +1239,6 @@ def distribute_fpn_proposals( ) return multi_rois, restore_ind, rois_num_per_level - if _non_static_mode(): - assert ( - rois_num is not None - ), "rois_num should not be None in dygraph mode." - attrs = ( - 'min_level', - min_level, - 'max_level', - max_level, - 'refer_level', - refer_level, - 'refer_scale', - refer_scale, - 'pixel_offset', - pixel_offset, - ) - ( - multi_rois, - restore_ind, - rois_num_per_level, - ) = _legacy_C_ops.distribute_fpn_proposals( - fpn_rois, rois_num, num_lvl, num_lvl, *attrs - ) - return multi_rois, restore_ind, rois_num_per_level - else: check_variable_and_dtype( fpn_rois, @@ -1472,19 +1316,19 @@ def read_file(filename, name=None): # [142915] """ - if _non_static_mode(): + if in_dygraph_mode(): return _legacy_C_ops.read_file('filename', filename) + else: + inputs = dict() + attrs = {'filename': filename} - inputs = dict() - attrs = {'filename': filename} - - helper = LayerHelper("read_file", **locals()) - out = helper.create_variable_for_type_inference('uint8') - helper.append_op( - type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out} - ) + helper = LayerHelper("read_file", **locals()) + out = helper.create_variable_for_type_inference('uint8') + helper.append_op( + type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) - return out + return out def decode_jpeg(x, mode='unchanged', name=None): @@ -1524,19 +1368,17 @@ def decode_jpeg(x, mode='unchanged', name=None): """ if in_dygraph_mode(): return _C_ops.decode_jpeg(x, mode, _current_expected_place()) - elif _non_static_mode(): - return _legacy_C_ops.decode_jpeg(x, "mode", mode) - - inputs = {'X': x} - attrs = {"mode": mode} + else: + inputs = {'X': x} + attrs = {"mode": mode} - helper = LayerHelper("decode_jpeg", **locals()) - out = helper.create_variable_for_type_inference('uint8') - helper.append_op( - type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out} - ) + helper = LayerHelper("decode_jpeg", **locals()) + out = helper.create_variable_for_type_inference('uint8') + helper.append_op( + type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out} + ) - return out + return out def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): @@ -1594,36 +1436,22 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): output_channels, spatial_scale, ) - if _in_legacy_dygraph(): - return _legacy_C_ops.psroi_pool( - x, - boxes, - boxes_num, - "output_channels", - output_channels, - "spatial_scale", - spatial_scale, - "pooled_height", - pooled_height, - "pooled_width", - pooled_width, + else: + helper = LayerHelper('psroi_pool', **locals()) + dtype = helper.input_dtype() + out = helper.create_variable_for_type_inference(dtype) + helper.append_op( + type='psroi_pool', + inputs={'X': x, 'ROIs': boxes}, + outputs={'Out': out}, + attrs={ + 'output_channels': output_channels, + 'spatial_scale': spatial_scale, + 'pooled_height': pooled_height, + 'pooled_width': pooled_width, + }, ) - - helper = LayerHelper('psroi_pool', **locals()) - dtype = helper.input_dtype() - out = helper.create_variable_for_type_inference(dtype) - helper.append_op( - type='psroi_pool', - inputs={'X': x, 'ROIs': boxes}, - outputs={'Out': out}, - attrs={ - 'output_channels': output_channels, - 'spatial_scale': spatial_scale, - 'pooled_height': pooled_height, - 'pooled_width': pooled_width, - }, - ) - return out + return out class PSRoIPool(Layer): @@ -1721,23 +1549,6 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): return _C_ops.roi_pool( x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale ) - if _in_legacy_dygraph(): - assert ( - boxes_num is not None - ), "boxes_num should not be None in dygraph mode." - pool_out, argmaxes = _legacy_C_ops.roi_pool( - x, - boxes, - boxes_num, - "pooled_height", - pooled_height, - "pooled_width", - pooled_width, - "spatial_scale", - spatial_scale, - ) - return pool_out - else: check_variable_and_dtype(x, 'x', ['float32'], 'roi_pool') check_variable_and_dtype(boxes, 'boxes', ['float32'], 'roi_pool') @@ -1903,27 +1714,6 @@ def roi_align( sampling_ratio, aligned, ) - if _in_legacy_dygraph(): - assert ( - boxes_num is not None - ), "boxes_num should not be None in dygraph mode." - align_out = _legacy_C_ops.roi_align( - x, - boxes, - boxes_num, - "pooled_height", - pooled_height, - "pooled_width", - pooled_width, - "spatial_scale", - spatial_scale, - "sampling_ratio", - sampling_ratio, - "aligned", - aligned, - ) - return align_out - else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'roi_align') check_variable_and_dtype( @@ -2143,18 +1933,16 @@ def nms( if in_dygraph_mode(): return _C_ops.nms(boxes, iou_threshold) - if _non_static_mode(): - return _legacy_C_ops.nms(boxes, 'iou_threshold', iou_threshold) - - helper = LayerHelper('nms', **locals()) - out = helper.create_variable_for_type_inference('int64') - helper.append_op( - type='nms', - inputs={'Boxes': boxes}, - outputs={'KeepBoxesIdxs': out}, - attrs={'iou_threshold': iou_threshold}, - ) - return out + else: + helper = LayerHelper('nms', **locals()) + out = helper.create_variable_for_type_inference('int64') + helper.append_op( + type='nms', + inputs={'Boxes': boxes}, + outputs={'KeepBoxesIdxs': out}, + attrs={'iou_threshold': iou_threshold}, + ) + return out if scores is None: return _nms(boxes, iou_threshold) @@ -2222,7 +2010,7 @@ def nms( if top_k is None: return keep_boxes_idxs[sorted_sub_indices] - if _non_static_mode(): + if in_dygraph_mode(): top_k = shape if shape < top_k else top_k _, topk_sub_indices = paddle.topk(scores[keep_boxes_idxs], top_k) return keep_boxes_idxs[topk_sub_indices] @@ -2331,92 +2119,70 @@ def generate_proposals( ) return rpn_rois, rpn_roi_probs, rpn_rois_num - elif _non_static_mode(): - assert ( - return_rois_num - ), "return_rois_num should be True in dygraph mode." - attrs = ( - 'pre_nms_topN', - pre_nms_top_n, - 'post_nms_topN', - post_nms_top_n, - 'nms_thresh', - nms_thresh, - 'min_size', - min_size, - 'eta', - eta, - 'pixel_offset', - pixel_offset, + else: + helper = LayerHelper('generate_proposals_v2', **locals()) + + check_variable_and_dtype( + scores, 'scores', ['float32'], 'generate_proposals_v2' ) - ( - rpn_rois, - rpn_roi_probs, - rpn_rois_num, - ) = _legacy_C_ops.generate_proposals_v2( - scores, bbox_deltas, img_size, anchors, variances, *attrs + check_variable_and_dtype( + bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + img_size, + 'img_size', + ['float32', 'float64'], + 'generate_proposals_v2', + ) + check_variable_and_dtype( + anchors, 'anchors', ['float32'], 'generate_proposals_v2' + ) + check_variable_and_dtype( + variances, 'variances', ['float32'], 'generate_proposals_v2' ) - return rpn_rois, rpn_roi_probs, rpn_rois_num - - helper = LayerHelper('generate_proposals_v2', **locals()) - - check_variable_and_dtype( - scores, 'scores', ['float32'], 'generate_proposals_v2' - ) - check_variable_and_dtype( - bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2' - ) - check_variable_and_dtype( - img_size, 'img_size', ['float32', 'float64'], 'generate_proposals_v2' - ) - check_variable_and_dtype( - anchors, 'anchors', ['float32'], 'generate_proposals_v2' - ) - check_variable_and_dtype( - variances, 'variances', ['float32'], 'generate_proposals_v2' - ) + rpn_rois = helper.create_variable_for_type_inference( + dtype=bbox_deltas.dtype + ) + rpn_roi_probs = helper.create_variable_for_type_inference( + dtype=scores.dtype + ) + outputs = { + 'RpnRois': rpn_rois, + 'RpnRoiProbs': rpn_roi_probs, + } + if return_rois_num: + rpn_rois_num = helper.create_variable_for_type_inference( + dtype='int32' + ) + rpn_rois_num.stop_gradient = True + outputs['RpnRoisNum'] = rpn_rois_num - rpn_rois = helper.create_variable_for_type_inference( - dtype=bbox_deltas.dtype - ) - rpn_roi_probs = helper.create_variable_for_type_inference( - dtype=scores.dtype - ) - outputs = { - 'RpnRois': rpn_rois, - 'RpnRoiProbs': rpn_roi_probs, - } - if return_rois_num: - rpn_rois_num = helper.create_variable_for_type_inference(dtype='int32') - rpn_rois_num.stop_gradient = True - outputs['RpnRoisNum'] = rpn_rois_num - - helper.append_op( - type="generate_proposals_v2", - inputs={ - 'Scores': scores, - 'BboxDeltas': bbox_deltas, - 'ImShape': img_size, - 'Anchors': anchors, - 'Variances': variances, - }, - attrs={ - 'pre_nms_topN': pre_nms_top_n, - 'post_nms_topN': post_nms_top_n, - 'nms_thresh': nms_thresh, - 'min_size': min_size, - 'eta': eta, - 'pixel_offset': pixel_offset, - }, - outputs=outputs, - ) - rpn_rois.stop_gradient = True - rpn_roi_probs.stop_gradient = True - if not return_rois_num: - rpn_rois_num = None + helper.append_op( + type="generate_proposals_v2", + inputs={ + 'Scores': scores, + 'BboxDeltas': bbox_deltas, + 'ImShape': img_size, + 'Anchors': anchors, + 'Variances': variances, + }, + attrs={ + 'pre_nms_topN': pre_nms_top_n, + 'post_nms_topN': post_nms_top_n, + 'nms_thresh': nms_thresh, + 'min_size': min_size, + 'eta': eta, + 'pixel_offset': pixel_offset, + }, + outputs=outputs, + ) + rpn_rois.stop_gradient = True + rpn_roi_probs.stop_gradient = True + if not return_rois_num: + rpn_rois_num = None - return rpn_rois, rpn_roi_probs, rpn_rois_num + return rpn_rois, rpn_roi_probs, rpn_rois_num def matrix_nms( @@ -2535,31 +2301,6 @@ def matrix_nms( if not return_rois_num: rois_num = None return out, rois_num, index - elif _in_legacy_dygraph(): - attrs = ( - 'background_label', - background_label, - 'score_threshold', - score_threshold, - 'post_threshold', - post_threshold, - 'nms_top_k', - nms_top_k, - 'gaussian_sigma', - gaussian_sigma, - 'use_gaussian', - use_gaussian, - 'keep_top_k', - keep_top_k, - 'normalized', - normalized, - ) - out, index, rois_num = _legacy_C_ops.matrix_nms(bboxes, scores, *attrs) - if not return_index: - index = None - if not return_rois_num: - rois_num = None - return out, rois_num, index else: helper = LayerHelper('matrix_nms', **locals()) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) -- GitLab