未验证 提交 1c7ae954 编写于 作者: 姜永久 提交者: GitHub

rm in_legacy part8 (#49386)

* rm legacy layers part6

* rm non_static_mode

* modify non_static

* minor change

* rm loss

* rm in_legacy part8

* minor change
上级 0c52e8a8
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from ..layer_helper import LayerHelper, unique_name from ..layer_helper import LayerHelper, unique_name
from ..framework import Variable, in_dygraph_mode, _in_legacy_dygraph from ..framework import Variable, in_dygraph_mode
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -120,42 +120,31 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False): ...@@ -120,42 +120,31 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False):
task = group.process_group.all_gather(x, out) task = group.process_group.all_gather(x, out)
task.wait() task.wait()
return out return out
else:
if _in_legacy_dygraph(): helper = LayerHelper(op_type, **locals())
attrs = ( out_shape = list(x.shape[:])
'nranks', if out_shape[0] > 0:
nranks, out_shape[0] *= nranks
'ring_id', out = helper.create_variable(
ring_id, name=unique_name.generate_with_ignorable_key(
'use_calc_stream', '.'.join([x.name, op_type])
use_calc_stream, ),
shape=out_shape,
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
) )
return _legacy_C_ops.c_allgather(x, *attrs) helper.append_op(
type=op_type,
helper = LayerHelper(op_type, **locals()) inputs={'X': [x]},
out_shape = list(x.shape[:]) outputs={'Out': [out]},
if out_shape[0] > 0: attrs={
out_shape[0] *= nranks 'nranks': nranks,
out = helper.create_variable( 'ring_id': ring_id,
name=unique_name.generate_with_ignorable_key( 'use_calc_stream': use_calc_stream,
'.'.join([x.name, op_type]) },
), )
shape=out_shape, return out
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
)
helper.append_op(
type=op_type,
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={
'nranks': nranks,
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
},
)
return out
def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False): def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False):
......
...@@ -21,9 +21,7 @@ from ..framework import ( ...@@ -21,9 +21,7 @@ from ..framework import (
Program, Program,
Variable, Variable,
Operator, Operator,
_non_static_mode,
static_only, static_only,
_in_legacy_dygraph,
in_dygraph_mode, in_dygraph_mode,
) )
from ..layer_helper import LayerHelper, unique_name from ..layer_helper import LayerHelper, unique_name
...@@ -1154,7 +1152,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -1154,7 +1152,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"but given shape as {0}.".format(list(pre_cond.shape)) "but given shape as {0}.".format(list(pre_cond.shape))
) )
if _non_static_mode(): if in_dygraph_mode():
now_cond = pre_cond.numpy()[0] now_cond = pre_cond.numpy()[0]
while now_cond: while now_cond:
output_vars = body(*loop_vars) output_vars = body(*loop_vars)
...@@ -1168,33 +1166,33 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -1168,33 +1166,33 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
now_cond = cond(*output_vars).numpy()[0] now_cond = cond(*output_vars).numpy()[0]
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars return loop_vars
else:
while_loop_block = While(pre_cond, is_test, name) while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars) has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block(): with while_loop_block.block():
# If a variable with mutable type is included in loop_vars, like `dict/list`, # If a variable with mutable type is included in loop_vars, like `dict/list`,
# modifying it in the body function will cause origin variable to be modified # modifying it in the body function will cause origin variable to be modified
# synchronously. This will raise an assignment error out of while block. # synchronously. This will raise an assignment error out of while block.
# Here we make a copy of the mutable vars to avoid this problem. # Here we make a copy of the mutable vars to avoid this problem.
if has_mutable_vars_in_loop: if has_mutable_vars_in_loop:
new_loop_vars = copy_mutable_vars(loop_vars) new_loop_vars = copy_mutable_vars(loop_vars)
output_vars = body(*new_loop_vars) output_vars = body(*new_loop_vars)
else: else:
output_vars = body(*loop_vars) output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)): if not isinstance(output_vars, (list, tuple)):
output_vars = [output_vars] output_vars = [output_vars]
try: try:
loop_vars = _deal_with_undefined_var(output_vars, loop_vars) loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
assert_same_structure(output_vars, loop_vars, check_types=False) assert_same_structure(output_vars, loop_vars, check_types=False)
except ValueError as e: except ValueError as e:
raise ValueError( raise ValueError(
"body in while_loop should return the same arity " "body in while_loop should return the same arity "
"(length and structure) as loop_vars: {0}".format(e) "(length and structure) as loop_vars: {0}".format(e)
) )
now_cond = cond(*output_vars) now_cond = cond(*output_vars)
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars) map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
assign(now_cond, pre_cond) assign(now_cond, pre_cond)
return loop_vars return loop_vars
# (TODO: Mine) There exists dependency. It will be removed later. # (TODO: Mine) There exists dependency. It will be removed later.
......
...@@ -24,13 +24,10 @@ from ..framework import ( ...@@ -24,13 +24,10 @@ from ..framework import (
Variable, Variable,
core, core,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
_non_static_mode,
in_dygraph_mode, in_dygraph_mode,
_in_legacy_dygraph,
) )
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
__all__ = [ __all__ = [
...@@ -276,7 +273,7 @@ def generate_activation_fn(op_type): ...@@ -276,7 +273,7 @@ def generate_activation_fn(op_type):
return op(x) return op(x)
# TODO(dev): Because some ops' yaml has not been migrated. # TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done. # Replace it with _in_legacy_dygraph while all yaml work is done.
if _non_static_mode(): if in_dygraph_mode() and hasattr(_legacy_C_ops, op_type):
op = getattr(_legacy_C_ops, op_type) op = getattr(_legacy_C_ops, op_type)
return op(x) return op(x)
...@@ -327,15 +324,16 @@ def generate_inplace_fn(inplace_op_type): ...@@ -327,15 +324,16 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type = inplace_op_type[:-1] origin_op_type = inplace_op_type[:-1]
def func(x, name=None): def func(x, name=None):
if _non_static_mode(): if in_dygraph_mode():
op = getattr(_legacy_C_ops, inplace_op_type) op = getattr(_legacy_C_ops, inplace_op_type)
return op(x) return op(x)
warnings.warn( else:
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format( warnings.warn(
inplace_op_type, origin_op_type "In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
)
) )
) return generate_activation_fn(origin_op_type)(x, name)
return generate_activation_fn(origin_op_type)(x, name)
func.__name__ = inplace_op_type func.__name__ = inplace_op_type
func.__doc__ = """ func.__doc__ = """
......
...@@ -27,9 +27,14 @@ import paddle ...@@ -27,9 +27,14 @@ import paddle
from . import control_flow from . import control_flow
from . import nn from . import nn
from . import tensor from . import tensor
from ..framework import default_main_program, Parameter, unique_name, name_scope from ..framework import (
default_main_program,
Parameter,
unique_name,
name_scope,
in_dygraph_mode,
)
from ..framework import Variable from ..framework import Variable
from ..framework import _non_static_mode
from ..dygraph import learning_rate_scheduler as imperate_lr from ..dygraph import learning_rate_scheduler as imperate_lr
from ..data_feeder import check_variable_and_dtype, check_type from ..data_feeder import check_variable_and_dtype, check_type
...@@ -99,7 +104,7 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0): ...@@ -99,7 +104,7 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
learning_rate) learning_rate)
""" """
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.NoamDecay( decay = imperate_lr.NoamDecay(
d_model, warmup_steps, learning_rate=learning_rate d_model, warmup_steps, learning_rate=learning_rate
) )
...@@ -160,7 +165,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -160,7 +165,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.ExponentialDecay( decay = imperate_lr.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase learning_rate, decay_steps, decay_rate, staircase
) )
...@@ -222,7 +227,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -222,7 +227,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
""" """
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.NaturalExpDecay( decay = imperate_lr.NaturalExpDecay(
learning_rate, decay_steps, decay_rate, staircase learning_rate, decay_steps, decay_rate, staircase
) )
...@@ -282,7 +287,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): ...@@ -282,7 +287,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
staircase=True)) staircase=True))
""" """
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.InverseTimeDecay( decay = imperate_lr.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase learning_rate, decay_steps, decay_rate, staircase
) )
...@@ -337,7 +342,7 @@ def polynomial_decay( ...@@ -337,7 +342,7 @@ def polynomial_decay(
""" """
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.PolynomialDecay( decay = imperate_lr.PolynomialDecay(
learning_rate, decay_steps, end_learning_rate, power, cycle learning_rate, decay_steps, end_learning_rate, power, cycle
) )
...@@ -414,7 +419,7 @@ def piecewise_decay(boundaries, values): ...@@ -414,7 +419,7 @@ def piecewise_decay(boundaries, values):
if len(values) - len(boundaries) != 1: if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1") raise ValueError("len(values) - len(boundaries) should be 1")
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.PiecewiseDecay(boundaries, values, 0) decay = imperate_lr.PiecewiseDecay(boundaries, values, 0)
return decay return decay
else: else:
...@@ -488,7 +493,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): ...@@ -488,7 +493,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
) )
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
decay = imperate_lr.CosineDecay( decay = imperate_lr.CosineDecay(
learning_rate, step_each_epoch, epochs learning_rate, step_each_epoch, epochs
) )
...@@ -569,7 +574,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): ...@@ -569,7 +574,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
linear_step = float(end_lr) - float(start_lr) linear_step = float(end_lr) - float(start_lr)
with default_main_program()._lr_schedule_guard(): with default_main_program()._lr_schedule_guard():
if _non_static_mode(): if in_dygraph_mode():
lr = imperate_lr.LinearLrWarmup( lr = imperate_lr.LinearLrWarmup(
learning_rate, warmup_steps, start_lr, end_lr learning_rate, warmup_steps, start_lr, end_lr
) )
......
...@@ -22,19 +22,16 @@ import numpy as np ...@@ -22,19 +22,16 @@ import numpy as np
import paddle import paddle
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from paddle.fluid.framework import _in_legacy_dygraph
from ..initializer import Normal, Constant from ..initializer import Normal, Constant
from ..framework import ( from ..framework import (
Variable, Variable,
OpProtoHolder, OpProtoHolder,
_non_static_mode,
dygraph_only, dygraph_only,
_dygraph_tracer, _dygraph_tracer,
default_main_program, default_main_program,
_varbase_creator, _varbase_creator,
static_only, static_only,
_global_flags, _global_flags,
_in_legacy_dygraph,
in_dygraph_mode, in_dygraph_mode,
) )
from ..framework import _current_expected_place from ..framework import _current_expected_place
...@@ -128,10 +125,6 @@ def _elementwise_op_in_dygraph( ...@@ -128,10 +125,6 @@ def _elementwise_op_in_dygraph(
OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
) )
out = op(x, y) out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph( return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn out, act, use_mkldnn=use_mkldnn
) )
...@@ -794,26 +787,25 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): ...@@ -794,26 +787,25 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.sum(input, dim, None, keep_dim) return _C_ops.sum(input, dim, None, keep_dim)
elif _in_legacy_dygraph(): else:
return _legacy_C_ops.reduce_sum( attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all check_variable_and_dtype(
input,
'input',
['float16', 'float32', 'float64', 'int32', 'int64'],
'reduce_sum',
) )
attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all} helper = LayerHelper('reduce_sum', **locals())
check_variable_and_dtype( out = helper.create_variable_for_type_inference(
input, dtype=helper.input_dtype()
'input', )
['float16', 'float32', 'float64', 'int32', 'int64'], helper.append_op(
'reduce_sum', type='reduce_sum',
) inputs={'X': input},
helper = LayerHelper('reduce_sum', **locals()) outputs={'Out': out},
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) attrs=attrs,
helper.append_op( )
type='reduce_sum', return out
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out
def autoincreased_step_counter(counter_name=None, begin=1, step=1): def autoincreased_step_counter(counter_name=None, begin=1, step=1):
...@@ -895,7 +887,7 @@ def unsqueeze(input, axes, name=None): ...@@ -895,7 +887,7 @@ def unsqueeze(input, axes, name=None):
y = fluid.layers.unsqueeze(input=x, axes=[1]) y = fluid.layers.unsqueeze(input=x, axes=[1])
""" """
if _non_static_mode(): if in_dygraph_mode():
if isinstance(axes, int): if isinstance(axes, int):
axes = [axes] axes = [axes]
elif isinstance(axes, Variable): elif isinstance(axes, Variable):
...@@ -905,98 +897,106 @@ def unsqueeze(input, axes, name=None): ...@@ -905,98 +897,106 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes for item in axes
] ]
if _in_legacy_dygraph():
out, _ = _legacy_C_ops.unsqueeze2(input, 'axes', axes)
return out
return _C_ops.unsqueeze(input, axes) return _C_ops.unsqueeze(input, axes)
else:
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze') if isinstance(axes, int):
check_variable_and_dtype( axes = [axes]
input, if isinstance(axes, Variable):
'input', axes.stop_gradient = True
[ inputs["AxesTensor"] = axes
'float16', elif isinstance(axes, (list, tuple)):
'float32', if utils._contain_var(axes):
'float64', inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
'bool', else:
'int8', attrs["axes"] = axes
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type="unsqueeze2", type="unsqueeze2",
inputs=inputs, inputs=inputs,
attrs=attrs, attrs=attrs,
outputs={"Out": out, "XShape": x_shape}, outputs={"Out": out, "XShape": x_shape},
) )
return out return out
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if _non_static_mode(): if in_dygraph_mode():
op = getattr(_legacy_C_ops, op_name) op = getattr(_legacy_C_ops, op_name)
if binary_op: if binary_op:
return op(x, y) return op(x, y)
else: else:
return op(x) return op(x)
check_variable_and_dtype( else:
x,
"x",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
op_name,
)
if y is not None:
check_variable_and_dtype( check_variable_and_dtype(
y, x,
"y", "x",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"], ["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
op_name, op_name,
) )
if out is not None: if y is not None:
check_type(out, "out", Variable, op_name) check_variable_and_dtype(
y,
"y",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
],
op_name,
)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals()) helper = LayerHelper(op_name, **locals())
if binary_op and x.dtype != y.dtype: if binary_op and x.dtype != y.dtype:
raise ValueError( raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s." "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
% (op_name, x.dtype, y.dtype) % (op_name, x.dtype, y.dtype)
) )
if out is None: if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op: if binary_op:
helper.append_op( helper.append_op(
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out} type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
) )
else: else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out}) helper.append_op(
type=op_name, inputs={"X": x}, outputs={"Out": out}
)
return out return out
@templatedoc() @templatedoc()
...@@ -1082,30 +1082,28 @@ def clip_by_norm(x, max_norm, name=None): ...@@ -1082,30 +1082,28 @@ def clip_by_norm(x, max_norm, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.clip_by_norm(x, max_norm) return _C_ops.clip_by_norm(x, max_norm)
if _non_static_mode(): else:
return _legacy_C_ops.clip_by_norm(x, 'max_norm', max_norm) helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
helper = LayerHelper("clip_by_norm", **locals()) if name is None:
check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm') name = unique_name.generate_with_ignorable_key(
check_type(max_norm, 'max_norm', (float), 'clip_by_norm') ".".join([helper.name, 'tmp'])
)
if name is None: out = helper.create_variable(
name = unique_name.generate_with_ignorable_key( type=x.type, name=name, dtype=x.dtype, persistable=False
".".join([helper.name, 'tmp'])
) )
out = helper.create_variable( helper.append_op(
type=x.type, name=name, dtype=x.dtype, persistable=False type="clip_by_norm",
) inputs={"X": x},
attrs={"max_norm": max_norm},
helper.append_op( outputs={"Out": out},
type="clip_by_norm", )
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out},
)
return out return out
@templatedoc() @templatedoc()
...@@ -1132,19 +1130,16 @@ def merge_selected_rows(x, name=None): ...@@ -1132,19 +1130,16 @@ def merge_selected_rows(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.merge_selected_rows(x) return _C_ops.merge_selected_rows(x)
else:
if _non_static_mode(): helper = LayerHelper("merge_selected_rows", **locals())
return _legacy_C_ops.merge_selected_rows(x) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
helper = LayerHelper("merge_selected_rows", **locals()) type="merge_selected_rows",
out = helper.create_variable_for_type_inference(dtype=x.dtype) inputs={"X": x},
helper.append_op( attrs={},
type="merge_selected_rows", outputs={"Out": out},
inputs={"X": x}, )
attrs={}, return out
outputs={"Out": out},
)
return out
@templatedoc() @templatedoc()
......
...@@ -17,9 +17,7 @@ from .layer_function_generator import templatedoc ...@@ -17,9 +17,7 @@ from .layer_function_generator import templatedoc
from ..framework import ( from ..framework import (
core, core,
Variable, Variable,
_non_static_mode,
in_dygraph_mode, in_dygraph_mode,
_in_legacy_dygraph,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
) )
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
...@@ -156,7 +154,7 @@ def sequence_conv( ...@@ -156,7 +154,7 @@ def sequence_conv(
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype( check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_conv' input, 'input', ['float32', 'float64'], 'sequence_conv'
...@@ -258,7 +256,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): ...@@ -258,7 +256,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y) x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y)
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_softmax', **locals()) helper = LayerHelper('sequence_softmax', **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -363,7 +361,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): ...@@ -363,7 +361,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first') first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first')
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype( check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_pool' input, 'input', ['float32', 'float64'], 'sequence_pool'
...@@ -441,7 +439,7 @@ def sequence_concat(input, name=None): ...@@ -441,7 +439,7 @@ def sequence_concat(input, name=None):
out = paddle.static.nn.sequence_concat(input=[x, y]) out = paddle.static.nn.sequence_concat(input=[x, y])
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_concat', **locals()) helper = LayerHelper('sequence_concat', **locals())
...@@ -640,7 +638,7 @@ def sequence_slice(input, offset, length, name=None): ...@@ -640,7 +638,7 @@ def sequence_slice(input, offset, length, name=None):
length=length) length=length)
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_slice", **locals()) helper = LayerHelper("sequence_slice", **locals())
...@@ -794,7 +792,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): ...@@ -794,7 +792,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
# data: [1 2 1 2 3 4 3 4] # data: [1 2 1 2 3 4 3 4]
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand' x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand'
...@@ -916,7 +914,7 @@ def sequence_expand_as(x, y, name=None): ...@@ -916,7 +914,7 @@ def sequence_expand_as(x, y, name=None):
# data: [1 1 1 2 2 2 3 4] # data: [1 1 1 2 2 2 3 4]
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as' x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as'
...@@ -1019,7 +1017,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): ...@@ -1019,7 +1017,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_pad', **locals()) helper = LayerHelper('sequence_pad', **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -1108,7 +1106,7 @@ def sequence_unpad(x, length, name=None): ...@@ -1108,7 +1106,7 @@ def sequence_unpad(x, length, name=None):
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_unpad', **locals()) helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -1183,7 +1181,7 @@ def sequence_reshape(input, new_dim): ...@@ -1183,7 +1181,7 @@ def sequence_reshape(input, new_dim):
x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4) x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4)
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_reshape', **locals()) helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -1268,7 +1266,7 @@ def sequence_scatter(input, index, updates, name=None): ...@@ -1268,7 +1266,7 @@ def sequence_scatter(input, index, updates, name=None):
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_scatter', **locals()) helper = LayerHelper('sequence_scatter', **locals())
...@@ -1350,7 +1348,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): ...@@ -1350,7 +1348,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0) out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0)
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype( check_variable_and_dtype(
input, 'input', ['int32', 'int64'], 'sequence_enumerate' input, 'input', ['int32', 'int64'], 'sequence_enumerate'
...@@ -1479,7 +1477,7 @@ def sequence_reverse(x, name=None): ...@@ -1479,7 +1477,7 @@ def sequence_reverse(x, name=None):
x_reversed = paddle.static.nn.sequence_reverse(x) x_reversed = paddle.static.nn.sequence_reverse(x)
""" """
assert ( assert (
not _non_static_mode() not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet." ), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_reverse", **locals()) helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype( check_variable_and_dtype(
......
...@@ -19,9 +19,7 @@ from ..layer_helper import LayerHelper ...@@ -19,9 +19,7 @@ from ..layer_helper import LayerHelper
from ..framework import ( from ..framework import (
_current_expected_place, _current_expected_place,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
_non_static_mode,
_varbase_creator, _varbase_creator,
_in_legacy_dygraph,
in_dygraph_mode, in_dygraph_mode,
) )
from ..framework import Variable from ..framework import Variable
...@@ -81,59 +79,53 @@ def cast(x, dtype): ...@@ -81,59 +79,53 @@ def cast(x, dtype):
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(x, dtype) return _C_ops.cast(x, dtype)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
if _non_static_mode(): helper = LayerHelper('cast', **locals())
if not isinstance(dtype, core.VarDesc.VarType): out = helper.create_variable_for_type_inference(
dtype = convert_np_dtype_to_dtype_(dtype) dtype=dtype, stop_gradient=x.stop_gradient
out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype) )
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
)
return out return out
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
)
return out
def concat(input, axis=0, name=None): def concat(input, axis=0, name=None):
""" """
...@@ -191,73 +183,69 @@ def concat(input, axis=0, name=None): ...@@ -191,73 +183,69 @@ def concat(input, axis=0, name=None):
input = [t for t in input if t.shape.count(0) == 0] input = [t for t in input if t.shape.count(0) == 0]
out = _C_ops.concat(input, axis) out = _C_ops.concat(input, axis)
return out return out
else:
if _in_legacy_dygraph(): check_type(input, 'input', (list, tuple, Variable), 'concat')
if isinstance(axis, Variable):
axis = axis.numpy()
axis = axis.item(0)
if not isinstance(input, Variable): if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0] for id, x in enumerate(input):
out = _varbase_creator() check_variable_and_dtype(
_legacy_C_ops.concat(input, out, 'axis', axis) x,
return out 'input[' + str(id) + ']',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'concat',
)
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
check_type(input, 'input', (list, tuple, Variable), 'concat') if isinstance(axis, Variable):
if not isinstance(input, Variable): check_dtype(
for id, x in enumerate(input): axis.dtype,
check_variable_and_dtype( 'axis',
x, ['int32', 'int64'],
'input[' + str(id) + ']',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'concat', 'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor",
) )
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
if isinstance(axis, Variable): helper = LayerHelper('concat', **locals())
check_dtype( out = helper.create_variable_for_type_inference(
axis.dtype, dtype=helper.input_dtype()
'axis',
['int32', 'int64'],
'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor",
) )
helper = LayerHelper('concat', **locals()) if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) # NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
assert len(input) == 1, ( assert len(input) == 1, (
"If the elements of 'input' in concat are Variable(LoDTensorArray), " "If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s." % len(input) "number of the elements must be 1, but received %s."
) % len(input)
out_index = helper.create_variable_for_type_inference(dtype="int32") )
helper.append_op( out_index = helper.create_variable_for_type_inference(dtype="int32")
type='tensor_array_to_tensor', helper.append_op(
inputs={'X': input[0]}, type='tensor_array_to_tensor',
outputs={'Out': [out], 'OutIndex': [out_index]}, inputs={'X': input[0]},
attrs={'axis': axis, 'use_stack': False}, outputs={'Out': [out], 'OutIndex': [out_index]},
) attrs={'axis': axis, 'use_stack': False},
else: )
inputs = {'X': input} else:
attrs = {} inputs = {'X': input}
if isinstance(axis, Variable): attrs = {}
axis.stop_gradient = True if isinstance(axis, Variable):
attrs['axis'] = axis axis.stop_gradient = True
attrs['axis'] = axis
helper.append_op( helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs type='concat',
) inputs=inputs,
return out outputs={'Out': [out]},
attrs=attrs,
)
return out
def sums(input, out=None): def sums(input, out=None):
...@@ -391,22 +379,15 @@ def assign(input, output=None): ...@@ -391,22 +379,15 @@ def assign(input, output=None):
input = numpy.array(input) input = numpy.array(input)
# NOTE(Aurelius84): Why we judge core.VarBase? # NOTE(Aurelius84): Why we judge core.VarBase?
# In case of @to_static, a VarBase can be as input of `assign`, # In case of @to_static, a VarBase can be as input of `assign`,
# but _non_static_mode()==False under @to_static, which means # but in_dygraph_mode()==False under @to_static, which means
# isinstance(VarBase, Variable) == False. It will cause return None # isinstance(VarBase, Variable) == False. It will cause return None
# after this api. # after this api.
if isinstance(input, (Variable, core.VarBase)): if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode(): if in_dygraph_mode():
if in_dygraph_mode() and output is None: if output is None:
output = _C_ops.assign(input) output = _C_ops.assign(input)
elif in_dygraph_mode() and output is not None:
_C_ops.assign_out_(input, output)
else: else:
if output is None: _C_ops.assign_out_(input, output)
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
_legacy_C_ops.assign(input, output)
else: else:
check_dtype( check_dtype(
input.dtype, input.dtype,
...@@ -480,18 +461,6 @@ def assign(input, output=None): ...@@ -480,18 +461,6 @@ def assign(input, output=None):
values, values,
_current_expected_place(), _current_expected_place(),
) )
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_legacy_C_ops.assign_value(
output,
'shape',
list(input.shape),
'dtype',
dtype,
value_name,
values,
)
else: else:
if output is None: if output is None:
output = helper.create_variable_for_type_inference( output = helper.create_variable_for_type_inference(
...@@ -507,7 +476,7 @@ def assign(input, output=None): ...@@ -507,7 +476,7 @@ def assign(input, output=None):
}, },
) )
if is_inplace and _non_static_mode(): if is_inplace and in_dygraph_mode():
output._bump_inplace_version() output._bump_inplace_version()
return output return output
...@@ -591,83 +560,56 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): ...@@ -591,83 +560,56 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
_C_ops.full_(out, shape, float(value), dtype, place) _C_ops.full_(out, shape, float(value), dtype, place)
out.stop_gradient = True out.stop_gradient = True
return out return out
else:
if _in_legacy_dygraph(): helper = LayerHelper("fill_constant", **locals())
shape = utils.convert_shape_to_list(shape) inputs = {}
if out is None:
out = _varbase_creator(dtype=dtype)
if isinstance(value, Variable): if isinstance(value, Variable):
if dtype in ['uint8', 'int16', 'int32', 'int64']: if convert_dtype(value.dtype) != dtype:
attrs['str_value'] = str(int(value.numpy().item(0))) value = cast(value, dtype)
else: inputs['ValueTensor'] = value
attrs['str_value'] = str(float(value.numpy().item(0)))
check_shape(shape)
_legacy_C_ops.fill_constant( check_dtype(
out, dtype,
'value',
float(value),
'force_cpu',
force_cpu,
'dtype', 'dtype',
out.dtype, [
'str_value', 'bool',
attrs['str_value'], 'float16',
'shape', 'float32',
shape, 'float64',
'uint8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'fill_constant',
) )
out.stop_gradient = True check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
return out
helper = LayerHelper("fill_constant", **locals()) if out is not None:
inputs = {} check_variable_and_dtype(
if isinstance(value, Variable): out, 'out', [convert_dtype(dtype)], 'fill_constant'
if convert_dtype(value.dtype) != dtype: )
value = cast(value, dtype)
inputs['ValueTensor'] = value
check_shape(shape)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'uint8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'fill_constant',
)
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
if out is not None: helper = LayerHelper("fill_constant", **locals())
check_variable_and_dtype( utils.get_shape_tensor_inputs(
out, 'out', [convert_dtype(dtype)], 'fill_constant' inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
) )
helper = LayerHelper("fill_constant", **locals()) if out is None:
utils.get_shape_tensor_inputs( out = helper.create_variable_for_type_inference(dtype=dtype)
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant' attrs['dtype'] = out.dtype
) helper.append_op(
type='fill_constant',
if out is None: inputs=inputs,
out = helper.create_variable_for_type_inference(dtype=dtype) outputs={'Out': [out]},
attrs['dtype'] = out.dtype attrs=attrs,
helper.append_op( stop_gradient=True,
type='fill_constant', )
inputs=inputs, out.stop_gradient = True
outputs={'Out': [out]}, return out
attrs=attrs,
stop_gradient=True,
)
out.stop_gradient = True
return out
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant") @deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
...@@ -727,29 +669,29 @@ def fill_constant_batch_size_like( ...@@ -727,29 +669,29 @@ def fill_constant_batch_size_like(
) )
out.stop_gradient = True out.stop_gradient = True
return out return out
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = {
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu,
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else: else:
attrs['str_value'] = str(float(value)) helper = LayerHelper("fill_constant_batch_size_like", **locals())
helper.append_op( out = helper.create_variable_for_type_inference(dtype=dtype)
type='fill_constant_batch_size_like', attrs = {
inputs={'Input': input}, 'shape': shape,
outputs={'Out': [out]}, 'dtype': out.dtype,
attrs=attrs, 'value': float(value),
) 'input_dim_idx': input_dim_idx,
out.stop_gradient = True 'output_dim_idx': output_dim_idx,
return out 'force_cpu': force_cpu,
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs=attrs,
)
out.stop_gradient = True
return out
def argmin(x, axis=0): def argmin(x, axis=0):
......
...@@ -12,9 +12,9 @@ ...@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
__all__ = [] __all__ = []
...@@ -52,32 +52,27 @@ def segment_sum(data, segment_ids, name=None): ...@@ -52,32 +52,27 @@ def segment_sum(data, segment_ids, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "SUM")[0] return _C_ops.segment_pool(data, segment_ids, "SUM")[0]
if _in_legacy_dygraph(): else:
out, tmp = _legacy_C_ops.segment_pool( check_variable_and_dtype(
data, segment_ids, 'pooltype', "SUM" data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
) )
return out
check_variable_and_dtype( helper = LayerHelper("segment_sum", **locals())
data, out = helper.create_variable_for_type_inference(dtype=data.dtype)
"X", summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
("float32", "float64", "int32", "int64", "float16"), helper.append_op(
"segment_pool", type="segment_pool",
) inputs={"X": data, "SegmentIds": segment_ids},
check_variable_and_dtype( outputs={"Out": out, "SummedIds": summed_ids},
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" attrs={"pooltype": "SUM"},
) )
return out
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "SUM"},
)
return out
def segment_mean(data, segment_ids, name=None): def segment_mean(data, segment_ids, name=None):
...@@ -114,32 +109,28 @@ def segment_mean(data, segment_ids, name=None): ...@@ -114,32 +109,28 @@ def segment_mean(data, segment_ids, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MEAN")[0] return _C_ops.segment_pool(data, segment_ids, "MEAN")[0]
if _in_legacy_dygraph(): else:
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "MEAN" check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
) )
return out
check_variable_and_dtype( helper = LayerHelper("segment_mean", **locals())
data, out = helper.create_variable_for_type_inference(dtype=data.dtype)
"X", summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
("float32", "float64", "int32", "int64", "float16"), helper.append_op(
"segment_pool", type="segment_pool",
) inputs={"X": data, "SegmentIds": segment_ids},
check_variable_and_dtype( outputs={"Out": out, "SummedIds": summed_ids},
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" attrs={"pooltype": "MEAN"},
) )
return out
helper = LayerHelper("segment_mean", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MEAN"},
)
return out
def segment_min(data, segment_ids, name=None): def segment_min(data, segment_ids, name=None):
...@@ -175,32 +166,27 @@ def segment_min(data, segment_ids, name=None): ...@@ -175,32 +166,27 @@ def segment_min(data, segment_ids, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MIN")[0] return _C_ops.segment_pool(data, segment_ids, "MIN")[0]
if _in_legacy_dygraph(): else:
out, tmp = _legacy_C_ops.segment_pool( check_variable_and_dtype(
data, segment_ids, 'pooltype', "MIN" data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
) )
return out
check_variable_and_dtype( helper = LayerHelper("segment_min", **locals())
data, out = helper.create_variable_for_type_inference(dtype=data.dtype)
"X", summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
("float32", "float64", "int32", "int64", "float16"), helper.append_op(
"segment_pool", type="segment_pool",
) inputs={"X": data, "SegmentIds": segment_ids},
check_variable_and_dtype( outputs={"Out": out, "SummedIds": summed_ids},
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" attrs={"pooltype": "MIN"},
) )
return out
helper = LayerHelper("segment_min", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MIN"},
)
return out
def segment_max(data, segment_ids, name=None): def segment_max(data, segment_ids, name=None):
...@@ -236,29 +222,24 @@ def segment_max(data, segment_ids, name=None): ...@@ -236,29 +222,24 @@ def segment_max(data, segment_ids, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MAX")[0] return _C_ops.segment_pool(data, segment_ids, "MAX")[0]
if _in_legacy_dygraph(): else:
out, tmp = _legacy_C_ops.segment_pool( check_variable_and_dtype(
data, segment_ids, 'pooltype', "MAX" data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
) )
return out
check_variable_and_dtype( helper = LayerHelper("segment_max", **locals())
data, out = helper.create_variable_for_type_inference(dtype=data.dtype)
"X", summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
("float32", "float64", "int32", "int64", "float16"), helper.append_op(
"segment_pool", type="segment_pool",
) inputs={"X": data, "SegmentIds": segment_ids},
check_variable_and_dtype( outputs={"Out": out, "SummedIds": summed_ids},
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool" attrs={"pooltype": "MAX"},
) )
return out
helper = LayerHelper("segment_max", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MAX"},
)
return out
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
import numpy as np import numpy as np
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.fluid.data_feeder import ( from paddle.fluid.data_feeder import (
check_dtype, check_dtype,
check_type, check_type,
check_variable_and_dtype, check_variable_and_dtype,
) )
from paddle.fluid.framework import Variable, _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from .utils import ( from .utils import (
...@@ -118,68 +118,61 @@ def send_u_recv( ...@@ -118,68 +118,61 @@ def send_u_recv(
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _in_legacy_dygraph():
out_size = convert_out_size_to_list(out_size)
out, tmp = _legacy_C_ops.graph_send_recv(
x,
src_index,
dst_index,
None,
'reduce_op',
reduce_op.upper(),
'out_size',
out_size,
)
return out
if in_dygraph_mode(): if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size) out_size = convert_out_size_to_list(out_size)
return _C_ops.send_u_recv( return _C_ops.send_u_recv(
x, src_index, dst_index, reduce_op.upper(), out_size x, src_index, dst_index, reduce_op.upper(), out_size
) )
else:
check_variable_and_dtype( check_variable_and_dtype(
x, x,
"X", "X",
("float32", "float64", "int32", "int64", "float16"), ("float32", "float64", "int32", "int64", "float16"),
"graph_send_recv", "graph_send_recv",
) )
check_variable_and_dtype( check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_recv" src_index, "Src_index", ("int32", "int64"), "graph_send_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_recv',
) )
if isinstance(out_size, Variable): check_variable_and_dtype(
check_dtype( dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv"
out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_recv'
) )
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_recv',
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype,
'out_size',
['int32', 'int64'],
'graph_send_recv',
)
helper = LayerHelper("send_u_recv", **locals()) helper = LayerHelper("send_u_recv", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
dst_count = helper.create_variable_for_type_inference( dst_count = helper.create_variable_for_type_inference(
dtype="int32", stop_gradient=True dtype="int32", stop_gradient=True
) )
inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index} inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index}
attrs = {"reduce_op": reduce_op.upper()} attrs = {"reduce_op": reduce_op.upper()}
get_out_size_tensor_inputs( get_out_size_tensor_inputs(
inputs=inputs, attrs=attrs, out_size=out_size, op_type='graph_send_recv' inputs=inputs,
) attrs=attrs,
out_size=out_size,
op_type='graph_send_recv',
)
helper.append_op( helper.append_op(
type="graph_send_recv", type="graph_send_recv",
inputs=inputs, inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count}, outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs, attrs=attrs,
) )
return out return out
def send_ue_recv( def send_ue_recv(
...@@ -302,86 +295,81 @@ def send_ue_recv( ...@@ -302,86 +295,81 @@ def send_ue_recv(
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _in_legacy_dygraph(): if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size) out_size = convert_out_size_to_list(out_size)
out, tmp = _legacy_C_ops.graph_send_ue_recv( return _C_ops.send_ue_recv(
x, x,
y, y,
src_index, src_index,
dst_index, dst_index,
None,
'message_op',
message_op.upper(), message_op.upper(),
'reduce_op',
reduce_op.upper(), reduce_op.upper(),
'out_size',
out_size, out_size,
) )
return out else:
if in_dygraph_mode(): check_variable_and_dtype(
out_size = convert_out_size_to_list(out_size)
return _C_ops.send_ue_recv(
x, x,
"X",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
y, y,
src_index, "Y",
dst_index, ("float32", "float64", "int32", "int64", "float16"),
message_op.upper(), "graph_send_ue_recv",
reduce_op.upper(),
out_size,
) )
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_ue_recv',
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype,
'out_size',
['int32', 'int64'],
'graph_send_ue_recv',
)
check_variable_and_dtype( helper = LayerHelper("send_ue_recv", **locals())
x, out = helper.create_variable_for_type_inference(dtype=x.dtype)
"X", dst_count = helper.create_variable_for_type_inference(
("float32", "float64", "int32", "int64", "float16"), dtype="int32", stop_gradient=True
"graph_send_ue_recv",
)
check_variable_and_dtype(
y,
"Y",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_ue_recv',
) )
if isinstance(out_size, Variable):
check_dtype( inputs = {
out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_ue_recv' "X": x,
"Y": y,
"Src_index": src_index,
"Dst_index": dst_index,
}
attrs = {
"message_op": message_op.upper(),
"reduce_op": reduce_op.upper(),
}
get_out_size_tensor_inputs(
inputs=inputs,
attrs=attrs,
out_size=out_size,
op_type='graph_send_ue_recv',
) )
helper = LayerHelper("send_ue_recv", **locals()) helper.append_op(
out = helper.create_variable_for_type_inference(dtype=x.dtype) type="graph_send_ue_recv",
dst_count = helper.create_variable_for_type_inference( inputs=inputs,
dtype="int32", stop_gradient=True outputs={"Out": out, "Dst_count": dst_count},
) attrs=attrs,
)
inputs = {"X": x, "Y": y, "Src_index": src_index, "Dst_index": dst_index} return out
attrs = {"message_op": message_op.upper(), "reduce_op": reduce_op.upper()}
get_out_size_tensor_inputs(
inputs=inputs,
attrs=attrs,
out_size=out_size,
op_type='graph_send_ue_recv',
)
helper.append_op(
type="graph_send_ue_recv",
inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs,
)
return out
def send_uv(x, y, src_index, dst_index, message_op="add", name=None): def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
...@@ -466,43 +454,39 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): ...@@ -466,43 +454,39 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.send_uv(x, y, src_index, dst_index, message_op.upper()) return _C_ops.send_uv(x, y, src_index, dst_index, message_op.upper())
else: else:
if _in_legacy_dygraph():
return _legacy_C_ops.graph_send_uv( helper = LayerHelper("graph_send_uv", **locals())
x, y, src_index, dst_index, "message_op", message_op.upper() check_variable_and_dtype(
) x,
else: 'x',
helper = LayerHelper("graph_send_uv", **locals()) ['int32', 'int64', 'float32', 'float64', 'float16'],
check_variable_and_dtype( 'graph_send_uv',
x, )
'x', check_variable_and_dtype(
['int32', 'int64', 'float32', 'float64', 'float16'], y,
'graph_send_uv', 'y',
) ['int32', 'int64', 'float32', 'float64', 'float16'],
check_variable_and_dtype( 'graph_send_uv',
y, )
'y', check_variable_and_dtype(
['int32', 'int64', 'float32', 'float64', 'float16'], src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv'
'graph_send_uv', )
) check_variable_and_dtype(
check_variable_and_dtype( dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv'
src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv' )
) out = helper.create_variable_for_type_inference(dtype=x.dtype)
check_variable_and_dtype(
dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv' inputs = {
) 'x': x,
out = helper.create_variable_for_type_inference(dtype=x.dtype) 'y': y,
'src_index': src_index,
inputs = { 'dst_index': dst_index,
'x': x, }
'y': y, attrs = {'message_op': message_op.upper()}
'src_index': src_index, helper.append_op(
'dst_index': dst_index, type="graph_send_uv",
} inputs=inputs,
attrs = {'message_op': message_op.upper()} attrs=attrs,
helper.append_op( outputs={"out": out},
type="graph_send_uv", )
inputs=inputs, return out
attrs=attrs,
outputs={"out": out},
)
return out
...@@ -1001,7 +1001,7 @@ def _custom_api_content(op_name): ...@@ -1001,7 +1001,7 @@ def _custom_api_content(op_name):
""" """
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.core import VarBase, CustomOpKernelContext from paddle.fluid.core import VarBase, CustomOpKernelContext
from paddle.fluid.framework import _non_static_mode, _dygraph_tracer, _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import _dygraph_tracer, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
def {op_name}({inputs}): def {op_name}({inputs}):
...@@ -1024,16 +1024,11 @@ def _custom_api_content(op_name): ...@@ -1024,16 +1024,11 @@ def _custom_api_content(op_name):
ctx.add_outputs(outs[out_name]) ctx.add_outputs(outs[out_name])
core.eager._run_custom_op(ctx, "{op_name}", True) core.eager._run_custom_op(ctx, "{op_name}", True)
else: else:
if _in_legacy_dygraph(): helper = LayerHelper("{op_name}", **locals())
for out_name in out_names: for out_name in out_names:
outs[out_name] = VarBase() outs[out_name] = helper.create_variable(dtype='float32')
_dygraph_tracer().trace_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
else: helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
helper = LayerHelper("{op_name}", **locals())
for out_name in out_names:
outs[out_name] = helper.create_variable(dtype='float32')
helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
res = [outs[out_name] for out_name in out_names] res = [outs[out_name] for out_name in out_names]
......
...@@ -18,12 +18,7 @@ from paddle import _C_ops, _legacy_C_ops ...@@ -18,12 +18,7 @@ from paddle import _C_ops, _legacy_C_ops
from paddle.tensor.math import _add_with_axis from paddle.tensor.math import _add_with_axis
from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.data_feeder import check_type, check_variable_and_dtype
from ..fluid.framework import ( from ..fluid.framework import Variable, in_dygraph_mode
Variable,
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ..fluid.initializer import Normal from ..fluid.initializer import Normal
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
from ..fluid.layers import utils from ..fluid.layers import utils
...@@ -211,76 +206,56 @@ def yolo_loss( ...@@ -211,76 +206,56 @@ def yolo_loss(
) )
return loss return loss
if _non_static_mode(): else:
loss, _, _ = _legacy_C_ops.yolov3_loss( helper = LayerHelper('yolov3_loss', **locals())
x,
gt_box, check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss')
gt_label, check_variable_and_dtype(
gt_score, gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss'
'anchors',
anchors,
'anchor_mask',
anchor_mask,
'class_num',
class_num,
'ignore_thresh',
ignore_thresh,
'downsample_ratio',
downsample_ratio,
'use_label_smooth',
use_label_smooth,
'scale_x_y',
scale_x_y,
) )
return loss check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss')
check_type(anchors, 'anchors', (list, tuple), 'yolo_loss')
check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss')
check_type(class_num, 'class_num', int, 'yolo_loss')
check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss')
check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss')
helper = LayerHelper('yolov3_loss', **locals()) loss = helper.create_variable_for_type_inference(dtype=x.dtype)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_loss') objectness_mask = helper.create_variable_for_type_inference(
check_variable_and_dtype( dtype='int32'
gt_box, 'gt_box', ['float32', 'float64'], 'yolo_loss' )
) gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
check_variable_and_dtype(gt_label, 'gt_label', 'int32', 'yolo_loss')
check_type(anchors, 'anchors', (list, tuple), 'yolo_loss') inputs = {
check_type(anchor_mask, 'anchor_mask', (list, tuple), 'yolo_loss') "X": x,
check_type(class_num, 'class_num', int, 'yolo_loss') "GTBox": gt_box,
check_type(ignore_thresh, 'ignore_thresh', float, 'yolo_loss') "GTLabel": gt_label,
check_type(use_label_smooth, 'use_label_smooth', bool, 'yolo_loss') }
if gt_score is not None:
loss = helper.create_variable_for_type_inference(dtype=x.dtype) inputs["GTScore"] = gt_score
objectness_mask = helper.create_variable_for_type_inference(dtype='int32') attrs = {
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32') "anchors": anchors,
"anchor_mask": anchor_mask,
inputs = { "class_num": class_num,
"X": x, "ignore_thresh": ignore_thresh,
"GTBox": gt_box, "downsample_ratio": downsample_ratio,
"GTLabel": gt_label, "use_label_smooth": use_label_smooth,
} "scale_x_y": scale_x_y,
if gt_score is not None: }
inputs["GTScore"] = gt_score
helper.append_op(
attrs = { type='yolov3_loss',
"anchors": anchors, inputs=inputs,
"anchor_mask": anchor_mask, outputs={
"class_num": class_num, 'Loss': loss,
"ignore_thresh": ignore_thresh, 'ObjectnessMask': objectness_mask,
"downsample_ratio": downsample_ratio, 'GTMatchMask': gt_match_mask,
"use_label_smooth": use_label_smooth, },
"scale_x_y": scale_x_y, attrs=attrs,
} )
return loss
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask,
},
attrs=attrs,
)
return loss
def yolo_box( def yolo_box(
...@@ -409,64 +384,42 @@ def yolo_box( ...@@ -409,64 +384,42 @@ def yolo_box(
) )
return boxes, scores return boxes, scores
if _non_static_mode(): else:
boxes, scores = _legacy_C_ops.yolo_box( helper = LayerHelper('yolo_box', **locals())
x,
img_size, check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box')
'anchors', check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box')
anchors, check_type(anchors, 'anchors', (list, tuple), 'yolo_box')
'class_num', check_type(conf_thresh, 'conf_thresh', float, 'yolo_box')
class_num,
'conf_thresh', boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
conf_thresh, scores = helper.create_variable_for_type_inference(dtype=x.dtype)
'downsample_ratio',
downsample_ratio, attrs = {
'clip_bbox', "anchors": anchors,
clip_bbox, "class_num": class_num,
'scale_x_y', "conf_thresh": conf_thresh,
scale_x_y, "downsample_ratio": downsample_ratio,
'iou_aware', "clip_bbox": clip_bbox,
iou_aware, "scale_x_y": scale_x_y,
'iou_aware_factor', "iou_aware": iou_aware,
iou_aware_factor, "iou_aware_factor": iou_aware_factor,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs,
) )
return boxes, scores return boxes, scores
helper = LayerHelper('yolo_box', **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'yolo_box')
check_variable_and_dtype(img_size, 'img_size', 'int32', 'yolo_box')
check_type(anchors, 'anchors', (list, tuple), 'yolo_box')
check_type(conf_thresh, 'conf_thresh', float, 'yolo_box')
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
"iou_aware": iou_aware,
"iou_aware_factor": iou_aware_factor,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs,
)
return boxes, scores
def prior_box( def prior_box(
input, input,
...@@ -587,31 +540,6 @@ def prior_box( ...@@ -587,31 +540,6 @@ def prior_box(
) )
return box, var return box, var
if _in_legacy_dygraph():
attrs = (
'min_sizes',
min_sizes,
'aspect_ratios',
aspect_ratios,
'variances',
variance,
'flip',
flip,
'clip',
clip,
'step_w',
steps[0],
'step_h',
steps[1],
'offset',
offset,
'min_max_aspect_ratios_order',
min_max_aspect_ratios_order,
)
if cur_max_sizes is not None:
attrs += ('max_sizes', cur_max_sizes)
box, var = _legacy_C_ops.prior_box(input, image, *attrs)
return box, var
else: else:
attrs = { attrs = {
'min_sizes': min_sizes, 'min_sizes': min_sizes,
...@@ -783,36 +711,6 @@ def box_coder( ...@@ -783,36 +711,6 @@ def box_coder(
raise TypeError("Input prior_box_var must be Variable or list") raise TypeError("Input prior_box_var must be Variable or list")
return output_box return output_box
if _in_legacy_dygraph():
if isinstance(prior_box_var, Variable):
output_box = _legacy_C_ops.box_coder(
prior_box,
prior_box_var,
target_box,
"code_type",
code_type,
"box_normalized",
box_normalized,
"axis",
axis,
)
elif isinstance(prior_box_var, list):
output_box = _legacy_C_ops.box_coder(
prior_box,
None,
target_box,
"code_type",
code_type,
"box_normalized",
box_normalized,
"axis",
axis,
"variance",
prior_box_var,
)
else:
raise TypeError("Input prior_box_var must be Variable or list")
return output_box
else: else:
helper = LayerHelper("box_coder", **locals()) helper = LayerHelper("box_coder", **locals())
...@@ -989,35 +887,6 @@ def deform_conv2d( ...@@ -989,35 +887,6 @@ def deform_conv2d(
out = _add_with_axis(pre_bias, bias, axis=1) out = _add_with_axis(pre_bias, bias, axis=1)
else: else:
out = pre_bias out = pre_bias
elif _in_legacy_dygraph():
attrs = (
'strides',
stride,
'paddings',
padding,
'dilations',
dilation,
'deformable_groups',
deformable_groups,
'groups',
groups,
'im2col_step',
1,
)
if use_deform_conv2d_v1:
op_type = 'deformable_conv_v1'
pre_bias = getattr(_legacy_C_ops, op_type)(
x, offset, weight, *attrs
)
else:
op_type = 'deformable_conv'
pre_bias = getattr(_legacy_C_ops, op_type)(
x, offset, mask, weight, *attrs
)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=1)
else:
out = pre_bias
else: else:
check_variable_and_dtype( check_variable_and_dtype(
x, "x", ['float32', 'float64'], 'deform_conv2d' x, "x", ['float32', 'float64'], 'deform_conv2d'
...@@ -1370,31 +1239,6 @@ def distribute_fpn_proposals( ...@@ -1370,31 +1239,6 @@ def distribute_fpn_proposals(
) )
return multi_rois, restore_ind, rois_num_per_level return multi_rois, restore_ind, rois_num_per_level
if _non_static_mode():
assert (
rois_num is not None
), "rois_num should not be None in dygraph mode."
attrs = (
'min_level',
min_level,
'max_level',
max_level,
'refer_level',
refer_level,
'refer_scale',
refer_scale,
'pixel_offset',
pixel_offset,
)
(
multi_rois,
restore_ind,
rois_num_per_level,
) = _legacy_C_ops.distribute_fpn_proposals(
fpn_rois, rois_num, num_lvl, num_lvl, *attrs
)
return multi_rois, restore_ind, rois_num_per_level
else: else:
check_variable_and_dtype( check_variable_and_dtype(
fpn_rois, fpn_rois,
...@@ -1472,19 +1316,19 @@ def read_file(filename, name=None): ...@@ -1472,19 +1316,19 @@ def read_file(filename, name=None):
# [142915] # [142915]
""" """
if _non_static_mode(): if in_dygraph_mode():
return _legacy_C_ops.read_file('filename', filename) return _legacy_C_ops.read_file('filename', filename)
else:
inputs = dict()
attrs = {'filename': filename}
inputs = dict() helper = LayerHelper("read_file", **locals())
attrs = {'filename': filename} out = helper.create_variable_for_type_inference('uint8')
helper.append_op(
helper = LayerHelper("read_file", **locals()) type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out}
out = helper.create_variable_for_type_inference('uint8') )
helper.append_op(
type="read_file", inputs=inputs, attrs=attrs, outputs={"Out": out}
)
return out return out
def decode_jpeg(x, mode='unchanged', name=None): def decode_jpeg(x, mode='unchanged', name=None):
...@@ -1524,19 +1368,17 @@ def decode_jpeg(x, mode='unchanged', name=None): ...@@ -1524,19 +1368,17 @@ def decode_jpeg(x, mode='unchanged', name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.decode_jpeg(x, mode, _current_expected_place()) return _C_ops.decode_jpeg(x, mode, _current_expected_place())
elif _non_static_mode(): else:
return _legacy_C_ops.decode_jpeg(x, "mode", mode) inputs = {'X': x}
attrs = {"mode": mode}
inputs = {'X': x}
attrs = {"mode": mode}
helper = LayerHelper("decode_jpeg", **locals()) helper = LayerHelper("decode_jpeg", **locals())
out = helper.create_variable_for_type_inference('uint8') out = helper.create_variable_for_type_inference('uint8')
helper.append_op( helper.append_op(
type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out} type="decode_jpeg", inputs=inputs, attrs=attrs, outputs={"Out": out}
) )
return out return out
def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
...@@ -1594,36 +1436,22 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): ...@@ -1594,36 +1436,22 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
output_channels, output_channels,
spatial_scale, spatial_scale,
) )
if _in_legacy_dygraph(): else:
return _legacy_C_ops.psroi_pool( helper = LayerHelper('psroi_pool', **locals())
x, dtype = helper.input_dtype()
boxes, out = helper.create_variable_for_type_inference(dtype)
boxes_num, helper.append_op(
"output_channels", type='psroi_pool',
output_channels, inputs={'X': x, 'ROIs': boxes},
"spatial_scale", outputs={'Out': out},
spatial_scale, attrs={
"pooled_height", 'output_channels': output_channels,
pooled_height, 'spatial_scale': spatial_scale,
"pooled_width", 'pooled_height': pooled_height,
pooled_width, 'pooled_width': pooled_width,
},
) )
return out
helper = LayerHelper('psroi_pool', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='psroi_pool',
inputs={'X': x, 'ROIs': boxes},
outputs={'Out': out},
attrs={
'output_channels': output_channels,
'spatial_scale': spatial_scale,
'pooled_height': pooled_height,
'pooled_width': pooled_width,
},
)
return out
class PSRoIPool(Layer): class PSRoIPool(Layer):
...@@ -1721,23 +1549,6 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None): ...@@ -1721,23 +1549,6 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
return _C_ops.roi_pool( return _C_ops.roi_pool(
x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale x, boxes, boxes_num, pooled_height, pooled_width, spatial_scale
) )
if _in_legacy_dygraph():
assert (
boxes_num is not None
), "boxes_num should not be None in dygraph mode."
pool_out, argmaxes = _legacy_C_ops.roi_pool(
x,
boxes,
boxes_num,
"pooled_height",
pooled_height,
"pooled_width",
pooled_width,
"spatial_scale",
spatial_scale,
)
return pool_out
else: else:
check_variable_and_dtype(x, 'x', ['float32'], 'roi_pool') check_variable_and_dtype(x, 'x', ['float32'], 'roi_pool')
check_variable_and_dtype(boxes, 'boxes', ['float32'], 'roi_pool') check_variable_and_dtype(boxes, 'boxes', ['float32'], 'roi_pool')
...@@ -1903,27 +1714,6 @@ def roi_align( ...@@ -1903,27 +1714,6 @@ def roi_align(
sampling_ratio, sampling_ratio,
aligned, aligned,
) )
if _in_legacy_dygraph():
assert (
boxes_num is not None
), "boxes_num should not be None in dygraph mode."
align_out = _legacy_C_ops.roi_align(
x,
boxes,
boxes_num,
"pooled_height",
pooled_height,
"pooled_width",
pooled_width,
"spatial_scale",
spatial_scale,
"sampling_ratio",
sampling_ratio,
"aligned",
aligned,
)
return align_out
else: else:
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'roi_align') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'roi_align')
check_variable_and_dtype( check_variable_and_dtype(
...@@ -2143,18 +1933,16 @@ def nms( ...@@ -2143,18 +1933,16 @@ def nms(
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.nms(boxes, iou_threshold) return _C_ops.nms(boxes, iou_threshold)
if _non_static_mode(): else:
return _legacy_C_ops.nms(boxes, 'iou_threshold', iou_threshold) helper = LayerHelper('nms', **locals())
out = helper.create_variable_for_type_inference('int64')
helper = LayerHelper('nms', **locals()) helper.append_op(
out = helper.create_variable_for_type_inference('int64') type='nms',
helper.append_op( inputs={'Boxes': boxes},
type='nms', outputs={'KeepBoxesIdxs': out},
inputs={'Boxes': boxes}, attrs={'iou_threshold': iou_threshold},
outputs={'KeepBoxesIdxs': out}, )
attrs={'iou_threshold': iou_threshold}, return out
)
return out
if scores is None: if scores is None:
return _nms(boxes, iou_threshold) return _nms(boxes, iou_threshold)
...@@ -2222,7 +2010,7 @@ def nms( ...@@ -2222,7 +2010,7 @@ def nms(
if top_k is None: if top_k is None:
return keep_boxes_idxs[sorted_sub_indices] return keep_boxes_idxs[sorted_sub_indices]
if _non_static_mode(): if in_dygraph_mode():
top_k = shape if shape < top_k else top_k top_k = shape if shape < top_k else top_k
_, topk_sub_indices = paddle.topk(scores[keep_boxes_idxs], top_k) _, topk_sub_indices = paddle.topk(scores[keep_boxes_idxs], top_k)
return keep_boxes_idxs[topk_sub_indices] return keep_boxes_idxs[topk_sub_indices]
...@@ -2331,92 +2119,70 @@ def generate_proposals( ...@@ -2331,92 +2119,70 @@ def generate_proposals(
) )
return rpn_rois, rpn_roi_probs, rpn_rois_num return rpn_rois, rpn_roi_probs, rpn_rois_num
elif _non_static_mode(): else:
assert ( helper = LayerHelper('generate_proposals_v2', **locals())
return_rois_num
), "return_rois_num should be True in dygraph mode." check_variable_and_dtype(
attrs = ( scores, 'scores', ['float32'], 'generate_proposals_v2'
'pre_nms_topN',
pre_nms_top_n,
'post_nms_topN',
post_nms_top_n,
'nms_thresh',
nms_thresh,
'min_size',
min_size,
'eta',
eta,
'pixel_offset',
pixel_offset,
) )
( check_variable_and_dtype(
rpn_rois, bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2'
rpn_roi_probs, )
rpn_rois_num, check_variable_and_dtype(
) = _legacy_C_ops.generate_proposals_v2( img_size,
scores, bbox_deltas, img_size, anchors, variances, *attrs 'img_size',
['float32', 'float64'],
'generate_proposals_v2',
)
check_variable_and_dtype(
anchors, 'anchors', ['float32'], 'generate_proposals_v2'
)
check_variable_and_dtype(
variances, 'variances', ['float32'], 'generate_proposals_v2'
) )
return rpn_rois, rpn_roi_probs, rpn_rois_num rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype
helper = LayerHelper('generate_proposals_v2', **locals()) )
rpn_roi_probs = helper.create_variable_for_type_inference(
check_variable_and_dtype( dtype=scores.dtype
scores, 'scores', ['float32'], 'generate_proposals_v2' )
) outputs = {
check_variable_and_dtype( 'RpnRois': rpn_rois,
bbox_deltas, 'bbox_deltas', ['float32'], 'generate_proposals_v2' 'RpnRoiProbs': rpn_roi_probs,
) }
check_variable_and_dtype( if return_rois_num:
img_size, 'img_size', ['float32', 'float64'], 'generate_proposals_v2' rpn_rois_num = helper.create_variable_for_type_inference(
) dtype='int32'
check_variable_and_dtype( )
anchors, 'anchors', ['float32'], 'generate_proposals_v2' rpn_rois_num.stop_gradient = True
) outputs['RpnRoisNum'] = rpn_rois_num
check_variable_and_dtype(
variances, 'variances', ['float32'], 'generate_proposals_v2'
)
rpn_rois = helper.create_variable_for_type_inference( helper.append_op(
dtype=bbox_deltas.dtype type="generate_proposals_v2",
) inputs={
rpn_roi_probs = helper.create_variable_for_type_inference( 'Scores': scores,
dtype=scores.dtype 'BboxDeltas': bbox_deltas,
) 'ImShape': img_size,
outputs = { 'Anchors': anchors,
'RpnRois': rpn_rois, 'Variances': variances,
'RpnRoiProbs': rpn_roi_probs, },
} attrs={
if return_rois_num: 'pre_nms_topN': pre_nms_top_n,
rpn_rois_num = helper.create_variable_for_type_inference(dtype='int32') 'post_nms_topN': post_nms_top_n,
rpn_rois_num.stop_gradient = True 'nms_thresh': nms_thresh,
outputs['RpnRoisNum'] = rpn_rois_num 'min_size': min_size,
'eta': eta,
helper.append_op( 'pixel_offset': pixel_offset,
type="generate_proposals_v2", },
inputs={ outputs=outputs,
'Scores': scores, )
'BboxDeltas': bbox_deltas, rpn_rois.stop_gradient = True
'ImShape': img_size, rpn_roi_probs.stop_gradient = True
'Anchors': anchors, if not return_rois_num:
'Variances': variances, rpn_rois_num = None
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta,
'pixel_offset': pixel_offset,
},
outputs=outputs,
)
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
if not return_rois_num:
rpn_rois_num = None
return rpn_rois, rpn_roi_probs, rpn_rois_num return rpn_rois, rpn_roi_probs, rpn_rois_num
def matrix_nms( def matrix_nms(
...@@ -2535,31 +2301,6 @@ def matrix_nms( ...@@ -2535,31 +2301,6 @@ def matrix_nms(
if not return_rois_num: if not return_rois_num:
rois_num = None rois_num = None
return out, rois_num, index return out, rois_num, index
elif _in_legacy_dygraph():
attrs = (
'background_label',
background_label,
'score_threshold',
score_threshold,
'post_threshold',
post_threshold,
'nms_top_k',
nms_top_k,
'gaussian_sigma',
gaussian_sigma,
'use_gaussian',
use_gaussian,
'keep_top_k',
keep_top_k,
'normalized',
normalized,
)
out, index, rois_num = _legacy_C_ops.matrix_nms(bboxes, scores, *attrs)
if not return_index:
index = None
if not return_rois_num:
rois_num = None
return out, rois_num, index
else: else:
helper = LayerHelper('matrix_nms', **locals()) helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册