未验证 提交 1c7ae954 编写于 作者: 姜永久 提交者: GitHub

rm in_legacy part8 (#49386)

* rm legacy layers part6

* rm non_static_mode

* modify non_static

* minor change

* rm loss

* rm in_legacy part8

* minor change
上级 0c52e8a8
......@@ -13,7 +13,7 @@
# limitations under the License.
from ..layer_helper import LayerHelper, unique_name
from ..framework import Variable, in_dygraph_mode, _in_legacy_dygraph
from ..framework import Variable, in_dygraph_mode
import paddle
from paddle import _C_ops, _legacy_C_ops
......@@ -120,42 +120,31 @@ def _c_allgather(x, nranks, ring_id=0, use_calc_stream=False):
task = group.process_group.all_gather(x, out)
task.wait()
return out
if _in_legacy_dygraph():
attrs = (
'nranks',
nranks,
'ring_id',
ring_id,
'use_calc_stream',
use_calc_stream,
else:
helper = LayerHelper(op_type, **locals())
out_shape = list(x.shape[:])
if out_shape[0] > 0:
out_shape[0] *= nranks
out = helper.create_variable(
name=unique_name.generate_with_ignorable_key(
'.'.join([x.name, op_type])
),
shape=out_shape,
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
)
return _legacy_C_ops.c_allgather(x, *attrs)
helper = LayerHelper(op_type, **locals())
out_shape = list(x.shape[:])
if out_shape[0] > 0:
out_shape[0] *= nranks
out = helper.create_variable(
name=unique_name.generate_with_ignorable_key(
'.'.join([x.name, op_type])
),
shape=out_shape,
dtype=x.dtype,
type=x.type,
persistable=x.persistable,
)
helper.append_op(
type=op_type,
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={
'nranks': nranks,
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
},
)
return out
helper.append_op(
type=op_type,
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={
'nranks': nranks,
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
},
)
return out
def _c_reducescatter(x, nranks, ring_id=0, use_calc_stream=False):
......
......@@ -21,9 +21,7 @@ from ..framework import (
Program,
Variable,
Operator,
_non_static_mode,
static_only,
_in_legacy_dygraph,
in_dygraph_mode,
)
from ..layer_helper import LayerHelper, unique_name
......@@ -1154,7 +1152,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"but given shape as {0}.".format(list(pre_cond.shape))
)
if _non_static_mode():
if in_dygraph_mode():
now_cond = pre_cond.numpy()[0]
while now_cond:
output_vars = body(*loop_vars)
......@@ -1168,33 +1166,33 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
now_cond = cond(*output_vars).numpy()[0]
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars
while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block():
# If a variable with mutable type is included in loop_vars, like `dict/list`,
# modifying it in the body function will cause origin variable to be modified
# synchronously. This will raise an assignment error out of while block.
# Here we make a copy of the mutable vars to avoid this problem.
if has_mutable_vars_in_loop:
new_loop_vars = copy_mutable_vars(loop_vars)
output_vars = body(*new_loop_vars)
else:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
output_vars = [output_vars]
try:
loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
assert_same_structure(output_vars, loop_vars, check_types=False)
except ValueError as e:
raise ValueError(
"body in while_loop should return the same arity "
"(length and structure) as loop_vars: {0}".format(e)
)
now_cond = cond(*output_vars)
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
assign(now_cond, pre_cond)
return loop_vars
else:
while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block():
# If a variable with mutable type is included in loop_vars, like `dict/list`,
# modifying it in the body function will cause origin variable to be modified
# synchronously. This will raise an assignment error out of while block.
# Here we make a copy of the mutable vars to avoid this problem.
if has_mutable_vars_in_loop:
new_loop_vars = copy_mutable_vars(loop_vars)
output_vars = body(*new_loop_vars)
else:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
output_vars = [output_vars]
try:
loop_vars = _deal_with_undefined_var(output_vars, loop_vars)
assert_same_structure(output_vars, loop_vars, check_types=False)
except ValueError as e:
raise ValueError(
"body in while_loop should return the same arity "
"(length and structure) as loop_vars: {0}".format(e)
)
now_cond = cond(*output_vars)
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
assign(now_cond, pre_cond)
return loop_vars
# (TODO: Mine) There exists dependency. It will be removed later.
......
......@@ -24,13 +24,10 @@ from ..framework import (
Variable,
core,
convert_np_dtype_to_dtype_,
_non_static_mode,
in_dygraph_mode,
_in_legacy_dygraph,
)
from ..layer_helper import LayerHelper
from ..data_feeder import check_variable_and_dtype
from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from paddle import _C_ops, _legacy_C_ops
__all__ = [
......@@ -276,7 +273,7 @@ def generate_activation_fn(op_type):
return op(x)
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if _non_static_mode():
if in_dygraph_mode() and hasattr(_legacy_C_ops, op_type):
op = getattr(_legacy_C_ops, op_type)
return op(x)
......@@ -327,15 +324,16 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type = inplace_op_type[:-1]
def func(x, name=None):
if _non_static_mode():
if in_dygraph_mode():
op = getattr(_legacy_C_ops, inplace_op_type)
return op(x)
warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
else:
warnings.warn(
"In static mode, {}() is the same as {}() and does not perform inplace operation.".format(
inplace_op_type, origin_op_type
)
)
)
return generate_activation_fn(origin_op_type)(x, name)
return generate_activation_fn(origin_op_type)(x, name)
func.__name__ = inplace_op_type
func.__doc__ = """
......
......@@ -27,9 +27,14 @@ import paddle
from . import control_flow
from . import nn
from . import tensor
from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import (
default_main_program,
Parameter,
unique_name,
name_scope,
in_dygraph_mode,
)
from ..framework import Variable
from ..framework import _non_static_mode
from ..dygraph import learning_rate_scheduler as imperate_lr
from ..data_feeder import check_variable_and_dtype, check_type
......@@ -99,7 +104,7 @@ def noam_decay(d_model, warmup_steps, learning_rate=1.0):
learning_rate)
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.NoamDecay(
d_model, warmup_steps, learning_rate=learning_rate
)
......@@ -160,7 +165,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase
)
......@@ -222,7 +227,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.NaturalExpDecay(
learning_rate, decay_steps, decay_rate, staircase
)
......@@ -282,7 +287,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
staircase=True))
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase
)
......@@ -337,7 +342,7 @@ def polynomial_decay(
"""
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.PolynomialDecay(
learning_rate, decay_steps, end_learning_rate, power, cycle
)
......@@ -414,7 +419,7 @@ def piecewise_decay(boundaries, values):
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.PiecewiseDecay(boundaries, values, 0)
return decay
else:
......@@ -488,7 +493,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs):
)
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
decay = imperate_lr.CosineDecay(
learning_rate, step_each_epoch, epochs
)
......@@ -569,7 +574,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
linear_step = float(end_lr) - float(start_lr)
with default_main_program()._lr_schedule_guard():
if _non_static_mode():
if in_dygraph_mode():
lr = imperate_lr.LinearLrWarmup(
learning_rate, warmup_steps, start_lr, end_lr
)
......
......@@ -22,19 +22,16 @@ import numpy as np
import paddle
from ..layer_helper import LayerHelper
from paddle.fluid.framework import _in_legacy_dygraph
from ..initializer import Normal, Constant
from ..framework import (
Variable,
OpProtoHolder,
_non_static_mode,
dygraph_only,
_dygraph_tracer,
default_main_program,
_varbase_creator,
static_only,
_global_flags,
_in_legacy_dygraph,
in_dygraph_mode,
)
from ..framework import _current_expected_place
......@@ -128,10 +125,6 @@ def _elementwise_op_in_dygraph(
OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
)
out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn
)
......@@ -794,26 +787,25 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
if in_dygraph_mode():
return _C_ops.sum(input, dim, None, keep_dim)
elif _in_legacy_dygraph():
return _legacy_C_ops.reduce_sum(
input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all
else:
attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
check_variable_and_dtype(
input,
'input',
['float16', 'float32', 'float64', 'int32', 'int64'],
'reduce_sum',
)
attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
check_variable_and_dtype(
input,
'input',
['float16', 'float32', 'float64', 'int32', 'int64'],
'reduce_sum',
)
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
helper.append_op(
type='reduce_sum',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
......@@ -895,7 +887,7 @@ def unsqueeze(input, axes, name=None):
y = fluid.layers.unsqueeze(input=x, axes=[1])
"""
if _non_static_mode():
if in_dygraph_mode():
if isinstance(axes, int):
axes = [axes]
elif isinstance(axes, Variable):
......@@ -905,98 +897,106 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
]
if _in_legacy_dygraph():
out, _ = _legacy_C_ops.unsqueeze2(input, 'axes', axes)
return out
return _C_ops.unsqueeze(input, axes)
else:
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_variable_and_dtype(
input,
'input',
[
'float16',
'float32',
'float64',
'bool',
'int8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'unsqueeze',
)
helper = LayerHelper("unsqueeze2", **locals())
inputs = {"X": input}
attrs = {}
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
if isinstance(axes, int):
axes = [axes]
if isinstance(axes, Variable):
axes.stop_gradient = True
inputs["AxesTensor"] = axes
elif isinstance(axes, (list, tuple)):
if utils._contain_var(axes):
inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
else:
attrs["axes"] = axes
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="unsqueeze2",
inputs=inputs,
attrs=attrs,
outputs={"Out": out, "XShape": x_shape},
)
return out
return out
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if _non_static_mode():
if in_dygraph_mode():
op = getattr(_legacy_C_ops, op_name)
if binary_op:
return op(x, y)
else:
return op(x)
check_variable_and_dtype(
x,
"x",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
op_name,
)
if y is not None:
else:
check_variable_and_dtype(
y,
"y",
x,
"x",
["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
op_name,
)
if out is not None:
check_type(out, "out", Variable, op_name)
if y is not None:
check_variable_and_dtype(
y,
"y",
[
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
],
op_name,
)
if out is not None:
check_type(out, "out", Variable, op_name)
helper = LayerHelper(op_name, **locals())
helper = LayerHelper(op_name, **locals())
if binary_op and x.dtype != y.dtype:
raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
% (op_name, x.dtype, y.dtype)
)
if binary_op and x.dtype != y.dtype:
raise ValueError(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
% (op_name, x.dtype, y.dtype)
)
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
)
else:
helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})
if binary_op:
helper.append_op(
type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
)
else:
helper.append_op(
type=op_name, inputs={"X": x}, outputs={"Out": out}
)
return out
return out
@templatedoc()
......@@ -1082,30 +1082,28 @@ def clip_by_norm(x, max_norm, name=None):
if in_dygraph_mode():
return _C_ops.clip_by_norm(x, max_norm)
if _non_static_mode():
return _legacy_C_ops.clip_by_norm(x, 'max_norm', max_norm)
else:
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
if name is None:
name = unique_name.generate_with_ignorable_key(
".".join([helper.name, 'tmp'])
)
if name is None:
name = unique_name.generate_with_ignorable_key(
".".join([helper.name, 'tmp'])
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False
)
out = helper.create_variable(
type=x.type, name=name, dtype=x.dtype, persistable=False
)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out},
)
helper.append_op(
type="clip_by_norm",
inputs={"X": x},
attrs={"max_norm": max_norm},
outputs={"Out": out},
)
return out
return out
@templatedoc()
......@@ -1132,19 +1130,16 @@ def merge_selected_rows(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.merge_selected_rows(x)
if _non_static_mode():
return _legacy_C_ops.merge_selected_rows(x)
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out},
)
return out
else:
helper = LayerHelper("merge_selected_rows", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="merge_selected_rows",
inputs={"X": x},
attrs={},
outputs={"Out": out},
)
return out
@templatedoc()
......
......@@ -17,9 +17,7 @@ from .layer_function_generator import templatedoc
from ..framework import (
core,
Variable,
_non_static_mode,
in_dygraph_mode,
_in_legacy_dygraph,
convert_np_dtype_to_dtype_,
)
from ..layer_helper import LayerHelper
......@@ -156,7 +154,7 @@ def sequence_conv(
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_conv'
......@@ -258,7 +256,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y)
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_softmax', **locals())
check_variable_and_dtype(
......@@ -363,7 +361,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0):
first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first')
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'sequence_pool'
......@@ -441,7 +439,7 @@ def sequence_concat(input, name=None):
out = paddle.static.nn.sequence_concat(input=[x, y])
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_concat', **locals())
......@@ -640,7 +638,7 @@ def sequence_slice(input, offset, length, name=None):
length=length)
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_slice", **locals())
......@@ -794,7 +792,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
# data: [1 2 1 2 3 4 3 4]
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand'
......@@ -916,7 +914,7 @@ def sequence_expand_as(x, y, name=None):
# data: [1 1 1 2 2 2 3 4]
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as'
......@@ -1019,7 +1017,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_pad', **locals())
check_variable_and_dtype(
......@@ -1108,7 +1106,7 @@ def sequence_unpad(x, length, name=None):
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_unpad', **locals())
check_variable_and_dtype(
......@@ -1183,7 +1181,7 @@ def sequence_reshape(input, new_dim):
x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4)
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_reshape', **locals())
check_variable_and_dtype(
......@@ -1268,7 +1266,7 @@ def sequence_scatter(input, index, updates, name=None):
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper('sequence_scatter', **locals())
......@@ -1350,7 +1348,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
check_variable_and_dtype(
input, 'input', ['int32', 'int64'], 'sequence_enumerate'
......@@ -1479,7 +1477,7 @@ def sequence_reverse(x, name=None):
x_reversed = paddle.static.nn.sequence_reverse(x)
"""
assert (
not _non_static_mode()
not in_dygraph_mode()
), "sequence layer is not supported in dygraph mode yet."
helper = LayerHelper("sequence_reverse", **locals())
check_variable_and_dtype(
......
......@@ -19,9 +19,7 @@ from ..layer_helper import LayerHelper
from ..framework import (
_current_expected_place,
convert_np_dtype_to_dtype_,
_non_static_mode,
_varbase_creator,
_in_legacy_dygraph,
in_dygraph_mode,
)
from ..framework import Variable
......@@ -81,59 +79,53 @@ def cast(x, dtype):
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return _C_ops.cast(x, dtype)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
if _non_static_mode():
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
)
return out
check_variable_and_dtype(
x,
'x',
[
'bool',
'float16',
'float32',
'float64',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
],
'cast',
)
helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
)
return out
def concat(input, axis=0, name=None):
"""
......@@ -191,73 +183,69 @@ def concat(input, axis=0, name=None):
input = [t for t in input if t.shape.count(0) == 0]
out = _C_ops.concat(input, axis)
return out
if _in_legacy_dygraph():
if isinstance(axis, Variable):
axis = axis.numpy()
axis = axis.item(0)
else:
check_type(input, 'input', (list, tuple, Variable), 'concat')
if not isinstance(input, Variable):
input = [t for t in input if t.shape.count(0) == 0]
out = _varbase_creator()
_legacy_C_ops.concat(input, out, 'axis', axis)
return out
for id, x in enumerate(input):
check_variable_and_dtype(
x,
'input[' + str(id) + ']',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'concat',
)
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
check_type(input, 'input', (list, tuple, Variable), 'concat')
if not isinstance(input, Variable):
for id, x in enumerate(input):
check_variable_and_dtype(
x,
'input[' + str(id) + ']',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
if isinstance(axis, Variable):
check_dtype(
axis.dtype,
'axis',
['int32', 'int64'],
'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor",
)
if x.dtype != input[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type."
)
else:
input = [input]
check_type(axis, 'axis', (int, Variable), 'concat')
if isinstance(axis, Variable):
check_dtype(
axis.dtype,
'axis',
['int32', 'int64'],
'concat',
"The data type of axis must be int32 or int64 when axis is a Tensor",
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
assert len(input) == 1, (
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s." % len(input)
)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out], 'OutIndex': [out_index]},
attrs={'axis': axis, 'use_stack': False},
)
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
attrs['axis'] = axis
assert len(input) == 1, (
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
% len(input)
)
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input[0]},
outputs={'Out': [out], 'OutIndex': [out_index]},
attrs={'axis': axis, 'use_stack': False},
)
else:
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
attrs['axis'] = axis
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
)
return out
helper.append_op(
type='concat',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
)
return out
def sums(input, out=None):
......@@ -391,22 +379,15 @@ def assign(input, output=None):
input = numpy.array(input)
# NOTE(Aurelius84): Why we judge core.VarBase?
# In case of @to_static, a VarBase can be as input of `assign`,
# but _non_static_mode()==False under @to_static, which means
# but in_dygraph_mode()==False under @to_static, which means
# isinstance(VarBase, Variable) == False. It will cause return None
# after this api.
if isinstance(input, (Variable, core.VarBase)):
if _non_static_mode():
if in_dygraph_mode() and output is None:
if in_dygraph_mode():
if output is None:
output = _C_ops.assign(input)
elif in_dygraph_mode() and output is not None:
_C_ops.assign_out_(input, output)
else:
if output is None:
if _in_legacy_dygraph():
output = core.VarBase()
else:
output = core.eager.Tensor()
_legacy_C_ops.assign(input, output)
_C_ops.assign_out_(input, output)
else:
check_dtype(
input.dtype,
......@@ -480,18 +461,6 @@ def assign(input, output=None):
values,
_current_expected_place(),
)
elif _in_legacy_dygraph():
if output is None:
output = core.VarBase()
_legacy_C_ops.assign_value(
output,
'shape',
list(input.shape),
'dtype',
dtype,
value_name,
values,
)
else:
if output is None:
output = helper.create_variable_for_type_inference(
......@@ -507,7 +476,7 @@ def assign(input, output=None):
},
)
if is_inplace and _non_static_mode():
if is_inplace and in_dygraph_mode():
output._bump_inplace_version()
return output
......@@ -591,83 +560,56 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
_C_ops.full_(out, shape, float(value), dtype, place)
out.stop_gradient = True
return out
if _in_legacy_dygraph():
shape = utils.convert_shape_to_list(shape)
if out is None:
out = _varbase_creator(dtype=dtype)
else:
helper = LayerHelper("fill_constant", **locals())
inputs = {}
if isinstance(value, Variable):
if dtype in ['uint8', 'int16', 'int32', 'int64']:
attrs['str_value'] = str(int(value.numpy().item(0)))
else:
attrs['str_value'] = str(float(value.numpy().item(0)))
_legacy_C_ops.fill_constant(
out,
'value',
float(value),
'force_cpu',
force_cpu,
if convert_dtype(value.dtype) != dtype:
value = cast(value, dtype)
inputs['ValueTensor'] = value
check_shape(shape)
check_dtype(
dtype,
'dtype',
out.dtype,
'str_value',
attrs['str_value'],
'shape',
shape,
[
'bool',
'float16',
'float32',
'float64',
'uint8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'fill_constant',
)
out.stop_gradient = True
return out
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
helper = LayerHelper("fill_constant", **locals())
inputs = {}
if isinstance(value, Variable):
if convert_dtype(value.dtype) != dtype:
value = cast(value, dtype)
inputs['ValueTensor'] = value
check_shape(shape)
check_dtype(
dtype,
'dtype',
[
'bool',
'float16',
'float32',
'float64',
'uint8',
'int16',
'int32',
'int64',
'complex64',
'complex128',
],
'fill_constant',
)
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
if out is not None:
check_variable_and_dtype(
out, 'out', [convert_dtype(dtype)], 'fill_constant'
)
if out is not None:
check_variable_and_dtype(
out, 'out', [convert_dtype(dtype)], 'fill_constant'
helper = LayerHelper("fill_constant", **locals())
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
)
helper = LayerHelper("fill_constant", **locals())
utils.get_shape_tensor_inputs(
inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
)
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True,
)
out.stop_gradient = True
return out
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True,
)
out.stop_gradient = True
return out
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
......@@ -727,29 +669,29 @@ def fill_constant_batch_size_like(
)
out.stop_gradient = True
return out
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = {
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu,
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs=attrs,
)
out.stop_gradient = True
return out
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
attrs = {
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu,
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
else:
attrs['str_value'] = str(float(value))
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs=attrs,
)
out.stop_gradient = True
return out
def argmin(x, axis=0):
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
__all__ = []
......@@ -52,32 +52,27 @@ def segment_sum(data, segment_ids, name=None):
"""
if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "SUM")[0]
if _in_legacy_dygraph():
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "SUM"
else:
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
return out
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "SUM"},
)
return out
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "SUM"},
)
return out
def segment_mean(data, segment_ids, name=None):
......@@ -114,32 +109,28 @@ def segment_mean(data, segment_ids, name=None):
if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MEAN")[0]
if _in_legacy_dygraph():
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "MEAN"
else:
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
return out
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
helper = LayerHelper("segment_mean", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MEAN"},
)
return out
helper = LayerHelper("segment_mean", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MEAN"},
)
return out
def segment_min(data, segment_ids, name=None):
......@@ -175,32 +166,27 @@ def segment_min(data, segment_ids, name=None):
if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MIN")[0]
if _in_legacy_dygraph():
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "MIN"
else:
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
return out
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
helper = LayerHelper("segment_min", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MIN"},
)
return out
helper = LayerHelper("segment_min", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MIN"},
)
return out
def segment_max(data, segment_ids, name=None):
......@@ -236,29 +222,24 @@ def segment_max(data, segment_ids, name=None):
if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "MAX")[0]
if _in_legacy_dygraph():
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "MAX"
else:
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
return out
check_variable_and_dtype(
data,
"X",
("float32", "float64", "int32", "int64", "float16"),
"segment_pool",
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
helper = LayerHelper("segment_max", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MAX"},
)
return out
helper = LayerHelper("segment_max", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "MAX"},
)
return out
......@@ -14,13 +14,13 @@
import numpy as np
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from paddle.fluid.data_feeder import (
check_dtype,
check_type,
check_variable_and_dtype,
)
from paddle.fluid.framework import Variable, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
from .utils import (
......@@ -118,68 +118,61 @@ def send_u_recv(
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _in_legacy_dygraph():
out_size = convert_out_size_to_list(out_size)
out, tmp = _legacy_C_ops.graph_send_recv(
x,
src_index,
dst_index,
None,
'reduce_op',
reduce_op.upper(),
'out_size',
out_size,
)
return out
if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size)
return _C_ops.send_u_recv(
x, src_index, dst_index, reduce_op.upper(), out_size
)
check_variable_and_dtype(
x,
"X",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_recv",
)
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_recv',
else:
check_variable_and_dtype(
x,
"X",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_recv",
)
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_recv"
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_recv'
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_recv',
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype,
'out_size',
['int32', 'int64'],
'graph_send_recv',
)
helper = LayerHelper("send_u_recv", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
dst_count = helper.create_variable_for_type_inference(
dtype="int32", stop_gradient=True
)
helper = LayerHelper("send_u_recv", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
dst_count = helper.create_variable_for_type_inference(
dtype="int32", stop_gradient=True
)
inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index}
attrs = {"reduce_op": reduce_op.upper()}
get_out_size_tensor_inputs(
inputs=inputs, attrs=attrs, out_size=out_size, op_type='graph_send_recv'
)
inputs = {"X": x, "Src_index": src_index, "Dst_index": dst_index}
attrs = {"reduce_op": reduce_op.upper()}
get_out_size_tensor_inputs(
inputs=inputs,
attrs=attrs,
out_size=out_size,
op_type='graph_send_recv',
)
helper.append_op(
type="graph_send_recv",
inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs,
)
return out
helper.append_op(
type="graph_send_recv",
inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs,
)
return out
def send_ue_recv(
......@@ -302,86 +295,81 @@ def send_ue_recv(
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _in_legacy_dygraph():
if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size)
out, tmp = _legacy_C_ops.graph_send_ue_recv(
return _C_ops.send_ue_recv(
x,
y,
src_index,
dst_index,
None,
'message_op',
message_op.upper(),
'reduce_op',
reduce_op.upper(),
'out_size',
out_size,
)
return out
if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size)
return _C_ops.send_ue_recv(
else:
check_variable_and_dtype(
x,
"X",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
y,
src_index,
dst_index,
message_op.upper(),
reduce_op.upper(),
out_size,
"Y",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_ue_recv',
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype,
'out_size',
['int32', 'int64'],
'graph_send_ue_recv',
)
check_variable_and_dtype(
x,
"X",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
y,
"Y",
("float32", "float64", "int32", "int64", "float16"),
"graph_send_ue_recv",
)
check_variable_and_dtype(
src_index, "Src_index", ("int32", "int64"), "graph_send_ue_recv"
)
check_variable_and_dtype(
dst_index, "Dst_index", ("int32", "int64"), "graph_send_ue_recv"
)
if out_size:
check_type(
out_size,
'out_size',
(int, np.int32, np.int64, Variable),
'graph_send_ue_recv',
helper = LayerHelper("send_ue_recv", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
dst_count = helper.create_variable_for_type_inference(
dtype="int32", stop_gradient=True
)
if isinstance(out_size, Variable):
check_dtype(
out_size.dtype, 'out_size', ['int32', 'int64'], 'graph_send_ue_recv'
inputs = {
"X": x,
"Y": y,
"Src_index": src_index,
"Dst_index": dst_index,
}
attrs = {
"message_op": message_op.upper(),
"reduce_op": reduce_op.upper(),
}
get_out_size_tensor_inputs(
inputs=inputs,
attrs=attrs,
out_size=out_size,
op_type='graph_send_ue_recv',
)
helper = LayerHelper("send_ue_recv", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
dst_count = helper.create_variable_for_type_inference(
dtype="int32", stop_gradient=True
)
inputs = {"X": x, "Y": y, "Src_index": src_index, "Dst_index": dst_index}
attrs = {"message_op": message_op.upper(), "reduce_op": reduce_op.upper()}
get_out_size_tensor_inputs(
inputs=inputs,
attrs=attrs,
out_size=out_size,
op_type='graph_send_ue_recv',
)
helper.append_op(
type="graph_send_ue_recv",
inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs,
)
return out
helper.append_op(
type="graph_send_ue_recv",
inputs=inputs,
outputs={"Out": out, "Dst_count": dst_count},
attrs=attrs,
)
return out
def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
......@@ -466,43 +454,39 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None):
if in_dygraph_mode():
return _C_ops.send_uv(x, y, src_index, dst_index, message_op.upper())
else:
if _in_legacy_dygraph():
return _legacy_C_ops.graph_send_uv(
x, y, src_index, dst_index, "message_op", message_op.upper()
)
else:
helper = LayerHelper("graph_send_uv", **locals())
check_variable_and_dtype(
x,
'x',
['int32', 'int64', 'float32', 'float64', 'float16'],
'graph_send_uv',
)
check_variable_and_dtype(
y,
'y',
['int32', 'int64', 'float32', 'float64', 'float16'],
'graph_send_uv',
)
check_variable_and_dtype(
src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv'
)
check_variable_and_dtype(
dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv'
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {
'x': x,
'y': y,
'src_index': src_index,
'dst_index': dst_index,
}
attrs = {'message_op': message_op.upper()}
helper.append_op(
type="graph_send_uv",
inputs=inputs,
attrs=attrs,
outputs={"out": out},
)
return out
helper = LayerHelper("graph_send_uv", **locals())
check_variable_and_dtype(
x,
'x',
['int32', 'int64', 'float32', 'float64', 'float16'],
'graph_send_uv',
)
check_variable_and_dtype(
y,
'y',
['int32', 'int64', 'float32', 'float64', 'float16'],
'graph_send_uv',
)
check_variable_and_dtype(
src_index, 'src_index', ['int32', 'int64'], 'graph_send_uv'
)
check_variable_and_dtype(
dst_index, 'dst_index', ['int32', 'int64'], 'graph_send_uv'
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
inputs = {
'x': x,
'y': y,
'src_index': src_index,
'dst_index': dst_index,
}
attrs = {'message_op': message_op.upper()}
helper.append_op(
type="graph_send_uv",
inputs=inputs,
attrs=attrs,
outputs={"out": out},
)
return out
......@@ -1001,7 +1001,7 @@ def _custom_api_content(op_name):
"""
import paddle.fluid.core as core
from paddle.fluid.core import VarBase, CustomOpKernelContext
from paddle.fluid.framework import _non_static_mode, _dygraph_tracer, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import _dygraph_tracer, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
def {op_name}({inputs}):
......@@ -1024,16 +1024,11 @@ def _custom_api_content(op_name):
ctx.add_outputs(outs[out_name])
core.eager._run_custom_op(ctx, "{op_name}", True)
else:
if _in_legacy_dygraph():
for out_name in out_names:
outs[out_name] = VarBase()
_dygraph_tracer().trace_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
else:
helper = LayerHelper("{op_name}", **locals())
for out_name in out_names:
outs[out_name] = helper.create_variable(dtype='float32')
helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
helper = LayerHelper("{op_name}", **locals())
for out_name in out_names:
outs[out_name] = helper.create_variable(dtype='float32')
helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
res = [outs[out_name] for out_name in out_names]
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册