未验证 提交 0a837cb2 编写于 作者: 姜永久 提交者: GitHub

rm _in_legacy part3 (#49264)

上级 140d786d
......@@ -18,9 +18,9 @@ import numpy as np
import paddle
from . import _C_ops, _legacy_C_ops
from . import _C_ops
from .fluid.data_feeder import check_variable_and_dtype
from .fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from .fluid.framework import in_dygraph_mode
from .fluid.layer_helper import LayerHelper
from .tensor.attribute import is_floating_point, is_integer
from .tensor.creation import _complex_to_real_dtype, _real_to_complex_dtype
......@@ -1445,9 +1445,6 @@ def fft_c2c(x, n, axis, norm, forward, name):
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], op_type)
if in_dygraph_mode():
out = _C_ops.fft_c2c(x, axes, norm, forward)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1480,18 +1477,6 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name):
if in_dygraph_mode():
out = _C_ops.fft_r2c(x, axes, norm, forward, onesided)
elif _in_legacy_dygraph():
attrs = (
'axes',
axes,
'normalization',
norm,
'forward',
forward,
'onesided',
onesided,
)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1536,21 +1521,6 @@ def fft_c2r(x, n, axis, norm, forward, name):
out = _C_ops.fft_c2r(x, axes, norm, forward, n)
else:
out = _C_ops.fft_c2r(x, axes, norm, forward, 0)
elif _in_legacy_dygraph():
if n is not None:
attrs = (
'axes',
axes,
'normalization',
norm,
'forward',
forward,
'last_dim_size',
n,
)
else:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1607,9 +1577,6 @@ def fftn_c2c(x, s, axes, norm, forward, name):
if in_dygraph_mode():
out = _C_ops.fft_c2c(x, axes, norm, forward)
elif _in_legacy_dygraph():
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1661,18 +1628,6 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name):
if in_dygraph_mode():
out = _C_ops.fft_r2c(x, axes, norm, forward, onesided)
elif _in_legacy_dygraph():
attrs = (
'axes',
axes,
'normalization',
norm,
'forward',
forward,
'onesided',
onesided,
)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......@@ -1739,21 +1694,6 @@ def fftn_c2r(x, s, axes, norm, forward, name):
out = _C_ops.fft_c2r(x, axes, norm, forward, s[-1])
else:
out = _C_ops.fft_c2r(x, axes, norm, forward, 0)
elif _in_legacy_dygraph():
if s:
attrs = (
'axes',
axes,
'normalization',
norm,
'forward',
forward,
'last_dim_size',
s[-1],
)
else:
attrs = ('axes', axes, 'normalization', norm, 'forward', forward)
out = getattr(_legacy_C_ops, op_type)(x, *attrs)
else:
inputs = {
'X': [x],
......
......@@ -15,14 +15,14 @@
import numpy as np
import paddle.utils.deprecated as deprecated
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from paddle.fluid.data_feeder import (
check_dtype,
check_type,
check_variable_and_dtype,
convert_dtype,
)
from paddle.fluid.framework import Variable, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import cast
......@@ -124,19 +124,6 @@ def graph_send_recv(
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _in_legacy_dygraph():
out_size = convert_out_size_to_list(out_size)
out, tmp = _legacy_C_ops.graph_send_recv(
x,
src_index,
dst_index,
None,
'reduce_op',
pool_type.upper(),
'out_size',
out_size,
)
return out
if in_dygraph_mode():
out_size = convert_out_size_to_list(out_size)
return _C_ops.send_u_recv(
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
__all__ = []
......@@ -49,35 +49,32 @@ def _npu_identity(x, format=-1):
"""
if in_dygraph_mode():
return _C_ops.npu_identity(x, format)
else:
check_variable_and_dtype(
x,
'x',
[
'bool',
'int8',
'uint8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
],
'npu_identity',
)
if _in_legacy_dygraph():
return _legacy_C_ops.npu_identity(x, 'format', format)
check_variable_and_dtype(
x,
'x',
[
'bool',
'int8',
'uint8',
'int16',
'int32',
'int64',
'float16',
'float32',
'float64',
],
'npu_identity',
)
helper = LayerHelper('npu_identity', **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='npu_identity',
inputs={'x': [x]},
outputs={'out': [out]},
attrs={'format': format},
)
return out
helper = LayerHelper('npu_identity', **locals())
out = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=x.stop_gradient
)
helper.append_op(
type='npu_identity',
inputs={'x': [x]},
outputs={'out': [out]},
attrs={'format': format},
)
return out
......@@ -15,7 +15,7 @@
import paddle.utils.deprecated as deprecated
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper, _non_static_mode
__all__ = []
......@@ -65,29 +65,24 @@ def segment_sum(data, segment_ids, name=None):
"""
if in_dygraph_mode():
return _C_ops.segment_pool(data, segment_ids, "SUM")[0]
if _in_legacy_dygraph():
out, tmp = _legacy_C_ops.segment_pool(
data, segment_ids, 'pooltype', "SUM"
else:
check_variable_and_dtype(
data, "X", ("float32", "float64", "int32", "int64"), "segment_pool"
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
return out
check_variable_and_dtype(
data, "X", ("float32", "float64", "int32", "int64"), "segment_pool"
)
check_variable_and_dtype(
segment_ids, "SegmentIds", ("int32", "int64"), "segment_pool"
)
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "SUM"},
)
return out
helper = LayerHelper("segment_sum", **locals())
out = helper.create_variable_for_type_inference(dtype=data.dtype)
summed_ids = helper.create_variable_for_type_inference(dtype=data.dtype)
helper.append_op(
type="segment_pool",
inputs={"X": data, "SegmentIds": segment_ids},
outputs={"Out": out, "SummedIds": summed_ids},
attrs={"pooltype": "SUM"},
)
return out
@deprecated(
......
......@@ -14,7 +14,7 @@
import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
from .fft import fft_c2c, fft_c2r, fft_r2c
from .fluid.data_feeder import check_variable_and_dtype
......@@ -125,23 +125,10 @@ def frame(x, frame_length, hop_length, axis=-1, name=None):
f'but got ({frame_length}) > ({x.shape[axis]}).'
)
op_type = 'frame'
if in_dygraph_mode():
return _C_ops.frame(x, frame_length, hop_length, axis)
if _in_legacy_dygraph():
attrs = (
'frame_length',
frame_length,
'hop_length',
hop_length,
'axis',
axis,
)
op = getattr(_legacy_C_ops, op_type)
out = op(x, *attrs)
else:
op_type = 'frame'
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], op_type
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册