未验证 提交 3ffcd693 编写于 作者: 姜永久 提交者: GitHub

Yj/rm legacy part 0 (#49424)

* rm legacy

* clear in_legacy

* fix tracer
上级 839e1499
......@@ -24,7 +24,6 @@ from paddle.fluid.framework import ( # noqa: F401
OpProtoHolder,
Variable,
_dygraph_tracer,
_in_legacy_dygraph,
_non_static_mode,
_varbase_creator,
convert_np_dtype_to_dtype_,
......
......@@ -306,7 +306,7 @@ class Tracer(core.Tracer):
stop_gradient=False,
inplace_map=None,
):
if not framework._in_legacy_dygraph():
if framework.in_dygraph_mode():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
if type in name_mapping.keys():
......
......@@ -98,11 +98,10 @@ _dy2st_enable_standalone_executor_ = os.environ.get(
# 2. dygraph_mode():
# This flags inidicates we are now running in dygraph mode which called eager mode before.
# 3. _in_legacy_dygraph():
# This flags inidicates we are now running in legacy dygraph mode
# This flags has been deprecated
#
# They have a relation ship as below:
# Both dygraph_mode and _in_legacy_dygraph are _non_static_mode, but if you are running in
# dygraph mode means you are not in _in_legacy_dygraph.
# Since _in_legacy_graph is deprecated, so dygraph_mode is _non_static_mode
#
# Why we have to make different of _in_legacy_dygraph and dygraph_mode?
# In some performance issue, we find that python if statement cause server performance problem
......@@ -237,10 +236,6 @@ def in_dygraph_mode():
return (_dygraph_tracer_ is not None) and _in_eager_mode_
def _in_legacy_dygraph():
return (not _in_eager_mode_) and (_dygraph_tracer_ is not None)
def _non_static_mode():
return _dygraph_tracer_ is not None
......@@ -1334,8 +1329,6 @@ class VariableMetaClass(type):
if in_dygraph_mode():
return issubclass(t, core.eager.Tensor)
else:
if _in_legacy_dygraph():
return issubclass(t, core.VarBase)
return issubclass(t, Variable)
......@@ -1346,8 +1339,6 @@ class ParameterMetaClass(VariableMetaClass):
if in_dygraph_mode():
return issubclass(t, EagerParamBase)
else:
if _in_legacy_dygraph():
return issubclass(t, ParamBase)
return issubclass(t, Parameter)
......@@ -3892,19 +3883,6 @@ class Block:
regularizer=regularizer,
error_clip=error_clip,
)
else:
if _in_legacy_dygraph():
var = ParamBase(
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
error_clip=error_clip,
)
else:
var = Parameter(
self,
......@@ -3945,9 +3923,6 @@ class Block:
param = None
if in_dygraph_mode():
param = EagerParamBase(*args, **kwargs)
else:
if _in_legacy_dygraph():
param = ParamBase(*args, **kwargs)
else:
param = Parameter(global_block, *args, **kwargs)
......@@ -4261,20 +4236,6 @@ class Block:
error_clip=p.error_clip,
name=v.name,
)
else:
if _in_legacy_dygraph():
new_p = ParamBase(
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
error_clip=p.error_clip,
name=v.name,
)
else:
new_p = Parameter(
block=self,
......
......@@ -272,7 +272,6 @@ def generate_activation_fn(op_type):
op = getattr(_C_ops, op_type)
return op(x)
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if in_dygraph_mode() and hasattr(_legacy_C_ops, op_type):
op = getattr(_legacy_C_ops, op_type)
return op(x)
......
......@@ -38,8 +38,8 @@ from paddle.fluid.framework import (
_dygraph_tracer,
_enable_legacy_dygraph,
_in_eager_without_dygraph_check,
_in_legacy_dygraph,
_test_eager_guard,
in_dygraph_mode,
)
from paddle.fluid.op import Operator
from paddle.jit.dy2static.utils import parse_arg_and_kwargs
......@@ -716,7 +716,7 @@ class OpTest(unittest.TestCase):
if if_return_inputs_grad_dict:
v.stop_gradient = False
if not _in_legacy_dygraph():
if hasattr(v, "retain_grads"):
v.retain_grads()
if has_lod:
......@@ -2515,7 +2515,7 @@ class OpTest(unittest.TestCase):
for no_grad_val in no_grad_set:
del inputs[no_grad_val]
if not _in_legacy_dygraph():
if in_dygraph_mode():
core.eager.run_backward(
fluid.layers.utils.flatten(outputs), grad_outputs, False
)
......
......@@ -64,7 +64,6 @@ from ..fluid.framework import _dygraph_tracer # noqa: F401
from ..fluid.layer_helper import LayerHelper # noqa: F401
from ..fluid.framework import in_dygraph_mode # noqa: F401
from ..fluid.framework import _in_legacy_dygraph # noqa: F401
from ..fluid.framework import _global_flags # noqa: F401
from ..fluid.framework import _apply_pass # noqa: F401
from ..fluid.framework import switch_main_program
......
......@@ -17,8 +17,8 @@ import numbers
# TODO: define normalization api
import paddle
import paddle.fluid as fluid
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle import _C_ops, in_dynamic_mode
from paddle.fluid.framework import in_dygraph_mode
from ...fluid import dygraph_utils
from ...fluid.data_feeder import check_type, check_variable_and_dtype
......@@ -336,18 +336,7 @@ def layer_norm(
out, _, _ = _C_ops.layer_norm(x, weight, bias, epsilon, begin_norm_axis)
return out
if _in_legacy_dygraph():
out, _, _ = _legacy_C_ops.layer_norm(
x,
weight,
bias,
'epsilon',
epsilon,
'begin_norm_axis',
begin_norm_axis,
)
return out
else:
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'LayerNorm'
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册