未验证 提交 fcc90531 编写于 作者: Z zhangkaihuo 提交者: GitHub

Fix the paddle/staitc/amp/__init__.py (#49791)

上级 86fa1715
......@@ -19,9 +19,3 @@ from .fp16_lists import CustomOpLists, AutoMixedPrecisionLists
from . import fp16_utils
from .fp16_utils import fp16_guard, cast_model_to_fp16, cast_parameters_to_fp16
from . import bf16
from .bf16 import bf16_guard
__all__ = []
__all__ += decorator.__all__
__all__ += fp16_lists.__all__
__all__ += fp16_utils.__all__
......@@ -18,8 +18,6 @@ from paddle.fluid.data_feeder import check_type, check_variable_and_dtype
from paddle.fluid.framework import Variable, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
__all__ = ['check_finite_and_unscale', 'update_loss_scaling']
def check_finite_and_unscale(x, scale, name=None, float_status=None):
"""
......
......@@ -24,8 +24,3 @@ from .amp_utils import (
)
from . import decorator
from .decorator import decorate_bf16
__all__ = []
__all__ += decorator.__all__
__all__ += amp_lists.__all__
__all__ += amp_utils.__all__
......@@ -20,8 +20,6 @@ from ..fp16_lists import black_list as black_list_fp16
from ..fp16_lists import gray_list as gray_list_fp16
from ..fp16_lists import white_list as white_list_fp16
__all__ = ["AutoMixedPrecisionListsBF16"]
class AutoMixedPrecisionListsBF16:
"""
......
......@@ -31,14 +31,6 @@ from ..fp16_utils import (
)
from .amp_lists import AutoMixedPrecisionListsBF16
__all__ = [
"bf16_guard",
"rewrite_program_bf16",
"cast_model_to_bf16",
"cast_parameters_to_bf16",
"convert_float_to_uint16",
]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
)
......
......@@ -25,8 +25,6 @@ from .amp_utils import (
rewrite_program_bf16,
)
__all__ = ["decorate_bf16"]
class OptimizerWithMixedPrecision:
"""
......
......@@ -34,8 +34,6 @@ from .fp16_utils import (
update_role_var_grad,
)
__all__ = ["decorate"]
class OptimizerWithMixedPrecision:
"""
......
......@@ -16,8 +16,6 @@ import copy
from paddle.fluid import core
__all__ = ["CustomOpLists", "AutoMixedPrecisionLists"]
# lookup_table fp16 is slower than fp32, though fp16 is supported.
_extra_unsupported_fp16_list = {
'lookup_table',
......
......@@ -23,8 +23,6 @@ from paddle.fluid.wrapped_decorator import signature_safe_contextmanager
from .fp16_lists import AutoMixedPrecisionLists
__all__ = ["fp16_guard", "cast_model_to_fp16", "cast_parameters_to_fp16"]
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s'
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册