未验证 提交 da3e9d66 编写于 作者: Z zhangkaihuo 提交者: GitHub

move fuild.dygraph.amp to paddle.amp (#49193)

上级 343bff7b
......@@ -13,7 +13,18 @@
# limitations under the License.
from .auto_cast import auto_cast # noqa: F401
from .grad_scaler import GradScaler # noqa: F401
from .auto_cast import decorate # noqa: F401
from .auto_cast import amp_guard # noqa: F401
from .auto_cast import amp_decorate # noqa: F401
from .auto_cast import low_precision_op_list # noqa: F401
from .auto_cast import WHITE_LIST # noqa: F401
from .auto_cast import BLACK_LIST # noqa: F401
from .auto_cast import PURE_FP16_WHITE_LIST # noqa: F401
from .auto_cast import PURE_FP16_BLACK_LIST # noqa: F401
from . import grad_scaler # noqa: F401
from .grad_scaler import GradScaler # noqa: F401
from .grad_scaler import AmpScaler # noqa: F401
from .grad_scaler import OptimizerState # noqa: F401
__all__ = ['auto_cast', 'GradScaler', 'decorate']
此差异已折叠。
此差异已折叠。
......@@ -28,9 +28,6 @@ from .parallel import *
from . import learning_rate_scheduler
from .learning_rate_scheduler import *
from . import amp
from .amp import *
from .math_op_patch import monkey_patch_math_varbase
__all__ = []
......@@ -38,4 +35,3 @@ __all__ += layers.__all__
__all__ += base.__all__
__all__ += parallel.__all__
__all__ += learning_rate_scheduler.__all__
__all__ += amp.__all__
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import auto_cast
from .auto_cast import *
from . import loss_scaler
from .loss_scaler import *
__all__ = []
__all__ += auto_cast.__all__
__all__ += loss_scaler.__all__
此差异已折叠。
......@@ -60,10 +60,10 @@ class TestAutoCast(unittest.TestCase):
with fluid.dygraph.guard():
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = fluid.dygraph.to_variable(data)
with fluid.dygraph.amp_guard(True):
with paddle.amp.amp_guard(True):
out_fp16 = conv2d(data)
with fluid.dygraph.amp_guard(False):
with paddle.amp.amp_guard(False):
out_fp32 = conv2d(data)
self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32)
......@@ -77,7 +77,7 @@ class TestAutoCast(unittest.TestCase):
data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32')
with fluid.dygraph.guard():
data = fluid.dygraph.to_variable(data)
with fluid.dygraph.amp_guard(True):
with paddle.amp.amp_guard(True):
out_fp32 = paddle.mean(data)
self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32)
......@@ -89,9 +89,9 @@ class TestAutoCast(unittest.TestCase):
def custom_op_list(self):
with fluid.dygraph.guard():
tracer = fluid.framework._dygraph_tracer()
base_white_list = fluid.dygraph.amp.auto_cast.WHITE_LIST
base_black_list = fluid.dygraph.amp.auto_cast.BLACK_LIST
with fluid.dygraph.amp_guard(
base_white_list = paddle.amp.WHITE_LIST
base_black_list = paddle.amp.BLACK_LIST
with paddle.amp.amp_guard(
custom_white_list=["log"], custom_black_list=["conv2d"]
):
white_list, black_list = tracer._get_amp_op_list()
......@@ -105,9 +105,9 @@ class TestAutoCast(unittest.TestCase):
== (set(base_black_list) - {"log"}) | {"conv2d"}
)
base_white_list = fluid.dygraph.amp.auto_cast.PURE_FP16_WHITE_LIST
base_black_list = fluid.dygraph.amp.auto_cast.PURE_FP16_BLACK_LIST
with fluid.dygraph.amp_guard(
base_white_list = paddle.amp.PURE_FP16_WHITE_LIST
base_black_list = paddle.amp.PURE_FP16_BLACK_LIST
with paddle.amp.amp_guard(
custom_white_list=["log"],
custom_black_list=["conv2d"],
level='O2',
......@@ -138,7 +138,7 @@ class TestAutoCast(unittest.TestCase):
stride=2,
act='relu',
)
with fluid.dygraph.amp_guard(
with paddle.amp.amp_guard(
custom_white_list=["conv2d"], custom_black_list=["conv2d"]
):
inp = fluid.dygraph.to_variable(inp_np)
......@@ -154,13 +154,13 @@ class TestAutoCast(unittest.TestCase):
with fluid.dygraph.guard():
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = fluid.dygraph.to_variable(data)
with fluid.dygraph.amp_guard(True):
with paddle.amp.amp_guard(True):
out_amp_fp16 = conv2d(data)
out_amp_fp32 = paddle.expand_as(
out_amp_fp16, out_amp_fp16
) # expand_as_v2 has no fp16 kernel
with fluid.dygraph.amp_guard(True, level='O2'):
with paddle.amp.amp_guard(True, level='O2'):
out_purefp16_fp16 = conv2d(data)
out_purefp16_fp32 = paddle.expand_as(
out_purefp16_fp16, out_purefp16_fp16
......@@ -184,7 +184,7 @@ class TestAutoCast(unittest.TestCase):
with fluid.dygraph.guard():
conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False)
data = fluid.dygraph.to_variable(data)
with fluid.dygraph.amp_guard(level='O'):
with paddle.amp.amp_guard(level='O'):
out = conv2d(data)
self.assertRaises(ValueError, func)
......@@ -197,7 +197,7 @@ class TestAmpScaler(unittest.TestCase):
def scale(self):
with fluid.dygraph.guard():
data = paddle.rand([10, 1024])
scaler = paddle.fluid.dygraph.AmpScaler(init_loss_scaling=1024)
scaler = paddle.amp.AmpScaler(init_loss_scaling=1024)
scaled_data = scaler.scale(data)
self.assertEqual(
np.array_equal(scaled_data.numpy(), data.numpy() * 1024), True
......@@ -223,7 +223,7 @@ class TestAmpScaler(unittest.TestCase):
optimizer = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=model.parameters()
)
scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024)
scaler = paddle.amp.AmpScaler(init_loss_scaling=1024)
data = fluid.dygraph.to_variable(inp_np)
out = model(data)
......@@ -332,7 +332,7 @@ class TestAmpScaler(unittest.TestCase):
optimizer = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=model.parameters()
)
scaler = fluid.dygraph.AmpScaler(init_loss_scaling=1024)
scaler = paddle.amp.AmpScaler(init_loss_scaling=1024)
data = fluid.dygraph.to_variable(inp_np)
out = model(data)
......@@ -1262,12 +1262,12 @@ class TestResnet(unittest.TestCase):
dy_param_init_value[param.name] = param.numpy()
program = None
scaler = paddle.fluid.dygraph.AmpScaler(
scaler = paddle.amp.AmpScaler(
enable=enable_amp, init_loss_scaling=2.0**10
)
if enable_amp and (level == 'O2'):
resnet, optimizer = paddle.fluid.dygraph.amp_decorate(
resnet, optimizer = paddle.amp.amp_decorate(
models=resnet, optimizers=optimizer, level='O2'
)
......@@ -1290,9 +1290,7 @@ class TestResnet(unittest.TestCase):
img = fluid.dygraph.to_variable(dy_x_data)
label = fluid.dygraph.to_variable(y_data)
label.stop_gradient = True
with paddle.fluid.dygraph.amp_guard(
enable=enable_amp, level=level
):
with paddle.amp.amp_guard(enable=enable_amp, level=level):
out = resnet(img)
loss = paddle.nn.functional.cross_entropy(
......
......@@ -28,7 +28,7 @@ class TestAMPList(unittest.TestCase):
with paddle.amp.auto_cast():
conv = conv2d(data)
c = a + b
paddle.fluid.dygraph.amp.auto_cast.low_precision_op_list()
paddle.amp.low_precision_op_list()
op_list = paddle.fluid.core.get_low_precision_op_list()
print(conv.dtype)
if conv.dtype == paddle.float16:
......
......@@ -18,6 +18,7 @@ import numpy as np
import paddle
from paddle import _legacy_C_ops
from paddle.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard
from paddle.fluid import backward, core, framework, program_guard
from paddle.fluid.compiler import BuildStrategy
from paddle.fluid.contrib.mixed_precision.decorator import (
......@@ -28,10 +29,6 @@ from paddle.fluid.contrib.mixed_precision.fp16_utils import (
rewrite_program,
)
from paddle.fluid.dygraph import layers
from paddle.fluid.dygraph.amp.auto_cast import (
_in_amp_guard,
_in_pure_fp16_guard,
)
from paddle.fluid.dygraph.base import switch_to_static_graph
from paddle.fluid.executor import (
_is_dy2st_enable_standalone_executor,
......
......@@ -331,7 +331,6 @@ packages=['paddle',
'paddle.inference.contrib.utils',
'paddle.fluid',
'paddle.fluid.dygraph',
'paddle.fluid.dygraph.amp',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.distributed',
......
......@@ -1202,7 +1202,6 @@ def get_setup_parameters():
'paddle.inference.contrib.utils',
'paddle.fluid',
'paddle.fluid.dygraph',
'paddle.fluid.dygraph.amp',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.distributed',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册