Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
69d01eb9
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
69d01eb9
编写于
1月 12, 2023
作者:
Z
zhangkaihuo
提交者:
GitHub
1月 12, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move fuild.contrib.mixed_precision to paddle.static.amp (#49412)
上级
8e291bf7
变更
42
隐藏空白更改
内联
并排
Showing
42 changed file
with
161 addition
and
247 deletion
+161
-247
python/paddle/amp/auto_cast.py
python/paddle/amp/auto_cast.py
+2
-1
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
...paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
+1
-1
python/paddle/distributed/passes/auto_parallel_amp.py
python/paddle/distributed/passes/auto_parallel_amp.py
+5
-5
python/paddle/distributed/passes/auto_parallel_bf16.py
python/paddle/distributed/passes/auto_parallel_bf16.py
+6
-8
python/paddle/distributed/passes/auto_parallel_fp16.py
python/paddle/distributed/passes/auto_parallel_fp16.py
+4
-4
python/paddle/fluid/contrib/__init__.py
python/paddle/fluid/contrib/__init__.py
+0
-3
python/paddle/fluid/contrib/layers/nn.py
python/paddle/fluid/contrib/layers/nn.py
+1
-1
python/paddle/fluid/contrib/mixed_precision/__init__.py
python/paddle/fluid/contrib/mixed_precision/__init__.py
+0
-26
python/paddle/fluid/contrib/tests/test_amp_list.py
python/paddle/fluid/contrib/tests/test_amp_list.py
+1
-1
python/paddle/fluid/contrib/tests/test_bf16_utils.py
python/paddle/fluid/contrib/tests/test_bf16_utils.py
+1
-1
python/paddle/fluid/contrib/tests/test_fp16_utils.py
python/paddle/fluid/contrib/tests/test_fp16_utils.py
+1
-1
python/paddle/fluid/contrib/tests/test_image_classification_fp16.py
...dle/fluid/contrib/tests/test_image_classification_fp16.py
+32
-82
python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py
...le/fluid/contrib/tests/test_multi_precision_fp16_train.py
+1
-1
python/paddle/fluid/incubate/fleet/base/fleet_base.py
python/paddle/fluid/incubate/fleet/base/fleet_base.py
+1
-1
python/paddle/fluid/incubate/fleet/collective/__init__.py
python/paddle/fluid/incubate/fleet/collective/__init__.py
+1
-1
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+1
-1
python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py
...dle/fluid/tests/unittests/asp/test_asp_optimize_static.py
+1
-5
python/paddle/fluid/tests/unittests/auto_parallel/test_pass_bf16.py
...dle/fluid/tests/unittests/auto_parallel/test_pass_bf16.py
+2
-2
python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py
...d/tests/unittests/collective/test_collective_optimizer.py
+2
-1
python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py
...addle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
...s/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py
...dle/fluid/tests/unittests/npu/test_mixed_precision_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py
...ests/unittests/npu/test_update_loss_scaling_min_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py
...id/tests/unittests/npu/test_update_loss_scaling_op_npu.py
+1
-1
python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py
...uid/tests/unittests/test_amp_check_finite_and_scale_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py
python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py
+1
-1
python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py
...paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py
+2
-2
python/paddle/fluid/tests/unittests/test_layer_norm_op.py
python/paddle/fluid/tests/unittests/test_layer_norm_op.py
+1
-3
python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py
...ddle/fluid/tests/unittests/test_update_loss_scaling_op.py
+1
-1
python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py
...id/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py
+1
-1
python/paddle/jit/dy2static/partial_program.py
python/paddle/jit/dy2static/partial_program.py
+5
-10
python/paddle/static/amp/__init__.py
python/paddle/static/amp/__init__.py
+14
-12
python/paddle/static/amp/amp_nn.py
python/paddle/static/amp/amp_nn.py
+4
-4
python/paddle/static/amp/bf16/__init__.py
python/paddle/static/amp/bf16/__init__.py
+9
-3
python/paddle/static/amp/bf16/amp_lists.py
python/paddle/static/amp/bf16/amp_lists.py
+5
-6
python/paddle/static/amp/bf16/amp_utils.py
python/paddle/static/amp/bf16/amp_utils.py
+13
-14
python/paddle/static/amp/bf16/decorator.py
python/paddle/static/amp/bf16/decorator.py
+7
-11
python/paddle/static/amp/decorator.py
python/paddle/static/amp/decorator.py
+18
-15
python/paddle/static/amp/fp16_lists.py
python/paddle/static/amp/fp16_lists.py
+2
-1
python/paddle/static/amp/fp16_utils.py
python/paddle/static/amp/fp16_utils.py
+7
-7
python/setup.py.in
python/setup.py.in
+1
-2
setup.py
setup.py
+1
-2
未找到文件。
python/paddle/amp/auto_cast.py
浏览文件 @
69d01eb9
...
...
@@ -59,6 +59,7 @@ BLACK_LIST = {
'trilinear_interp_v2'
,
}
AMP_RELATED_FLAGS
=
[
'FLAGS_cudnn_exhaustive_search'
,
'FLAGS_conv_workspace_size_limit'
,
...
...
@@ -127,7 +128,7 @@ def amp_state():
return
_g_amp_state_
# NOTE(zhiqiu): similar as paddle.
fluid.contrib.mixed_precision
.fp16_lists.AutoMixedPrecisionLists._update_list
# NOTE(zhiqiu): similar as paddle.
static.amp
.fp16_lists.AutoMixedPrecisionLists._update_list
# The reason why not use AutoMixedPrecisionLists is that custom_black_varnames is not suitable for imperative mode.
def
_update_list
(
custom_white_list
,
custom_black_list
,
level
=
'O1'
,
dtype
=
'float16'
...
...
python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py
浏览文件 @
69d01eb9
...
...
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import
paddle.
fluid.contrib.mixed_precision
as
mixed_precision
import
paddle.
static.amp
as
mixed_precision
from
.meta_optimizer_base
import
MetaOptimizerBase
...
...
python/paddle/distributed/passes/auto_parallel_amp.py
浏览文件 @
69d01eb9
...
...
@@ -23,7 +23,9 @@ from paddle.distributed.auto_parallel.utils import (
set_var_dist_attr
,
)
from
paddle.distributed.fleet.meta_optimizers.common
import
OpRole
from
paddle.fluid.contrib.mixed_precision.fp16_utils
import
(
from
paddle.fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle.framework
import
core
from
paddle.static.amp.fp16_utils
import
(
AutoMixedPrecisionLists
,
_dtype_to_str
,
_is_in_black_varnames
,
...
...
@@ -35,8 +37,6 @@ from paddle.fluid.contrib.mixed_precision.fp16_utils import (
find_true_post_op
,
find_true_prev_op
,
)
from
paddle.fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle.framework
import
core
from
paddle.utils
import
unique_name
from
..auto_parallel.process_mesh
import
ProcessMesh
...
...
@@ -83,7 +83,7 @@ class AMPState:
def
_mark_black_white_ops
(
self
,
amp_lists
):
"""
this function is modified from paddle.
fluid.contrib.mixed_precision
this function is modified from paddle.
static.amp
"""
self
.
_block
.
_sync_with_cpp
()
ops
=
self
.
_block
.
ops
...
...
@@ -179,7 +179,7 @@ class AMPState:
):
"""
only for forward cast
modified from paddle.
fluid.contrib.mixed_precision
modified from paddle.
static.amp
"""
num_cast_ops
=
0
var_name_dict
=
{}
...
...
python/paddle/distributed/passes/auto_parallel_bf16.py
浏览文件 @
69d01eb9
...
...
@@ -26,22 +26,20 @@ from paddle.distributed.auto_parallel.utils import (
from
paddle.distributed.fleet.meta_optimizers.common
import
OpRole
from
paddle.distributed.passes.pass_base
import
PassBase
,
register_pass
from
paddle.fluid
import
unique_name
from
paddle.fluid.
contrib.mixed_precision.bf16
import
(
AutoMixedPrecisionListsBF16
,
)
from
paddle.
fluid.contrib.mixed_precision
.bf16.amp_utils
import
(
from
paddle.fluid.
framework
import
Block
from
paddle.framework
import
core
from
paddle.static.amp.bf16
import
AutoMixedPrecisionListsBF16
from
paddle.
static.amp
.bf16.amp_utils
import
(
_dtype_to_str
,
_is_in_fp32_varnames
,
_valid_types
,
find_op_index
,
find_true_post_op
,
)
from
paddle.
fluid.contrib.mixed_precision
.fp16_utils
import
(
from
paddle.
static.amp
.fp16_utils
import
(
_rename_arg
,
find_op_index
,
find_true_prev_op
,
)
from
paddle.fluid.framework
import
Block
from
paddle.framework
import
core
from
..auto_parallel.utils
import
is_backward_op
,
is_forward_op
,
is_loss_op
...
...
python/paddle/distributed/passes/auto_parallel_fp16.py
浏览文件 @
69d01eb9
...
...
@@ -26,16 +26,16 @@ from paddle.distributed.auto_parallel.utils import (
set_var_dist_attr
,
)
from
paddle.distributed.fleet.meta_optimizers.common
import
OP_ROLE_KEY
,
OpRole
from
paddle.fluid.contrib.mixed_precision.fp16_utils
import
(
from
paddle.fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle.framework
import
core
from
paddle.static
import
default_main_program
,
default_startup_program
from
paddle.static.amp.fp16_utils
import
(
AutoMixedPrecisionLists
,
_dtype_to_str
,
_keep_layer_norm_scale_bias_to_fp32
,
_need_keep_fp32
,
_valid_types
,
)
from
paddle.fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle.framework
import
core
from
paddle.static
import
default_main_program
,
default_startup_program
from
paddle.utils
import
unique_name
from
..auto_parallel.process_mesh
import
ProcessMesh
...
...
python/paddle/fluid/contrib/__init__.py
浏览文件 @
69d01eb9
...
...
@@ -22,8 +22,6 @@ from . import extend_optimizer
from
.extend_optimizer
import
*
from
.
import
model_stat
from
.model_stat
import
*
from
.
import
mixed_precision
from
.mixed_precision
import
*
from
.
import
layers
from
.layers
import
*
from
.
import
optimizer
...
...
@@ -34,6 +32,5 @@ __all__ = []
__all__
+=
memory_usage_calc
.
__all__
__all__
+=
op_frequence
.
__all__
__all__
+=
extend_optimizer
.
__all__
__all__
+=
[
'mixed_precision'
]
__all__
+=
layers
.
__all__
__all__
+=
optimizer
.
__all__
python/paddle/fluid/contrib/layers/nn.py
浏览文件 @
69d01eb9
...
...
@@ -1587,7 +1587,7 @@ def fused_bn_add_act(
)
loss = paddle.mean(loss)
sgd = fluid.optimizer.SGD(learning_rate=0.001)
sgd =
fluid.contrib.mixed_precision
.decorate(
sgd =
paddle.static.amp
.decorate(
sgd, use_dynamic_loss_scaling=True, init_loss_scaling=128.0)
sgd.minimize(loss)
...
...
python/paddle/fluid/contrib/mixed_precision/__init__.py
已删除
100644 → 0
浏览文件 @
8e291bf7
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.
import
decorator
from
.decorator
import
*
from
.
import
fp16_lists
from
.fp16_lists
import
*
from
.
import
fp16_utils
from
.fp16_utils
import
*
from
.
import
bf16
__all__
=
[]
__all__
+=
decorator
.
__all__
__all__
+=
fp16_lists
.
__all__
__all__
+=
fp16_utils
.
__all__
python/paddle/fluid/contrib/tests/test_amp_list.py
浏览文件 @
69d01eb9
...
...
@@ -14,7 +14,7 @@
import
paddle
import
unittest
from
paddle.
fluid.contrib.mixed_precision
.fp16_lists
import
(
from
paddle.
static.amp
.fp16_lists
import
(
AutoMixedPrecisionLists
,
)
...
...
python/paddle/fluid/contrib/tests/test_bf16_utils.py
浏览文件 @
69d01eb9
...
...
@@ -110,7 +110,7 @@ class AMPTest2(unittest.TestCase):
def
test_find_op_index
(
self
):
block
=
fluid
.
default_main_program
().
global_block
()
op_desc
=
core
.
OpDesc
()
idx
=
amp
.
bf16
.
amp
_utils
.
find_op_index
(
block
.
desc
,
op_desc
)
idx
=
amp
.
fp16
_utils
.
find_op_index
(
block
.
desc
,
op_desc
)
assert
idx
==
-
1
def
test_is_in_fp32_varnames
(
self
):
...
...
python/paddle/fluid/contrib/tests/test_fp16_utils.py
浏览文件 @
69d01eb9
...
...
@@ -15,7 +15,7 @@
import
unittest
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.
fluid.contrib.mixed_precision
import
fp16_utils
from
paddle.
static.amp
import
fp16_utils
import
paddle
paddle
.
enable_static
()
...
...
python/paddle/fluid/contrib/tests/test_image_classification_fp16.py
浏览文件 @
69d01eb9
...
...
@@ -136,7 +136,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
optimizer
=
fluid
.
optimizer
.
Lamb
(
learning_rate
=
0.001
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
(
custom_black_varnames
=
{
"loss"
,
"conv2d_0.w_0"
}
)
mp_optimizer
=
decorate
(
...
...
@@ -313,102 +313,66 @@ class TestImageClassification(unittest.TestCase):
# infer(use_cuda, save_dirname)
def
test_amp_lists
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
()
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
()
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
self
.
assertEqual
(
amp_lists
.
black_list
,
black_list
)
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_1
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 1. w={'exp}, b=None
white_list
.
add
(
'exp'
)
black_list
.
remove
(
'exp'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
{
'exp'
}
)
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
({
'exp'
})
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
self
.
assertEqual
(
amp_lists
.
black_list
,
black_list
)
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_2
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 2. w={'tanh'}, b=None
white_list
.
add
(
'tanh'
)
gray_list
.
remove
(
'tanh'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
{
'tanh'
}
)
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
({
'tanh'
})
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
self
.
assertEqual
(
amp_lists
.
black_list
,
black_list
)
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_3
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 3. w={'lstm'}, b=None
white_list
.
add
(
'lstm'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
{
'lstm'
}
)
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
({
'lstm'
})
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
self
.
assertEqual
(
amp_lists
.
black_list
,
black_list
)
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_4
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 4. w=None, b={'conv2d'}
white_list
.
remove
(
'conv2d'
)
black_list
.
add
(
'conv2d'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
(
custom_black_list
=
{
'conv2d'
}
)
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
...
...
@@ -416,21 +380,15 @@ class TestImageClassification(unittest.TestCase):
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_5
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 5. w=None, b={'tanh'}
black_list
.
add
(
'tanh'
)
gray_list
.
remove
(
'tanh'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
(
custom_black_list
=
{
'tanh'
}
)
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
...
...
@@ -438,20 +396,14 @@ class TestImageClassification(unittest.TestCase):
self
.
assertEqual
(
amp_lists
.
gray_list
,
gray_list
)
def
test_amp_lists_6
(
self
):
white_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
fluid
.
contrib
.
mixed_precision
.
fp16_lists
.
gray_list
)
white_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
white_list
)
black_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
black_list
)
gray_list
=
copy
.
copy
(
paddle
.
static
.
amp
.
fp16_lists
.
gray_list
)
# 6. w=None, b={'lstm'}
black_list
.
add
(
'lstm'
)
amp_lists
=
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
(
custom_black_list
=
{
'lstm'
}
)
self
.
assertEqual
(
amp_lists
.
white_list
,
white_list
)
...
...
@@ -463,7 +415,7 @@ class TestImageClassification(unittest.TestCase):
# raise ValueError
self
.
assertRaises
(
ValueError
,
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
,
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
,
{
'lstm'
},
{
'lstm'
},
)
...
...
@@ -515,10 +467,8 @@ class TestAmpWithNonIterableDataLoader(unittest.TestCase):
avg_cost
=
paddle
.
mean
(
cost
)
optimizer
=
fluid
.
optimizer
.
Lamb
(
learning_rate
=
0.001
)
amp_lists
=
(
fluid
.
contrib
.
mixed_precision
.
AutoMixedPrecisionLists
(
custom_black_varnames
=
{
"loss"
,
"conv2d_0.w_0"
}
)
amp_lists
=
paddle
.
static
.
amp
.
AutoMixedPrecisionLists
(
custom_black_varnames
=
{
"loss"
,
"conv2d_0.w_0"
}
)
mp_optimizer
=
decorate
(
optimizer
=
optimizer
,
...
...
python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py
浏览文件 @
69d01eb9
...
...
@@ -18,7 +18,7 @@ import contextlib
import
unittest
import
numpy
as
np
from
paddle.io
import
Dataset
from
paddle.
fluid.contrib.mixed_precision
.fp16_utils
import
cast_model_to_fp16
from
paddle.
static.amp
.fp16_utils
import
cast_model_to_fp16
paddle
.
enable_static
()
...
...
python/paddle/fluid/incubate/fleet/base/fleet_base.py
浏览文件 @
69d01eb9
...
...
@@ -21,7 +21,7 @@ from paddle.optimizer import SGD as SGD_v2
from
paddle.fluid.incubate.fleet.base.mode
import
Mode
from
paddle.distributed.fleet.base.role_maker
import
RoleMakerBase
from
paddle.
fluid.contrib.mixed_precision
.decorator
import
(
from
paddle.
static.amp
.decorator
import
(
OptimizerWithMixedPrecision
,
)
from
.
import
mode
...
...
python/paddle/fluid/incubate/fleet/collective/__init__.py
浏览文件 @
69d01eb9
...
...
@@ -555,7 +555,7 @@ class CollectiveOptimizer(DistributedOptimizer):
self
.
raiseOptimizeError
(
"mixed_precision"
,
self
.
_optimizer
.
__class__
.
__name__
)
self
.
_optimizer
=
fluid
.
contrib
.
mixed_precision
.
decorate
(
self
.
_optimizer
=
paddle
.
static
.
amp
.
decorate
(
self
.
_optimizer
,
init_loss_scaling
=
self
.
_amp_loss_scaling
,
use_dynamic_loss_scaling
=
True
,
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
69d01eb9
...
...
@@ -4404,7 +4404,7 @@ class PipelineOptimizer:
valid_optimizers
=
(
Optimizer
,
paddle
.
optimizer
.
Optimizer
,
paddle
.
fluid
.
contrib
.
mixed_precision
.
decorator
.
OptimizerWithMixedPrecision
,
paddle
.
static
.
amp
.
decorator
.
OptimizerWithMixedPrecision
,
)
if
not
isinstance
(
optimizer
,
valid_optimizers
):
raise
ValueError
(
...
...
python/paddle/fluid/tests/unittests/asp/test_asp_optimize_static.py
浏览文件 @
69d01eb9
...
...
@@ -215,11 +215,7 @@ class TestASPStaticOptimize(unittest.TestCase):
if
core
.
is_compiled_with_cuda
():
place
=
paddle
.
CUDAPlace
(
0
)
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
self
.
optimizer
=
(
fluid
.
contrib
.
mixed_precision
.
decorator
.
decorate
(
self
.
optimizer
)
)
self
.
optimizer
=
paddle
.
static
.
amp
.
decorate
(
self
.
optimizer
)
self
.
optimizer
=
paddle
.
incubate
.
asp
.
decorate
(
self
.
optimizer
)
self
.
optimizer
.
minimize
(
self
.
loss
,
self
.
startup_program
)
...
...
python/paddle/fluid/tests/unittests/auto_parallel/test_pass_bf16.py
浏览文件 @
69d01eb9
...
...
@@ -21,10 +21,10 @@ import paddle
import
paddle.fluid.core
as
core
import
paddle.nn
as
nn
from
paddle.distributed.fleet
import
auto
from
paddle.fluid.contrib.mixed_precision.bf16.amp_utils
import
_valid_types
from
paddle.fluid.contrib.mixed_precision.fp16_utils
import
find_true_prev_op
from
paddle.fluid.dygraph.parallel
import
ParallelEnv
from
paddle.static
import
InputSpec
from
paddle.static.amp.bf16.amp_utils
import
_valid_types
from
paddle.static.amp.fp16_utils
import
find_true_prev_op
from
paddle.vision.datasets
import
MNIST
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/collective/test_collective_optimizer.py
浏览文件 @
69d01eb9
...
...
@@ -26,6 +26,7 @@
import
unittest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.incubate.fleet.collective
import
(
CollectiveOptimizer
,
...
...
@@ -61,7 +62,7 @@ class CollectiveOptimizerTest(unittest.TestCase):
def
test_amp_strategy
(
self
):
optimizer
=
fluid
.
optimizer
.
AdamOptimizer
()
optimizer
=
fluid
.
contrib
.
mixed_precision
.
decorate
(
optimizer
=
paddle
.
static
.
amp
.
decorate
(
optimizer
,
init_loss_scaling
=
1.0
,
use_dynamic_loss_scaling
=
True
)
dist_strategy
=
DistributedStrategy
()
...
...
python/paddle/fluid/tests/unittests/mlu/test_layer_norm_op_mlu.py
浏览文件 @
69d01eb9
...
...
@@ -25,7 +25,7 @@ import sys
sys
.
path
.
append
(
'..'
)
from
op_test
import
_set_use_system_allocator
from
paddle.fluid
import
Program
,
program_guard
from
paddle.
fluid.contrib.mixed_precision
.fp16_utils
import
(
from
paddle.
static.amp
.fp16_utils
import
(
_keep_layer_norm_scale_bias_to_fp32
,
)
from
test_layer_norm_op
import
(
...
...
python/paddle/fluid/tests/unittests/npu/test_amp_check_finite_and_scale_op_npu.py
浏览文件 @
69d01eb9
...
...
@@ -21,7 +21,7 @@ from op_test import OpTest, skip_check_grad_ci
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.
fluid.contrib.mixed_precision
.amp_nn
import
check_finite_and_unscale
from
paddle.
static.amp
.amp_nn
import
check_finite_and_unscale
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_mixed_precision_npu.py
浏览文件 @
69d01eb9
...
...
@@ -17,7 +17,7 @@ import sys
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.
fluid.contrib.mixed_precision
import
fp16_utils
from
paddle.
static.amp
import
fp16_utils
import
paddle.nn
as
nn
import
paddle.static
as
static
import
numpy
as
np
...
...
python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_min_op_npu.py
浏览文件 @
69d01eb9
...
...
@@ -21,7 +21,7 @@ sys.path.append("..")
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.
fluid.contrib.mixed_precision
.amp_nn
as
amp_nn
import
paddle.
static.amp
.amp_nn
as
amp_nn
from
test_update_loss_scaling_op_npu
import
TestUpdateLossScalingOpBad
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/npu/test_update_loss_scaling_op_npu.py
浏览文件 @
69d01eb9
...
...
@@ -20,7 +20,7 @@ sys.path.append("..")
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
import
paddle.
fluid.contrib.mixed_precision
.amp_nn
as
amp_nn
import
paddle.
static.amp
.amp_nn
as
amp_nn
paddle
.
enable_static
()
SEED
=
2021
...
...
python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py
浏览文件 @
69d01eb9
...
...
@@ -17,7 +17,7 @@ import unittest
import
numpy
as
np
from
op_test
import
OpTest
import
paddle.
fluid.contrib.mixed_precision
.amp_nn
as
amp_nn
import
paddle.
static.amp
.amp_nn
as
amp_nn
def
check_finite_and_unscale_wrapper
(
x
,
scale
):
...
...
python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py
浏览文件 @
69d01eb9
...
...
@@ -61,7 +61,7 @@ class TestFuseBatchNormActPass(unittest.TestCase):
loss
=
paddle
.
mean
(
loss
)
sgd
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
if
use_cuda
:
sgd
=
fluid
.
contrib
.
mixed_precision
.
decorate
(
sgd
=
paddle
.
static
.
amp
.
decorate
(
sgd
,
use_dynamic_loss_scaling
=
True
,
init_loss_scaling
=
128.0
)
sgd
.
minimize
(
loss
)
...
...
python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py
浏览文件 @
69d01eb9
...
...
@@ -112,7 +112,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
)
loss
=
paddle
.
mean
(
loss
)
sgd
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd
=
fluid
.
contrib
.
mixed_precision
.
decorate
(
sgd
=
paddle
.
static
.
amp
.
decorate
(
sgd
,
use_dynamic_loss_scaling
=
True
,
init_loss_scaling
=
128.0
)
sgd
.
minimize
(
loss
)
...
...
@@ -173,7 +173,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
)
loss
=
paddle
.
mean
(
loss
)
sgd
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd
=
fluid
.
contrib
.
mixed_precision
.
decorate
(
sgd
=
paddle
.
static
.
amp
.
decorate
(
sgd
,
use_dynamic_loss_scaling
=
True
,
init_loss_scaling
=
128.0
)
sgd
.
minimize
(
loss
)
...
...
python/paddle/fluid/tests/unittests/test_layer_norm_op.py
浏览文件 @
69d01eb9
...
...
@@ -24,9 +24,7 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.contrib.mixed_precision.fp16_utils
import
(
_keep_layer_norm_scale_bias_to_fp32
,
)
from
paddle.static.amp.fp16_utils
import
_keep_layer_norm_scale_bias_to_fp32
paddle
.
enable_static
()
...
...
python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py
浏览文件 @
69d01eb9
...
...
@@ -18,7 +18,7 @@ import numpy as np
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
import
paddle.
fluid.contrib.mixed_precision
.amp_nn
as
amp_nn
import
paddle.
static.amp
.amp_nn
as
amp_nn
def
update_loss_scaling_wrapper
(
...
...
python/paddle/fluid/tests/unittests/xpu/test_update_loss_scaling_op_xpu.py
浏览文件 @
69d01eb9
...
...
@@ -26,7 +26,7 @@ from xpu.get_test_cover_info import (
import
paddle
import
paddle.fluid
as
fluid
import
paddle.
fluid.contrib.mixed_precision
.amp_nn
as
amp_nn
import
paddle.
static.amp
.amp_nn
as
amp_nn
paddle
.
enable_static
()
...
...
python/paddle/jit/dy2static/partial_program.py
浏览文件 @
69d01eb9
...
...
@@ -21,13 +21,6 @@ from paddle import _legacy_C_ops
from
paddle.amp.auto_cast
import
_in_amp_guard
,
_in_pure_fp16_guard
from
paddle.fluid
import
backward
,
core
,
framework
,
program_guard
from
paddle.fluid.compiler
import
BuildStrategy
from
paddle.fluid.contrib.mixed_precision.decorator
import
(
AutoMixedPrecisionLists
,
)
from
paddle.fluid.contrib.mixed_precision.fp16_utils
import
(
cast_model_to_fp16
,
rewrite_program
,
)
from
paddle.fluid.dygraph
import
layers
from
paddle.fluid.dygraph.base
import
switch_to_static_graph
from
paddle.fluid.executor
import
(
...
...
@@ -189,7 +182,7 @@ class PartialProgramLayer:
if
tracer
:
custom_white_list
,
custom_black_list
=
tracer
.
_get_amp_op_list
()
# For AMP training
self
.
_amp_list
=
AutoMixedPrecisionLists
(
self
.
_amp_list
=
paddle
.
static
.
amp
.
fp16_lists
.
AutoMixedPrecisionLists
(
custom_white_list
=
custom_white_list
,
custom_black_list
=
custom_black_list
,
)
...
...
@@ -238,7 +231,9 @@ class PartialProgramLayer:
def
_create_amp_program
(
self
,
is_infer_mode
=
False
):
amp_program
=
self
.
_origin_main_program
.
clone
(
for_test
=
is_infer_mode
)
with
program_guard
(
amp_program
):
rewrite_program
(
amp_program
,
self
.
_amp_list
)
paddle
.
static
.
amp
.
fp16_utils
.
rewrite_program
(
amp_program
,
self
.
_amp_list
)
if
is_infer_mode
:
return
amp_program
else
:
...
...
@@ -252,7 +247,7 @@ class PartialProgramLayer:
for_test
=
is_infer_mode
)
with
program_guard
(
pure_fp16_program
):
cast_model_to_fp16
(
paddle
.
static
.
amp
.
fp16_utils
.
cast_model_to_fp16
(
pure_fp16_program
,
self
.
_amp_list
,
use_fp16_guard
=
False
)
if
is_infer_mode
:
...
...
python/paddle/static/amp/__init__.py
浏览文件 @
69d01eb9
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
...
...
@@ -12,14 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
...fluid.contrib.mixed_precision
import
decorate
# noqa: F401
from
...fluid.contrib.mixed_precision
import
CustomOpLists
# noqa: F401
from
...fluid.contrib.mixed_precision
import
(
AutoMixedPrecisionLists
,
)
# noqa: F401
from
...fluid.contrib.mixed_precision
import
fp16_guard
# noqa: F401
from
...fluid.contrib.mixed_precision
import
cast_model_to_fp16
# noqa: F401
from
...fluid.contrib.mixed_precision
import
(
cast_parameters_to_fp16
,
)
# noqa: F401
from
...fluid.contrib.mixed_precision
import
bf16
# noqa: F401
from
.
import
decorator
from
.decorator
import
decorate
from
.
import
fp16_lists
from
.fp16_lists
import
CustomOpLists
,
AutoMixedPrecisionLists
from
.
import
fp16_utils
from
.fp16_utils
import
fp16_guard
,
cast_model_to_fp16
,
cast_parameters_to_fp16
from
.
import
bf16
from
.bf16
import
bf16_guard
__all__
=
[]
__all__
+=
decorator
.
__all__
__all__
+=
fp16_lists
.
__all__
__all__
+=
fp16_utils
.
__all__
python/paddle/
fluid/contrib/mixed_precision
/amp_nn.py
→
python/paddle/
static/amp
/amp_nn.py
浏览文件 @
69d01eb9
...
...
@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
Variable
,
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle
import
_C_ops
from
paddle.fluid
import
core
from
paddle.fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle.fluid.framework
import
Variable
,
in_dygraph_mode
from
paddle.fluid.layer_helper
import
LayerHelper
__all__
=
[
'check_finite_and_unscale'
,
'update_loss_scaling'
]
...
...
python/paddle/
fluid/contrib/mixed_precision
/bf16/__init__.py
→
python/paddle/
static/amp
/bf16/__init__.py
浏览文件 @
69d01eb9
...
...
@@ -13,11 +13,17 @@
# limitations under the License.
from
.
import
amp_lists
from
.amp_lists
import
*
from
.amp_lists
import
AutoMixedPrecisionListsBF16
from
.
import
amp_utils
from
.amp_utils
import
*
from
.amp_utils
import
(
bf16_guard
,
rewrite_program_bf16
,
cast_model_to_bf16
,
cast_parameters_to_bf16
,
convert_float_to_uint16
,
)
from
.
import
decorator
from
.decorator
import
*
from
.decorator
import
decorate_bf16
__all__
=
[]
__all__
+=
decorator
.
__all__
...
...
python/paddle/
fluid/contrib/mixed_precision
/bf16/amp_lists.py
→
python/paddle/
static/amp
/bf16/amp_lists.py
浏览文件 @
69d01eb9
...
...
@@ -13,13 +13,12 @@
# limitations under the License.
import
copy
from
paddle.fluid
import
core
from
..fp16_lists
import
(
white_list
as
white_list_fp16
,
black_list
as
black_list_fp16
,
gray_list
as
gray_list_fp16
,
)
from
..fp16_lists
import
black_list
as
black_list_fp16
from
..fp16_lists
import
gray_list
as
gray_list_fp16
from
..fp16_lists
import
white_list
as
white_list_fp16
__all__
=
[
"AutoMixedPrecisionListsBF16"
]
...
...
@@ -40,7 +39,7 @@ class AutoMixedPrecisionListsBF16:
import paddle
paddle.enable_static()
with paddle.static.amp.bf16_guard():
paddle.static.amp.AutoMixedPrecisionListsBF16(custom_fp32_list={'lstm'})
paddle.static.amp.
bf16.
AutoMixedPrecisionListsBF16(custom_fp32_list={'lstm'})
"""
def
__init__
(
...
...
python/paddle/
fluid/contrib/mixed_precision
/bf16/amp_utils.py
→
python/paddle/
static/amp
/bf16/amp_utils.py
浏览文件 @
69d01eb9
...
...
@@ -13,24 +13,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
....
import
core
from
....
import
framework
from
....
import
global_scope
from
....log_helper
import
get_logger
from
....wrapped_decorator
import
signature_safe_contextmanager
from
.amp_lists
import
AutoMixedPrecisionListsBF16
import
collections
import
logging
import
struct
import
numpy
as
np
from
paddle.fluid
import
core
,
framework
,
global_scope
from
paddle.fluid.log_helper
import
get_logger
from
paddle.fluid.wrapped_decorator
import
signature_safe_contextmanager
from
..fp16_utils
import
(
find_true_prev_op
,
find_true_post_op
,
_rename_arg
,
find_op_index
,
_rename_op_input
,
find_true_post_op
,
find_true_prev_op
,
)
import
collections
import
struct
import
logging
import
numpy
as
np
from
.amp_lists
import
AutoMixedPrecisionListsBF16
__all__
=
[
"bf16_guard"
,
...
...
python/paddle/
fluid/contrib/mixed_precision
/bf16/decorator.py
→
python/paddle/
static/amp
/bf16/decorator.py
浏览文件 @
69d01eb9
...
...
@@ -12,22 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import
types
import
warnings
import
paddle
from
paddle.fluid
import
(
core
,
default_main_program
,
layers
,
program_guard
,
unique_name
,
)
from
paddle.fluid
import
core
,
default_main_program
,
program_guard
,
unique_name
from
.amp_lists
import
AutoMixedPrecisionListsBF16
from
.amp_utils
import
(
rewrite_program_bf16
,
cast_model_to_bf16
,
cast_parameters_to_bf16
,
rewrite_program_bf16
,
)
from
.amp_lists
import
AutoMixedPrecisionListsBF16
import
types
import
warnings
__all__
=
[
"decorate_bf16"
]
...
...
python/paddle/
fluid/contrib/mixed_precision
/decorator.py
→
python/paddle/
static/amp
/decorator.py
浏览文件 @
69d01eb9
...
...
@@ -12,24 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
...
import
core
from
...
import
default_main_program
from
...
import
default_startup_program
from
...
import
framework
from
...
import
layers
from
...
import
program_guard
from
...
import
unique_name
from
.
import
fp16_utils
from
.fp16_utils
import
rewrite_program
from
.fp16_utils
import
cast_model_to_fp16
from
.fp16_utils
import
cast_parameters_to_fp16
from
.fp16_utils
import
update_role_var_grad
from
.fp16_lists
import
AutoMixedPrecisionLists
from
.amp_nn
import
check_finite_and_unscale
from
.amp_nn
import
update_loss_scaling
import
types
import
warnings
import
paddle
from
paddle.fluid
import
(
core
,
default_main_program
,
default_startup_program
,
layers
,
program_guard
,
unique_name
,
)
from
.amp_nn
import
check_finite_and_unscale
,
update_loss_scaling
from
.fp16_lists
import
AutoMixedPrecisionLists
from
.fp16_utils
import
(
cast_model_to_fp16
,
cast_parameters_to_fp16
,
rewrite_program
,
update_role_var_grad
,
)
__all__
=
[
"decorate"
]
...
...
python/paddle/
fluid/contrib/mixed_precision
/fp16_lists.py
→
python/paddle/
static/amp
/fp16_lists.py
浏览文件 @
69d01eb9
...
...
@@ -13,7 +13,8 @@
# limitations under the License.
import
copy
from
...
import
core
from
paddle.fluid
import
core
__all__
=
[
"CustomOpLists"
,
"AutoMixedPrecisionLists"
]
...
...
python/paddle/
fluid/contrib/mixed_precision
/fp16_utils.py
→
python/paddle/
static/amp
/fp16_utils.py
浏览文件 @
69d01eb9
...
...
@@ -12,17 +12,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
...
import
core
from
...
import
framework
from
...
import
layers
from
...
import
global_scope
from
...log_helper
import
get_logger
from
...wrapped_decorator
import
signature_safe_contextmanager
from
.fp16_lists
import
AutoMixedPrecisionLists
import
collections
import
logging
import
numpy
as
np
from
paddle.fluid
import
core
,
framework
,
global_scope
from
paddle.fluid.log_helper
import
get_logger
from
paddle.fluid.wrapped_decorator
import
signature_safe_contextmanager
from
.fp16_lists
import
AutoMixedPrecisionLists
__all__
=
[
"fp16_guard"
,
"cast_model_to_fp16"
,
"cast_parameters_to_fp16"
]
_logger
=
get_logger
(
...
...
python/setup.py.in
浏览文件 @
69d01eb9
...
...
@@ -337,8 +337,6 @@ packages=['paddle',
'paddle.fluid.dataloader',
'paddle.fluid.contrib',
'paddle.fluid.contrib.extend_optimizer',
'paddle.fluid.contrib.mixed_precision',
'paddle.fluid.contrib.mixed_precision.bf16',
'paddle.fluid.contrib.layers',
'paddle.fluid.transpiler',
'paddle.fluid.transpiler.details',
...
...
@@ -400,6 +398,7 @@ packages=['paddle',
'paddle.static',
'paddle.static.nn',
'paddle.static.amp',
'paddle.static.amp.bf16',
'paddle.static.quantization',
'paddle.quantization',
'paddle.quantization.imperative',
...
...
setup.py
浏览文件 @
69d01eb9
...
...
@@ -1236,8 +1236,6 @@ def get_setup_parameters():
'paddle.fluid.dataloader'
,
'paddle.fluid.contrib'
,
'paddle.fluid.contrib.extend_optimizer'
,
'paddle.fluid.contrib.mixed_precision'
,
'paddle.fluid.contrib.mixed_precision.bf16'
,
'paddle.fluid.contrib.layers'
,
'paddle.fluid.transpiler'
,
'paddle.fluid.transpiler.details'
,
...
...
@@ -1299,6 +1297,7 @@ def get_setup_parameters():
'paddle.static'
,
'paddle.static.nn'
,
'paddle.static.amp'
,
'paddle.static.amp.bf16'
,
'paddle.static.quantization'
,
'paddle.quantization'
,
'paddle.quantization.imperative'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录