Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
da3e9d66
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
da3e9d66
编写于
1月 05, 2023
作者:
Z
zhangkaihuo
提交者:
GitHub
1月 05, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move fuild.dygraph.amp to paddle.amp (#49193)
上级
343bff7b
变更
12
展开全部
隐藏空白更改
内联
并排
Showing
12 changed file
with
1221 addition
and
1315 deletion
+1221
-1315
python/paddle/amp/__init__.py
python/paddle/amp/__init__.py
+12
-1
python/paddle/amp/auto_cast.py
python/paddle/amp/auto_cast.py
+631
-2
python/paddle/amp/grad_scaler.py
python/paddle/amp/grad_scaler.py
+557
-2
python/paddle/fluid/dygraph/__init__.py
python/paddle/fluid/dygraph/__init__.py
+0
-4
python/paddle/fluid/dygraph/amp/__init__.py
python/paddle/fluid/dygraph/amp/__init__.py
+0
-23
python/paddle/fluid/dygraph/amp/auto_cast.py
python/paddle/fluid/dygraph/amp/auto_cast.py
+0
-666
python/paddle/fluid/dygraph/amp/loss_scaler.py
python/paddle/fluid/dygraph/amp/loss_scaler.py
+0
-589
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py
...e/fleet/test_imperative_auto_mixed_precision_for_eager.py
+19
-21
python/paddle/fluid/tests/unittests/test_low_precision_list.py
...n/paddle/fluid/tests/unittests/test_low_precision_list.py
+1
-1
python/paddle/jit/dy2static/partial_program.py
python/paddle/jit/dy2static/partial_program.py
+1
-4
python/setup.py.in
python/setup.py.in
+0
-1
setup.py
setup.py
+0
-1
未找到文件。
python/paddle/amp/__init__.py
浏览文件 @
da3e9d66
...
...
@@ -13,7 +13,18 @@
# limitations under the License.
from
.auto_cast
import
auto_cast
# noqa: F401
from
.grad_scaler
import
GradScaler
# noqa: F401
from
.auto_cast
import
decorate
# noqa: F401
from
.auto_cast
import
amp_guard
# noqa: F401
from
.auto_cast
import
amp_decorate
# noqa: F401
from
.auto_cast
import
low_precision_op_list
# noqa: F401
from
.auto_cast
import
WHITE_LIST
# noqa: F401
from
.auto_cast
import
BLACK_LIST
# noqa: F401
from
.auto_cast
import
PURE_FP16_WHITE_LIST
# noqa: F401
from
.auto_cast
import
PURE_FP16_BLACK_LIST
# noqa: F401
from
.
import
grad_scaler
# noqa: F401
from
.grad_scaler
import
GradScaler
# noqa: F401
from
.grad_scaler
import
AmpScaler
# noqa: F401
from
.grad_scaler
import
OptimizerState
# noqa: F401
__all__
=
[
'auto_cast'
,
'GradScaler'
,
'decorate'
]
python/paddle/amp/auto_cast.py
浏览文件 @
da3e9d66
此差异已折叠。
点击以展开。
python/paddle/amp/grad_scaler.py
浏览文件 @
da3e9d66
此差异已折叠。
点击以展开。
python/paddle/fluid/dygraph/__init__.py
浏览文件 @
da3e9d66
...
...
@@ -28,9 +28,6 @@ from .parallel import *
from
.
import
learning_rate_scheduler
from
.learning_rate_scheduler
import
*
from
.
import
amp
from
.amp
import
*
from
.math_op_patch
import
monkey_patch_math_varbase
__all__
=
[]
...
...
@@ -38,4 +35,3 @@ __all__ += layers.__all__
__all__
+=
base
.
__all__
__all__
+=
parallel
.
__all__
__all__
+=
learning_rate_scheduler
.
__all__
__all__
+=
amp
.
__all__
python/paddle/fluid/dygraph/amp/__init__.py
已删除
100644 → 0
浏览文件 @
343bff7b
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.
import
auto_cast
from
.auto_cast
import
*
from
.
import
loss_scaler
from
.loss_scaler
import
*
__all__
=
[]
__all__
+=
auto_cast
.
__all__
__all__
+=
loss_scaler
.
__all__
python/paddle/fluid/dygraph/amp/auto_cast.py
已删除
100644 → 0
浏览文件 @
343bff7b
此差异已折叠。
点击以展开。
python/paddle/fluid/dygraph/amp/loss_scaler.py
已删除
100644 → 0
浏览文件 @
343bff7b
此差异已折叠。
点击以展开。
python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py
浏览文件 @
da3e9d66
...
...
@@ -60,10 +60,10 @@ class TestAutoCast(unittest.TestCase):
with
fluid
.
dygraph
.
guard
():
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
with
paddle
.
amp
.
amp_guard
(
True
):
out_fp16
=
conv2d
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
False
):
with
paddle
.
amp
.
amp_guard
(
False
):
out_fp32
=
conv2d
(
data
)
self
.
assertTrue
(
data
.
dtype
==
fluid
.
core
.
VarDesc
.
VarType
.
FP32
)
...
...
@@ -77,7 +77,7 @@ class TestAutoCast(unittest.TestCase):
data
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
3
,
32
,
32
]).
astype
(
'float32'
)
with
fluid
.
dygraph
.
guard
():
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
with
paddle
.
amp
.
amp_guard
(
True
):
out_fp32
=
paddle
.
mean
(
data
)
self
.
assertTrue
(
data
.
dtype
==
fluid
.
core
.
VarDesc
.
VarType
.
FP32
)
...
...
@@ -89,9 +89,9 @@ class TestAutoCast(unittest.TestCase):
def
custom_op_list
(
self
):
with
fluid
.
dygraph
.
guard
():
tracer
=
fluid
.
framework
.
_dygraph_tracer
()
base_white_list
=
fluid
.
dygraph
.
amp
.
auto_cast
.
WHITE_LIST
base_black_list
=
fluid
.
dygraph
.
amp
.
auto_cast
.
BLACK_LIST
with
fluid
.
dygraph
.
amp_guard
(
base_white_list
=
paddle
.
amp
.
WHITE_LIST
base_black_list
=
paddle
.
amp
.
BLACK_LIST
with
paddle
.
amp
.
amp_guard
(
custom_white_list
=
[
"log"
],
custom_black_list
=
[
"conv2d"
]
):
white_list
,
black_list
=
tracer
.
_get_amp_op_list
()
...
...
@@ -105,9 +105,9 @@ class TestAutoCast(unittest.TestCase):
==
(
set
(
base_black_list
)
-
{
"log"
})
|
{
"conv2d"
}
)
base_white_list
=
fluid
.
dygraph
.
amp
.
auto_cast
.
PURE_FP16_WHITE_LIST
base_black_list
=
fluid
.
dygraph
.
amp
.
auto_cast
.
PURE_FP16_BLACK_LIST
with
fluid
.
dygraph
.
amp_guard
(
base_white_list
=
paddle
.
amp
.
PURE_FP16_WHITE_LIST
base_black_list
=
paddle
.
amp
.
PURE_FP16_BLACK_LIST
with
paddle
.
amp
.
amp_guard
(
custom_white_list
=
[
"log"
],
custom_black_list
=
[
"conv2d"
],
level
=
'O2'
,
...
...
@@ -138,7 +138,7 @@ class TestAutoCast(unittest.TestCase):
stride
=
2
,
act
=
'relu'
,
)
with
fluid
.
dygraph
.
amp_guard
(
with
paddle
.
amp
.
amp_guard
(
custom_white_list
=
[
"conv2d"
],
custom_black_list
=
[
"conv2d"
]
):
inp
=
fluid
.
dygraph
.
to_variable
(
inp_np
)
...
...
@@ -154,13 +154,13 @@ class TestAutoCast(unittest.TestCase):
with
fluid
.
dygraph
.
guard
():
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
True
):
with
paddle
.
amp
.
amp_guard
(
True
):
out_amp_fp16
=
conv2d
(
data
)
out_amp_fp32
=
paddle
.
expand_as
(
out_amp_fp16
,
out_amp_fp16
)
# expand_as_v2 has no fp16 kernel
with
fluid
.
dygraph
.
amp_guard
(
True
,
level
=
'O2'
):
with
paddle
.
amp
.
amp_guard
(
True
,
level
=
'O2'
):
out_purefp16_fp16
=
conv2d
(
data
)
out_purefp16_fp32
=
paddle
.
expand_as
(
out_purefp16_fp16
,
out_purefp16_fp16
...
...
@@ -184,7 +184,7 @@ class TestAutoCast(unittest.TestCase):
with
fluid
.
dygraph
.
guard
():
conv2d
=
paddle
.
nn
.
Conv2D
(
3
,
2
,
3
,
bias_attr
=
False
)
data
=
fluid
.
dygraph
.
to_variable
(
data
)
with
fluid
.
dygraph
.
amp_guard
(
level
=
'O'
):
with
paddle
.
amp
.
amp_guard
(
level
=
'O'
):
out
=
conv2d
(
data
)
self
.
assertRaises
(
ValueError
,
func
)
...
...
@@ -197,7 +197,7 @@ class TestAmpScaler(unittest.TestCase):
def
scale
(
self
):
with
fluid
.
dygraph
.
guard
():
data
=
paddle
.
rand
([
10
,
1024
])
scaler
=
paddle
.
fluid
.
dygraph
.
AmpScaler
(
init_loss_scaling
=
1024
)
scaler
=
paddle
.
amp
.
AmpScaler
(
init_loss_scaling
=
1024
)
scaled_data
=
scaler
.
scale
(
data
)
self
.
assertEqual
(
np
.
array_equal
(
scaled_data
.
numpy
(),
data
.
numpy
()
*
1024
),
True
...
...
@@ -223,7 +223,7 @@ class TestAmpScaler(unittest.TestCase):
optimizer
=
fluid
.
optimizer
.
SGDOptimizer
(
learning_rate
=
0.01
,
parameter_list
=
model
.
parameters
()
)
scaler
=
fluid
.
dygraph
.
AmpScaler
(
init_loss_scaling
=
1024
)
scaler
=
paddle
.
amp
.
AmpScaler
(
init_loss_scaling
=
1024
)
data
=
fluid
.
dygraph
.
to_variable
(
inp_np
)
out
=
model
(
data
)
...
...
@@ -332,7 +332,7 @@ class TestAmpScaler(unittest.TestCase):
optimizer
=
fluid
.
optimizer
.
SGDOptimizer
(
learning_rate
=
0.01
,
parameter_list
=
model
.
parameters
()
)
scaler
=
fluid
.
dygraph
.
AmpScaler
(
init_loss_scaling
=
1024
)
scaler
=
paddle
.
amp
.
AmpScaler
(
init_loss_scaling
=
1024
)
data
=
fluid
.
dygraph
.
to_variable
(
inp_np
)
out
=
model
(
data
)
...
...
@@ -1262,12 +1262,12 @@ class TestResnet(unittest.TestCase):
dy_param_init_value
[
param
.
name
]
=
param
.
numpy
()
program
=
None
scaler
=
paddle
.
fluid
.
dygraph
.
AmpScaler
(
scaler
=
paddle
.
amp
.
AmpScaler
(
enable
=
enable_amp
,
init_loss_scaling
=
2.0
**
10
)
if
enable_amp
and
(
level
==
'O2'
):
resnet
,
optimizer
=
paddle
.
fluid
.
dygraph
.
amp_decorate
(
resnet
,
optimizer
=
paddle
.
amp
.
amp_decorate
(
models
=
resnet
,
optimizers
=
optimizer
,
level
=
'O2'
)
...
...
@@ -1290,9 +1290,7 @@ class TestResnet(unittest.TestCase):
img
=
fluid
.
dygraph
.
to_variable
(
dy_x_data
)
label
=
fluid
.
dygraph
.
to_variable
(
y_data
)
label
.
stop_gradient
=
True
with
paddle
.
fluid
.
dygraph
.
amp_guard
(
enable
=
enable_amp
,
level
=
level
):
with
paddle
.
amp
.
amp_guard
(
enable
=
enable_amp
,
level
=
level
):
out
=
resnet
(
img
)
loss
=
paddle
.
nn
.
functional
.
cross_entropy
(
...
...
python/paddle/fluid/tests/unittests/test_low_precision_list.py
浏览文件 @
da3e9d66
...
...
@@ -28,7 +28,7 @@ class TestAMPList(unittest.TestCase):
with
paddle
.
amp
.
auto_cast
():
conv
=
conv2d
(
data
)
c
=
a
+
b
paddle
.
fluid
.
dygraph
.
amp
.
auto_cast
.
low_precision_op_list
()
paddle
.
amp
.
low_precision_op_list
()
op_list
=
paddle
.
fluid
.
core
.
get_low_precision_op_list
()
print
(
conv
.
dtype
)
if
conv
.
dtype
==
paddle
.
float16
:
...
...
python/paddle/jit/dy2static/partial_program.py
浏览文件 @
da3e9d66
...
...
@@ -18,6 +18,7 @@ import numpy as np
import
paddle
from
paddle
import
_legacy_C_ops
from
paddle.amp.auto_cast
import
_in_amp_guard
,
_in_pure_fp16_guard
from
paddle.fluid
import
backward
,
core
,
framework
,
program_guard
from
paddle.fluid.compiler
import
BuildStrategy
from
paddle.fluid.contrib.mixed_precision.decorator
import
(
...
...
@@ -28,10 +29,6 @@ from paddle.fluid.contrib.mixed_precision.fp16_utils import (
rewrite_program
,
)
from
paddle.fluid.dygraph
import
layers
from
paddle.fluid.dygraph.amp.auto_cast
import
(
_in_amp_guard
,
_in_pure_fp16_guard
,
)
from
paddle.fluid.dygraph.base
import
switch_to_static_graph
from
paddle.fluid.executor
import
(
_is_dy2st_enable_standalone_executor
,
...
...
python/setup.py.in
浏览文件 @
da3e9d66
...
...
@@ -331,7 +331,6 @@ packages=['paddle',
'paddle.inference.contrib.utils',
'paddle.fluid',
'paddle.fluid.dygraph',
'paddle.fluid.dygraph.amp',
'paddle.fluid.proto',
'paddle.fluid.proto.profiler',
'paddle.fluid.distributed',
...
...
setup.py
浏览文件 @
da3e9d66
...
...
@@ -1202,7 +1202,6 @@ def get_setup_parameters():
'paddle.inference.contrib.utils'
,
'paddle.fluid'
,
'paddle.fluid.dygraph'
,
'paddle.fluid.dygraph.amp'
,
'paddle.fluid.proto'
,
'paddle.fluid.proto.profiler'
,
'paddle.fluid.distributed'
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录