Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
f9b90dda
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f9b90dda
编写于
9月 20, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
9月 20, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PHI]Add yaml and unittest for update_loss_scaling and check_finite_and_unscale (#46130)
* add amp yaml * fix ci bugs
上级
8ff7df8f
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
82 addition
and
8 deletion
+82
-8
paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py
paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py
+3
-2
paddle/phi/api/yaml/legacy_ops.yaml
paddle/phi/api/yaml/legacy_ops.yaml
+23
-0
python/paddle/fluid/contrib/mixed_precision/amp_nn.py
python/paddle/fluid/contrib/mixed_precision/amp_nn.py
+14
-1
python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py
...uid/tests/unittests/test_amp_check_finite_and_scale_op.py
+15
-3
python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py
...ddle/fluid/tests/unittests/test_update_loss_scaling_op.py
+27
-2
未找到文件。
paddle/phi/api/yaml/generator/wrapped_infermeta_gen.py
浏览文件 @
f9b90dda
...
...
@@ -45,9 +45,10 @@ PD_REGISTER_INFER_META_FN({api.kernel['func'][0]}, phi::{api.infer_meta['func']}
tensor_type_map
=
{
'const Tensor&'
:
'const MetaTensor&'
,
'const std::vector<Tensor>&'
:
'const std::vector<MetaTensor>&'
,
'const std::vector<Tensor>&'
:
'const std::vector<const MetaTensor*>&'
,
'Tensor'
:
'MetaTensor*'
,
'std::vector<Tensor>'
:
'std::vector<MetaTensor
>*
'
,
'std::vector<Tensor>'
:
'std::vector<MetaTensor
*>
'
,
'const paddle::optional<Tensor>&'
:
'const MetaTensor&'
}
...
...
paddle/phi/api/yaml/legacy_ops.yaml
浏览文件 @
f9b90dda
...
...
@@ -463,6 +463,18 @@
func
:
celu
backward
:
celu_grad
-
op
:
check_finite_and_unscale_
args
:
(Tensor[] x, Tensor scale, Tensor input_found_infinite)
output
:
Tensor[](out){x.size()}, Tensor(output_found_infinite)
infer_meta
:
func
:
CheckFiniteAndUnscaleInferMeta
param
:
[
x
,
scale
]
kernel
:
func
:
check_finite_and_unscale
param
:
[
x
,
scale
]
data_type
:
x
inplace
:
(x -> out), (input_found_infinite -> output_found_infinite)
-
op
:
class_center_sample
args
:
(Tensor label, int num_classes, int num_samples, int ring_id, int rank, int nranks, bool fix_seed, int seed)
output
:
Tensor(remapped_label), Tensor(sampled_local_class_center)
...
...
@@ -2763,6 +2775,17 @@
backend
:
place
data_type
:
dtype
-
op
:
update_loss_scaling_
args
:
(Tensor[] x, Tensor found_infinite, Tensor prev_loss_scaling, Tensor in_good_steps, Tensor in_bad_steps, int incr_every_n_steps, int decr_every_n_nan_or_inf, float incr_ratio, float decr_ratio, Scalar stop_update)
output
:
Tensor[](out){x.size()}, Tensor(loss_scaling), Tensor(out_good_steps), Tensor(out_bad_steps)
infer_meta
:
func
:
UpdateLossScalingInferMeta
param
:
[
x
,
found_infinite
,
prev_loss_scaling
,
in_good_steps
,
in_bad_steps
]
kernel
:
func
:
update_loss_scaling
data_type
:
x
inplace
:
(x -> out), (prev_loss_scaling -> loss_scaling), (in_good_steps -> out_good_steps), (in_bad_steps -> out_bad_steps)
-
op
:
unbind
args
:
(Tensor input, int axis)
output
:
Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]}
...
...
python/paddle/fluid/contrib/mixed_precision/amp_nn.py
浏览文件 @
f9b90dda
...
...
@@ -14,8 +14,9 @@
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
Variable
from
paddle.fluid.framework
import
Variable
,
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle
import
_C_ops
__all__
=
[
'check_finite_and_unscale'
,
'update_loss_scaling'
]
...
...
@@ -42,8 +43,13 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None):
'check_finite_and_unscale'
)
helper
=
LayerHelper
(
"check_finite_and_unscale"
,
**
locals
())
found_inf
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
if
in_dygraph_mode
():
_C_ops
.
check_finite_and_unscale_
(
x
,
scale
,
found_inf
)
return
x
,
found_inf
inputs
=
{
'X'
:
x
,
'Scale'
:
scale
}
if
core
.
is_compiled_with_npu
():
check_variable_and_dtype
(
float_status
,
"float_status"
,
...
...
@@ -108,6 +114,13 @@ def update_loss_scaling(x,
else
:
assert
prev_loss_scaling
.
dtype
==
e
.
dtype
,
"The dtype of prev_loss_scaling should be equal to the dtype of x."
if
in_dygraph_mode
():
_C_ops
.
update_loss_scaling_
(
x
,
found_inf
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
,
incr_every_n_steps
,
decr_every_n_nan_or_inf
,
incr_ratio
,
decr_ratio
,
stop_update
)
return
x
helper
=
LayerHelper
(
"update_loss_scaling"
,
**
locals
())
inputs
=
{
...
...
python/paddle/fluid/tests/unittests/test_amp_check_finite_and_scale_op.py
浏览文件 @
f9b90dda
...
...
@@ -16,12 +16,20 @@ import unittest
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
import
paddle.fluid
as
fluid
import
paddle.fluid.contrib.mixed_precision.amp_nn
as
amp_nn
def
check_finite_and_unscale_wrapper
(
x
,
scale
):
_
,
found_inf
=
amp_nn
.
check_finite_and_unscale
([
x
],
scale
)
return
x
,
found_inf
class
TestCheckFiniteAndUnscaleOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"check_finite_and_unscale"
self
.
python_api
=
check_finite_and_unscale_wrapper
self
.
python_out_sig
=
[
"out0"
,
"FoundInfinite"
]
self
.
init_dtype
()
x
=
np
.
random
.
random
((
1024
,
1024
)).
astype
(
self
.
dtype
)
scale
=
np
.
random
.
random
((
1
)).
astype
(
self
.
dtype
)
...
...
@@ -36,7 +44,7 @@ class TestCheckFiniteAndUnscaleOp(OpTest):
self
.
dtype
=
np
.
float32
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestCheckFiniteAndUnscaleOpWithNan
(
OpTest
):
...
...
@@ -44,6 +52,8 @@ class TestCheckFiniteAndUnscaleOpWithNan(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"check_finite_and_unscale"
self
.
init_dtype
()
self
.
python_api
=
check_finite_and_unscale_wrapper
self
.
python_out_sig
=
[
"out0"
,
"FoundInfinite"
]
x
=
np
.
random
.
random
((
1024
,
1024
)).
astype
(
self
.
dtype
)
x
[
128
][
128
]
=
np
.
nan
scale
=
np
.
random
.
random
((
1
)).
astype
(
self
.
dtype
)
...
...
@@ -60,7 +70,7 @@ class TestCheckFiniteAndUnscaleOpWithNan(OpTest):
def
test_check_output
(
self
):
# When input contains nan, do not check the output,
# since the output may be nondeterministic and will be discarded.
self
.
check_output
(
no_check_set
=
[
'Out'
])
self
.
check_output
(
no_check_set
=
[
'Out'
]
,
check_eager
=
True
)
class
TestCheckFiniteAndUnscaleOpWithInf
(
OpTest
):
...
...
@@ -68,6 +78,8 @@ class TestCheckFiniteAndUnscaleOpWithInf(OpTest):
def
setUp
(
self
):
self
.
op_type
=
"check_finite_and_unscale"
self
.
init_dtype
()
self
.
python_api
=
check_finite_and_unscale_wrapper
self
.
python_out_sig
=
[
"out0"
,
"FoundInfinite"
]
x
=
np
.
random
.
random
((
1024
,
1024
)).
astype
(
self
.
dtype
)
x
[
128
][
128
]
=
np
.
inf
scale
=
np
.
random
.
random
((
1
)).
astype
(
self
.
dtype
)
...
...
@@ -84,7 +96,7 @@ class TestCheckFiniteAndUnscaleOpWithInf(OpTest):
def
test_check_output
(
self
):
# When input contains inf, do not check the output,
# since the output may be nondeterministic and will be discarded.
self
.
check_output
(
no_check_set
=
[
'Out'
])
self
.
check_output
(
no_check_set
=
[
'Out'
]
,
check_eager
=
True
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_update_loss_scaling_op.py
浏览文件 @
f9b90dda
...
...
@@ -19,11 +19,32 @@ import paddle.fluid as fluid
import
paddle.fluid.contrib.mixed_precision.amp_nn
as
amp_nn
def
update_loss_scaling_wrapper
(
x
,
found_inf
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
,
incr_every_n_steps
,
decr_every_n_nan_or_inf
,
incr_ratio
,
decr_ratio
,
stop_update
=
False
):
amp_nn
.
update_loss_scaling
([
x
],
found_inf
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
,
incr_every_n_steps
,
decr_every_n_nan_or_inf
,
incr_ratio
,
decr_ratio
,
stop_update
)
return
x
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
class
TestUpdateLossScalingOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"update_loss_scaling"
self
.
init
()
self
.
python_api
=
update_loss_scaling_wrapper
self
.
python_out_sig
=
[
"out0"
,
"LossScaling"
,
"OutGoodSteps"
,
"OutBadSteps"
]
found_inf
=
np
.
array
([
False
],
dtype
=
np
.
bool_
)
x
=
np
.
random
.
random
((
1024
,
1024
)).
astype
(
self
.
dtype
)
...
...
@@ -59,7 +80,7 @@ class TestUpdateLossScalingOp(OpTest):
}
def
test_check_output
(
self
):
self
.
check_output
(
no_check_set
=
[
'Out'
])
self
.
check_output
(
no_check_set
=
[
'Out'
]
,
check_eager
=
True
)
class
TestUpdateLossScalingOpBad
(
TestUpdateLossScalingOp
):
...
...
@@ -67,6 +88,10 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
def
setUp
(
self
):
self
.
op_type
=
"update_loss_scaling"
self
.
init
()
self
.
python_api
=
update_loss_scaling_wrapper
self
.
python_out_sig
=
[
"out0"
,
"LossScaling"
,
"OutGoodSteps"
,
"OutBadSteps"
]
found_inf
=
np
.
array
([
True
],
dtype
=
np
.
bool_
)
x
=
np
.
random
.
random
((
1024
,
1024
)).
astype
(
self
.
dtype
)
i
=
np
.
random
.
randint
(
0
,
1024
,
1
)
...
...
@@ -90,7 +115,7 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
class
TestUpdateLossScalingLayer
(
unittest
.
TestCase
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录