Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
42b0748a
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
42b0748a
编写于
1月 19, 2018
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add unittest
上级
974183b4
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
93 addition
and
3 deletion
+93
-3
python/paddle/v2/fluid/clip.py
python/paddle/v2/fluid/clip.py
+11
-3
python/paddle/v2/fluid/tests/test_error_clip.py
python/paddle/v2/fluid/tests/test_error_clip.py
+0
-0
python/paddle/v2/fluid/tests/test_gradient_clip.py
python/paddle/v2/fluid/tests/test_gradient_clip.py
+82
-0
未找到文件。
python/paddle/v2/fluid/clip.py
浏览文件 @
42b0748a
...
...
@@ -113,6 +113,7 @@ class GradientClipByNorm(BaseGradientClipAttr):
class
GradientClipByGlobalNorm
(
BaseGradientClipAttr
):
global_norm_var
=
None
local_norm_var
=
None
clip_norm_var
=
None
scale_var
=
None
...
...
@@ -123,12 +124,18 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
cls
.
global_norm_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
"float32"
,
value
=
0.0
)
cls
.
local_norm_var
=
framework
.
default_main_program
().
current_block
(
).
create_var
(
name
=
framework
.
unique_name
(
"local_norm"
),
dtype
=
"float32"
,
persistable
=
False
)
cls
.
clip_norm_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
"float32"
,
value
=
clip_norm
)
@
classmethod
def
check_init
(
cls
):
if
not
(
isinstance
(
cls
.
global_norm_var
,
framework
.
Variable
)
and
isinstance
(
cls
.
local_norm_var
,
framework
.
Variable
)
and
isinstance
(
cls
.
clip_norm_var
,
framework
.
Variable
)):
raise
ValueError
(
"Class 'GradientClipByGlobalNorm' has not been properly initialized.
\
...
...
@@ -138,9 +145,10 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
cls
=
self
.
__class__
cls
.
check_init
()
local_norm_var
=
layers
.
reduce_sum
(
input
=
layers
.
pow
(
x
=
grad
,
factor
=
2.0
))
cls
.
local_norm_var
=
layers
.
reduce_sum
(
input
=
layers
.
pow
(
x
=
grad
,
factor
=
2.0
))
layers
.
sums
(
input
=
[
local_norm_var
,
cls
.
global_norm_var
],
input
=
[
cls
.
local_norm_var
,
cls
.
global_norm_var
],
out
=
[
cls
.
global_norm_var
])
def
create_operators
(
self
,
param
,
grad
):
...
...
@@ -148,7 +156,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
cls
.
check_init
()
if
cls
.
scale_var
is
None
:
cls
.
global_norm_var
=
layers
.
sqrt
(
x
=
cls
.
global_norm_var
)
layers
.
sqrt
(
x
=
cls
.
global_norm_var
,
out
=
cls
.
global_norm_var
)
cls
.
scale_var
=
layers
.
elementwise_div
(
x
=
cls
.
clip_norm_var
,
y
=
layers
.
elementwise_max
(
...
...
python/paddle/v2/fluid/tests/test_clip.py
→
python/paddle/v2/fluid/tests/test_
error_
clip.py
浏览文件 @
42b0748a
文件已移动
python/paddle/v2/fluid/tests/test_gradient_clip.py
0 → 100644
浏览文件 @
42b0748a
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2.fluid
as
fluid
def
_get_global_param_norm_
(
params_grads
):
res
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
"float32"
,
value
=
0.0
)
for
_
,
grad
in
params_grads
:
norm_var
=
fluid
.
layers
.
reduce_sum
(
input
=
fluid
.
layers
.
pow
(
x
=
grad
,
factor
=
2.0
))
fluid
.
layers
.
sums
(
input
=
[
norm_var
,
res
],
out
=
[
res
])
fluid
.
layers
.
sqrt
(
x
=
res
,
out
=
res
)
return
res
BATCH_SIZE
=
128
CLIP
=
0.5
prog
=
fluid
.
framework
.
Program
()
with
fluid
.
program_guard
(
main_program
=
prog
):
image
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
784
],
dtype
=
'float32'
)
hidden1
=
fluid
.
layers
.
fc
(
input
=
image
,
size
=
128
,
act
=
'relu'
)
hidden2
=
fluid
.
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
)
predict
=
fluid
.
layers
.
fc
(
input
=
hidden2
,
size
=
10
,
act
=
'softmax'
)
label
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
prog_clip
=
prog
.
clone
()
avg_cost_clip
=
prog_clip
.
block
(
0
).
var
(
avg_cost
.
name
)
p_g
=
fluid
.
backward
.
append_backward
(
loss
=
avg_cost
)
p_g_clip
=
fluid
.
backward
.
append_backward
(
loss
=
avg_cost_clip
)
with
fluid
.
program_guard
(
main_program
=
prog
):
gloabl_norm
=
_get_global_param_norm_
(
p_g
)
with
fluid
.
program_guard
(
main_program
=
prog_clip
):
fluid
.
clip
.
gradient_clip_by_global_norm
(
clip_norm
=
CLIP
)
p_g_clip
=
fluid
.
clip
.
append_gradient_clip_ops
(
p_g_clip
)
gloabl_norm_clip
=
_get_global_param_norm_
(
p_g_clip
)
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
8192
),
batch_size
=
BATCH_SIZE
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
feeder
=
fluid
.
DataFeeder
(
feed_list
=
[
image
,
label
],
place
=
place
)
exe
.
run
(
fluid
.
default_startup_program
())
count
=
0
for
data
in
train_reader
():
count
+=
1
if
count
>
5
:
break
out
,
=
exe
.
run
(
prog
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
gloabl_norm
])
out_clip
,
=
exe
.
run
(
prog_clip
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
gloabl_norm_clip
])
if
not
np
.
allclose
(
out_clip
,
np
.
minimum
(
out
,
np
.
array
([
CLIP
]))):
exit
(
1
)
exit
(
0
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录