Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
223c01fd
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
223c01fd
编写于
8月 26, 2021
作者:
zhouweiwei2014
提交者:
GitHub
8月 26, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix iscan python bug (#35148)
上级
289e1818
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
16 addition
and
25 deletion
+16
-25
python/paddle/fluid/tests/unittests/test_gradient_clip.py
python/paddle/fluid/tests/unittests/test_gradient_clip.py
+11
-20
python/paddle/tensor/to_string.py
python/paddle/tensor/to_string.py
+5
-5
未找到文件。
python/paddle/fluid/tests/unittests/test_gradient_clip.py
浏览文件 @
223c01fd
...
...
@@ -56,6 +56,7 @@ class TestGradientClip(unittest.TestCase):
self
.
BATCH_SIZE
=
2
reader
=
fake_imdb_reader
(
self
.
word_dict_len
,
self
.
BATCH_SIZE
*
100
)
self
.
train_data
=
paddle
.
batch
(
reader
,
batch_size
=
self
.
BATCH_SIZE
)
self
.
clip_gradient
=
lambda
x
:
None
self
.
init
()
def
init
(
self
):
...
...
@@ -67,9 +68,6 @@ class TestGradientClip(unittest.TestCase):
places
.
append
(
fluid
.
CUDAPlace
(
0
))
return
places
def
clip_gradient
(
self
,
params_grads
):
pass
def
check_clip_result
(
self
,
out
,
out_clip
):
pass
...
...
@@ -132,7 +130,6 @@ class TestGradientClip(unittest.TestCase):
data
=
next
(
self
.
train_data
())
val
=
exe
.
run
(
prog
,
feed
=
feeder
.
feed
(
data
),
fetch_list
=
[
cost
])[
0
]
self
.
assertEqual
((
1
,
),
val
.
shape
)
print
(
val
)
self
.
assertFalse
(
np
.
isnan
(
val
))
def
backward_and_optimize
(
self
,
cost
):
...
...
@@ -143,11 +140,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip):
def
init
(
self
):
self
.
clip_norm
=
0.2
def
clip_gradient
(
self
,
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByGlobalNorm
(
clip_norm
=
self
.
clip_norm
)
print
(
clip
)
return
clip
(
params_grads
)
def
check_clip_result
(
self
,
out
,
out_clip
):
global_norm
=
0
for
v
in
out
:
...
...
@@ -179,7 +171,6 @@ class TestGradientClipByGlobalNorm(TestGradientClip):
def
test_new_gradient_clip
(
self
):
def
func
(
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByGlobalNorm
(
clip_norm
=
self
.
clip_norm
)
print
(
clip
)
return
clip
(
params_grads
)
self
.
clip_gradient
=
func
...
...
@@ -236,11 +227,6 @@ class TestGradientClipByNorm(TestGradientClip):
def
init
(
self
):
self
.
clip_norm
=
0.2
def
clip_gradient
(
self
,
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByNorm
(
clip_norm
=
self
.
clip_norm
)
print
(
clip
)
return
clip
(
params_grads
)
def
check_clip_result
(
self
,
out
,
out_clip
):
for
u
,
v
in
zip
(
out
,
out_clip
):
norm
=
np
.
sqrt
(
np
.
sum
(
np
.
power
(
u
,
2
)))
...
...
@@ -253,6 +239,11 @@ class TestGradientClipByNorm(TestGradientClip):
# test whether the ouput is right when use grad_clip
def
test_gradient_clip
(
self
):
def
func
(
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByNorm
(
clip_norm
=
self
.
clip_norm
)
return
clip
(
params_grads
)
self
.
clip_gradient
=
func
self
.
check_gradient_clip
(
fluid
.
CPUPlace
())
# if grad is None or not need clip
...
...
@@ -280,11 +271,6 @@ class TestGradientClipByValue(TestGradientClip):
self
.
max
=
0.2
self
.
min
=
0.1
def
clip_gradient
(
self
,
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByValue
(
max
=
self
.
max
,
min
=
self
.
min
)
print
(
clip
)
return
clip
(
params_grads
)
def
check_clip_result
(
self
,
out
,
out_clip
):
for
i
,
v
in
enumerate
(
out
):
out
[
i
]
=
np
.
clip
(
v
,
self
.
min
,
self
.
max
)
...
...
@@ -297,6 +283,11 @@ class TestGradientClipByValue(TestGradientClip):
# test whether the ouput is right when use grad_clip
def
test_gradient_clip
(
self
):
def
func
(
params_grads
):
clip
=
fluid
.
clip
.
GradientClipByValue
(
max
=
self
.
max
,
min
=
self
.
min
)
return
clip
(
params_grads
)
self
.
clip_gradient
=
func
self
.
check_gradient_clip
(
fluid
.
CPUPlace
())
# if grad is None or not need clip
...
...
python/paddle/tensor/to_string.py
浏览文件 @
223c01fd
...
...
@@ -101,14 +101,14 @@ def _to_sumary(var):
return
var
elif
len
(
var
.
shape
)
==
1
:
if
var
.
shape
[
0
]
>
2
*
edgeitems
:
return
np
.
concatenate
([
var
[:
edgeitems
],
var
[
-
edgeitems
:]])
return
np
.
concatenate
([
var
[:
edgeitems
],
var
[
(
-
1
*
edgeitems
)
:]])
else
:
return
var
else
:
# recursively handle all dimensions
if
var
.
shape
[
0
]
>
2
*
edgeitems
:
begin
=
[
x
for
x
in
var
[:
edgeitems
]]
end
=
[
x
for
x
in
var
[
-
edgeitems
:]]
end
=
[
x
for
x
in
var
[
(
-
1
*
edgeitems
)
:]]
return
np
.
stack
([
_to_sumary
(
x
)
for
x
in
(
begin
+
end
)])
else
:
return
np
.
stack
([
_to_sumary
(
x
)
for
x
in
var
])
...
...
@@ -162,10 +162,10 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False):
if
sumary
and
var
.
shape
[
0
]
>
2
*
edgeitems
:
items
=
[
_format_item
(
item
,
max_width
,
signed
)
for
item
in
list
(
var
)[:
DEFAULT_PRINT_OPTIONS
.
edgeitems
]
for
item
in
list
(
var
)[:
edgeitems
]
]
+
[
'...'
]
+
[
_format_item
(
item
,
max_width
,
signed
)
for
item
in
list
(
var
)[
-
DEFAULT_PRINT_OPTIONS
.
edgeitems
:]
for
item
in
list
(
var
)[
(
-
1
*
edgeitems
)
:]
]
else
:
items
=
[
...
...
@@ -181,7 +181,7 @@ def _format_tensor(var, sumary, indent=0, max_width=0, signed=False):
for
x
in
var
[:
edgeitems
]
]
+
[
'...'
]
+
[
_format_tensor
(
x
,
sumary
,
indent
+
1
,
max_width
,
signed
)
for
x
in
var
[
-
edgeitems
:]
for
x
in
var
[
(
-
1
*
edgeitems
)
:]
]
else
:
vars
=
[
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录