Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
6737226f
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6737226f
编写于
3月 14, 2023
作者:
zhouweiwei2014
提交者:
GitHub
3月 14, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Zero-Dim] correct some code to adapt to 0D Tensor (#51562)
上级
bb9eb20f
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
93 addition
and
91 deletion
+93
-91
python/paddle/fluid/dygraph/learning_rate_scheduler.py
python/paddle/fluid/dygraph/learning_rate_scheduler.py
+8
-13
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+3
-3
python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py
...d/tests/unittests/dygraph_to_static/ifelse_simple_func.py
+11
-11
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
...addle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
+16
-16
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py
...id/tests/unittests/dygraph_to_static/test_convert_call.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
...fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py
...luid/tests/unittests/dygraph_to_static/test_layer_hook.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
...dle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
+2
-2
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py
...fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
...addle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
+9
-9
python/paddle/fluid/tests/unittests/gradient_checker.py
python/paddle/fluid/tests/unittests/gradient_checker.py
+1
-4
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
+18
-2
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+1
-1
python/paddle/optimizer/lr.py
python/paddle/optimizer/lr.py
+10
-13
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+2
-2
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+3
-6
python/paddle/vision/transforms/functional_tensor.py
python/paddle/vision/transforms/functional_tensor.py
+1
-1
未找到文件。
python/paddle/fluid/dygraph/learning_rate_scheduler.py
浏览文件 @
6737226f
...
@@ -92,12 +92,10 @@ class LearningRateDecay:
...
@@ -92,12 +92,10 @@ class LearningRateDecay:
continue
continue
value
=
self
.
__dict__
[
key
]
value
=
self
.
__dict__
[
key
]
if
isinstance
(
value
,
Variable
):
if
isinstance
(
value
,
Variable
):
assert
value
.
shape
==
[
assert
(
1
value
.
size
==
1
],
"shape of Variable in state_dict must be [1] {}"
.
format
(
),
"size of Variable in state_dict must be 1"
value
.
shape
value
=
float
(
value
)
)
value
=
value
.
numpy
()[
0
]
state_dict
[
key
]
=
value
state_dict
[
key
]
=
value
return
state_dict
return
state_dict
...
@@ -857,7 +855,7 @@ class ReduceLROnPlateau(LearningRateDecay):
...
@@ -857,7 +855,7 @@ class ReduceLROnPlateau(LearningRateDecay):
# adjust learning rate according to avg_loss
# adjust learning rate according to avg_loss
reduce_lr.step(avg_loss)
reduce_lr.step(avg_loss)
lr = adam.current_step_lr()
lr = adam.current_step_lr()
print("current avg_loss is %s, current lr is %s" % (
avg_loss.numpy()[0]
, lr))
print("current avg_loss is %s, current lr is %s" % (
float(avg_loss)
, lr))
"""
"""
...
@@ -979,14 +977,11 @@ class ReduceLROnPlateau(LearningRateDecay):
...
@@ -979,14 +977,11 @@ class ReduceLROnPlateau(LearningRateDecay):
)
)
if
self
.
learning_rate
-
new_lr
>
self
.
eps
:
if
self
.
learning_rate
-
new_lr
>
self
.
eps
:
if
self
.
verbose
:
if
self
.
verbose
:
old_lr
=
(
self
.
learning_rate
.
numpy
()[
0
]
if
isinstance
(
self
.
learning_rate
,
Variable
)
else
self
.
learning_rate
)
print
(
print
(
'Epoch {}: reducing learning rate from {} to {}.'
.
format
(
'Epoch {}: reducing learning rate from {} to {}.'
.
format
(
self
.
epoch_num
,
old_lr
,
new_lr
.
numpy
()[
0
]
self
.
epoch_num
,
float
(
self
.
learning_rate
),
float
(
new_lr
),
)
)
)
)
self
.
learning_rate
=
new_lr
self
.
learning_rate
=
new_lr
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
6737226f
...
@@ -1150,7 +1150,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
...
@@ -1150,7 +1150,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
)
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
now_cond
=
pre_cond
.
numpy
()
[
0
]
now_cond
=
pre_cond
.
numpy
()
.
item
()
while
now_cond
:
while
now_cond
:
output_vars
=
body
(
*
loop_vars
)
output_vars
=
body
(
*
loop_vars
)
if
not
isinstance
(
output_vars
,
(
list
,
tuple
)):
if
not
isinstance
(
output_vars
,
(
list
,
tuple
)):
...
@@ -1160,7 +1160,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
...
@@ -1160,7 +1160,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
"body in while_loop should return the same arity "
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
"(length and structure) and types as loop_vars"
)
)
now_cond
=
cond
(
*
output_vars
).
numpy
()
[
0
]
now_cond
=
cond
(
*
output_vars
).
numpy
()
.
item
()
map_structure
(
assign_skip_lod_tensor_array
,
output_vars
,
loop_vars
)
map_structure
(
assign_skip_lod_tensor_array
,
output_vars
,
loop_vars
)
return
loop_vars
return
loop_vars
else
:
else
:
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
6737226f
...
@@ -596,19 +596,19 @@ class Optimizer:
...
@@ -596,19 +596,19 @@ class Optimizer:
"""
"""
current_lr
=
self
.
_global_learning_rate
()
current_lr
=
self
.
_global_learning_rate
()
if
isinstance
(
current_lr
,
framework
.
Variable
):
if
isinstance
(
current_lr
,
framework
.
Variable
):
return
self
.
_global_learning_rate
().
numpy
()[
0
]
return
float
(
current_lr
)
if
isinstance
(
self
.
_learning_rate
,
float
):
if
isinstance
(
self
.
_learning_rate
,
float
):
return
self
.
_learning_rate
return
self
.
_learning_rate
elif
isinstance
(
self
.
_learning_rate
,
_LearningRateEpochDecay
):
elif
isinstance
(
self
.
_learning_rate
,
_LearningRateEpochDecay
):
step_lr
=
self
.
_learning_rate
()
step_lr
=
self
.
_learning_rate
()
return
step_lr
.
numpy
()[
0
]
return
float
(
step_lr
)
else
:
else
:
step_lr
=
self
.
_learning_rate
.
step
()
step_lr
=
self
.
_learning_rate
.
step
()
if
isinstance
(
step_lr
,
(
float
,
int
)):
if
isinstance
(
step_lr
,
(
float
,
int
)):
return
step_lr
return
step_lr
else
:
else
:
return
step_lr
.
numpy
()[
0
]
return
float
(
step_lr
)
def
_global_learning_rate
(
self
,
program
=
None
):
def
_global_learning_rate
(
self
,
program
=
None
):
"""
"""
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/ifelse_simple_func.py
浏览文件 @
6737226f
...
@@ -41,7 +41,7 @@ def dyfunc_empty_nonlocal(x):
...
@@ -41,7 +41,7 @@ def dyfunc_empty_nonlocal(x):
def
dyfunc_with_if_else
(
x_v
,
label
=
None
):
def
dyfunc_with_if_else
(
x_v
,
label
=
None
):
if
paddle
.
mean
(
x_v
).
numpy
()
[
0
]
>
5
:
if
paddle
.
mean
(
x_v
).
numpy
()
>
5
:
x_v
=
x_v
-
1
x_v
=
x_v
-
1
else
:
else
:
x_v
=
x_v
+
1
x_v
=
x_v
+
1
...
@@ -61,7 +61,7 @@ def dyfunc_with_if_else2(x, col=100):
...
@@ -61,7 +61,7 @@ def dyfunc_with_if_else2(x, col=100):
# `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed.
# `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed.
# col = -1
# col = -1
col
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=-
1
,
dtype
=
"int64"
)
col
=
fluid
.
layers
.
fill_constant
(
shape
=
[
1
],
value
=-
1
,
dtype
=
"int64"
)
if
paddle
.
mean
(
x
).
numpy
()
[
0
]
>
x
.
numpy
()[
row
][
col
]:
if
paddle
.
mean
(
x
).
numpy
()
>
x
.
numpy
()[
row
][
col
]:
y
=
paddle
.
nn
.
functional
.
relu
(
x
)
y
=
paddle
.
nn
.
functional
.
relu
(
x
)
else
:
else
:
x_pow
=
paddle
.
pow
(
x
,
2
)
x_pow
=
paddle
.
pow
(
x
,
2
)
...
@@ -89,14 +89,14 @@ def dyfunc_with_if_else3(x):
...
@@ -89,14 +89,14 @@ def dyfunc_with_if_else3(x):
m = x + 2
m = x + 2
n = x + 3
n = x + 3
return q, x, y, z
return q, x, y, z
q, x, y, z = paddle.static.nn.cond(paddle.mean(x)
[0]
< 5, lambda :
q, x, y, z = paddle.static.nn.cond(paddle.mean(x) < 5, lambda :
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
x, y))
x, y))
"""
"""
y
=
x
+
1
y
=
x
+
1
# NOTE: x_v[0] < 5 is True
# NOTE: x_v[0] < 5 is True
if
paddle
.
mean
(
x
).
numpy
()
[
0
]
<
5
:
if
paddle
.
mean
(
x
).
numpy
()
<
5
:
x
=
x
+
1
x
=
x
+
1
z
=
x
+
2
z
=
x
+
2
q
=
x
+
3
q
=
x
+
3
...
@@ -164,7 +164,7 @@ def nested_if_else(x_v):
...
@@ -164,7 +164,7 @@ def nested_if_else(x_v):
if
y
.
numpy
()[
0
]
<
10
:
if
y
.
numpy
()[
0
]
<
10
:
tmp
=
y
*
w
tmp
=
y
*
w
y
=
paddle
.
nn
.
functional
.
relu
(
tmp
)
y
=
paddle
.
nn
.
functional
.
relu
(
tmp
)
if
paddle
.
mean
(
y
).
numpy
()
[
0
]
<
batch_size
:
if
paddle
.
mean
(
y
).
numpy
()
<
batch_size
:
y
=
paddle
.
abs
(
y
)
y
=
paddle
.
abs
(
y
)
else
:
else
:
tmp
=
fluid
.
layers
.
fill_constant
(
tmp
=
fluid
.
layers
.
fill_constant
(
...
@@ -264,7 +264,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
...
@@ -264,7 +264,7 @@ class NetWithControlFlowIf(fluid.dygraph.Layer):
)
)
# Control flow `if` statement
# Control flow `if` statement
fc_out
=
self
.
fc
(
input
)
fc_out
=
self
.
fc
(
input
)
if
paddle
.
mean
(
fc_out
).
numpy
()
[
0
]
<
0
:
if
paddle
.
mean
(
fc_out
).
numpy
()
<
0
:
y
=
fc_out
+
self
.
constant_vars
[
'bias'
]
y
=
fc_out
+
self
.
constant_vars
[
'bias'
]
self
.
constant_vars
[
'w'
]
=
fluid
.
layers
.
fill_constant
(
self
.
constant_vars
[
'w'
]
=
fluid
.
layers
.
fill_constant
(
[
5
],
dtype
=
'float32'
,
value
=
10
[
5
],
dtype
=
'float32'
,
value
=
10
...
@@ -297,7 +297,7 @@ def if_with_and_or(x_v, label=None):
...
@@ -297,7 +297,7 @@ def if_with_and_or(x_v, label=None):
batch_size
=
paddle
.
shape
(
x_v
)
batch_size
=
paddle
.
shape
(
x_v
)
if
(
if
(
x_v
is
not
None
x_v
is
not
None
and
(
paddle
.
mean
(
x_v
).
numpy
()
[
0
]
>
0
or
label
is
not
None
)
and
(
paddle
.
mean
(
x_v
).
numpy
()
>
0
or
label
is
not
None
)
and
batch_size
[
0
]
>
1
and
batch_size
[
0
]
>
1
and
True
and
True
):
):
...
@@ -338,10 +338,10 @@ def if_with_and_or_3(x, y=None):
...
@@ -338,10 +338,10 @@ def if_with_and_or_3(x, y=None):
x
is
not
None
x
is
not
None
and
batch_size
[
0
]
>
1
and
batch_size
[
0
]
>
1
and
y
is
not
None
and
y
is
not
None
and
mean_res
.
numpy
()
[
0
]
>
0
and
mean_res
.
numpy
()
>
0
):
):
x
=
x
+
1
x
=
x
+
1
if
mean_res
.
numpy
()
[
0
]
>
0
and
(
x
is
not
None
and
batch_size
[
0
]
>
1
)
and
y
:
if
mean_res
.
numpy
()
>
0
and
(
x
is
not
None
and
batch_size
[
0
]
>
1
)
and
y
:
x
=
x
-
1
x
=
x
-
1
return
x
return
x
...
@@ -350,11 +350,11 @@ def if_with_and_or_4(x, y=None):
...
@@ -350,11 +350,11 @@ def if_with_and_or_4(x, y=None):
batch_size
=
paddle
.
shape
(
x
)
batch_size
=
paddle
.
shape
(
x
)
mean_res
=
paddle
.
mean
(
x
)
mean_res
=
paddle
.
mean
(
x
)
if
(
x
is
not
None
and
batch_size
[
0
]
>
1
)
or
(
if
(
x
is
not
None
and
batch_size
[
0
]
>
1
)
or
(
y
is
not
None
and
mean_res
.
numpy
()
[
0
]
>
0
y
is
not
None
and
mean_res
.
numpy
()
>
0
):
):
x
=
x
+
1
x
=
x
+
1
if
(
x
is
not
None
or
batch_size
[
0
]
>
1
)
and
(
if
(
x
is
not
None
or
batch_size
[
0
]
>
1
)
and
(
y
is
not
None
or
mean_res
.
numpy
()
[
0
]
>
0
y
is
not
None
or
mean_res
.
numpy
()
>
0
):
):
x
=
x
-
1
x
=
x
-
1
return
x
return
x
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
浏览文件 @
6737226f
...
@@ -620,19 +620,19 @@ def val_bmn(model, args):
...
@@ -620,19 +620,19 @@ def val_bmn(model, args):
avg_loss
=
paddle
.
mean
(
loss
)
avg_loss
=
paddle
.
mean
(
loss
)
loss_data
+=
[
loss_data
+=
[
avg_loss
.
numpy
()[
0
]
,
float
(
avg_loss
)
,
tem_loss
.
numpy
()[
0
]
,
float
(
tem_loss
)
,
pem_reg_loss
.
numpy
()[
0
]
,
float
(
pem_reg_loss
)
,
pem_cls_loss
.
numpy
()[
0
]
,
float
(
pem_cls_loss
)
,
]
]
print
(
print
(
'[VALID] iter {} '
.
format
(
batch_id
)
'[VALID] iter {} '
.
format
(
batch_id
)
+
'
\t
Loss = {},
\t
tem_loss = {},
\t
pem_reg_loss = {},
\t
pem_cls_loss = {}'
.
format
(
+
'
\t
Loss = {},
\t
tem_loss = {},
\t
pem_reg_loss = {},
\t
pem_cls_loss = {}'
.
format
(
'%f'
%
avg_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
avg_loss
)
,
'%f'
%
tem_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
tem_loss
)
,
'%f'
%
pem_reg_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
pem_reg_loss
)
,
'%f'
%
pem_cls_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
pem_cls_loss
)
,
)
)
)
)
...
@@ -716,10 +716,10 @@ class TestTrain(unittest.TestCase):
...
@@ -716,10 +716,10 @@ class TestTrain(unittest.TestCase):
bmn
.
clear_gradients
()
bmn
.
clear_gradients
()
# log loss data to verify correctness
# log loss data to verify correctness
loss_data
+=
[
loss_data
+=
[
avg_loss
.
numpy
()[
0
]
,
float
(
avg_loss
)
,
tem_loss
.
numpy
()[
0
]
,
float
(
tem_loss
)
,
pem_reg_loss
.
numpy
()[
0
]
,
float
(
pem_reg_loss
)
,
pem_cls_loss
.
numpy
()[
0
]
,
float
(
pem_cls_loss
)
,
]
]
if
args
.
log_interval
>
0
and
(
if
args
.
log_interval
>
0
and
(
...
@@ -728,10 +728,10 @@ class TestTrain(unittest.TestCase):
...
@@ -728,10 +728,10 @@ class TestTrain(unittest.TestCase):
print
(
print
(
'[TRAIN] Epoch {}, iter {} '
.
format
(
epoch
,
batch_id
)
'[TRAIN] Epoch {}, iter {} '
.
format
(
epoch
,
batch_id
)
+
'
\t
Loss = {},
\t
tem_loss = {},
\t
pem_reg_loss = {},
\t
pem_cls_loss = {}'
.
format
(
+
'
\t
Loss = {},
\t
tem_loss = {},
\t
pem_reg_loss = {},
\t
pem_cls_loss = {}'
.
format
(
'%f'
%
avg_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
avg_loss
)
,
'%f'
%
tem_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
tem_loss
)
,
'%f'
%
pem_reg_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
pem_reg_loss
)
,
'%f'
%
pem_cls_loss
.
numpy
()[
0
]
,
'%f'
%
float
(
pem_cls_loss
)
,
)
)
)
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_convert_call.py
浏览文件 @
6737226f
...
@@ -32,7 +32,7 @@ np.random.seed(SEED)
...
@@ -32,7 +32,7 @@ np.random.seed(SEED)
# Use a decorator to test exception
# Use a decorator to test exception
@
paddle
.
jit
.
to_static
@
paddle
.
jit
.
to_static
def
dyfunc_with_if
(
x_v
):
def
dyfunc_with_if
(
x_v
):
if
paddle
.
mean
(
x_v
).
numpy
()
[
0
]
>
5
:
if
paddle
.
mean
(
x_v
).
numpy
()
>
5
:
x_v
=
x_v
-
1
x_v
=
x_v
-
1
else
:
else
:
x_v
=
x_v
+
1
x_v
=
x_v
+
1
...
@@ -53,7 +53,7 @@ def nested_func(x_v):
...
@@ -53,7 +53,7 @@ def nested_func(x_v):
@
paddle
.
jit
.
to_static
@
paddle
.
jit
.
to_static
def
dyfunc_with_third_library_logging
(
x_v
):
def
dyfunc_with_third_library_logging
(
x_v
):
logging
.
info
(
'test dyfunc_with_third_library_logging'
)
logging
.
info
(
'test dyfunc_with_third_library_logging'
)
if
paddle
.
mean
(
x_v
).
numpy
()
[
0
]
>
5
:
if
paddle
.
mean
(
x_v
).
numpy
()
>
5
:
x_v
=
x_v
-
1
x_v
=
x_v
-
1
else
:
else
:
x_v
=
x_v
+
1
x_v
=
x_v
+
1
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py
浏览文件 @
6737226f
...
@@ -669,7 +669,7 @@ def train(args, to_static):
...
@@ -669,7 +669,7 @@ def train(args, to_static):
cyc_B_loss
,
cyc_B_loss
,
idt_loss_B
,
idt_loss_B
,
]
]
cur_batch_loss
=
[
x
.
numpy
()[
0
]
for
x
in
cur_batch_loss
]
cur_batch_loss
=
[
float
(
x
)
for
x
in
cur_batch_loss
]
batch_time
=
time
.
time
()
-
s_time
batch_time
=
time
.
time
()
-
s_time
t_time
+=
batch_time
t_time
+=
batch_time
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_layer_hook.py
浏览文件 @
6737226f
...
@@ -75,12 +75,12 @@ class TestNestLayerHook(unittest.TestCase):
...
@@ -75,12 +75,12 @@ class TestNestLayerHook(unittest.TestCase):
if
to_static
:
if
to_static
:
paddle
.
jit
.
save
(
net
,
self
.
path
)
paddle
.
jit
.
save
(
net
,
self
.
path
)
return
out
.
numpy
()[
0
]
return
float
(
out
)
def
load_train
(
self
):
def
load_train
(
self
):
net
=
paddle
.
jit
.
load
(
self
.
path
)
net
=
paddle
.
jit
.
load
(
self
.
path
)
out
=
net
(
self
.
x
)
out
=
net
(
self
.
x
)
return
out
.
numpy
()[
0
]
return
float
(
out
)
def
test_hook
(
self
):
def
test_hook
(
self
):
dy_out
=
self
.
train_net
(
to_static
=
False
)
dy_out
=
self
.
train_net
(
to_static
=
False
)
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist.py
浏览文件 @
6737226f
...
@@ -219,7 +219,7 @@ class TestMNISTWithToStatic(TestMNIST):
...
@@ -219,7 +219,7 @@ class TestMNISTWithToStatic(TestMNIST):
avg_loss
.
backward
()
avg_loss
.
backward
()
adam
.
minimize
(
avg_loss
)
adam
.
minimize
(
avg_loss
)
loss_data
.
append
(
avg_loss
.
numpy
()[
0
]
)
loss_data
.
append
(
float
(
avg_loss
)
)
# save checkpoint
# save checkpoint
mnist
.
clear_gradients
()
mnist
.
clear_gradients
()
if
batch_id
%
10
==
0
:
if
batch_id
%
10
==
0
:
...
@@ -236,7 +236,7 @@ class TestMNISTWithToStatic(TestMNIST):
...
@@ -236,7 +236,7 @@ class TestMNISTWithToStatic(TestMNIST):
if
batch_id
==
50
:
if
batch_id
==
50
:
mnist
.
eval
()
mnist
.
eval
()
prediction
,
acc
,
avg_loss
=
mnist
(
img
,
label
)
prediction
,
acc
,
avg_loss
=
mnist
(
img
,
label
)
loss_data
.
append
(
avg_loss
.
numpy
()[
0
]
)
loss_data
.
append
(
float
(
avg_loss
)
)
# new save load check
# new save load check
self
.
check_jit_save_load
(
self
.
check_jit_save_load
(
mnist
,
[
dy_x_data
],
[
img
],
to_static
,
prediction
mnist
,
[
dy_x_data
],
[
img
],
to_static
,
prediction
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_mnist_amp.py
浏览文件 @
6737226f
...
@@ -86,7 +86,7 @@ class TestAMP(TestMNIST):
...
@@ -86,7 +86,7 @@ class TestAMP(TestMNIST):
scaled
.
backward
()
scaled
.
backward
()
scaler
.
minimize
(
adam
,
scaled
)
scaler
.
minimize
(
adam
,
scaled
)
loss_data
.
append
(
avg_loss
.
numpy
()[
0
]
)
loss_data
.
append
(
float
(
avg_loss
)
)
# save checkpoint
# save checkpoint
mnist
.
clear_gradients
()
mnist
.
clear_gradients
()
if
batch_id
%
10
==
0
:
if
batch_id
%
10
==
0
:
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_tsm.py
浏览文件 @
6737226f
...
@@ -346,25 +346,25 @@ def train(args, fake_data_reader, to_static):
...
@@ -346,25 +346,25 @@ def train(args, fake_data_reader, to_static):
optimizer
.
minimize
(
avg_loss
)
optimizer
.
minimize
(
avg_loss
)
video_model
.
clear_gradients
()
video_model
.
clear_gradients
()
total_loss
+=
avg_loss
.
numpy
()[
0
]
total_loss
+=
float
(
avg_loss
)
total_acc1
+=
acc_top1
.
numpy
()[
0
]
total_acc1
+=
float
(
acc_top1
)
total_acc5
+=
acc_top5
.
numpy
()[
0
]
total_acc5
+=
float
(
acc_top5
)
total_sample
+=
1
total_sample
+=
1
print
(
print
(
'TRAIN Epoch {}, iter {}, loss = {}, acc1 {}, acc5 {}'
.
format
(
'TRAIN Epoch {}, iter {}, loss = {}, acc1 {}, acc5 {}'
.
format
(
epoch
,
epoch
,
batch_id
,
batch_id
,
avg_loss
.
numpy
()[
0
]
,
float
(
avg_loss
)
,
acc_top1
.
numpy
()[
0
]
,
float
(
acc_top1
)
,
acc_top5
.
numpy
()[
0
]
,
float
(
acc_top5
)
,
)
)
)
)
ret
.
extend
(
ret
.
extend
(
[
[
avg_loss
.
numpy
()[
0
]
,
float
(
avg_loss
)
,
acc_top1
.
numpy
()[
0
]
,
float
(
acc_top1
)
,
acc_top5
.
numpy
()[
0
]
,
float
(
acc_top5
)
,
]
]
)
)
...
...
python/paddle/fluid/tests/unittests/gradient_checker.py
浏览文件 @
6737226f
...
@@ -25,10 +25,7 @@ from paddle.fluid.backward import _append_grad_suffix_, _as_list
...
@@ -25,10 +25,7 @@ from paddle.fluid.backward import _append_grad_suffix_, _as_list
def
_product
(
t
):
def
_product
(
t
):
if
isinstance
(
t
,
int
):
return
int
(
np
.
product
(
t
))
return
t
else
:
return
np
.
product
(
t
)
def
dtype_to_np_dtype
(
dtype
):
def
dtype_to_np_dtype
(
dtype
):
...
...
python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
浏览文件 @
6737226f
...
@@ -1546,7 +1546,19 @@ class TestSundryAPI(unittest.TestCase):
...
@@ -1546,7 +1546,19 @@ class TestSundryAPI(unittest.TestCase):
self
.
assertEqual
(
x2
.
grad
.
numpy
(),
0
)
self
.
assertEqual
(
x2
.
grad
.
numpy
(),
0
)
def
test_lerp
(
self
):
def
test_lerp
(
self
):
# 0D + 0D
# 0D + 0D, weight is float scalar
x
=
paddle
.
rand
([])
y
=
paddle
.
rand
([])
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
out
=
paddle
.
lerp
(
x
,
y
,
0.5
)
out
.
backward
()
self
.
assertEqual
(
out
.
shape
,
[])
self
.
assertEqual
(
x
.
grad
.
shape
,
[])
self
.
assertEqual
(
y
.
grad
.
shape
,
[])
# 0D + 0D, weigh is 0D
x0
=
paddle
.
rand
([])
x0
=
paddle
.
rand
([])
y0
=
paddle
.
rand
([])
y0
=
paddle
.
rand
([])
w0
=
paddle
.
rand
([])
w0
=
paddle
.
rand
([])
...
@@ -2896,11 +2908,15 @@ class TestSundryAPIStatic(unittest.TestCase):
...
@@ -2896,11 +2908,15 @@ class TestSundryAPIStatic(unittest.TestCase):
[(),
(),
(),
()],
[(),
(),
(),
()],
[(),
(
64
,
64
),
(),
(
64
,
64
)],
[(),
(
64
,
64
),
(),
(
64
,
64
)],
[(
64
,
64
),
(),
(),
(
64
,
64
)],
[(
64
,
64
),
(),
(),
(
64
,
64
)],
[(
64
,
64
),
(),
0.5
,
(
64
,
64
)],
]
]
for
shape
in
shapes
:
for
shape
in
shapes
:
x
=
paddle
.
rand
(
shape
[
0
])
x
=
paddle
.
rand
(
shape
[
0
])
y
=
paddle
.
rand
(
shape
[
1
])
y
=
paddle
.
rand
(
shape
[
1
])
w
=
paddle
.
rand
(
shape
[
2
])
if
isinstance
(
shape
[
2
],
float
):
w
=
shape
[
2
]
else
:
w
=
paddle
.
rand
(
shape
[
2
])
x
.
stop_gradient
=
False
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
y
.
stop_gradient
=
False
...
...
python/paddle/nn/functional/pooling.py
浏览文件 @
6737226f
...
@@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
...
@@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else
:
else
:
for
i
,
var
in
enumerate
(
output_size
):
for
i
,
var
in
enumerate
(
output_size
):
if
isinstance
(
var
,
Variable
):
if
isinstance
(
var
,
Variable
):
output_size
[
i
]
=
var
.
numpy
()
[
0
]
output_size
[
i
]
=
var
.
numpy
()
.
item
()
if
len
(
output_size
)
==
len
(
kernel_size
)
+
2
:
if
len
(
output_size
)
==
len
(
kernel_size
)
+
2
:
output_size
=
output_size
[
2
:]
output_size
=
output_size
[
2
:]
...
...
python/paddle/optimizer/lr.py
浏览文件 @
6737226f
...
@@ -156,12 +156,10 @@ class LRScheduler:
...
@@ -156,12 +156,10 @@ class LRScheduler:
continue
continue
value
=
self
.
__dict__
[
key
]
value
=
self
.
__dict__
[
key
]
if
isinstance
(
value
,
Tensor
):
if
isinstance
(
value
,
Tensor
):
assert
value
.
shape
==
[
assert
(
1
value
.
size
==
1
],
"shape of Tensor in state_dict must be [1] {}"
.
format
(
),
"numel of Tensor in state_dict must be 1"
value
.
shape
value
=
float
(
value
)
)
value
=
value
.
numpy
()[
0
]
state_dict
[
key
]
=
value
state_dict
[
key
]
=
value
return
state_dict
return
state_dict
...
@@ -1236,7 +1234,7 @@ class ReduceOnPlateau(LRScheduler):
...
@@ -1236,7 +1234,7 @@ class ReduceOnPlateau(LRScheduler):
Reduce learning rate when ``metrics`` has stopped descending. Models often benefit from reducing the learning rate
Reduce learning rate when ``metrics`` has stopped descending. Models often benefit from reducing the learning rate
by 2 to 10 times once model performance has no longer improvement.
by 2 to 10 times once model performance has no longer improvement.
The ``metrics`` is the one which has been pass into ``step`` , it
must be 1-D Tensor with shape
[1]. When ``metrics``
The ``metrics`` is the one which has been pass into ``step`` , it
's shape must [] or
[1]. When ``metrics``
stop descending for a ``patience`` number of epochs, the learning rate will be reduced to ``learning_rate * factor`` .
stop descending for a ``patience`` number of epochs, the learning rate will be reduced to ``learning_rate * factor`` .
(Specially, ``mode`` can also be set to ``'max`` , in this case, when ``metrics`` stop ascending for a ``patience``
(Specially, ``mode`` can also be set to ``'max`` , in this case, when ``metrics`` stop ascending for a ``patience``
number of epochs, the learning rate will be reduced.)
number of epochs, the learning rate will be reduced.)
...
@@ -1390,7 +1388,7 @@ class ReduceOnPlateau(LRScheduler):
...
@@ -1390,7 +1388,7 @@ class ReduceOnPlateau(LRScheduler):
Args:
Args:
metrics (Tensor|numpy.ndarray|float): Which will be monitored to determine whether the learning rate will reduce.
metrics (Tensor|numpy.ndarray|float): Which will be monitored to determine whether the learning rate will reduce.
If it stop descending for a ``patience`` number of epochs, the learning rate will reduce. If it's 'Tensor' or
If it stop descending for a ``patience`` number of epochs, the learning rate will reduce. If it's 'Tensor' or
'numpy.ndarray', its
shape must be [1]
.
'numpy.ndarray', its
numel must be 1
.
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
Returns:
Returns:
...
@@ -1404,13 +1402,12 @@ class ReduceOnPlateau(LRScheduler):
...
@@ -1404,13 +1402,12 @@ class ReduceOnPlateau(LRScheduler):
else
:
else
:
self
.
last_epoch
=
epoch
self
.
last_epoch
=
epoch
# loss must be float, numpy.ndarray or 1-D Tensor with
shape [1]
# loss must be float, numpy.ndarray or 1-D Tensor with
numel 1
if
isinstance
(
metrics
,
(
core
.
eager
.
Tensor
,
numpy
.
ndarray
)):
if
isinstance
(
metrics
,
(
core
.
eager
.
Tensor
,
numpy
.
ndarray
)):
assert
len
(
metrics
.
shape
)
==
1
and
metrics
.
shape
[
0
]
==
1
,
(
assert
metrics
.
size
==
1
,
(
"the metrics.shape "
"the size of metrics must be 1, but the current metrics.size is {}. Maybe that "
"should be (1L,), but the current metrics.shape is {}. Maybe that "
"you should call paddle.mean to process it first."
.
format
(
"you should call paddle.mean to process it first."
.
format
(
metrics
.
s
hap
e
metrics
.
s
iz
e
)
)
)
)
elif
not
isinstance
(
elif
not
isinstance
(
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
6737226f
...
@@ -437,7 +437,7 @@ class Optimizer:
...
@@ -437,7 +437,7 @@ class Optimizer:
self
.
_learning_rate
.
_var_name
=
lr_name
self
.
_learning_rate
.
_var_name
=
lr_name
lr_var
=
self
.
helper
.
create_global_variable
(
lr_var
=
self
.
helper
.
create_global_variable
(
name
=
lr_name
,
name
=
lr_name
,
shape
=
[
1
],
shape
=
[],
persistable
=
True
,
persistable
=
True
,
stop_gradient
=
True
,
stop_gradient
=
True
,
dtype
=
_lr_dtype
,
dtype
=
_lr_dtype
,
...
@@ -465,7 +465,7 @@ class Optimizer:
...
@@ -465,7 +465,7 @@ class Optimizer:
framework
.
default_main_program
()
framework
.
default_main_program
()
]
=
paddle
.
static
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
dtype
=
_lr_dtype
,
dtype
=
_lr_dtype
,
persistable
=
True
,
persistable
=
True
,
...
...
python/paddle/tensor/math.py
浏览文件 @
6737226f
...
@@ -4200,15 +4200,12 @@ def lerp(x, y, weight, name=None):
...
@@ -4200,15 +4200,12 @@ def lerp(x, y, weight, name=None):
# out: [5.5, 6., 6.5, 7.]
# out: [5.5, 6., 6.5, 7.]
"""
"""
if
in_dygraph_mode
():
if
isinstance
(
weight
,
float
):
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
full
(
shape
=
[],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
if
in_dygraph_mode
():
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
else
:
else
:
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
...
python/paddle/vision/transforms/functional_tensor.py
浏览文件 @
6737226f
...
@@ -457,7 +457,7 @@ def rotate(
...
@@ -457,7 +457,7 @@ def rotate(
nh
=
npos
[
0
][
1
]
nh
=
npos
[
0
][
1
]
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
ow
,
oh
=
int
(
nw
.
numpy
()[
0
]),
int
(
nh
.
numpy
()[
0
]
)
ow
,
oh
=
int
(
nw
),
int
(
nh
)
else
:
else
:
ow
,
oh
=
nw
.
astype
(
"int32"
),
nh
.
astype
(
"int32"
)
ow
,
oh
=
nw
.
astype
(
"int32"
),
nh
.
astype
(
"int32"
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录