Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
45c9f2a6
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
45c9f2a6
编写于
3月 11, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix bugs in piecewise decay
test=develop
上级
a424ab49
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
184 addition
and
99 deletion
+184
-99
python/paddle/fluid/imperative/__init__.py
python/paddle/fluid/imperative/__init__.py
+4
-0
python/paddle/fluid/imperative/learning_rate_scheduler.py
python/paddle/fluid/imperative/learning_rate_scheduler.py
+14
-15
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+15
-4
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+133
-69
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+18
-11
未找到文件。
python/paddle/fluid/imperative/__init__.py
浏览文件 @
45c9f2a6
...
@@ -26,8 +26,12 @@ from .nn import *
...
@@ -26,8 +26,12 @@ from .nn import *
from
.
import
tracer
from
.
import
tracer
from
.tracer
import
*
from
.tracer
import
*
from
.
import
learning_rate_scheduler
from
.learning_rate_scheduler
import
*
__all__
=
[]
__all__
=
[]
__all__
+=
layers
.
__all__
__all__
+=
layers
.
__all__
__all__
+=
base
.
__all__
__all__
+=
base
.
__all__
__all__
+=
nn
.
__all__
__all__
+=
nn
.
__all__
__all__
+=
tracer
.
__all__
__all__
+=
tracer
.
__all__
__all__
+=
learning_rate_scheduler
.
__all__
python/paddle/fluid/imperative/learning_rate_scheduler.py
浏览文件 @
45c9f2a6
...
@@ -14,13 +14,9 @@
...
@@ -14,13 +14,9 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
..
import
layers
from
..
import
unique_name
from
..
import
unique_name
__all__
=
[
__all__
=
[
'PiecewiseDecay'
]
'ExponentialDecay'
,
'NaturalExpDecay'
,
'InverseTimeDecay'
,
'PolynomialDecay'
,
'PiecewiseDecay'
,
'NoamDecay'
]
class
LearningRateDecay
(
object
):
class
LearningRateDecay
(
object
):
...
@@ -28,32 +24,35 @@ class LearningRateDecay(object):
...
@@ -28,32 +24,35 @@ class LearningRateDecay(object):
Base class of learning rate decay
Base class of learning rate decay
"""
"""
def
__init__
(
self
,
step
,
dtype
=
'float32'
):
def
__init__
(
self
,
begin
=
0
,
step
=
1
,
dtype
=
'float32'
):
self
.
step
=
step
self
.
step_num
=
begin
self
.
step_size
=
step
self
.
dtype
=
dtype
self
.
dtype
=
dtype
def
__call__
(
self
):
def
__call__
(
self
):
lr
=
self
.
step
()
lr
=
self
.
step
()
if
isinstance
(
lr
,
float
):
if
isinstance
(
lr
,
float
):
lr
=
self
.
_create_lr_var
(
lr
)
lr
=
self
.
_create_lr_var
(
lr
)
self
.
step
+=
1
self
.
step
_num
+=
self
.
step_size
return
lr
return
lr
def
create_lr_var
(
lr
):
def
create_lr_var
(
self
,
lr
):
from
..
import
layers
lr
=
layers
.
create_global_var
(
lr
=
layers
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
lr
),
value
=
float
(
lr
),
dtype
=
self
.
dtype
,
dtype
=
self
.
dtype
,
persistable
=
True
)
persistable
=
True
)
return
lr
def
step
(
self
):
def
step
(
self
):
raise
NotImplementedError
()
raise
NotImplementedError
()
class
PiecewiseDecay
(
object
):
class
PiecewiseDecay
(
LearningRateDecay
):
def
__init__
(
self
,
boundaries
,
values
,
step
,
dtype
=
'float32'
):
def
__init__
(
self
,
boundaries
,
values
,
begin
,
step
=
1
,
dtype
=
'float32'
):
super
(
PiecewiseDecay
,
self
).
__init__
(
step
,
dtype
)
super
(
PiecewiseDecay
,
self
).
__init__
(
begin
,
step
,
dtype
)
self
.
boundaries
=
boundaries
self
.
boundaries
=
boundaries
self
.
values
=
values
self
.
values
=
values
...
@@ -62,7 +61,7 @@ class PiecewiseDecay(object):
...
@@ -62,7 +61,7 @@ class PiecewiseDecay(object):
self
.
vars
.
append
(
self
.
create_lr_var
(
value
))
self
.
vars
.
append
(
self
.
create_lr_var
(
value
))
def
step
(
self
):
def
step
(
self
):
for
i
in
range
(
len
(
boundaries
)):
for
i
in
range
(
len
(
self
.
boundaries
)):
if
self
.
step
<=
boundaries
[
i
]:
if
self
.
step
_num
<
self
.
boundaries
[
i
]:
return
self
.
vars
[
i
]
return
self
.
vars
[
i
]
return
self
.
vars
[
len
(
values
)
-
1
]
return
self
.
vars
[
len
(
self
.
values
)
-
1
]
python/paddle/fluid/optimizer.py
浏览文件 @
45c9f2a6
...
@@ -31,6 +31,7 @@ from .layer_helper import LayerHelper
...
@@ -31,6 +31,7 @@ from .layer_helper import LayerHelper
from
.layers
import
ops
from
.layers
import
ops
from
.regularizer
import
append_regularization_ops
from
.regularizer
import
append_regularization_ops
from
.imperative
import
base
as
imperative_base
from
.imperative
import
base
as
imperative_base
from
.imperative.learning_rate_scheduler
import
LearningRateDecay
__all__
=
[
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
,
'Ftrl'
,
...
@@ -50,9 +51,19 @@ class Optimizer(object):
...
@@ -50,9 +51,19 @@ class Optimizer(object):
"""
"""
def
__init__
(
self
,
learning_rate
,
regularization
=
None
,
name
=
None
):
def
__init__
(
self
,
learning_rate
,
regularization
=
None
,
name
=
None
):
if
not
isinstance
(
learning_rate
,
float
)
and
\
if
framework
.
_in_imperative_mode
():
not
isinstance
(
learning_rate
,
framework
.
Variable
):
if
not
isinstance
(
learning_rate
,
float
)
and
\
raise
TypeError
(
"learning rate should be float or Variable"
)
not
isinstance
(
learning_rate
,
LearningRateDecay
):
raise
TypeError
(
"learning rate should be float or LearningRateDecay, got %s here"
%
type
(
learning_rate
))
else
:
if
not
isinstance
(
learning_rate
,
float
)
and
\
not
isinstance
(
learning_rate
,
framework
.
Variable
):
raise
TypeError
(
"learning rate should be float or Variable, got %s here"
%
type
(
learning_rate
))
self
.
_name
=
name
self
.
_name
=
name
self
.
regularization
=
regularization
self
.
regularization
=
regularization
self
.
_learning_rate
=
learning_rate
self
.
_learning_rate
=
learning_rate
...
@@ -83,7 +94,7 @@ class Optimizer(object):
...
@@ -83,7 +94,7 @@ class Optimizer(object):
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
persistable
=
True
)
persistable
=
True
)
# get learning rate Variable from LearningRateDecay
# get learning rate Variable from LearningRateDecay
elif
isinstance
(
self
.
_learning_rate
,
imperative
.
LearningRateDecay
):
elif
isinstance
(
self
.
_learning_rate
,
LearningRateDecay
):
self
.
_learning_rate_map
[
framework
.
default_main_program
(
self
.
_learning_rate_map
[
framework
.
default_main_program
(
)]
=
self
.
_learning_rate
()
)]
=
self
.
_learning_rate
()
else
:
else
:
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
浏览文件 @
45c9f2a6
...
@@ -23,70 +23,130 @@ import paddle
...
@@ -23,70 +23,130 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
FC
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.imperative.base
import
to_variable
from
test_imperative_base
import
new_program_scope
from
test_imperative_base
import
new_program_scope
class
MLP
(
fluid
.
imperative
.
Layer
):
class
SimpleImgConvPool
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
param_attr
=
None
,
bias_attr
=
None
):
def
__init__
(
self
,
self
.
_fc1
=
FC
(
10
)
name_scope
,
self
.
_fc2
=
FC
(
10
)
num_channels
,
num_filters
,
filter_size
,
pool_size
,
pool_stride
,
pool_padding
=
0
,
pool_type
=
'max'
,
global_pooling
=
False
,
conv_stride
=
1
,
conv_padding
=
0
,
conv_dilation
=
1
,
conv_groups
=
1
,
act
=
None
,
use_cudnn
=
False
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
SimpleImgConvPool
,
self
).
__init__
(
name_scope
)
self
.
_conv2d
=
Conv2D
(
self
.
full_name
(),
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
)
self
.
_pool2d
=
Pool2D
(
self
.
full_name
(),
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
,
pool_padding
=
pool_padding
,
global_pooling
=
global_pooling
,
use_cudnn
=
use_cudnn
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
y
=
self
.
_fc1
(
inputs
)
x
=
self
.
_conv2d
(
inputs
)
y
=
self
.
_fc2
(
y
)
x
=
self
.
_pool2d
(
x
)
return
y
return
x
class
TestImperativeOptimizerBase
(
unittest
.
TestCase
):
class
MNIST
(
fluid
.
imperative
.
Layer
):
def
setUp
(
self
):
def
__init__
(
self
,
name_scope
):
s
elf
.
batch_num
=
2
s
uper
(
MNIST
,
self
).
__init__
(
name_scope
)
def
get_optimizer
(
self
):
self
.
_simple_img_conv_pool_1
=
SimpleImgConvPool
(
self
.
optimizer
=
SGDOptimizer
(
learning_rate
=
1e-3
)
self
.
full_name
(),
1
,
20
,
5
,
2
,
2
,
act
=
"relu"
)
def
test_optimizer_float32
(
self
):
self
.
_simple_img_conv_pool_2
=
SimpleImgConvPool
(
self
.
full_name
(),
20
,
50
,
5
,
2
,
2
,
act
=
"relu"
)
pool_2_shape
=
50
*
4
*
4
SIZE
=
10
scale
=
(
2.0
/
(
pool_2_shape
**
2
*
SIZE
))
**
0.5
self
.
_fc
=
FC
(
self
.
full_name
(),
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)),
act
=
"softmax"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
x
=
self
.
_simple_img_conv_pool_2
(
x
)
x
=
self
.
_fc
(
x
)
return
x
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_mnist_float32
(
self
):
seed
=
90
seed
=
90
epoch_num
=
1
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
m
lp
=
MLP
(
)
m
nist
=
MNIST
(
"mnist"
)
s
elf
.
get_optimizer
(
)
s
gd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
dy_param_init_value
=
{}
dy_param_init_value
=
{}
for
batch_id
,
data
in
enumerate
(
train_reader
()
):
for
epoch
in
range
(
epoch_num
):
if
batch_id
>=
self
.
batch_num
:
for
batch_id
,
data
in
enumerate
(
train_reader
())
:
break
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
dy_x_data
=
np
.
array
(
for
x
in
data
]).
astype
(
'float32'
)
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
128
,
1
)
img
=
to_variable
(
dy_x_data
)
img
=
to_variable
(
dy_x
_data
)
label
=
to_variable
(
y
_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
label
.
_stop_gradient
=
True
cost
=
mnist
(
img
)
cost
=
mlp
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
dy_out
=
avg_loss
.
_numpy
()
dy_out
=
avg_loss
.
_numpy
()
if
batch_id
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
if
epoch
==
0
and
batch_id
==
0
:
).
all_
parameters
():
for
param
in
mnist
.
parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
avg_loss
.
_backward
()
self
.
optimizer
.
minimize
(
avg_loss
)
sgd
.
minimize
(
avg_loss
)
mlp
.
clear_gradients
()
mnist
.
clear_gradients
()
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
dy_param_value
=
{}
).
all_
parameters
():
for
param
in
mnist
.
parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
with
new_program_scope
():
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
...
@@ -95,8 +155,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -95,8 +155,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
mnist
=
MNIST
()
mnist
=
MNIST
(
"mnist"
)
s
elf
.
get_optimizer
(
)
s
gd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
...
@@ -104,8 +164,9 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -104,8 +164,9 @@ class TestImperativeOptimizerBase(unittest.TestCase):
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
cost
=
mnist
(
img
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
self
.
optimizer
.
minimize
(
avg_loss
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
sgd
.
minimize
(
avg_loss
)
# initialize params and fetch them
# initialize params and fetch them
static_param_init_value
=
{}
static_param_init_value
=
{}
...
@@ -119,26 +180,29 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -119,26 +180,29 @@ class TestImperativeOptimizerBase(unittest.TestCase):
for
i
in
range
(
len
(
static_param_name_list
)):
for
i
in
range
(
len
(
static_param_name_list
)):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
for
epoch
in
range
(
epoch_num
):
if
batch_id
>=
self
.
batch_num
:
for
batch_id
,
data
in
enumerate
(
train_reader
()):
break
static_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
static_x_data
=
np
.
array
(
for
x
in
data
]).
astype
(
'float32'
)
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
([
128
,
1
])
[
128
,
1
])
fetch_list
=
[
avg_loss
.
name
]
fetch_list
=
[
avg_loss
.
name
]
fetch_list
.
extend
(
static_param_name_list
)
fetch_list
.
extend
(
static_param_name_list
)
out
=
exe
.
run
(
out
=
exe
.
run
(
fluid
.
default_main_program
(),
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
static_x_data
,
feed
=
{
"pixel"
:
static_x_data
,
"label"
:
y_data
},
"label"
:
y_data
},
fetch_list
=
fetch_list
)
fetch_list
=
fetch_list
)
static_param_value
=
{}
static_param_value
=
{}
static_out
=
out
[
0
]
static_out
=
out
[
0
]
for
i
in
range
(
1
,
len
(
out
)):
for
i
in
range
(
1
,
len
(
out
)):
static_param_value
[
static_param_name_list
[
i
-
1
]]
=
out
[
i
]
static_param_value
[
static_param_name_list
[
i
-
1
]]
=
out
[
i
]
self
.
assertTrue
(
np
.
allclose
(
dy_x_data
.
all
(),
static_x_data
.
all
()))
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
self
.
assertTrue
(
np
.
allclose
(
value
,
dy_param_init_value
[
key
]))
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
45c9f2a6
...
@@ -29,9 +29,11 @@ from test_imperative_base import new_program_scope
...
@@ -29,9 +29,11 @@ from test_imperative_base import new_program_scope
class
MLP
(
fluid
.
imperative
.
Layer
):
class
MLP
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
param_attr
=
None
,
bias_attr
=
None
):
def
__init__
(
self
,
name_scope
,
param_attr
=
None
,
bias_attr
=
None
):
self
.
_fc1
=
FC
(
10
)
super
(
MLP
,
self
).
__init__
(
name_scope
)
self
.
_fc2
=
FC
(
10
)
self
.
_fc1
=
FC
(
self
.
full_name
(),
10
)
self
.
_fc2
=
FC
(
self
.
full_name
(),
10
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
y
=
self
.
_fc1
(
inputs
)
y
=
self
.
_fc1
(
inputs
)
...
@@ -41,10 +43,15 @@ class MLP(fluid.imperative.Layer):
...
@@ -41,10 +43,15 @@ class MLP(fluid.imperative.Layer):
class
TestImperativeOptimizerBase
(
unittest
.
TestCase
):
class
TestImperativeOptimizerBase
(
unittest
.
TestCase
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
batch_num
=
2
self
.
batch_num
=
10
def
get_optimizer
(
self
):
def
get_optimizer
(
self
):
self
.
optimizer
=
SGDOptimizer
(
learning_rate
=
1e-3
)
bd
=
[
3
,
6
,
9
]
self
.
optimizer
=
SGDOptimizer
(
learning_rate
=
fluid
.
layers
.
piecewise_decay
(
boundaries
=
bd
,
values
=
[
0.1
*
(
0.1
**
i
)
for
i
in
range
(
len
(
bd
)
+
1
)]))
return
self
.
optimizer
def
test_optimizer_float32
(
self
):
def
test_optimizer_float32
(
self
):
seed
=
90
seed
=
90
...
@@ -52,8 +59,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -52,8 +59,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
mlp
=
MLP
()
mlp
=
MLP
(
'mlp'
)
self
.
get_optimizer
()
optimizer
=
self
.
get_optimizer
()
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
...
@@ -81,7 +88,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -81,7 +88,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
avg_loss
.
_backward
()
self
.
optimizer
.
minimize
(
avg_loss
)
optimizer
.
minimize
(
avg_loss
)
mlp
.
clear_gradients
()
mlp
.
clear_gradients
()
dy_param_value
=
{}
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
for
param
in
fluid
.
default_main_program
().
global_block
(
...
@@ -95,8 +102,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -95,8 +102,8 @@ class TestImperativeOptimizerBase(unittest.TestCase):
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
mnist
=
M
NIST
(
)
mnist
=
M
LP
(
'mlp'
)
self
.
get_optimizer
()
optimizer
=
self
.
get_optimizer
()
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
,
drop_last
=
True
)
...
@@ -105,7 +112,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
...
@@ -105,7 +112,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
cost
=
mnist
(
img
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
self
.
optimizer
.
minimize
(
avg_loss
)
optimizer
.
minimize
(
avg_loss
)
# initialize params and fetch them
# initialize params and fetch them
static_param_init_value
=
{}
static_param_init_value
=
{}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录