Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
0ec53f98
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0ec53f98
编写于
1月 29, 2019
作者:
M
minqiyang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Support imperative learning rate decay in optimizer
上级
fd286f35
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
291 addition
and
115 deletion
+291
-115
python/paddle/fluid/layers/learning_rate_scheduler.py
python/paddle/fluid/layers/learning_rate_scheduler.py
+28
-23
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+31
-12
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
+207
-0
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+25
-80
未找到文件。
python/paddle/fluid/layers/learning_rate_scheduler.py
浏览文件 @
0ec53f98
...
@@ -28,6 +28,7 @@ from . import ops
...
@@ -28,6 +28,7 @@ from . import ops
from
.
import
tensor
from
.
import
tensor
from
..initializer
import
init_on_cpu
from
..initializer
import
init_on_cpu
from
..framework
import
default_main_program
,
Parameter
,
unique_name
,
name_scope
from
..framework
import
default_main_program
,
Parameter
,
unique_name
,
name_scope
from
..imperative
import
base
as
imperative_base
__all__
=
[
__all__
=
[
'exponential_decay'
,
'natural_exp_decay'
,
'inverse_time_decay'
,
'exponential_decay'
,
'natural_exp_decay'
,
'inverse_time_decay'
,
...
@@ -277,34 +278,38 @@ def piecewise_decay(boundaries, values):
...
@@ -277,34 +278,38 @@ def piecewise_decay(boundaries, values):
if
len
(
values
)
-
len
(
boundaries
)
!=
1
:
if
len
(
values
)
-
len
(
boundaries
)
!=
1
:
raise
ValueError
(
"len(values) - len(boundaries) should be 1"
)
raise
ValueError
(
"len(values) - len(boundaries) should be 1"
)
global_step
=
_decay_step_counter
()
if
imperative_base
.
enabled
():
decay
=
imperative
.
PiecewiseDecay
(
boundaries
,
values
,
0
)
return
decay
else
:
global_step
=
_decay_step_counter
()
lr
=
tensor
.
create_global_var
(
lr
=
tensor
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
0.0
,
value
=
0.0
,
dtype
=
'float32'
,
dtype
=
'float32'
,
persistable
=
True
,
persistable
=
True
,
name
=
"learning_rate"
)
name
=
"learning_rate"
)
with
control_flow
.
Switch
()
as
switch
:
with
control_flow
.
Switch
()
as
switch
:
for
i
in
range
(
len
(
boundaries
)):
for
i
in
range
(
len
(
boundaries
)):
boundary_val
=
tensor
.
fill_constant
(
boundary_val
=
tensor
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
boundaries
[
i
]),
force_cpu
=
True
)
value_var
=
tensor
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
values
[
i
]))
with
switch
.
case
(
global_step
<
boundary_val
):
tensor
.
assign
(
value_var
,
lr
)
last_value_var
=
tensor
.
fill_constant
(
shape
=
[
1
],
shape
=
[
1
],
dtype
=
'float32'
,
dtype
=
'float32'
,
value
=
float
(
boundaries
[
i
]),
value
=
float
(
values
[
len
(
values
)
-
1
]))
force_cpu
=
True
)
with
switch
.
default
():
value_var
=
tensor
.
fill_constant
(
tensor
.
assign
(
last_value_var
,
lr
)
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
values
[
i
]))
with
switch
.
case
(
global_step
<
boundary_val
):
tensor
.
assign
(
value_var
,
lr
)
last_value_var
=
tensor
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
float
(
values
[
len
(
values
)
-
1
]))
with
switch
.
default
():
tensor
.
assign
(
last_value_var
,
lr
)
return
lr
return
lr
def
append_LARS
(
params_grads
,
learning_rate
,
weight_decay
):
def
append_LARS
(
params_grads
,
learning_rate
,
weight_decay
):
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
0ec53f98
...
@@ -72,24 +72,43 @@ class Optimizer(object):
...
@@ -72,24 +72,43 @@ class Optimizer(object):
self
.
helper
=
None
self
.
helper
=
None
def
_create_global_learning_rate
(
self
):
def
_create_global_learning_rate
(
self
):
lr
=
self
.
_global_learning_rate
()
if
imperative_base
.
enabled
():
# create learning rate Variable
if
isinstance
(
lr
,
framework
.
Variable
):
if
isinstance
(
self
.
_learning_rate
,
float
):
return
self
.
_learning_rate_map
[
framework
.
default_main_program
(
)]
=
layers
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
persistable
=
True
)
# get learning rate Variable from LearningRateDecay
elif
isinstance
(
self
.
_learning_rate
,
imperative
.
LearningRateDecay
):
self
.
_learning_rate_map
[
framework
.
default_main_program
(
)]
=
self
.
_learning_rate
()
else
:
raise
TypeError
(
"optimizer's learning rate must be float or LearningRateDecay"
)
else
:
else
:
lr
=
self
.
_global_learning_rate
()
if
isinstance
(
lr
,
framework
.
Variable
):
return
if
not
isinstance
(
self
.
_learning_rate
,
float
):
if
not
isinstance
(
self
.
_learning_rate
,
float
):
raise
TypeError
(
raise
TypeError
(
"learning rate variable is create outside optimizer,"
"learning rate variable is create outside optimizer,"
"can not create new learning rate variable for new program"
)
"can not create new learning rate variable for new program"
)
# create learning rate in the current main program
# create learning rate in the current main program
self
.
_learning_rate_map
[
framework
.
default_main_program
(
self
.
_learning_rate_map
[
framework
.
default_main_program
(
)]
=
layers
.
create_global_var
(
)]
=
layers
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
dtype
=
'float32'
if
self
.
_dtype
is
None
else
self
.
_dtype
,
persistable
=
True
)
persistable
=
True
)
def
_global_learning_rate
(
self
,
program
=
None
):
def
_global_learning_rate
(
self
,
program
=
None
):
"""
"""
...
...
python/paddle/fluid/tests/unittests/test_imperative_mnist.py
0 → 100644
浏览文件 @
0ec53f98
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
contextlib
import
unittest
import
numpy
as
np
import
six
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.imperative.base
import
to_variable
from
test_imperative_base
import
new_program_scope
class
SimpleImgConvPool
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
pool_size
,
pool_stride
,
pool_padding
=
0
,
pool_type
=
'max'
,
global_pooling
=
False
,
conv_stride
=
1
,
conv_padding
=
0
,
conv_dilation
=
1
,
conv_groups
=
1
,
act
=
None
,
use_cudnn
=
False
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
SimpleImgConvPool
,
self
).
__init__
()
self
.
_conv2d
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
)
self
.
_pool2d
=
Pool2D
(
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
,
pool_padding
=
pool_padding
,
global_pooling
=
global_pooling
,
use_cudnn
=
use_cudnn
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv2d
(
inputs
)
x
=
self
.
_pool2d
(
x
)
return
x
class
MNIST
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
MNIST
,
self
).
__init__
()
self
.
_simple_img_conv_pool_1
=
SimpleImgConvPool
(
1
,
20
,
5
,
2
,
2
,
act
=
"relu"
)
self
.
_simple_img_conv_pool_2
=
SimpleImgConvPool
(
20
,
50
,
5
,
2
,
2
,
act
=
"relu"
)
pool_2_shape
=
50
*
8
*
8
SIZE
=
10
scale
=
(
2.0
/
(
pool_2_shape
**
2
*
SIZE
))
**
0.5
self
.
_fc
=
FC
(
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)))
def
forward
(
self
,
inputs
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
x
=
self
.
_simple_img_conv_pool_2
(
x
)
x
=
self
.
_fc
(
x
)
return
x
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
dy_param_init_value
=
{}
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
2
:
break
x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
128
,
1
)
img
=
to_variable
(
x_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
dy_out
=
avg_loss
.
_numpy
()
if
batch_id
==
0
:
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
sgd
.
minimize
(
avg_loss
)
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
dy_param_value
[
param
.
name
]
=
param
.
_numpy
()
with
new_program_scope
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
(
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
mnist
=
MNIST
()
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
img
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
sgd
.
minimize
(
avg_loss
)
# initialize params and fetch them
static_param_init_value
=
{}
static_param_name_list
=
[]
for
param
in
fluid
.
default_startup_program
().
global_block
(
).
all_parameters
():
static_param_name_list
.
append
(
param
.
name
)
out
=
exe
.
run
(
fluid
.
default_startup_program
(),
fetch_list
=
static_param_name_list
)
for
i
in
range
(
len
(
static_param_name_list
)):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
2
:
break
x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
[
128
,
1
])
fetch_list
=
[
avg_loss
.
name
]
fetch_list
.
extend
(
static_param_name_list
)
out
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"pixel"
:
x_data
,
"label"
:
y_data
},
fetch_list
=
fetch_list
)
static_param_value
=
{}
static_out
=
out
[
0
]
for
i
in
range
(
1
,
len
(
out
)):
static_param_value
[
static_param_name_list
[
i
-
1
]]
=
out
[
i
]
for
key
,
value
in
six
.
iteritems
(
static_param_init_value
):
self
.
assertTrue
(
np
.
allclose
(
value
.
all
(),
dy_param_init_value
[
key
].
all
()))
self
.
assertTrue
(
np
.
allclose
(
static_out
.
all
(),
dy_out
.
all
()))
for
key
,
value
in
six
.
iteritems
(
static_param_value
):
self
.
assertTrue
(
np
.
allclose
(
value
.
all
(),
dy_param_value
[
key
].
all
()))
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
0ec53f98
...
@@ -21,98 +21,44 @@ import paddle
...
@@ -21,98 +21,44 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.optimizer
import
SGDOptimizer
from
paddle.fluid.imperative.nn
import
Conv2D
,
Pool2D
,
FC
from
paddle.fluid.imperative.nn
import
FC
from
paddle.fluid.imperative.base
import
to_variable
from
paddle.fluid.imperative.base
import
to_variable
from
test_imperative_base
import
new_program_scope
from
test_imperative_base
import
new_program_scope
class
SimpleImgConvPool
(
fluid
.
imperative
.
Layer
):
class
MLP
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
pool_size
,
pool_stride
,
pool_padding
=
0
,
pool_type
=
'max'
,
global_pooling
=
False
,
conv_stride
=
1
,
conv_padding
=
0
,
conv_dilation
=
1
,
conv_groups
=
1
,
act
=
None
,
use_cudnn
=
False
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
SimpleImgConvPool
,
self
).
__init__
()
self
.
_conv2d
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
conv_stride
,
padding
=
conv_padding
,
dilation
=
conv_dilation
,
groups
=
conv_groups
,
param_attr
=
None
,
bias_attr
=
None
,
use_cudnn
=
use_cudnn
)
self
.
_pool2d
=
Pool2D
(
pool_size
=
pool_size
,
pool_type
=
pool_type
,
pool_stride
=
pool_stride
,
pool_padding
=
pool_padding
,
global_pooling
=
global_pooling
,
use_cudnn
=
use_cudnn
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv2d
(
inputs
)
x
=
self
.
_pool2d
(
x
)
return
x
class
MNIST
(
fluid
.
imperative
.
Layer
):
def
__init__
(
self
,
param_attr
=
None
,
bias_attr
=
None
):
def
__init__
(
self
,
param_attr
=
None
,
bias_attr
=
None
):
super
(
MNIST
,
self
).
__init__
()
self
.
_fc1
=
FC
(
10
)
self
.
_fc2
=
FC
(
10
)
self
.
_simple_img_conv_pool_1
=
SimpleImgConvPool
(
1
,
20
,
5
,
2
,
2
,
act
=
"relu"
)
self
.
_simple_img_conv_pool_2
=
SimpleImgConvPool
(
def
forward
(
self
,
inputs
):
20
,
50
,
5
,
2
,
2
,
act
=
"relu"
)
y
=
self
.
_fc1
(
inputs
)
y
=
self
.
_fc2
(
y
)
return
y
pool_2_shape
=
50
*
8
*
8
SIZE
=
10
scale
=
(
2.0
/
(
pool_2_shape
**
2
*
SIZE
))
**
0.5
self
.
_fc
=
FC
(
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)))
def
forward
(
self
,
inputs
):
class
TestImperativeOptimizerBase
(
unittest
.
TestCase
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
def
setUp
(
self
):
x
=
self
.
_simple_img_conv_pool_2
(
x
)
self
.
batch_num
=
2
x
=
self
.
_fc
(
x
)
return
x
def
get_optimizer
(
self
):
self
.
optimizer
=
SGDOptimizer
(
learning_rate
=
1e-3
)
class
TestImperativeMnist
(
unittest
.
TestCase
):
def
test_optimizer_float32
(
self
):
def
test_mnist_cpu_float32
(
self
):
seed
=
90
seed
=
90
with
fluid
.
imperative
.
guard
():
with
fluid
.
imperative
.
guard
():
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_startup_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
m
nist
=
MNIST
()
m
lp
=
MLP
()
s
gd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
s
elf
.
get_optimizer
(
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
dy_param_init_value
=
{}
dy_param_init_value
=
{}
for
batch_id
,
data
in
enumerate
(
train_reader
()):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
2
:
if
batch_id
>=
self
.
batch_num
:
break
break
x_data
=
np
.
array
(
x_data
=
np
.
array
(
...
@@ -124,9 +70,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -124,9 +70,8 @@ class TestImperativeMnist(unittest.TestCase):
label
=
to_variable
(
y_data
)
label
=
to_variable
(
y_data
)
label
.
_stop_gradient
=
True
label
.
_stop_gradient
=
True
cost
=
mnist
(
img
)
cost
=
mlp
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
dy_out
=
avg_loss
.
_numpy
()
dy_out
=
avg_loss
.
_numpy
()
if
batch_id
==
0
:
if
batch_id
==
0
:
...
@@ -135,7 +80,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -135,7 +80,8 @@ class TestImperativeMnist(unittest.TestCase):
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
dy_param_init_value
[
param
.
name
]
=
param
.
_numpy
()
avg_loss
.
_backward
()
avg_loss
.
_backward
()
sgd
.
minimize
(
avg_loss
)
self
.
optimizer
.
minimize
(
avg_loss
)
dy_param_value
=
{}
dy_param_value
=
{}
for
param
in
fluid
.
default_main_program
().
global_block
(
for
param
in
fluid
.
default_main_program
().
global_block
(
).
all_parameters
():
).
all_parameters
():
...
@@ -149,7 +95,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -149,7 +95,7 @@ class TestImperativeMnist(unittest.TestCase):
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
)
if
not
core
.
is_compiled_with_cuda
()
else
fluid
.
CUDAPlace
(
0
))
mnist
=
MNIST
()
mnist
=
MNIST
()
s
gd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
s
elf
.
get_optimizer
(
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
128
)
...
@@ -157,9 +103,8 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -157,9 +103,8 @@ class TestImperativeMnist(unittest.TestCase):
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
mnist
(
img
)
cost
=
mnist
(
img
)
loss
=
fluid
.
layers
.
cross_entropy
(
cost
,
label
)
avg_loss
=
fluid
.
layers
.
reduce_mean
(
cost
)
avg_loss
=
fluid
.
layers
.
mean
(
loss
)
self
.
optimizer
.
minimize
(
avg_loss
)
sgd
.
minimize
(
avg_loss
)
# initialize params and fetch them
# initialize params and fetch them
static_param_init_value
=
{}
static_param_init_value
=
{}
...
@@ -175,7 +120,7 @@ class TestImperativeMnist(unittest.TestCase):
...
@@ -175,7 +120,7 @@ class TestImperativeMnist(unittest.TestCase):
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
static_param_init_value
[
static_param_name_list
[
i
]]
=
out
[
i
]
for
batch_id
,
data
in
enumerate
(
train_reader
()):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
if
batch_id
>=
2
:
if
batch_id
>=
self
.
batch_num
:
break
break
x_data
=
np
.
array
(
x_data
=
np
.
array
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录