Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
models
提交
b0239e3a
M
models
项目概览
PaddlePaddle
/
models
大约 1 年 前同步成功
通知
222
Star
6828
Fork
2962
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
602
列表
看板
标记
里程碑
合并请求
255
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
models
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
602
Issue
602
列表
看板
标记
里程碑
合并请求
255
合并请求
255
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b0239e3a
编写于
5月 28, 2020
作者:
C
Chen Weihang
提交者:
GitHub
5月 28, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change some model using data loader (#4595)
上级
edf1a872
变更
7
展开全部
隐藏空白更改
内联
并排
Showing
7 changed file
with
585 addition
and
599 deletion
+585
-599
dygraph/mnist/train.py
dygraph/mnist/train.py
+36
-25
dygraph/mobilenet/reader.py
dygraph/mobilenet/reader.py
+1
-1
dygraph/mobilenet/train.py
dygraph/mobilenet/train.py
+2
-6
dygraph/mobilenet/utils/utility.py
dygraph/mobilenet/utils/utility.py
+2
-20
dygraph/ptb_lm/ptb_dy.py
dygraph/ptb_lm/ptb_dy.py
+474
-461
dygraph/resnet/train.py
dygraph/resnet/train.py
+38
-47
dygraph/se_resnet/train.py
dygraph/se_resnet/train.py
+32
-39
未找到文件。
dygraph/mnist/train.py
浏览文件 @
b0239e3a
...
...
@@ -99,11 +99,13 @@ class MNIST(fluid.dygraph.Layer):
self
.
pool_2_shape
=
50
*
4
*
4
SIZE
=
10
scale
=
(
2.0
/
(
self
.
pool_2_shape
**
2
*
SIZE
))
**
0.5
self
.
_fc
=
Linear
(
self
.
pool_2_shape
,
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)),
act
=
"softmax"
)
self
.
_fc
=
Linear
(
self
.
pool_2_shape
,
10
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
scale
)),
act
=
"softmax"
)
def
forward
(
self
,
inputs
,
label
=
None
):
x
=
self
.
_simple_img_conv_pool_1
(
inputs
)
...
...
@@ -117,17 +119,21 @@ class MNIST(fluid.dygraph.Layer):
return
x
def
reader_decorator
(
reader
):
def
__reader__
():
for
item
in
reader
():
img
=
np
.
array
(
item
[
0
]).
astype
(
'float32'
).
reshape
(
1
,
28
,
28
)
label
=
np
.
array
(
item
[
1
]).
astype
(
'int64'
).
reshape
(
1
)
yield
img
,
label
return
__reader__
def
test_mnist
(
reader
,
model
,
batch_size
):
acc_set
=
[]
avg_loss_set
=
[]
for
batch_id
,
data
in
enumerate
(
reader
()):
dy_x_data
=
np
.
array
([
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
img
,
label
=
data
label
.
stop_gradient
=
True
prediction
,
acc
=
model
(
img
,
label
)
loss
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
...
...
@@ -187,28 +193,33 @@ def train_mnist(args):
if
args
.
use_data_parallel
:
strategy
=
fluid
.
dygraph
.
parallel
.
prepare_context
()
mnist
=
MNIST
()
adam
=
AdamOptimizer
(
learning_rate
=
0.001
,
parameter_list
=
mnist
.
parameters
())
adam
=
AdamOptimizer
(
learning_rate
=
0.001
,
parameter_list
=
mnist
.
parameters
())
if
args
.
use_data_parallel
:
mnist
=
fluid
.
dygraph
.
parallel
.
DataParallel
(
mnist
,
strategy
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
train
(),
batch_size
=
BATCH_SIZE
,
drop_last
=
True
)
reader_decorator
(
paddle
.
dataset
.
mnist
.
train
()),
batch_size
=
BATCH_SIZE
,
drop_last
=
True
)
if
args
.
use_data_parallel
:
train_reader
=
fluid
.
contrib
.
reader
.
distributed_batch_reader
(
train_reader
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
BATCH_SIZE
,
drop_last
=
True
)
reader_decorator
(
paddle
.
dataset
.
mnist
.
test
()),
batch_size
=
BATCH_SIZE
,
drop_last
=
True
)
train_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
train_loader
.
set_sample_list_generator
(
train_reader
,
places
=
place
)
test_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
test_loader
.
set_sample_list_generator
(
test_reader
,
places
=
place
)
for
epoch
in
range
(
epoch_num
):
for
batch_id
,
data
in
enumerate
(
train_reader
()):
dy_x_data
=
np
.
array
([
x
[
0
].
reshape
(
1
,
28
,
28
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
(
[
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
for
batch_id
,
data
in
enumerate
(
train_loader
()):
img
,
label
=
data
label
.
stop_gradient
=
True
cost
,
acc
=
mnist
(
img
,
label
)
...
...
@@ -231,7 +242,7 @@ def train_mnist(args):
epoch
,
batch_id
,
avg_loss
.
numpy
()))
mnist
.
eval
()
test_cost
,
test_acc
=
test_mnist
(
test_
re
ader
,
mnist
,
BATCH_SIZE
)
test_cost
,
test_acc
=
test_mnist
(
test_
lo
ader
,
mnist
,
BATCH_SIZE
)
mnist
.
train
()
if
args
.
ce
:
print
(
"kpis
\t
test_acc
\t
%s"
%
test_acc
)
...
...
@@ -244,7 +255,7 @@ def train_mnist(args):
fluid
.
dygraph
.
parallel
.
Env
().
local_rank
==
0
)
if
save_parameters
:
fluid
.
save_dygraph
(
mnist
.
state_dict
(),
"save_temp"
)
print
(
"checkpoint saved"
)
inference_mnist
()
...
...
dygraph/mobilenet/reader.py
浏览文件 @
b0239e3a
...
...
@@ -239,7 +239,7 @@ def process_image(sample, settings, mode, color_jitter, rotate):
img
/=
img_std
if
mode
==
'train'
or
mode
==
'val'
:
return
(
img
,
sample
[
1
])
return
(
img
,
[
sample
[
1
]
])
elif
mode
==
'test'
:
return
(
img
,
)
...
...
dygraph/mobilenet/train.py
浏览文件 @
b0239e3a
...
...
@@ -116,10 +116,8 @@ def train_mobilenet():
optimizer
.
set_dict
(
opti_dict
)
# 3. reader
train_data_loader
,
train_data
=
utility
.
create_data_loader
(
is_train
=
True
,
args
=
args
)
test_data_loader
,
test_data
=
utility
.
create_data_loader
(
is_train
=
False
,
args
=
args
)
train_data_loader
=
utility
.
create_data_loader
(
is_train
=
True
,
args
=
args
)
test_data_loader
=
utility
.
create_data_loader
(
is_train
=
False
,
args
=
args
)
num_trainers
=
int
(
os
.
environ
.
get
(
'PADDLE_TRAINERS_NUM'
,
1
))
imagenet_reader
=
reader
.
ImageNetReader
(
seed
=
0
,
place_num
=
place_num
)
train_reader
=
imagenet_reader
.
train
(
settings
=
args
)
...
...
@@ -145,8 +143,6 @@ def train_mobilenet():
t1
=
time
.
time
()
if
args
.
max_iter
and
total_batch_num
==
args
.
max_iter
:
return
label
=
to_variable
(
label
.
numpy
().
astype
(
'int64'
).
reshape
(
int
(
args
.
batch_size
//
place_num
),
1
))
t_start
=
time
.
time
()
# 4.1.1 call net()
...
...
dygraph/mobilenet/utils/utility.py
浏览文件 @
b0239e3a
...
...
@@ -309,32 +309,14 @@ def create_data_loader(is_train, args):
Returns:
data_loader and the input data of net,
"""
image_shape
=
[
int
(
m
)
for
m
in
args
.
image_shape
.
split
(
","
)]
feed_image
=
fluid
.
data
(
name
=
"feed_image"
,
shape
=
[
None
]
+
image_shape
,
dtype
=
"float32"
,
lod_level
=
0
)
feed_label
=
fluid
.
data
(
name
=
"feed_label"
,
shape
=
[
None
,
1
],
dtype
=
"int64"
,
lod_level
=
0
)
feed_y_a
=
fluid
.
data
(
name
=
"feed_y_a"
,
shape
=
[
None
,
1
],
dtype
=
"int64"
,
lod_level
=
0
)
if
is_train
and
args
.
use_mixup
:
feed_y_b
=
fluid
.
data
(
name
=
"feed_y_b"
,
shape
=
[
None
,
1
],
dtype
=
"int64"
,
lod_level
=
0
)
feed_lam
=
fluid
.
data
(
name
=
"feed_lam"
,
shape
=
[
None
,
1
],
dtype
=
"float32"
,
lod_level
=
0
)
data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
64
,
use_double_buffer
=
True
,
iterable
=
True
,
return_list
=
True
)
return
data_loader
,
[
feed_image
,
feed_y_a
,
feed_y_b
,
feed_lam
]
return
data_loader
else
:
data_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
64
,
...
...
@@ -342,7 +324,7 @@ def create_data_loader(is_train, args):
iterable
=
True
,
return_list
=
True
)
return
data_loader
,
[
feed_image
,
feed_label
]
return
data_loader
def
print_info
(
pass_id
,
batch_id
,
print_step
,
metrics
,
time_info
,
info_mode
):
...
...
dygraph/ptb_lm/ptb_dy.py
浏览文件 @
b0239e3a
此差异已折叠。
点击以展开。
dygraph/resnet/train.py
浏览文件 @
b0239e3a
...
...
@@ -81,7 +81,6 @@ def optimizer_setting(parameter_list=None):
boundaries
=
bd
,
values
=
lr
),
momentum
=
momentum_rate
,
regularization
=
fluid
.
regularizer
.
L2Decay
(
l2_decay
))
return
optimizer
...
...
@@ -116,11 +115,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
stride
,
shortcut
=
True
):
def
__init__
(
self
,
num_channels
,
num_filters
,
stride
,
shortcut
=
True
):
super
(
BottleneckBlock
,
self
).
__init__
()
self
.
conv0
=
ConvBNLayer
(
...
...
@@ -186,16 +181,9 @@ class ResNet(fluid.dygraph.Layer):
num_filters
=
[
64
,
128
,
256
,
512
]
self
.
conv
=
ConvBNLayer
(
num_channels
=
3
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
)
num_channels
=
3
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
bottleneck_block_list
=
[]
for
block
in
range
(
len
(
depth
)):
...
...
@@ -220,11 +208,12 @@ class ResNet(fluid.dygraph.Layer):
import
math
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
self
.
out
=
Linear
(
self
.
pool2d_avg_output
,
class_dim
,
act
=
'softmax'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
self
.
out
=
Linear
(
self
.
pool2d_avg_output
,
class_dim
,
act
=
'softmax'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
def
forward
(
self
,
inputs
):
y
=
self
.
conv
(
inputs
)
...
...
@@ -237,6 +226,16 @@ class ResNet(fluid.dygraph.Layer):
return
y
def
reader_decorator
(
reader
):
def
__reader__
():
for
item
in
reader
():
img
=
np
.
array
(
item
[
0
]).
astype
(
'float32'
).
reshape
(
3
,
224
,
224
)
label
=
np
.
array
(
item
[
1
]).
astype
(
'int64'
).
reshape
(
1
)
yield
img
,
label
return
__reader__
def
eval
(
model
,
data
):
model
.
eval
()
...
...
@@ -245,15 +244,8 @@ def eval(model, data):
total_acc5
=
0.0
total_sample
=
0
for
batch_id
,
data
in
enumerate
(
data
()):
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
if
len
(
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
))
!=
batch_size
:
continue
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
img
=
data
[
0
]
label
=
data
[
1
]
label
.
stop_gradient
=
True
out
=
model
(
img
)
...
...
@@ -303,13 +295,24 @@ def train_resnet():
resnet
=
fluid
.
dygraph
.
parallel
.
DataParallel
(
resnet
,
strategy
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
),
batch_size
=
batch_size
)
reader_decorator
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
True
)),
batch_size
=
batch_size
,
drop_last
=
True
)
if
args
.
use_data_parallel
:
train_reader
=
fluid
.
contrib
.
reader
.
distributed_batch_reader
(
train_reader
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
test
(
use_xmap
=
False
),
batch_size
=
batch_size
)
reader_decorator
(
paddle
.
dataset
.
flowers
.
test
(
use_xmap
=
True
)),
batch_size
=
batch_size
,
drop_last
=
True
)
train_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
train_loader
.
set_sample_list_generator
(
train_reader
,
places
=
place
)
test_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
test_loader
.
set_sample_list_generator
(
test_reader
,
places
=
place
)
#file_name = './model/epoch_0.npz'
#model_data = np.load( file_name )
...
...
@@ -331,23 +334,13 @@ def train_resnet():
print
(
"load finished"
)
for
batch_id
,
data
in
enumerate
(
train_reader
()):
for
batch_id
,
data
in
enumerate
(
train_loader
()):
#NOTE: used in benchmark
if
args
.
max_iter
and
total_batch_num
==
args
.
max_iter
:
return
batch_start
=
time
.
time
()
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
if
len
(
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
))
!=
batch_size
:
continue
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
-
1
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
img
,
label
=
data
label
.
stop_gradient
=
True
out
=
resnet
(
img
)
...
...
@@ -390,16 +383,14 @@ def train_resnet():
(
eop
,
batch_id
,
total_loss
/
total_sample
,
\
total_acc1
/
total_sample
,
total_acc5
/
total_sample
))
resnet
.
eval
()
eval
(
resnet
,
test_
re
ader
)
eval
(
resnet
,
test_
lo
ader
)
save_parameters
=
(
not
args
.
use_data_parallel
)
or
(
args
.
use_data_parallel
and
fluid
.
dygraph
.
parallel
.
Env
().
local_rank
==
0
)
if
save_parameters
:
fluid
.
save_dygraph
(
resnet
.
state_dict
(),
'resnet_params'
)
fluid
.
save_dygraph
(
resnet
.
state_dict
(),
'resnet_params'
)
if
__name__
==
'__main__'
:
train_resnet
()
dygraph/se_resnet/train.py
浏览文件 @
b0239e3a
...
...
@@ -169,8 +169,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
act
=
None
)
self
.
scale
=
SqueezeExcitation
(
num_channels
=
num_filters
*
2
,
reduction_ratio
=
reduction_ratio
)
num_channels
=
num_filters
*
2
,
reduction_ratio
=
reduction_ratio
)
if
not
shortcut
:
self
.
short
=
ConvBNLayer
(
...
...
@@ -219,10 +218,7 @@ class SeResNeXt(fluid.dygraph.Layer):
stride
=
2
,
act
=
'relu'
)
self
.
pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
elif
layers
==
101
:
cardinality
=
32
reduction_ratio
=
16
...
...
@@ -235,10 +231,7 @@ class SeResNeXt(fluid.dygraph.Layer):
stride
=
2
,
act
=
'relu'
)
self
.
pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
elif
layers
==
152
:
cardinality
=
64
reduction_ratio
=
16
...
...
@@ -263,10 +256,7 @@ class SeResNeXt(fluid.dygraph.Layer):
stride
=
1
,
act
=
'relu'
)
self
.
pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
bottleneck_block_list
=
[]
num_channels
=
64
...
...
@@ -294,10 +284,11 @@ class SeResNeXt(fluid.dygraph.Layer):
self
.
pool2d_avg_output
=
num_filters
[
len
(
num_filters
)
-
1
]
*
2
*
1
*
1
self
.
out
=
Linear
(
self
.
pool2d_avg_output
,
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
self
.
out
=
Linear
(
self
.
pool2d_avg_output
,
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
def
forward
(
self
,
inputs
):
if
self
.
layers
==
50
or
self
.
layers
==
101
:
...
...
@@ -318,6 +309,16 @@ class SeResNeXt(fluid.dygraph.Layer):
return
y
def
reader_decorator
(
reader
):
def
__reader__
():
for
item
in
reader
():
img
=
np
.
array
(
item
[
0
]).
astype
(
'float32'
).
reshape
(
3
,
224
,
224
)
label
=
np
.
array
(
item
[
1
]).
astype
(
'int64'
).
reshape
(
1
)
yield
img
,
label
return
__reader__
def
eval
(
model
,
data
):
model
.
eval
()
...
...
@@ -327,15 +328,7 @@ def eval(model, data):
total_acc5
=
0.0
total_sample
=
0
for
batch_id
,
data
in
enumerate
(
data
()):
dy_x_data
=
np
.
array
(
[
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
if
len
(
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
))
!=
batch_size
:
continue
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
img
,
label
=
data
label
.
stop_gradient
=
True
out
=
model
(
img
)
...
...
@@ -389,29 +382,29 @@ def train():
se_resnext
=
fluid
.
dygraph
.
parallel
.
DataParallel
(
se_resnext
,
strategy
)
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
),
reader_decorator
(
paddle
.
dataset
.
flowers
.
train
(
use_xmap
=
False
)
),
batch_size
=
batch_size
,
drop_last
=
True
)
if
args
.
use_data_parallel
:
train_reader
=
fluid
.
contrib
.
reader
.
distributed_batch_reader
(
train_reader
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
flowers
.
test
(
use_xmap
=
False
),
batch_size
=
32
)
reader_decorator
(
paddle
.
dataset
.
flowers
.
test
(
use_xmap
=
False
)),
batch_size
=
32
)
train_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
train_loader
.
set_sample_list_generator
(
train_reader
,
places
=
place
)
test_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
10
)
test_loader
.
set_sample_list_generator
(
test_reader
,
places
=
place
)
for
epoch_id
in
range
(
epoch_num
):
total_loss
=
0.0
total_acc1
=
0.0
total_acc5
=
0.0
total_sample
=
0
for
batch_id
,
data
in
enumerate
(
train_reader
()):
dy_x_data
=
np
.
array
([
x
[
0
].
reshape
(
3
,
224
,
224
)
for
x
in
data
]).
astype
(
'float32'
)
y_data
=
np
.
array
([
x
[
1
]
for
x
in
data
]).
astype
(
'int64'
).
reshape
(
batch_size
,
1
)
img
=
to_variable
(
dy_x_data
)
label
=
to_variable
(
y_data
)
for
batch_id
,
data
in
enumerate
(
train_loader
()):
img
,
label
=
data
label
.
stop_gradient
=
True
out
=
se_resnext
(
img
)
...
...
@@ -454,7 +447,7 @@ def train():
(
epoch_id
,
batch_id
,
total_loss
/
total_sample
,
\
total_acc1
/
total_sample
,
total_acc5
/
total_sample
))
se_resnext
.
eval
()
eval
(
se_resnext
,
test_
re
ader
)
eval
(
se_resnext
,
test_
lo
ader
)
se_resnext
.
train
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录