Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
86fb87ac
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
86fb87ac
编写于
8月 25, 2020
作者:
C
chenguowei01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change to iters
上级
f087abe1
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
33 addition
and
33 deletion
+33
-33
dygraph/benchmark/deeplabv3p.py
dygraph/benchmark/deeplabv3p.py
+17
-17
dygraph/benchmark/hrnet.py
dygraph/benchmark/hrnet.py
+16
-16
未找到文件。
dygraph/benchmark/deeplabv3p.py
浏览文件 @
86fb87ac
...
...
@@ -61,11 +61,11 @@ def parse_args():
default
=
[
512
,
512
],
type
=
int
)
parser
.
add_argument
(
'--
num_epoch
s'
,
dest
=
'
num_epoch
s'
,
help
=
'
Number epoch
s for training'
,
'--
iter
s'
,
dest
=
'
iter
s'
,
help
=
'
iter
s for training'
,
type
=
int
,
default
=
100
)
default
=
100
00
)
parser
.
add_argument
(
'--batch_size'
,
dest
=
'batch_size'
,
...
...
@@ -91,9 +91,9 @@ def parse_args():
type
=
str
,
default
=
None
)
parser
.
add_argument
(
'--save_interval_
epoch
s'
,
dest
=
'save_interval_
epoch
s'
,
help
=
'The interval
epoch
s for save a model snapshot'
,
'--save_interval_
iter
s'
,
dest
=
'save_interval_
iter
s'
,
help
=
'The interval
iter
s for save a model snapshot'
,
type
=
int
,
default
=
5
)
parser
.
add_argument
(
...
...
@@ -114,9 +114,9 @@ def parse_args():
help
=
'Eval while training'
,
action
=
'store_true'
)
parser
.
add_argument
(
'--log_
step
s'
,
dest
=
'log_
step
s'
,
help
=
'Display logging information at every log_
step
s'
,
'--log_
iter
s'
,
dest
=
'log_
iter
s'
,
help
=
'Display logging information at every log_
iter
s'
,
default
=
10
,
type
=
int
)
parser
.
add_argument
(
...
...
@@ -134,6 +134,7 @@ def main(args):
info
=
'
\n
'
.
join
([
'
\n
'
,
format
(
'Environment Information'
,
'-^48s'
)]
+
info
+
[
'-'
*
48
])
logger
.
info
(
info
)
places
=
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
env_info
[
'Paddle compiled with cuda'
]
and
env_info
[
'GPUs used'
]
\
else
fluid
.
CPUPlace
()
...
...
@@ -160,7 +161,7 @@ def main(args):
eval_dataset
=
None
if
args
.
do_eval
:
eval_transforms
=
T
.
Compose
(
[
T
.
Padding
((
2049
,
1025
)
),
[
T
.
Resize
(
args
.
input_size
),
T
.
Normalize
()])
eval_dataset
=
dataset
(
dataset_root
=
args
.
dataset_root
,
...
...
@@ -175,11 +176,10 @@ def main(args):
# Creat optimizer
# todo, may less one than len(loader)
num_
step
s_each_epoch
=
len
(
train_dataset
)
//
(
num_
iter
s_each_epoch
=
len
(
train_dataset
)
//
(
args
.
batch_size
*
ParallelEnv
().
nranks
)
decay_step
=
args
.
num_epochs
*
num_steps_each_epoch
lr_decay
=
fluid
.
layers
.
polynomial_decay
(
args
.
learning_rate
,
decay_step
,
end_learning_rate
=
0
,
power
=
0.9
)
args
.
learning_rate
,
args
.
iters
,
end_learning_rate
=
0
,
power
=
0.9
)
optimizer
=
fluid
.
optimizer
.
Momentum
(
lr_decay
,
momentum
=
0.9
,
...
...
@@ -193,12 +193,12 @@ def main(args):
eval_dataset
=
eval_dataset
,
optimizer
=
optimizer
,
save_dir
=
args
.
save_dir
,
num_epochs
=
args
.
num_epoch
s
,
iters
=
args
.
iter
s
,
batch_size
=
args
.
batch_size
,
pretrained_model
=
args
.
pretrained_model
,
resume_model
=
args
.
resume_model
,
save_interval_
epochs
=
args
.
save_interval_epoch
s
,
log_
steps
=
args
.
log_step
s
,
save_interval_
iters
=
args
.
save_interval_iter
s
,
log_
iters
=
args
.
log_iter
s
,
num_classes
=
train_dataset
.
num_classes
,
num_workers
=
args
.
num_workers
,
use_vdl
=
args
.
use_vdl
)
...
...
dygraph/benchmark/hrnet.py
浏览文件 @
86fb87ac
...
...
@@ -61,11 +61,11 @@ def parse_args():
default
=
[
512
,
512
],
type
=
int
)
parser
.
add_argument
(
'--
num_epoch
s'
,
dest
=
'
num_epoch
s'
,
help
=
'
Number epoch
s for training'
,
'--
iter
s'
,
dest
=
'
iter
s'
,
help
=
'
iter
s for training'
,
type
=
int
,
default
=
100
)
default
=
100
00
)
parser
.
add_argument
(
'--batch_size'
,
dest
=
'batch_size'
,
...
...
@@ -91,9 +91,9 @@ def parse_args():
type
=
str
,
default
=
None
)
parser
.
add_argument
(
'--save_interval_
epoch
s'
,
dest
=
'save_interval_
epoch
s'
,
help
=
'The interval
epoch
s for save a model snapshot'
,
'--save_interval_
iter
s'
,
dest
=
'save_interval_
iter
s'
,
help
=
'The interval
iter
s for save a model snapshot'
,
type
=
int
,
default
=
5
)
parser
.
add_argument
(
...
...
@@ -114,9 +114,9 @@ def parse_args():
help
=
'Eval while training'
,
action
=
'store_true'
)
parser
.
add_argument
(
'--log_
step
s'
,
dest
=
'log_
step
s'
,
help
=
'Display logging information at every log_
step
s'
,
'--log_
iter
s'
,
dest
=
'log_
iter
s'
,
help
=
'Display logging information at every log_
iter
s'
,
default
=
10
,
type
=
int
)
parser
.
add_argument
(
...
...
@@ -134,6 +134,7 @@ def main(args):
info
=
'
\n
'
.
join
([
'
\n
'
,
format
(
'Environment Information'
,
'-^48s'
)]
+
info
+
[
'-'
*
48
])
logger
.
info
(
info
)
places
=
fluid
.
CUDAPlace
(
ParallelEnv
().
dev_id
)
\
if
env_info
[
'Paddle compiled with cuda'
]
and
env_info
[
'GPUs used'
]
\
else
fluid
.
CPUPlace
()
...
...
@@ -173,11 +174,10 @@ def main(args):
# Creat optimizer
# todo, may less one than len(loader)
num_
step
s_each_epoch
=
len
(
train_dataset
)
//
(
num_
iter
s_each_epoch
=
len
(
train_dataset
)
//
(
args
.
batch_size
*
ParallelEnv
().
nranks
)
decay_step
=
args
.
num_epochs
*
num_steps_each_epoch
lr_decay
=
fluid
.
layers
.
polynomial_decay
(
args
.
learning_rate
,
decay_step
,
end_learning_rate
=
0
,
power
=
0.9
)
args
.
learning_rate
,
args
.
iters
,
end_learning_rate
=
0
,
power
=
0.9
)
optimizer
=
fluid
.
optimizer
.
Momentum
(
lr_decay
,
momentum
=
0.9
,
...
...
@@ -191,12 +191,12 @@ def main(args):
eval_dataset
=
eval_dataset
,
optimizer
=
optimizer
,
save_dir
=
args
.
save_dir
,
num_epochs
=
args
.
num_epoch
s
,
iters
=
args
.
iter
s
,
batch_size
=
args
.
batch_size
,
pretrained_model
=
args
.
pretrained_model
,
resume_model
=
args
.
resume_model
,
save_interval_
epochs
=
args
.
save_interval_epoch
s
,
log_
steps
=
args
.
log_step
s
,
save_interval_
iters
=
args
.
save_interval_iter
s
,
log_
iters
=
args
.
log_iter
s
,
num_classes
=
train_dataset
.
num_classes
,
num_workers
=
args
.
num_workers
,
use_vdl
=
args
.
use_vdl
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录