提交 86fb87ac 编写于 作者: C chenguowei01

change to iters

上级 f087abe1
...@@ -61,11 +61,11 @@ def parse_args(): ...@@ -61,11 +61,11 @@ def parse_args():
default=[512, 512], default=[512, 512],
type=int) type=int)
parser.add_argument( parser.add_argument(
'--num_epochs', '--iters',
dest='num_epochs', dest='iters',
help='Number epochs for training', help='iters for training',
type=int, type=int,
default=100) default=10000)
parser.add_argument( parser.add_argument(
'--batch_size', '--batch_size',
dest='batch_size', dest='batch_size',
...@@ -91,9 +91,9 @@ def parse_args(): ...@@ -91,9 +91,9 @@ def parse_args():
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument(
'--save_interval_epochs', '--save_interval_iters',
dest='save_interval_epochs', dest='save_interval_iters',
help='The interval epochs for save a model snapshot', help='The interval iters for save a model snapshot',
type=int, type=int,
default=5) default=5)
parser.add_argument( parser.add_argument(
...@@ -114,9 +114,9 @@ def parse_args(): ...@@ -114,9 +114,9 @@ def parse_args():
help='Eval while training', help='Eval while training',
action='store_true') action='store_true')
parser.add_argument( parser.add_argument(
'--log_steps', '--log_iters',
dest='log_steps', dest='log_iters',
help='Display logging information at every log_steps', help='Display logging information at every log_iters',
default=10, default=10,
type=int) type=int)
parser.add_argument( parser.add_argument(
...@@ -134,6 +134,7 @@ def main(args): ...@@ -134,6 +134,7 @@ def main(args):
info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info + info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info +
['-' * 48]) ['-' * 48])
logger.info(info) logger.info(info)
places = fluid.CUDAPlace(ParallelEnv().dev_id) \ places = fluid.CUDAPlace(ParallelEnv().dev_id) \
if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \ if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \
else fluid.CPUPlace() else fluid.CPUPlace()
...@@ -160,7 +161,7 @@ def main(args): ...@@ -160,7 +161,7 @@ def main(args):
eval_dataset = None eval_dataset = None
if args.do_eval: if args.do_eval:
eval_transforms = T.Compose( eval_transforms = T.Compose(
[T.Padding((2049, 1025)), [T.Resize(args.input_size),
T.Normalize()]) T.Normalize()])
eval_dataset = dataset( eval_dataset = dataset(
dataset_root=args.dataset_root, dataset_root=args.dataset_root,
...@@ -175,11 +176,10 @@ def main(args): ...@@ -175,11 +176,10 @@ def main(args):
# Creat optimizer # Creat optimizer
# todo, may less one than len(loader) # todo, may less one than len(loader)
num_steps_each_epoch = len(train_dataset) // ( num_iters_each_epoch = len(train_dataset) // (
args.batch_size * ParallelEnv().nranks) args.batch_size * ParallelEnv().nranks)
decay_step = args.num_epochs * num_steps_each_epoch
lr_decay = fluid.layers.polynomial_decay( lr_decay = fluid.layers.polynomial_decay(
args.learning_rate, decay_step, end_learning_rate=0, power=0.9) args.learning_rate, args.iters, end_learning_rate=0, power=0.9)
optimizer = fluid.optimizer.Momentum( optimizer = fluid.optimizer.Momentum(
lr_decay, lr_decay,
momentum=0.9, momentum=0.9,
...@@ -193,12 +193,12 @@ def main(args): ...@@ -193,12 +193,12 @@ def main(args):
eval_dataset=eval_dataset, eval_dataset=eval_dataset,
optimizer=optimizer, optimizer=optimizer,
save_dir=args.save_dir, save_dir=args.save_dir,
num_epochs=args.num_epochs, iters=args.iters,
batch_size=args.batch_size, batch_size=args.batch_size,
pretrained_model=args.pretrained_model, pretrained_model=args.pretrained_model,
resume_model=args.resume_model, resume_model=args.resume_model,
save_interval_epochs=args.save_interval_epochs, save_interval_iters=args.save_interval_iters,
log_steps=args.log_steps, log_iters=args.log_iters,
num_classes=train_dataset.num_classes, num_classes=train_dataset.num_classes,
num_workers=args.num_workers, num_workers=args.num_workers,
use_vdl=args.use_vdl) use_vdl=args.use_vdl)
......
...@@ -61,11 +61,11 @@ def parse_args(): ...@@ -61,11 +61,11 @@ def parse_args():
default=[512, 512], default=[512, 512],
type=int) type=int)
parser.add_argument( parser.add_argument(
'--num_epochs', '--iters',
dest='num_epochs', dest='iters',
help='Number epochs for training', help='iters for training',
type=int, type=int,
default=100) default=10000)
parser.add_argument( parser.add_argument(
'--batch_size', '--batch_size',
dest='batch_size', dest='batch_size',
...@@ -91,9 +91,9 @@ def parse_args(): ...@@ -91,9 +91,9 @@ def parse_args():
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument(
'--save_interval_epochs', '--save_interval_iters',
dest='save_interval_epochs', dest='save_interval_iters',
help='The interval epochs for save a model snapshot', help='The interval iters for save a model snapshot',
type=int, type=int,
default=5) default=5)
parser.add_argument( parser.add_argument(
...@@ -114,9 +114,9 @@ def parse_args(): ...@@ -114,9 +114,9 @@ def parse_args():
help='Eval while training', help='Eval while training',
action='store_true') action='store_true')
parser.add_argument( parser.add_argument(
'--log_steps', '--log_iters',
dest='log_steps', dest='log_iters',
help='Display logging information at every log_steps', help='Display logging information at every log_iters',
default=10, default=10,
type=int) type=int)
parser.add_argument( parser.add_argument(
...@@ -134,6 +134,7 @@ def main(args): ...@@ -134,6 +134,7 @@ def main(args):
info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info + info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info +
['-' * 48]) ['-' * 48])
logger.info(info) logger.info(info)
places = fluid.CUDAPlace(ParallelEnv().dev_id) \ places = fluid.CUDAPlace(ParallelEnv().dev_id) \
if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \ if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \
else fluid.CPUPlace() else fluid.CPUPlace()
...@@ -173,11 +174,10 @@ def main(args): ...@@ -173,11 +174,10 @@ def main(args):
# Creat optimizer # Creat optimizer
# todo, may less one than len(loader) # todo, may less one than len(loader)
num_steps_each_epoch = len(train_dataset) // ( num_iters_each_epoch = len(train_dataset) // (
args.batch_size * ParallelEnv().nranks) args.batch_size * ParallelEnv().nranks)
decay_step = args.num_epochs * num_steps_each_epoch
lr_decay = fluid.layers.polynomial_decay( lr_decay = fluid.layers.polynomial_decay(
args.learning_rate, decay_step, end_learning_rate=0, power=0.9) args.learning_rate, args.iters, end_learning_rate=0, power=0.9)
optimizer = fluid.optimizer.Momentum( optimizer = fluid.optimizer.Momentum(
lr_decay, lr_decay,
momentum=0.9, momentum=0.9,
...@@ -191,12 +191,12 @@ def main(args): ...@@ -191,12 +191,12 @@ def main(args):
eval_dataset=eval_dataset, eval_dataset=eval_dataset,
optimizer=optimizer, optimizer=optimizer,
save_dir=args.save_dir, save_dir=args.save_dir,
num_epochs=args.num_epochs, iters=args.iters,
batch_size=args.batch_size, batch_size=args.batch_size,
pretrained_model=args.pretrained_model, pretrained_model=args.pretrained_model,
resume_model=args.resume_model, resume_model=args.resume_model,
save_interval_epochs=args.save_interval_epochs, save_interval_iters=args.save_interval_iters,
log_steps=args.log_steps, log_iters=args.log_iters,
num_classes=train_dataset.num_classes, num_classes=train_dataset.num_classes,
num_workers=args.num_workers, num_workers=args.num_workers,
use_vdl=args.use_vdl) use_vdl=args.use_vdl)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册