diff --git a/dygraph/benchmark/deeplabv3p.py b/dygraph/benchmark/deeplabv3p.py index 0ce683fe7aca51832be3e1aef521e99c4137cabe..92c7e8ba00ef67a51be09b877e816eab118ff7f9 100644 --- a/dygraph/benchmark/deeplabv3p.py +++ b/dygraph/benchmark/deeplabv3p.py @@ -61,11 +61,11 @@ def parse_args(): default=[512, 512], type=int) parser.add_argument( - '--num_epochs', - dest='num_epochs', - help='Number epochs for training', + '--iters', + dest='iters', + help='iters for training', type=int, - default=100) + default=10000) parser.add_argument( '--batch_size', dest='batch_size', @@ -91,9 +91,9 @@ def parse_args(): type=str, default=None) parser.add_argument( - '--save_interval_epochs', - dest='save_interval_epochs', - help='The interval epochs for save a model snapshot', + '--save_interval_iters', + dest='save_interval_iters', + help='The interval iters for save a model snapshot', type=int, default=5) parser.add_argument( @@ -114,9 +114,9 @@ def parse_args(): help='Eval while training', action='store_true') parser.add_argument( - '--log_steps', - dest='log_steps', - help='Display logging information at every log_steps', + '--log_iters', + dest='log_iters', + help='Display logging information at every log_iters', default=10, type=int) parser.add_argument( @@ -134,6 +134,7 @@ def main(args): info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) + places = fluid.CUDAPlace(ParallelEnv().dev_id) \ if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \ else fluid.CPUPlace() @@ -160,7 +161,7 @@ def main(args): eval_dataset = None if args.do_eval: eval_transforms = T.Compose( - [T.Padding((2049, 1025)), + [T.Resize(args.input_size), T.Normalize()]) eval_dataset = dataset( dataset_root=args.dataset_root, @@ -175,11 +176,10 @@ def main(args): # Creat optimizer # todo, may less one than len(loader) - num_steps_each_epoch = len(train_dataset) // ( + num_iters_each_epoch = len(train_dataset) // ( args.batch_size * ParallelEnv().nranks) - decay_step = args.num_epochs * num_steps_each_epoch lr_decay = fluid.layers.polynomial_decay( - args.learning_rate, decay_step, end_learning_rate=0, power=0.9) + args.learning_rate, args.iters, end_learning_rate=0, power=0.9) optimizer = fluid.optimizer.Momentum( lr_decay, momentum=0.9, @@ -193,12 +193,12 @@ def main(args): eval_dataset=eval_dataset, optimizer=optimizer, save_dir=args.save_dir, - num_epochs=args.num_epochs, + iters=args.iters, batch_size=args.batch_size, pretrained_model=args.pretrained_model, resume_model=args.resume_model, - save_interval_epochs=args.save_interval_epochs, - log_steps=args.log_steps, + save_interval_iters=args.save_interval_iters, + log_iters=args.log_iters, num_classes=train_dataset.num_classes, num_workers=args.num_workers, use_vdl=args.use_vdl) diff --git a/dygraph/benchmark/hrnet.py b/dygraph/benchmark/hrnet.py index 793bb8ae9060a9f8f9247f82535daacfa8100fc2..4de9b06f0135b971a7795f6b3713599e26e798a5 100644 --- a/dygraph/benchmark/hrnet.py +++ b/dygraph/benchmark/hrnet.py @@ -61,11 +61,11 @@ def parse_args(): default=[512, 512], type=int) parser.add_argument( - '--num_epochs', - dest='num_epochs', - help='Number epochs for training', + '--iters', + dest='iters', + help='iters for training', type=int, - default=100) + default=10000) parser.add_argument( '--batch_size', dest='batch_size', @@ -91,9 +91,9 @@ def parse_args(): type=str, default=None) parser.add_argument( - '--save_interval_epochs', - dest='save_interval_epochs', - help='The interval epochs for save a model snapshot', + '--save_interval_iters', + dest='save_interval_iters', + help='The interval iters for save a model snapshot', type=int, default=5) parser.add_argument( @@ -114,9 +114,9 @@ def parse_args(): help='Eval while training', action='store_true') parser.add_argument( - '--log_steps', - dest='log_steps', - help='Display logging information at every log_steps', + '--log_iters', + dest='log_iters', + help='Display logging information at every log_iters', default=10, type=int) parser.add_argument( @@ -134,6 +134,7 @@ def main(args): info = '\n'.join(['\n', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) + places = fluid.CUDAPlace(ParallelEnv().dev_id) \ if env_info['Paddle compiled with cuda'] and env_info['GPUs used'] \ else fluid.CPUPlace() @@ -173,11 +174,10 @@ def main(args): # Creat optimizer # todo, may less one than len(loader) - num_steps_each_epoch = len(train_dataset) // ( + num_iters_each_epoch = len(train_dataset) // ( args.batch_size * ParallelEnv().nranks) - decay_step = args.num_epochs * num_steps_each_epoch lr_decay = fluid.layers.polynomial_decay( - args.learning_rate, decay_step, end_learning_rate=0, power=0.9) + args.learning_rate, args.iters, end_learning_rate=0, power=0.9) optimizer = fluid.optimizer.Momentum( lr_decay, momentum=0.9, @@ -191,12 +191,12 @@ def main(args): eval_dataset=eval_dataset, optimizer=optimizer, save_dir=args.save_dir, - num_epochs=args.num_epochs, + iters=args.iters, batch_size=args.batch_size, pretrained_model=args.pretrained_model, resume_model=args.resume_model, - save_interval_epochs=args.save_interval_epochs, - log_steps=args.log_steps, + save_interval_iters=args.save_interval_iters, + log_iters=args.log_iters, num_classes=train_dataset.num_classes, num_workers=args.num_workers, use_vdl=args.use_vdl)