diff --git a/dygraph/benchmark/deeplabv3p.py b/dygraph/benchmark/deeplabv3p.py index 66a2c3f2217aeb4ff7f25bbdae30c8ab0d34e7a8..9c1bc66c36feba4b8660941e81e7a70c9eef4050 100644 --- a/dygraph/benchmark/deeplabv3p.py +++ b/dygraph/benchmark/deeplabv3p.py @@ -13,23 +13,15 @@ # limitations under the License. import argparse -import os -import sys import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.io import DataLoader -from paddle.incubate.hapi.distributed import DistributedBatchSampler -from datasets import OpticDiscSeg, Cityscapes -import transforms as T -from models import MODELS -import utils.logging as logging -from utils import get_environ_info -from utils import load_pretrained_model -from utils import resume -from utils import Timer, calculate_eta -from core import train +from dygraph.datasets import DATASETS +import dygraph.transforms as T +from dygraph.models import MODELS +from dygraph.utils import get_environ_info +from dygraph.core import train def parse_args(): @@ -48,10 +40,16 @@ def parse_args(): parser.add_argument( '--dataset', dest='dataset', - help= - "The dataset you want to train, which is one of ('OpticDiscSeg', 'Cityscapes')", + help="The dataset you want to train, which is one of {}".format( + str(list(DATASETS.keys()))), type=str, default='OpticDiscSeg') + parser.add_argument( + '--dataset_root', + dest='dataset_root', + help="dataset root directory", + type=str, + default=None) # params of training parser.add_argument( @@ -135,36 +133,38 @@ def main(args): if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \ else fluid.CPUPlace() - if args.dataset.lower() == 'opticdiscseg': - dataset = OpticDiscSeg - elif args.dataset.lower() == 'cityscapes': - dataset = Cityscapes - else: - raise Exception( - "The --dataset set wrong. It should be one of ('OpticDiscSeg', 'Cityscapes')" - ) + if args.dataset not in DATASETS: + raise Exception('`--dataset` is invalid. it should be one of {}'.format( + str(list(DATASETS.keys())))) + dataset = DATASETS[args.dataset] with fluid.dygraph.guard(places): # Creat dataset reader train_transforms = T.Compose([ + T.RandomHorizontalFlip(0.5), T.ResizeStepScaling(0.5, 2.0, 0.25), T.RandomPaddingCrop(args.input_size), - T.RandomHorizontalFlip(), - T.Normalize() + T.RandomDistort(), + T.Normalize(), ]) - train_dataset = dataset(transforms=train_transforms, mode='train') + train_dataset = dataset( + dataset_root=args.dataset_root, + transforms=train_transforms, + mode='train') eval_dataset = None if args.do_eval: eval_transforms = T.Compose( [T.Padding((2049, 1025)), - T.Normalize()] - ) - eval_dataset = dataset(transforms=eval_transforms, mode='eval') + T.Normalize()]) + eval_dataset = dataset( + dataset_root=args.dataset_root, + transforms=eval_transforms, + mode='val') if args.model_name not in MODELS: raise Exception( - '--model_name is invalid. it should be one of {}'.format( + '`--model_name` is invalid. it should be one of {}'.format( str(list(MODELS.keys())))) model = MODELS[args.model_name](num_classes=train_dataset.num_classes) @@ -174,16 +174,12 @@ def main(args): args.batch_size * ParallelEnv().nranks) decay_step = args.num_epochs * num_steps_each_epoch lr_decay = fluid.layers.polynomial_decay( - args.learning_rate, decay_step, end_learning_rate=0.00001, power=0.9) - - + args.learning_rate, decay_step, end_learning_rate=0, power=0.9) optimizer = fluid.optimizer.Momentum( lr_decay, momentum=0.9, parameter_list=model.parameters(), - #parameter_list=filter(lambda p: p.trainable, model.parameters()), regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5)) - train( model, diff --git a/dygraph/benchmark/hrnet_w18_benchmark.py b/dygraph/benchmark/hrnet.py similarity index 81% rename from dygraph/benchmark/hrnet_w18_benchmark.py rename to dygraph/benchmark/hrnet.py index 6a6ce872686bbbec8c4f51ead028830b248512f3..3f64aa79739d2caca601f3a0574fd987ba49fba3 100644 --- a/dygraph/benchmark/hrnet_w18_benchmark.py +++ b/dygraph/benchmark/hrnet.py @@ -13,22 +13,15 @@ # limitations under the License. import argparse -import os import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from paddle.fluid.io import DataLoader -from paddle.incubate.hapi.distributed import DistributedBatchSampler -from datasets import OpticDiscSeg, Cityscapes -import transforms as T -from models import MODELS -import utils.logging as logging -from utils import get_environ_info -from utils import load_pretrained_model -from utils import resume -from utils import Timer, calculate_eta -from core import train, evaluate +from dygraph.datasets import DATASETS +import dygraph.transforms as T +from dygraph.models import MODELS +from dygraph.utils import get_environ_info +from dygraph.core import train def parse_args(): @@ -47,10 +40,16 @@ def parse_args(): parser.add_argument( '--dataset', dest='dataset', - help= - "The dataset you want to train, which is one of ('OpticDiscSeg', 'Cityscapes')", + help="The dataset you want to train, which is one of {}".format( + str(list(DATASETS.keys()))), type=str, - default='Cityscapes') + default='OpticDiscSeg') + parser.add_argument( + '--dataset_root', + dest='dataset_root', + help="dataset root directory", + type=str, + default=None) # params of training parser.add_argument( @@ -58,14 +57,14 @@ def parse_args(): dest="input_size", help="The image size for net inputs.", nargs=2, - default=[1024, 512], + default=[512, 512], type=int) parser.add_argument( '--num_epochs', dest='num_epochs', help='Number epochs for training', type=int, - default=500) + default=100) parser.add_argument( '--batch_size', dest='batch_size', @@ -107,7 +106,7 @@ def parse_args(): dest='num_workers', help='Num workers for data loader', type=int, - default=2) + default=0) parser.add_argument( '--do_eval', dest='do_eval', @@ -134,14 +133,10 @@ def main(args): if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \ else fluid.CPUPlace() - if args.dataset.lower() == 'opticdiscseg': - dataset = OpticDiscSeg - elif args.dataset.lower() == 'cityscapes': - dataset = Cityscapes - else: - raise Exception( - "The --dataset set wrong. It should be one of ('OpticDiscSeg', 'Cityscapes')" - ) + if args.dataset not in DATASETS: + raise Exception('`--dataset` is invalid. it should be one of {}'.format( + str(list(DATASETS.keys())))) + dataset = DATASETS[args.dataset] with fluid.dygraph.guard(places): # Creat dataset reader @@ -152,16 +147,22 @@ def main(args): T.RandomDistort(), T.Normalize(), ]) - train_dataset = dataset(transforms=train_transforms, mode='train') + train_dataset = dataset( + dataset_root=args.dataset_root, + transforms=train_transforms, + mode='train') eval_dataset = None if args.do_eval: eval_transforms = T.Compose([T.Normalize()]) - eval_dataset = dataset(transforms=eval_transforms, mode='eval') + eval_dataset = dataset( + dataset_root=args.dataset_root, + transforms=eval_transforms, + mode='val') if args.model_name not in MODELS: raise Exception( - '--model_name is invalid. it should be one of {}'.format( + '`--model_name` is invalid. it should be one of {}'.format( str(list(MODELS.keys())))) model = MODELS[args.model_name](num_classes=train_dataset.num_classes) @@ -176,7 +177,8 @@ def main(args): lr_decay, momentum=0.9, parameter_list=model.parameters(), - regularization=fluid.regularizer.L2Decay(regularization_coeff=5e-4)) + regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5)) + train( model, train_dataset, diff --git a/dygraph/core/infer.py b/dygraph/core/infer.py index fa39062599670cccd485b59040e2fe1b530c2218..23890e8e1b7c869c36f37aa53d481b22317c1a2c 100644 --- a/dygraph/core/infer.py +++ b/dygraph/core/infer.py @@ -20,8 +20,8 @@ import paddle.fluid as fluid import cv2 import tqdm -import utils -import utils.logging as logging +from dygraph import utils +import dygraph.utils.logging as logging def mkdir(path): diff --git a/dygraph/core/train.py b/dygraph/core/train.py index 7e84d2049b817e97a813b59bf1a6d51df8ff1379..a823265f316951b0f43e4449212f709301e87574 100644 --- a/dygraph/core/train.py +++ b/dygraph/core/train.py @@ -19,10 +19,10 @@ from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.io import DataLoader from paddle.incubate.hapi.distributed import DistributedBatchSampler -import utils.logging as logging -from utils import load_pretrained_model -from utils import resume -from utils import Timer, calculate_eta +import dygraph.utils.logging as logging +from dygraph.utils import load_pretrained_model +from dygraph.utils import resume +from dygraph.utils import Timer, calculate_eta from .val import evaluate diff --git a/dygraph/core/val.py b/dygraph/core/val.py index 3245be5a449e86eae7eff366ec7e1d13555e6eba..0623b61772e221da8ccdf73aebad2217cbbd06de 100644 --- a/dygraph/core/val.py +++ b/dygraph/core/val.py @@ -20,9 +20,9 @@ import cv2 from paddle.fluid.dygraph.base import to_variable import paddle.fluid as fluid -import utils.logging as logging -from utils import ConfusionMatrix -from utils import Timer, calculate_eta +import dygraph.utils.logging as logging +from dygraph.utils import ConfusionMatrix +from dygraph.utils import Timer, calculate_eta def evaluate(model, diff --git a/dygraph/datasets/ade.py b/dygraph/datasets/ade.py index def88a51dfe8f9bd2870e9b433a57ad60b659575..1c9065e38f677290d81bb0d8be5224b2a54c0adf 100644 --- a/dygraph/datasets/ade.py +++ b/dygraph/datasets/ade.py @@ -18,7 +18,7 @@ import numpy as np from PIL import Image from .dataset import Dataset -from utils.download import download_file_and_uncompress +from dygraph.utils.download import download_file_and_uncompress DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') URL = "http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip" diff --git a/dygraph/datasets/optic_disc_seg.py b/dygraph/datasets/optic_disc_seg.py index a59ac847dccb5a1b17738dcd67abc315ef076e30..82d18e8c5e51ec12b487a252b1c4ac1dc77838d1 100644 --- a/dygraph/datasets/optic_disc_seg.py +++ b/dygraph/datasets/optic_disc_seg.py @@ -15,7 +15,7 @@ import os from .dataset import Dataset -from utils.download import download_file_and_uncompress +from dygraph.utils.download import download_file_and_uncompress DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') URL = "https://paddleseg.bj.bcebos.com/dataset/optic_disc_seg.zip" diff --git a/dygraph/datasets/voc.py b/dygraph/datasets/voc.py index ceb33b9ab2478ff56e6d27921406a644a5e0e7c7..d11f4c9e7e2d39577e75e0f5705814ce3a189c53 100644 --- a/dygraph/datasets/voc.py +++ b/dygraph/datasets/voc.py @@ -14,7 +14,7 @@ import os from .dataset import Dataset -from utils.download import download_file_and_uncompress +from dygraph.utils.download import download_file_and_uncompress DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar" diff --git a/dygraph/infer.py b/dygraph/infer.py index 8111ff01a07e56aa3479a6ac08e49fb325968397..76cdee7cacf33307d630f71e5b47737d37e9363c 100644 --- a/dygraph/infer.py +++ b/dygraph/infer.py @@ -17,11 +17,11 @@ import argparse import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from datasets import DATASETS -import transforms as T -from models import MODELS -from utils import get_environ_info -from core import infer +from dygraph.datasets import DATASETS +import dygraph.transforms as T +from dygraph.models import MODELS +from dygraph.utils import get_environ_info +from dygraph.core import infer def parse_args(): diff --git a/dygraph/tools/voc_augment.py b/dygraph/tools/voc_augment.py index c4be6ad4e466faf54b52b6accf320c7ec03530bb..b86f99bf36061608cd51ce7d8d478ad6a10e8b04 100644 --- a/dygraph/tools/voc_augment.py +++ b/dygraph/tools/voc_augment.py @@ -27,7 +27,7 @@ import numpy as np from scipy.io import loadmat import tqdm -from utils.download import download_file_and_uncompress +from dygraph.utils.download import download_file_and_uncompress DATA_HOME = os.path.expanduser('~/.cache/paddle/dataset') URL = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz' diff --git a/dygraph/train.py b/dygraph/train.py index 8cd979654cc3ef8d0222c3668a3a7e0b26ce6656..5a66f6073ce842944d777054e45435ca058c135a 100644 --- a/dygraph/train.py +++ b/dygraph/train.py @@ -17,11 +17,11 @@ import argparse import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from datasets import DATASETS -import transforms as T -from models import MODELS -from utils import get_environ_info -from core import train +from dygraph.datasets import DATASETS +import dygraph.transforms as T +from dygraph.models import MODELS +from dygraph.utils import get_environ_info +from dygraph.core import train def parse_args(): diff --git a/dygraph/val.py b/dygraph/val.py index d388fccf81d39b928323412dbc08145c5401d184..9550cc837f871d80e9a0eb0ca47c96ba1703b99e 100644 --- a/dygraph/val.py +++ b/dygraph/val.py @@ -17,11 +17,11 @@ import argparse import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from datasets import DATASETS -import transforms as T -from models import MODELS -from utils import get_environ_info -from core import evaluate +from dygraph.datasets import DATASETS +import dygraph.transforms as T +from dygraph.models import MODELS +from dygraph.utils import get_environ_info +from dygraph.core import evaluate def parse_args():