提交 2f1b1af5 编写于 作者: C chenguowei01

update according reviewing

上级 b4ac4e82
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os.path as osp import os
from threading import Thread from threading import Thread
import multiprocessing import multiprocessing
import collections import collections
...@@ -170,8 +170,8 @@ def multiprocess_reader(mapper, ...@@ -170,8 +170,8 @@ def multiprocess_reader(mapper,
index = i % num_workers index = i % num_workers
total_samples[index].append(sample) total_samples[index].append(sample)
for i in range(num_workers): for i in range(num_workers):
p = multiprocessing.Process( p = multiprocessing.Process(target=_read_into_queue,
target=_read_into_queue, args=(total_samples[i], mapper, queue)) args=(total_samples[i], mapper, queue))
p.start() p.start()
finish_num = 0 finish_num = 0
...@@ -230,18 +230,18 @@ class Dataset: ...@@ -230,18 +230,18 @@ class Dataset:
items = line.strip().split() items = line.strip().split()
if not is_pic(items[0]): if not is_pic(items[0]):
continue continue
full_path_im = osp.join(data_dir, items[0]) full_path_im = os.path.join(data_dir, items[0])
full_path_label = osp.join(data_dir, items[1]) full_path_label = os.path.join(data_dir, items[1])
if not osp.exists(full_path_im): if not os.path.exists(full_path_im):
raise IOError( raise IOError(
'The image file {} is not exist!'.format(full_path_im)) 'The image file {} is not exist!'.format(full_path_im))
if not osp.exists(full_path_label): if not os.path.exists(full_path_label):
raise IOError('The image file {} is not exist!'.format( raise IOError('The image file {} is not exist!'.format(
full_path_label)) full_path_label))
self.file_list.append([full_path_im, full_path_label]) self.file_list.append([full_path_im, full_path_label])
self.num_samples = len(self.file_list) self.num_samples = len(self.file_list)
logging.info("{} samples in file {}".format( logging.info("{} samples in file {}".format(len(self.file_list),
len(self.file_list), file_list)) file_list))
def iterator(self): def iterator(self):
self._epoch += 1 self._epoch += 1
...@@ -266,10 +266,9 @@ class Dataset: ...@@ -266,10 +266,9 @@ class Dataset:
) )
else: else:
parallel_reader = multiprocess_reader parallel_reader = multiprocess_reader
return parallel_reader( return parallel_reader(self.transforms,
self.transforms, self.iterator,
self.iterator, num_workers=self.num_workers,
num_workers=self.num_workers, buffer_size=self.buffer_size,
buffer_size=self.buffer_size, batch_size=batch_size,
batch_size=batch_size, drop_last=drop_last)
drop_last=drop_last)
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
import numpy as np import numpy as np
...@@ -33,84 +32,76 @@ def parse_args(): ...@@ -33,84 +32,76 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name', dest='model_name',
dest='model_name', help="Model type for traing, which is one of ('UNet')",
help="Model type for traing, which is one of ('UNet')", type=str,
type=str, default='UNet')
default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir', dest='data_dir',
dest='data_dir', help='The root directory of dataset',
help='The root directory of dataset', type=str)
type=str) parser.add_argument('--test_list',
parser.add_argument( dest='test_list',
'--test_list', help='Val list file of dataset',
dest='test_list', type=str,
help='Val list file of dataset', default=None)
type=str, parser.add_argument('--num_classes',
default=None) dest='num_classes',
parser.add_argument( help='Number of classes',
'--num_classes', type=int,
dest='num_classes', default=2)
help='Number of classes',
type=int,
default=2)
# params of prediction # params of prediction
parser.add_argument( parser.add_argument("--input_size",
"--input_size", dest="input_size",
dest="input_size", help="The image size for net inputs.",
help="The image size for net inputs.", nargs=2,
nargs=2, default=[512, 512],
default=[512, 512], type=int)
type=int) parser.add_argument('--batch_size',
parser.add_argument( dest='batch_size',
'--batch_size', help='Mini batch size',
dest='batch_size', type=int,
help='Mini batch size', default=2)
type=int, parser.add_argument('--model_dir',
default=2) dest='model_dir',
parser.add_argument( help='The path of model for evaluation',
'--model_dir', type=str,
dest='model_dir', default=None)
help='The path of model for evaluation', parser.add_argument('--save_dir',
type=str, dest='save_dir',
default=None) help='The directory for saving the inference results',
parser.add_argument( type=str,
'--save_dir', default='./output/result')
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
return parser.parse_args() return parser.parse_args()
def mkdir(path): def mkdir(path):
sub_dir = osp.dirname(path) sub_dir = os.path.dirname(path)
if not osp.exists(sub_dir): if not os.path.exists(sub_dir):
os.makedirs(sub_dir) os.makedirs(sub_dir)
def infer(model, data_dir=None, test_list=None, model_dir=None, def infer(model, data_dir=None, test_list=None, model_dir=None,
transforms=None): transforms=None):
ckpt_path = osp.join(model_dir, 'model') ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path) para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict) model.set_dict(para_state_dict)
model.eval() model.eval()
added_saved_dir = osp.join(args.save_dir, 'added') added_saved_dir = os.path.join(args.save_dir, 'added')
pred_saved_dir = osp.join(args.save_dir, 'prediction') pred_saved_dir = os.path.join(args.save_dir, 'prediction')
logging.info("Start to predict...") logging.info("Start to predict...")
with open(test_list, 'r') as f: with open(test_list, 'r') as f:
files = f.readlines() files = f.readlines()
for file in tqdm.tqdm(files): for file in tqdm.tqdm(files):
file = file.strip() file = file.strip()
im_file = osp.join(data_dir, file) im_file = os.path.join(data_dir, file)
im, im_info, _ = transforms(im_file) im, im_info, _ = transforms(im_file)
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
im = to_variable(im) im = to_variable(im)
...@@ -129,29 +120,29 @@ def infer(model, data_dir=None, test_list=None, model_dir=None, ...@@ -129,29 +120,29 @@ def infer(model, data_dir=None, test_list=None, model_dir=None,
# save added image # save added image
added_image = utils.visualize(im_file, pred, weight=0.6) added_image = utils.visualize(im_file, pred, weight=0.6)
added_image_path = osp.join(added_saved_dir, file) added_image_path = os.path.join(added_saved_dir, file)
mkdir(added_image_path) mkdir(added_image_path)
cv2.imwrite(added_image_path, added_image) cv2.imwrite(added_image_path, added_image)
# save prediction # save prediction
pred_im = utils.visualize(im_file, pred, weight=0.0) pred_im = utils.visualize(im_file, pred, weight=0.0)
pred_saved_path = osp.join(pred_saved_dir, file) pred_saved_path = os.path.join(pred_saved_dir, file)
mkdir(pred_saved_path) mkdir(pred_saved_path)
cv2.imwrite(pred_saved_path, pred_im) cv2.imwrite(pred_saved_path, pred_im)
def main(args): def main(args):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) with fluid.dygraph.guard(places):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
if args.model_name == 'UNet': if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes) model = models.UNet(num_classes=args.num_classes)
infer( infer(model,
model, data_dir=args.data_dir,
data_dir=args.data_dir, test_list=args.test_list,
test_list=args.test_list, model_dir=args.model_dir,
model_dir=args.model_dir, transforms=test_transforms)
transforms=test_transforms)
if __name__ == '__main__': if __name__ == '__main__':
...@@ -161,5 +152,4 @@ if __name__ == '__main__': ...@@ -161,5 +152,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places): main(args)
main(args)
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
import numpy as np import numpy as np
...@@ -33,88 +32,69 @@ def parse_args(): ...@@ -33,88 +32,69 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name', dest='model_name',
dest='model_name', help="Model type for traing, which is one of ('UNet')",
help="Model type for traing, which is one of ('UNet')", type=str,
type=str, default='UNet')
default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir', dest='data_dir',
dest='data_dir', help='The root directory of dataset',
help='The root directory of dataset', type=str)
type=str) parser.add_argument('--train_list',
parser.add_argument( dest='train_list',
'--train_list', help='Train list file of dataset',
dest='train_list', type=str)
help='Train list file of dataset', parser.add_argument('--val_list',
type=str) dest='val_list',
parser.add_argument( help='Val list file of dataset',
'--val_list', type=str,
dest='val_list', default=None)
help='Val list file of dataset', parser.add_argument('--num_classes',
type=str, dest='num_classes',
default=None) help='Number of classes',
parser.add_argument( type=int,
'--num_classes', default=2)
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during training',
type=int,
default=255)
# params of training # params of training
parser.add_argument( parser.add_argument("--input_size",
"--input_size", dest="input_size",
dest="input_size", help="The image size for net inputs.",
help="The image size for net inputs.", nargs=2,
nargs=2, default=[512, 512],
default=[512, 512], type=int)
type=int) parser.add_argument('--num_epochs',
parser.add_argument( dest='num_epochs',
'--num_epochs', help='Number epochs for training',
dest='num_epochs', type=int,
help='Number epochs for training', default=100)
type=int, parser.add_argument('--batch_size',
default=100) dest='batch_size',
parser.add_argument( help='Mini batch size',
'--batch_size', type=int,
dest='batch_size', default=2)
help='Mini batch size', parser.add_argument('--learning_rate',
type=int, dest='learning_rate',
default=2) help='Learning rate',
parser.add_argument( type=float,
'--learning_rate', default=0.01)
dest='learning_rate', parser.add_argument('--pretrained_model',
help='Learning rate', dest='pretrained_model',
type=float, help='The path of pretrianed weight',
default=0.01) type=str,
parser.add_argument( default=None)
'--pretrained_model', parser.add_argument('--save_interval_epochs',
dest='pretrained_model', dest='save_interval_epochs',
help='The path of pretrianed weight', help='The interval epochs for save a model snapshot',
type=str, type=int,
default=None) default=5)
parser.add_argument( parser.add_argument('--save_dir',
'--save_interval_epochs', dest='save_dir',
dest='save_interval_epochs', help='The directory for saving the model snapshot',
help='The interval epochs for save a model snapshot', type=str,
type=int, default='./output')
default=5)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output')
return parser.parse_args() return parser.parse_args()
...@@ -129,15 +109,15 @@ def train(model, ...@@ -129,15 +109,15 @@ def train(model,
pretrained_model=None, pretrained_model=None,
save_interval_epochs=1, save_interval_epochs=1,
num_classes=None): num_classes=None):
if not osp.isdir(save_dir): if not os.path.isdir(save_dir):
if osp.exists(save_dir): if os.path.exists(save_dir):
os.remove(save_dir) os.remove(save_dir)
os.makedirs(save_dir) os.makedirs(save_dir)
load_pretrained_model(model, pretrained_model) load_pretrained_model(model, pretrained_model)
data_generator = train_dataset.generator( data_generator = train_dataset.generator(batch_size=batch_size,
batch_size=batch_size, drop_last=True) drop_last=True)
num_steps_each_epoch = train_dataset.num_samples // args.batch_size num_steps_each_epoch = train_dataset.num_samples // args.batch_size
for epoch in range(num_epochs): for epoch in range(num_epochs):
...@@ -156,76 +136,78 @@ def train(model, ...@@ -156,76 +136,78 @@ def train(model,
if ( if (
epoch + 1 epoch + 1
) % save_interval_epochs == 0 or num_steps_each_epoch == num_epochs - 1: ) % save_interval_epochs == 0 or num_steps_each_epoch == num_epochs - 1:
current_save_dir = osp.join(save_dir, "epoch_{}".format(epoch + 1)) current_save_dir = os.path.join(save_dir,
if not osp.isdir(current_save_dir): "epoch_{}".format(epoch + 1))
if not os.path.isdir(current_save_dir):
os.makedirs(current_save_dir) os.makedirs(current_save_dir)
fluid.save_dygraph(model.state_dict(), fluid.save_dygraph(model.state_dict(),
osp.join(current_save_dir, 'model')) os.path.join(current_save_dir, 'model'))
if eval_dataset is not None: if eval_dataset is not None:
model.eval() model.eval()
evaluate( evaluate(model,
model, eval_dataset,
eval_dataset, model_dir=current_save_dir,
model_dir=current_save_dir, num_classes=num_classes,
num_classes=num_classes, batch_size=batch_size,
batch_size=batch_size, ignore_index=model.ignore_index,
ignore_index=model.ignore_index, epoch_id=epoch + 1)
epoch_id=epoch + 1)
model.train() model.train()
def main(args): def main(args):
# Creat dataset reader with fluid.dygraph.guard(places):
train_transforms = T.Compose( # Creat dataset reader
[T.Resize(args.input_size), train_transforms = T.Compose([
T.RandomHorizontalFlip(), T.Resize(args.input_size),
T.Normalize()]) T.RandomHorizontalFlip(),
train_dataset = Dataset( T.Normalize()
data_dir=args.data_dir, ])
file_list=args.train_list, train_dataset = Dataset(data_dir=args.data_dir,
transforms=train_transforms, file_list=args.train_list,
num_workers='auto', transforms=train_transforms,
buffer_size=100, num_workers='auto',
parallel_method='thread', buffer_size=100,
shuffle=True) parallel_method='thread',
if args.val_list is not None: shuffle=True)
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) if args.val_list is not None:
eval_dataset = Dataset( eval_transforms = T.Compose(
data_dir=args.data_dir, [T.Resize(args.input_size),
file_list=args.val_list, T.Normalize()])
transforms=eval_transforms, eval_dataset = Dataset(data_dir=args.data_dir,
num_workers='auto', file_list=args.val_list,
buffer_size=100, transforms=eval_transforms,
parallel_method='thread', num_workers='auto',
shuffle=False) buffer_size=100,
parallel_method='thread',
if args.model_name == 'UNet': shuffle=False)
model = models.UNet(
num_classes=args.num_classes, ignore_index=args.ignore_index) if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes, ignore_index=255)
# Creat optimizer
num_steps_each_epoch = train_dataset.num_samples // args.batch_size # Creat optimizer
decay_step = args.num_epochs * num_steps_each_epoch num_steps_each_epoch = train_dataset.num_samples // args.batch_size
lr_decay = fluid.layers.polynomial_decay( decay_step = args.num_epochs * num_steps_each_epoch
args.learning_rate, decay_step, end_learning_rate=0, power=0.9) lr_decay = fluid.layers.polynomial_decay(args.learning_rate,
optimizer = fluid.optimizer.Momentum( decay_step,
lr_decay, end_learning_rate=0,
momentum=0.9, power=0.9)
parameter_list=model.parameters(), optimizer = fluid.optimizer.Momentum(
regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5)) lr_decay,
momentum=0.9,
train( parameter_list=model.parameters(),
model, regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5))
train_dataset,
eval_dataset, train(model,
optimizer, train_dataset,
save_dir=args.save_dir, eval_dataset,
num_epochs=args.num_epochs, optimizer,
batch_size=args.batch_size, save_dir=args.save_dir,
pretrained_model=args.pretrained_model, num_epochs=args.num_epochs,
save_interval_epochs=args.save_interval_epochs, batch_size=args.batch_size,
num_classes=args.num_classes) pretrained_model=args.pretrained_model,
save_interval_epochs=args.save_interval_epochs,
num_classes=args.num_classes)
if __name__ == '__main__': if __name__ == '__main__':
...@@ -235,5 +217,4 @@ if __name__ == '__main__': ...@@ -235,5 +217,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places): main(args)
main(args)
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
import os.path as osp
import numpy as np import numpy as np
import math import math
import cv2 import cv2
...@@ -59,8 +58,8 @@ def get_environ_info(): ...@@ -59,8 +58,8 @@ def get_environ_info():
def load_pretrained_model(model, pretrained_model): def load_pretrained_model(model, pretrained_model):
logging.info('Load pretrained model!') logging.info('Load pretrained model!')
if pretrained_model is not None: if pretrained_model is not None:
if osp.exists(pretrained_model): if os.path.exists(pretrained_model):
ckpt_path = osp.join(pretrained_model, 'model') ckpt_path = os.path.join(pretrained_model, 'model')
para_state_dict, _ = fluid.load_dygraph(ckpt_path) para_state_dict, _ = fluid.load_dygraph(ckpt_path)
model_state_dict = model.state_dict() model_state_dict = model.state_dict()
keys = model_state_dict.keys() keys = model_state_dict.keys()
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
import math import math
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
...@@ -33,59 +32,45 @@ def parse_args(): ...@@ -33,59 +32,45 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name', dest='model_name',
dest='model_name', help="Model type for traing, which is one of ('UNet')",
help="Model type for traing, which is one of ('UNet')", type=str,
type=str, default='UNet')
default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir', dest='data_dir',
dest='data_dir', help='The root directory of dataset',
help='The root directory of dataset', type=str)
type=str) parser.add_argument('--val_list',
parser.add_argument( dest='val_list',
'--val_list', help='Val list file of dataset',
dest='val_list', type=str,
help='Val list file of dataset', default=None)
type=str, parser.add_argument('--num_classes',
default=None) dest='num_classes',
parser.add_argument( help='Number of classes',
'--num_classes', type=int,
dest='num_classes', default=2)
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during evaluation',
type=int,
default=255)
# params of evaluate # params of evaluate
parser.add_argument( parser.add_argument("--input_size",
"--input_size", dest="input_size",
dest="input_size", help="The image size for net inputs.",
help="The image size for net inputs.", nargs=2,
nargs=2, default=[512, 512],
default=[512, 512], type=int)
type=int) parser.add_argument('--batch_size',
parser.add_argument( dest='batch_size',
'--batch_size', help='Mini batch size',
dest='batch_size', type=int,
help='Mini batch size', default=2)
type=int, parser.add_argument('--model_dir',
default=2) dest='model_dir',
parser.add_argument( help='The path of model for evaluation',
'--model_dir', type=str,
dest='model_dir', default=None)
help='The path of model for evaluation',
type=str,
default=None)
return parser.parse_args() return parser.parse_args()
...@@ -97,13 +82,13 @@ def evaluate(model, ...@@ -97,13 +82,13 @@ def evaluate(model,
batch_size=2, batch_size=2,
ignore_index=255, ignore_index=255,
epoch_id=None): epoch_id=None):
ckpt_path = osp.join(model_dir, 'model') ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path) para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict) model.set_dict(para_state_dict)
model.eval() model.eval()
data_generator = eval_dataset.generator( data_generator = eval_dataset.generator(batch_size=batch_size,
batch_size=batch_size, drop_last=True) drop_last=True)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size) total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
conf_mat = ConfusionMatrix(num_classes, streaming=True) conf_mat = ConfusionMatrix(num_classes, streaming=True)
...@@ -135,27 +120,24 @@ def evaluate(model, ...@@ -135,27 +120,24 @@ def evaluate(model,
def main(args): def main(args):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) with fluid.dygraph.guard(places):
eval_dataset = Dataset( eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
data_dir=args.data_dir, eval_dataset = Dataset(data_dir=args.data_dir,
file_list=args.val_list, file_list=args.val_list,
transforms=eval_transforms, transforms=eval_transforms,
num_workers='auto', num_workers='auto',
buffer_size=100, buffer_size=100,
parallel_method='thread', parallel_method='thread',
shuffle=False) shuffle=False)
if args.model_name == 'UNet': if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes) model = models.UNet(num_classes=args.num_classes)
evaluate( evaluate(model,
model, eval_dataset,
eval_dataset, model_dir=args.model_dir,
model_dir=args.model_dir, num_classes=args.num_classes,
num_classes=args.num_classes, batch_size=args.batch_size)
batch_size=args.batch_size,
ignore_index=args.ignore_index,
)
if __name__ == '__main__': if __name__ == '__main__':
...@@ -165,5 +147,4 @@ if __name__ == '__main__': ...@@ -165,5 +147,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places): main(args)
main(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册