提交 2f1b1af5 编写于 作者: C chenguowei01

update according reviewing

上级 b4ac4e82
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os.path as osp import os
from threading import Thread from threading import Thread
import multiprocessing import multiprocessing
import collections import collections
...@@ -170,8 +170,8 @@ def multiprocess_reader(mapper, ...@@ -170,8 +170,8 @@ def multiprocess_reader(mapper,
index = i % num_workers index = i % num_workers
total_samples[index].append(sample) total_samples[index].append(sample)
for i in range(num_workers): for i in range(num_workers):
p = multiprocessing.Process( p = multiprocessing.Process(target=_read_into_queue,
target=_read_into_queue, args=(total_samples[i], mapper, queue)) args=(total_samples[i], mapper, queue))
p.start() p.start()
finish_num = 0 finish_num = 0
...@@ -230,18 +230,18 @@ class Dataset: ...@@ -230,18 +230,18 @@ class Dataset:
items = line.strip().split() items = line.strip().split()
if not is_pic(items[0]): if not is_pic(items[0]):
continue continue
full_path_im = osp.join(data_dir, items[0]) full_path_im = os.path.join(data_dir, items[0])
full_path_label = osp.join(data_dir, items[1]) full_path_label = os.path.join(data_dir, items[1])
if not osp.exists(full_path_im): if not os.path.exists(full_path_im):
raise IOError( raise IOError(
'The image file {} is not exist!'.format(full_path_im)) 'The image file {} is not exist!'.format(full_path_im))
if not osp.exists(full_path_label): if not os.path.exists(full_path_label):
raise IOError('The image file {} is not exist!'.format( raise IOError('The image file {} is not exist!'.format(
full_path_label)) full_path_label))
self.file_list.append([full_path_im, full_path_label]) self.file_list.append([full_path_im, full_path_label])
self.num_samples = len(self.file_list) self.num_samples = len(self.file_list)
logging.info("{} samples in file {}".format( logging.info("{} samples in file {}".format(len(self.file_list),
len(self.file_list), file_list)) file_list))
def iterator(self): def iterator(self):
self._epoch += 1 self._epoch += 1
...@@ -266,8 +266,7 @@ class Dataset: ...@@ -266,8 +266,7 @@ class Dataset:
) )
else: else:
parallel_reader = multiprocess_reader parallel_reader = multiprocess_reader
return parallel_reader( return parallel_reader(self.transforms,
self.transforms,
self.iterator, self.iterator,
num_workers=self.num_workers, num_workers=self.num_workers,
buffer_size=self.buffer_size, buffer_size=self.buffer_size,
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
import numpy as np import numpy as np
...@@ -33,54 +32,46 @@ def parse_args(): ...@@ -33,54 +32,46 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name',
dest='model_name', dest='model_name',
help="Model type for traing, which is one of ('UNet')", help="Model type for traing, which is one of ('UNet')",
type=str, type=str,
default='UNet') default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir',
dest='data_dir', dest='data_dir',
help='The root directory of dataset', help='The root directory of dataset',
type=str) type=str)
parser.add_argument( parser.add_argument('--test_list',
'--test_list',
dest='test_list', dest='test_list',
help='Val list file of dataset', help='Val list file of dataset',
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument('--num_classes',
'--num_classes',
dest='num_classes', dest='num_classes',
help='Number of classes', help='Number of classes',
type=int, type=int,
default=2) default=2)
# params of prediction # params of prediction
parser.add_argument( parser.add_argument("--input_size",
"--input_size",
dest="input_size", dest="input_size",
help="The image size for net inputs.", help="The image size for net inputs.",
nargs=2, nargs=2,
default=[512, 512], default=[512, 512],
type=int) type=int)
parser.add_argument( parser.add_argument('--batch_size',
'--batch_size',
dest='batch_size', dest='batch_size',
help='Mini batch size', help='Mini batch size',
type=int, type=int,
default=2) default=2)
parser.add_argument( parser.add_argument('--model_dir',
'--model_dir',
dest='model_dir', dest='model_dir',
help='The path of model for evaluation', help='The path of model for evaluation',
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument('--save_dir',
'--save_dir',
dest='save_dir', dest='save_dir',
help='The directory for saving the inference results', help='The directory for saving the inference results',
type=str, type=str,
...@@ -90,27 +81,27 @@ def parse_args(): ...@@ -90,27 +81,27 @@ def parse_args():
def mkdir(path): def mkdir(path):
sub_dir = osp.dirname(path) sub_dir = os.path.dirname(path)
if not osp.exists(sub_dir): if not os.path.exists(sub_dir):
os.makedirs(sub_dir) os.makedirs(sub_dir)
def infer(model, data_dir=None, test_list=None, model_dir=None, def infer(model, data_dir=None, test_list=None, model_dir=None,
transforms=None): transforms=None):
ckpt_path = osp.join(model_dir, 'model') ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path) para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict) model.set_dict(para_state_dict)
model.eval() model.eval()
added_saved_dir = osp.join(args.save_dir, 'added') added_saved_dir = os.path.join(args.save_dir, 'added')
pred_saved_dir = osp.join(args.save_dir, 'prediction') pred_saved_dir = os.path.join(args.save_dir, 'prediction')
logging.info("Start to predict...") logging.info("Start to predict...")
with open(test_list, 'r') as f: with open(test_list, 'r') as f:
files = f.readlines() files = f.readlines()
for file in tqdm.tqdm(files): for file in tqdm.tqdm(files):
file = file.strip() file = file.strip()
im_file = osp.join(data_dir, file) im_file = os.path.join(data_dir, file)
im, im_info, _ = transforms(im_file) im, im_info, _ = transforms(im_file)
im = np.expand_dims(im, axis=0) im = np.expand_dims(im, axis=0)
im = to_variable(im) im = to_variable(im)
...@@ -129,25 +120,25 @@ def infer(model, data_dir=None, test_list=None, model_dir=None, ...@@ -129,25 +120,25 @@ def infer(model, data_dir=None, test_list=None, model_dir=None,
# save added image # save added image
added_image = utils.visualize(im_file, pred, weight=0.6) added_image = utils.visualize(im_file, pred, weight=0.6)
added_image_path = osp.join(added_saved_dir, file) added_image_path = os.path.join(added_saved_dir, file)
mkdir(added_image_path) mkdir(added_image_path)
cv2.imwrite(added_image_path, added_image) cv2.imwrite(added_image_path, added_image)
# save prediction # save prediction
pred_im = utils.visualize(im_file, pred, weight=0.0) pred_im = utils.visualize(im_file, pred, weight=0.0)
pred_saved_path = osp.join(pred_saved_dir, file) pred_saved_path = os.path.join(pred_saved_dir, file)
mkdir(pred_saved_path) mkdir(pred_saved_path)
cv2.imwrite(pred_saved_path, pred_im) cv2.imwrite(pred_saved_path, pred_im)
def main(args): def main(args):
with fluid.dygraph.guard(places):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
if args.model_name == 'UNet': if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes) model = models.UNet(num_classes=args.num_classes)
infer( infer(model,
model,
data_dir=args.data_dir, data_dir=args.data_dir,
test_list=args.test_list, test_list=args.test_list,
model_dir=args.model_dir, model_dir=args.model_dir,
...@@ -161,5 +152,4 @@ if __name__ == '__main__': ...@@ -161,5 +152,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args) main(args)
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
import numpy as np import numpy as np
...@@ -33,84 +32,65 @@ def parse_args(): ...@@ -33,84 +32,65 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name',
dest='model_name', dest='model_name',
help="Model type for traing, which is one of ('UNet')", help="Model type for traing, which is one of ('UNet')",
type=str, type=str,
default='UNet') default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir',
dest='data_dir', dest='data_dir',
help='The root directory of dataset', help='The root directory of dataset',
type=str) type=str)
parser.add_argument( parser.add_argument('--train_list',
'--train_list',
dest='train_list', dest='train_list',
help='Train list file of dataset', help='Train list file of dataset',
type=str) type=str)
parser.add_argument( parser.add_argument('--val_list',
'--val_list',
dest='val_list', dest='val_list',
help='Val list file of dataset', help='Val list file of dataset',
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument('--num_classes',
'--num_classes',
dest='num_classes', dest='num_classes',
help='Number of classes', help='Number of classes',
type=int, type=int,
default=2) default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during training',
type=int,
default=255)
# params of training # params of training
parser.add_argument( parser.add_argument("--input_size",
"--input_size",
dest="input_size", dest="input_size",
help="The image size for net inputs.", help="The image size for net inputs.",
nargs=2, nargs=2,
default=[512, 512], default=[512, 512],
type=int) type=int)
parser.add_argument( parser.add_argument('--num_epochs',
'--num_epochs',
dest='num_epochs', dest='num_epochs',
help='Number epochs for training', help='Number epochs for training',
type=int, type=int,
default=100) default=100)
parser.add_argument( parser.add_argument('--batch_size',
'--batch_size',
dest='batch_size', dest='batch_size',
help='Mini batch size', help='Mini batch size',
type=int, type=int,
default=2) default=2)
parser.add_argument( parser.add_argument('--learning_rate',
'--learning_rate',
dest='learning_rate', dest='learning_rate',
help='Learning rate', help='Learning rate',
type=float, type=float,
default=0.01) default=0.01)
parser.add_argument( parser.add_argument('--pretrained_model',
'--pretrained_model',
dest='pretrained_model', dest='pretrained_model',
help='The path of pretrianed weight', help='The path of pretrianed weight',
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument('--save_interval_epochs',
'--save_interval_epochs',
dest='save_interval_epochs', dest='save_interval_epochs',
help='The interval epochs for save a model snapshot', help='The interval epochs for save a model snapshot',
type=int, type=int,
default=5) default=5)
parser.add_argument( parser.add_argument('--save_dir',
'--save_dir',
dest='save_dir', dest='save_dir',
help='The directory for saving the model snapshot', help='The directory for saving the model snapshot',
type=str, type=str,
...@@ -129,15 +109,15 @@ def train(model, ...@@ -129,15 +109,15 @@ def train(model,
pretrained_model=None, pretrained_model=None,
save_interval_epochs=1, save_interval_epochs=1,
num_classes=None): num_classes=None):
if not osp.isdir(save_dir): if not os.path.isdir(save_dir):
if osp.exists(save_dir): if os.path.exists(save_dir):
os.remove(save_dir) os.remove(save_dir)
os.makedirs(save_dir) os.makedirs(save_dir)
load_pretrained_model(model, pretrained_model) load_pretrained_model(model, pretrained_model)
data_generator = train_dataset.generator( data_generator = train_dataset.generator(batch_size=batch_size,
batch_size=batch_size, drop_last=True) drop_last=True)
num_steps_each_epoch = train_dataset.num_samples // args.batch_size num_steps_each_epoch = train_dataset.num_samples // args.batch_size
for epoch in range(num_epochs): for epoch in range(num_epochs):
...@@ -156,16 +136,16 @@ def train(model, ...@@ -156,16 +136,16 @@ def train(model,
if ( if (
epoch + 1 epoch + 1
) % save_interval_epochs == 0 or num_steps_each_epoch == num_epochs - 1: ) % save_interval_epochs == 0 or num_steps_each_epoch == num_epochs - 1:
current_save_dir = osp.join(save_dir, "epoch_{}".format(epoch + 1)) current_save_dir = os.path.join(save_dir,
if not osp.isdir(current_save_dir): "epoch_{}".format(epoch + 1))
if not os.path.isdir(current_save_dir):
os.makedirs(current_save_dir) os.makedirs(current_save_dir)
fluid.save_dygraph(model.state_dict(), fluid.save_dygraph(model.state_dict(),
osp.join(current_save_dir, 'model')) os.path.join(current_save_dir, 'model'))
if eval_dataset is not None: if eval_dataset is not None:
model.eval() model.eval()
evaluate( evaluate(model,
model,
eval_dataset, eval_dataset,
model_dir=current_save_dir, model_dir=current_save_dir,
num_classes=num_classes, num_classes=num_classes,
...@@ -176,13 +156,14 @@ def train(model, ...@@ -176,13 +156,14 @@ def train(model,
def main(args): def main(args):
with fluid.dygraph.guard(places):
# Creat dataset reader # Creat dataset reader
train_transforms = T.Compose( train_transforms = T.Compose([
[T.Resize(args.input_size), T.Resize(args.input_size),
T.RandomHorizontalFlip(), T.RandomHorizontalFlip(),
T.Normalize()]) T.Normalize()
train_dataset = Dataset( ])
data_dir=args.data_dir, train_dataset = Dataset(data_dir=args.data_dir,
file_list=args.train_list, file_list=args.train_list,
transforms=train_transforms, transforms=train_transforms,
num_workers='auto', num_workers='auto',
...@@ -190,9 +171,10 @@ def main(args): ...@@ -190,9 +171,10 @@ def main(args):
parallel_method='thread', parallel_method='thread',
shuffle=True) shuffle=True)
if args.val_list is not None: if args.val_list is not None:
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) eval_transforms = T.Compose(
eval_dataset = Dataset( [T.Resize(args.input_size),
data_dir=args.data_dir, T.Normalize()])
eval_dataset = Dataset(data_dir=args.data_dir,
file_list=args.val_list, file_list=args.val_list,
transforms=eval_transforms, transforms=eval_transforms,
num_workers='auto', num_workers='auto',
...@@ -201,22 +183,22 @@ def main(args): ...@@ -201,22 +183,22 @@ def main(args):
shuffle=False) shuffle=False)
if args.model_name == 'UNet': if args.model_name == 'UNet':
model = models.UNet( model = models.UNet(num_classes=args.num_classes, ignore_index=255)
num_classes=args.num_classes, ignore_index=args.ignore_index)
# Creat optimizer # Creat optimizer
num_steps_each_epoch = train_dataset.num_samples // args.batch_size num_steps_each_epoch = train_dataset.num_samples // args.batch_size
decay_step = args.num_epochs * num_steps_each_epoch decay_step = args.num_epochs * num_steps_each_epoch
lr_decay = fluid.layers.polynomial_decay( lr_decay = fluid.layers.polynomial_decay(args.learning_rate,
args.learning_rate, decay_step, end_learning_rate=0, power=0.9) decay_step,
end_learning_rate=0,
power=0.9)
optimizer = fluid.optimizer.Momentum( optimizer = fluid.optimizer.Momentum(
lr_decay, lr_decay,
momentum=0.9, momentum=0.9,
parameter_list=model.parameters(), parameter_list=model.parameters(),
regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5)) regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5))
train( train(model,
model,
train_dataset, train_dataset,
eval_dataset, eval_dataset,
optimizer, optimizer,
...@@ -235,5 +217,4 @@ if __name__ == '__main__': ...@@ -235,5 +217,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args) main(args)
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
import os import os
import os.path as osp
import numpy as np import numpy as np
import math import math
import cv2 import cv2
...@@ -59,8 +58,8 @@ def get_environ_info(): ...@@ -59,8 +58,8 @@ def get_environ_info():
def load_pretrained_model(model, pretrained_model): def load_pretrained_model(model, pretrained_model):
logging.info('Load pretrained model!') logging.info('Load pretrained model!')
if pretrained_model is not None: if pretrained_model is not None:
if osp.exists(pretrained_model): if os.path.exists(pretrained_model):
ckpt_path = osp.join(pretrained_model, 'model') ckpt_path = os.path.join(pretrained_model, 'model')
para_state_dict, _ = fluid.load_dygraph(ckpt_path) para_state_dict, _ = fluid.load_dygraph(ckpt_path)
model_state_dict = model.state_dict() model_state_dict = model.state_dict()
keys = model_state_dict.keys() keys = model_state_dict.keys()
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
import argparse import argparse
import os import os
import os.path as osp
import math import math
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
...@@ -33,55 +32,41 @@ def parse_args(): ...@@ -33,55 +32,41 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training') parser = argparse.ArgumentParser(description='Model training')
# params of model # params of model
parser.add_argument( parser.add_argument('--model_name',
'--model_name',
dest='model_name', dest='model_name',
help="Model type for traing, which is one of ('UNet')", help="Model type for traing, which is one of ('UNet')",
type=str, type=str,
default='UNet') default='UNet')
# params of dataset # params of dataset
parser.add_argument( parser.add_argument('--data_dir',
'--data_dir',
dest='data_dir', dest='data_dir',
help='The root directory of dataset', help='The root directory of dataset',
type=str) type=str)
parser.add_argument( parser.add_argument('--val_list',
'--val_list',
dest='val_list', dest='val_list',
help='Val list file of dataset', help='Val list file of dataset',
type=str, type=str,
default=None) default=None)
parser.add_argument( parser.add_argument('--num_classes',
'--num_classes',
dest='num_classes', dest='num_classes',
help='Number of classes', help='Number of classes',
type=int, type=int,
default=2) default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during evaluation',
type=int,
default=255)
# params of evaluate # params of evaluate
parser.add_argument( parser.add_argument("--input_size",
"--input_size",
dest="input_size", dest="input_size",
help="The image size for net inputs.", help="The image size for net inputs.",
nargs=2, nargs=2,
default=[512, 512], default=[512, 512],
type=int) type=int)
parser.add_argument( parser.add_argument('--batch_size',
'--batch_size',
dest='batch_size', dest='batch_size',
help='Mini batch size', help='Mini batch size',
type=int, type=int,
default=2) default=2)
parser.add_argument( parser.add_argument('--model_dir',
'--model_dir',
dest='model_dir', dest='model_dir',
help='The path of model for evaluation', help='The path of model for evaluation',
type=str, type=str,
...@@ -97,13 +82,13 @@ def evaluate(model, ...@@ -97,13 +82,13 @@ def evaluate(model,
batch_size=2, batch_size=2,
ignore_index=255, ignore_index=255,
epoch_id=None): epoch_id=None):
ckpt_path = osp.join(model_dir, 'model') ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path) para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict) model.set_dict(para_state_dict)
model.eval() model.eval()
data_generator = eval_dataset.generator( data_generator = eval_dataset.generator(batch_size=batch_size,
batch_size=batch_size, drop_last=True) drop_last=True)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size) total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
conf_mat = ConfusionMatrix(num_classes, streaming=True) conf_mat = ConfusionMatrix(num_classes, streaming=True)
...@@ -135,9 +120,9 @@ def evaluate(model, ...@@ -135,9 +120,9 @@ def evaluate(model,
def main(args): def main(args):
with fluid.dygraph.guard(places):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()]) eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
eval_dataset = Dataset( eval_dataset = Dataset(data_dir=args.data_dir,
data_dir=args.data_dir,
file_list=args.val_list, file_list=args.val_list,
transforms=eval_transforms, transforms=eval_transforms,
num_workers='auto', num_workers='auto',
...@@ -148,14 +133,11 @@ def main(args): ...@@ -148,14 +133,11 @@ def main(args):
if args.model_name == 'UNet': if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes) model = models.UNet(num_classes=args.num_classes)
evaluate( evaluate(model,
model,
eval_dataset, eval_dataset,
model_dir=args.model_dir, model_dir=args.model_dir,
num_classes=args.num_classes, num_classes=args.num_classes,
batch_size=args.batch_size, batch_size=args.batch_size)
ignore_index=args.ignore_index,
)
if __name__ == '__main__': if __name__ == '__main__':
...@@ -165,5 +147,4 @@ if __name__ == '__main__': ...@@ -165,5 +147,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace() places = fluid.CPUPlace()
else: else:
places = fluid.CUDAPlace(0) places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args) main(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册