提交 2f1b1af5 编写于 作者: C chenguowei01

update according reviewing

上级 b4ac4e82
......@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import os
from threading import Thread
import multiprocessing
import collections
......@@ -170,8 +170,8 @@ def multiprocess_reader(mapper,
index = i % num_workers
total_samples[index].append(sample)
for i in range(num_workers):
p = multiprocessing.Process(
target=_read_into_queue, args=(total_samples[i], mapper, queue))
p = multiprocessing.Process(target=_read_into_queue,
args=(total_samples[i], mapper, queue))
p.start()
finish_num = 0
......@@ -230,18 +230,18 @@ class Dataset:
items = line.strip().split()
if not is_pic(items[0]):
continue
full_path_im = osp.join(data_dir, items[0])
full_path_label = osp.join(data_dir, items[1])
if not osp.exists(full_path_im):
full_path_im = os.path.join(data_dir, items[0])
full_path_label = os.path.join(data_dir, items[1])
if not os.path.exists(full_path_im):
raise IOError(
'The image file {} is not exist!'.format(full_path_im))
if not osp.exists(full_path_label):
if not os.path.exists(full_path_label):
raise IOError('The image file {} is not exist!'.format(
full_path_label))
self.file_list.append([full_path_im, full_path_label])
self.num_samples = len(self.file_list)
logging.info("{} samples in file {}".format(
len(self.file_list), file_list))
logging.info("{} samples in file {}".format(len(self.file_list),
file_list))
def iterator(self):
self._epoch += 1
......@@ -266,10 +266,9 @@ class Dataset:
)
else:
parallel_reader = multiprocess_reader
return parallel_reader(
self.transforms,
self.iterator,
num_workers=self.num_workers,
buffer_size=self.buffer_size,
batch_size=batch_size,
drop_last=drop_last)
return parallel_reader(self.transforms,
self.iterator,
num_workers=self.num_workers,
buffer_size=self.buffer_size,
batch_size=batch_size,
drop_last=drop_last)
......@@ -14,7 +14,6 @@
import argparse
import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable
import numpy as np
......@@ -33,84 +32,76 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of model
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
parser.add_argument('--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
# params of dataset
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--test_list',
dest='test_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument('--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument('--test_list',
dest='test_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument('--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
# params of prediction
parser.add_argument(
"--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument(
'--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
parser.add_argument("--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument('--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument('--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
parser.add_argument('--save_dir',
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
return parser.parse_args()
def mkdir(path):
sub_dir = osp.dirname(path)
if not osp.exists(sub_dir):
sub_dir = os.path.dirname(path)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
def infer(model, data_dir=None, test_list=None, model_dir=None,
transforms=None):
ckpt_path = osp.join(model_dir, 'model')
ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict)
model.eval()
added_saved_dir = osp.join(args.save_dir, 'added')
pred_saved_dir = osp.join(args.save_dir, 'prediction')
added_saved_dir = os.path.join(args.save_dir, 'added')
pred_saved_dir = os.path.join(args.save_dir, 'prediction')
logging.info("Start to predict...")
with open(test_list, 'r') as f:
files = f.readlines()
for file in tqdm.tqdm(files):
file = file.strip()
im_file = osp.join(data_dir, file)
im_file = os.path.join(data_dir, file)
im, im_info, _ = transforms(im_file)
im = np.expand_dims(im, axis=0)
im = to_variable(im)
......@@ -129,29 +120,29 @@ def infer(model, data_dir=None, test_list=None, model_dir=None,
# save added image
added_image = utils.visualize(im_file, pred, weight=0.6)
added_image_path = osp.join(added_saved_dir, file)
added_image_path = os.path.join(added_saved_dir, file)
mkdir(added_image_path)
cv2.imwrite(added_image_path, added_image)
# save prediction
pred_im = utils.visualize(im_file, pred, weight=0.0)
pred_saved_path = osp.join(pred_saved_dir, file)
pred_saved_path = os.path.join(pred_saved_dir, file)
mkdir(pred_saved_path)
cv2.imwrite(pred_saved_path, pred_im)
def main(args):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
with fluid.dygraph.guard(places):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
infer(
model,
data_dir=args.data_dir,
test_list=args.test_list,
model_dir=args.model_dir,
transforms=test_transforms)
infer(model,
data_dir=args.data_dir,
test_list=args.test_list,
model_dir=args.model_dir,
transforms=test_transforms)
if __name__ == '__main__':
......@@ -161,5 +152,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args)
main(args)
......@@ -14,7 +14,6 @@
import argparse
import os
import os.path as osp
from paddle.fluid.dygraph.base import to_variable
import numpy as np
......@@ -33,88 +32,69 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of model
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
parser.add_argument('--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
# params of dataset
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--train_list',
dest='train_list',
help='Train list file of dataset',
type=str)
parser.add_argument(
'--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during training',
type=int,
default=255)
parser.add_argument('--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument('--train_list',
dest='train_list',
help='Train list file of dataset',
type=str)
parser.add_argument('--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument('--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
# params of training
parser.add_argument(
"--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument(
'--num_epochs',
dest='num_epochs',
help='Number epochs for training',
type=int,
default=100)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument(
'--learning_rate',
dest='learning_rate',
help='Learning rate',
type=float,
default=0.01)
parser.add_argument(
'--pretrained_model',
dest='pretrained_model',
help='The path of pretrianed weight',
type=str,
default=None)
parser.add_argument(
'--save_interval_epochs',
dest='save_interval_epochs',
help='The interval epochs for save a model snapshot',
type=int,
default=5)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output')
parser.add_argument("--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument('--num_epochs',
dest='num_epochs',
help='Number epochs for training',
type=int,
default=100)
parser.add_argument('--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument('--learning_rate',
dest='learning_rate',
help='Learning rate',
type=float,
default=0.01)
parser.add_argument('--pretrained_model',
dest='pretrained_model',
help='The path of pretrianed weight',
type=str,
default=None)
parser.add_argument('--save_interval_epochs',
dest='save_interval_epochs',
help='The interval epochs for save a model snapshot',
type=int,
default=5)
parser.add_argument('--save_dir',
dest='save_dir',
help='The directory for saving the model snapshot',
type=str,
default='./output')
return parser.parse_args()
......@@ -129,15 +109,15 @@ def train(model,
pretrained_model=None,
save_interval_epochs=1,
num_classes=None):
if not osp.isdir(save_dir):
if osp.exists(save_dir):
if not os.path.isdir(save_dir):
if os.path.exists(save_dir):
os.remove(save_dir)
os.makedirs(save_dir)
load_pretrained_model(model, pretrained_model)
data_generator = train_dataset.generator(
batch_size=batch_size, drop_last=True)
data_generator = train_dataset.generator(batch_size=batch_size,
drop_last=True)
num_steps_each_epoch = train_dataset.num_samples // args.batch_size
for epoch in range(num_epochs):
......@@ -156,76 +136,78 @@ def train(model,
if (
epoch + 1
) % save_interval_epochs == 0 or num_steps_each_epoch == num_epochs - 1:
current_save_dir = osp.join(save_dir, "epoch_{}".format(epoch + 1))
if not osp.isdir(current_save_dir):
current_save_dir = os.path.join(save_dir,
"epoch_{}".format(epoch + 1))
if not os.path.isdir(current_save_dir):
os.makedirs(current_save_dir)
fluid.save_dygraph(model.state_dict(),
osp.join(current_save_dir, 'model'))
os.path.join(current_save_dir, 'model'))
if eval_dataset is not None:
model.eval()
evaluate(
model,
eval_dataset,
model_dir=current_save_dir,
num_classes=num_classes,
batch_size=batch_size,
ignore_index=model.ignore_index,
epoch_id=epoch + 1)
evaluate(model,
eval_dataset,
model_dir=current_save_dir,
num_classes=num_classes,
batch_size=batch_size,
ignore_index=model.ignore_index,
epoch_id=epoch + 1)
model.train()
def main(args):
# Creat dataset reader
train_transforms = T.Compose(
[T.Resize(args.input_size),
T.RandomHorizontalFlip(),
T.Normalize()])
train_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.train_list,
transforms=train_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=True)
if args.val_list is not None:
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(
num_classes=args.num_classes, ignore_index=args.ignore_index)
# Creat optimizer
num_steps_each_epoch = train_dataset.num_samples // args.batch_size
decay_step = args.num_epochs * num_steps_each_epoch
lr_decay = fluid.layers.polynomial_decay(
args.learning_rate, decay_step, end_learning_rate=0, power=0.9)
optimizer = fluid.optimizer.Momentum(
lr_decay,
momentum=0.9,
parameter_list=model.parameters(),
regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5))
train(
model,
train_dataset,
eval_dataset,
optimizer,
save_dir=args.save_dir,
num_epochs=args.num_epochs,
batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
save_interval_epochs=args.save_interval_epochs,
num_classes=args.num_classes)
with fluid.dygraph.guard(places):
# Creat dataset reader
train_transforms = T.Compose([
T.Resize(args.input_size),
T.RandomHorizontalFlip(),
T.Normalize()
])
train_dataset = Dataset(data_dir=args.data_dir,
file_list=args.train_list,
transforms=train_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=True)
if args.val_list is not None:
eval_transforms = T.Compose(
[T.Resize(args.input_size),
T.Normalize()])
eval_dataset = Dataset(data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes, ignore_index=255)
# Creat optimizer
num_steps_each_epoch = train_dataset.num_samples // args.batch_size
decay_step = args.num_epochs * num_steps_each_epoch
lr_decay = fluid.layers.polynomial_decay(args.learning_rate,
decay_step,
end_learning_rate=0,
power=0.9)
optimizer = fluid.optimizer.Momentum(
lr_decay,
momentum=0.9,
parameter_list=model.parameters(),
regularization=fluid.regularizer.L2Decay(regularization_coeff=4e-5))
train(model,
train_dataset,
eval_dataset,
optimizer,
save_dir=args.save_dir,
num_epochs=args.num_epochs,
batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
save_interval_epochs=args.save_interval_epochs,
num_classes=args.num_classes)
if __name__ == '__main__':
......@@ -235,5 +217,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args)
main(args)
......@@ -13,7 +13,6 @@
# limitations under the License.
import os
import os.path as osp
import numpy as np
import math
import cv2
......@@ -59,8 +58,8 @@ def get_environ_info():
def load_pretrained_model(model, pretrained_model):
logging.info('Load pretrained model!')
if pretrained_model is not None:
if osp.exists(pretrained_model):
ckpt_path = osp.join(pretrained_model, 'model')
if os.path.exists(pretrained_model):
ckpt_path = os.path.join(pretrained_model, 'model')
para_state_dict, _ = fluid.load_dygraph(ckpt_path)
model_state_dict = model.state_dict()
keys = model_state_dict.keys()
......
......@@ -14,7 +14,6 @@
import argparse
import os
import os.path as osp
import math
from paddle.fluid.dygraph.base import to_variable
......@@ -33,59 +32,45 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of model
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
parser.add_argument('--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
# params of dataset
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during evaluation',
type=int,
default=255)
parser.add_argument('--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument('--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument('--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
# params of evaluate
parser.add_argument(
"--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument(
'--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
parser.add_argument("--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument('--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument('--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
return parser.parse_args()
......@@ -97,13 +82,13 @@ def evaluate(model,
batch_size=2,
ignore_index=255,
epoch_id=None):
ckpt_path = osp.join(model_dir, 'model')
ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict)
model.eval()
data_generator = eval_dataset.generator(
batch_size=batch_size, drop_last=True)
data_generator = eval_dataset.generator(batch_size=batch_size,
drop_last=True)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
conf_mat = ConfusionMatrix(num_classes, streaming=True)
......@@ -135,27 +120,24 @@ def evaluate(model,
def main(args):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
evaluate(
model,
eval_dataset,
model_dir=args.model_dir,
num_classes=args.num_classes,
batch_size=args.batch_size,
ignore_index=args.ignore_index,
)
with fluid.dygraph.guard(places):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
eval_dataset = Dataset(data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
evaluate(model,
eval_dataset,
model_dir=args.model_dir,
num_classes=args.num_classes,
batch_size=args.batch_size)
if __name__ == '__main__':
......@@ -165,5 +147,4 @@ if __name__ == '__main__':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args)
main(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册