提交 e5cabbb1 编写于 作者: C chenguowei01

update some

上级 30089840
......@@ -91,7 +91,7 @@ class OpticDiscSeg(Dataset):
elif self.mode == 'eval':
return im, label
if self.mode == 'test':
return im, im_info
return im, im_info, image_path
def __len__(self):
return len(self.file_list)
......@@ -18,9 +18,11 @@ import os
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv
import cv2
import tqdm
from datasets import OpticDiscSeg
import transforms as T
import models
import utils
......@@ -32,50 +34,58 @@ def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of model
parser.add_argument('--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
# params of dataset
parser.add_argument('--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument('--test_list',
dest='test_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument('--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--test_list',
dest='test_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
# params of prediction
parser.add_argument("--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument('--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument('--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
parser.add_argument('--save_dir',
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
parser.add_argument(
"--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument(
'--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the inference results',
type=str,
default='./output/result')
return parser.parse_args()
......@@ -86,26 +96,19 @@ def mkdir(path):
os.makedirs(sub_dir)
def infer(model, data_dir=None, test_list=None, model_dir=None,
transforms=None):
def infer(model, test_dataset=None, model_dir=None, save_dir='output'):
ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict)
model.eval()
added_saved_dir = os.path.join(args.save_dir, 'added')
pred_saved_dir = os.path.join(args.save_dir, 'prediction')
added_saved_dir = os.path.join(save_dir, 'added')
pred_saved_dir = os.path.join(save_dir, 'prediction')
logging.info("Start to predict...")
with open(test_list, 'r') as f:
files = f.readlines()
for file in tqdm.tqdm(files):
file = file.strip()
im_file = os.path.join(data_dir, file)
im, im_info, _ = transforms(im_file)
im = np.expand_dims(im, axis=0)
for im, im_info, im_path in tqdm.tqdm(test_dataset):
im = im[np.newaxis, ...]
im = to_variable(im)
pred, _ = model(im, mode='test')
pred = pred.numpy()
pred = np.squeeze(pred).astype('uint8')
......@@ -118,38 +121,41 @@ def infer(model, data_dir=None, test_list=None, model_dir=None,
h, w = im_info[k][0], im_info[k][1]
pred = pred[0:h, 0:w]
im_file = im_path.replace(test_dataset.data_dir, '')
if im_file[0] == '/':
im_file = im_file[1:]
# save added image
added_image = utils.visualize(im_file, pred, weight=0.6)
added_image_path = os.path.join(added_saved_dir, file)
added_image = utils.visualize(im_path, pred, weight=0.6)
added_image_path = os.path.join(added_saved_dir, im_file)
mkdir(added_image_path)
cv2.imwrite(added_image_path, added_image)
# save prediction
pred_im = utils.visualize(im_file, pred, weight=0.0)
pred_saved_path = os.path.join(pred_saved_dir, file)
pred_im = utils.visualize(im_path, pred, weight=0.0)
pred_saved_path = os.path.join(pred_saved_dir, im_file)
mkdir(pred_saved_path)
cv2.imwrite(pred_saved_path, pred_im)
def main(args):
env_info = get_environ_info()
places = fluid.CUDAPlace(ParallelEnv().dev_id) \
if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
with fluid.dygraph.guard(places):
test_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
test_dataset = OpticDiscSeg(transforms=test_transforms, mode='test')
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
infer(model,
data_dir=args.data_dir,
test_list=args.test_list,
model_dir=args.model_dir,
transforms=test_transforms)
infer(
model,
model_dir=args.model_dir,
test_dataset=test_dataset,
save_dir=args.save_dir)
if __name__ == '__main__':
args = parse_args()
env_info = get_environ_info()
if env_info['place'] == 'cpu':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
main(args)
......@@ -116,6 +116,11 @@ def parse_args():
help='Num workers for data loader',
type=int,
default=0)
parser.add_argument(
'--do_eval',
dest='do_eval',
help='Eval while training',
action='store_true')
return parser.parse_args()
......@@ -183,7 +188,7 @@ def train(model,
"epoch_{}".format(epoch + 1))
if not os.path.isdir(current_save_dir):
os.makedirs(current_save_dir)
fluid.save_dygraph(model_parallel.state_dict(),
fluid.save_dygraph(model.state_dict(),
os.path.join(current_save_dir, 'model'))
if eval_dataset is not None:
......@@ -215,12 +220,11 @@ def main(args):
train_dataset = OpticDiscSeg(transforms=train_transforms, mode='train')
eval_dataset = None
if args.val_list is not None:
if args.do_eval:
eval_transforms = T.Compose(
[T.Resize(args.input_size),
T.Normalize()])
eval_dataset = OpticDiscSeg(
transforms=train_transforms, mode='eval')
eval_dataset = OpticDiscSeg(transforms=eval_transforms, mode='eval')
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes, ignore_index=255)
......
......@@ -19,25 +19,26 @@ import math
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle.fluid as fluid
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.io import DataLoader
from paddle.fluid.dataloader import BatchSampler
from datasets import Dataset
from datasets import OpticDiscSeg
import transforms as T
import models
import utils.logging as logging
from utils import get_environ_info
from utils import ConfusionMatrix
from utils import DistributedBatchSampler
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
parser = argparse.ArgumentParser(description='Model evaluation')
# params of model
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
help="Model type for evaluation, which is one of ('UNet')",
type=str,
default='UNet')
......@@ -97,28 +98,28 @@ def evaluate(model,
model.set_dict(para_state_dict)
model.eval()
batch_sampler = DistributedBatchSampler(
eval_dataset, batch_size=batch_size, shuffle=True, drop_last=False)
batch_sampler = BatchSampler(
eval_dataset, batch_size=batch_size, shuffle=False, drop_last=False)
loader = DataLoader(
eval_dataset,
batch_sampler=batch_sampler,
places=places,
return_list=True,
)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
total_steps = math.ceil(len(eval_dataset) * 1.0 / batch_size)
conf_mat = ConfusionMatrix(num_classes, streaming=True)
logging.info(
"Start to evaluating(total_samples={}, total_steps={})...".format(
eval_dataset.num_samples, total_steps))
len(eval_dataset), total_steps))
for step, data in enumerate(loader):
images = data[0]
labels = data[1].astype('int64')
pred, _ = model(images, labels, mode='eval')
pred, _ = model(images, mode='eval')
pred = pred.numpy()
labels = labels.numpy()
mask = labels != ignore_index
conf_mat.calculate(pred=pred, label=labels, ignore=mask)
_, iou = conf_mat.mean_iou()
......@@ -128,7 +129,7 @@ def evaluate(model,
category_iou, miou = conf_mat.mean_iou()
category_acc, macc = conf_mat.accuracy()
logging.info("[EVAL] #image={} acc={:.4f} IoU={:.4f}".format(
eval_dataset.num_samples, macc, miou))
len(eval_dataset), macc, miou))
logging.info("[EVAL] Category IoU: " + str(category_iou))
logging.info("[EVAL] Category Acc: " + str(category_acc))
logging.info("[EVAL] Kappa:{:.4f} ".format(conf_mat.kappa()))
......@@ -136,20 +137,12 @@ def evaluate(model,
def main(args):
env_info = get_environ_info()
if env_info['place'] == 'cpu':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
places = fluid.CUDAPlace(ParallelEnv().dev_id) \
if env_info['place'] == 'cuda' and fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
with fluid.dygraph.guard(places):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
eval_dataset = OpticDiscSeg(transforms=eval_transforms, mode='eval')
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
......@@ -157,6 +150,7 @@ def main(args):
evaluate(
model,
eval_dataset,
places=places,
model_dir=args.model_dir,
num_classes=args.num_classes,
batch_size=args.batch_size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册