提交 84426956 编写于 作者: C chenguowei01

add evaluation

上级 29a5e832
......@@ -43,18 +43,18 @@ class UNet(fluid.dygraph.Layer):
self.get_logit = GetLogit(64, num_classes)
self.ignore_index = ignore_index
def forward(self, x, label, mode='train'):
def forward(self, x, label=None, mode='train'):
encode_data, short_cuts = self.encode(x)
decode_data = self.decode(encode_data, short_cuts)
logit = self.get_logit(decode_data)
if mode == 'train':
return self._get_loss(logit, label)
else:
logit = fluid.layers.softmax(logit, axis=1)
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
pred = fluid.layers.argmax(logit, axis=3)
score_map = fluid.layers.softmax(logit, axis=1)
score_map = fluid.layers.transpose(score_map, [0, 2, 3, 1])
pred = fluid.layers.argmax(score_map, axis=3)
pred = fluid.layers.unsqueeze(pred, axes=[3])
return pred, logit
return pred, score_map
def _get_loss(self, logit, label):
mask = label != self.ignore_index
......
......@@ -25,6 +25,7 @@ import transforms as T
import models
import utils.logging as logging
from utils import get_environ_info
from val import evaluate
def parse_args():
......@@ -61,6 +62,13 @@ def parse_args():
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during training',
type=int,
default=255)
# params of training
parser.add_argument(
......@@ -118,7 +126,8 @@ def train(model,
num_epochs=100,
batch_size=2,
pretrained_model=None,
save_interval_epochs=1):
save_interval_epochs=1,
num_classes=None):
if not osp.isdir(save_dir):
if osp.exists(save_dir):
os.remove(save_dir)
......@@ -150,10 +159,17 @@ def train(model,
fluid.save_dygraph(model.state_dict(),
osp.join(current_save_dir, 'model'))
# if eval_dataset is not None:
# model.eval()
# evaluate(eval_dataset, batch_size=train_batch_size)
# model.train()
if eval_dataset is not None:
model.eval()
evaluate(
model,
eval_dataset,
model_dir=current_save_dir,
num_classes=num_classes,
batch_size=batch_size,
ignore_index=model.ignore_index,
epoch_id=epoch + 1)
model.train()
def arrange_transform(transforms, mode='train'):
......@@ -181,7 +197,7 @@ def main(args):
shuffle=True)
if args.val_list is not None:
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
arrange_transform(train_transforms, mode='eval')
arrange_transform(eval_transforms, mode='eval')
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
......@@ -192,7 +208,8 @@ def main(args):
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
model = models.UNet(
num_classes=args.num_classes, ignore_index=args.ignore_index)
# Creat optimizer
num_steps_each_epoch = train_dataset.num_samples // args.batch_size
......@@ -214,7 +231,8 @@ def main(args):
num_epochs=args.num_epochs,
batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
save_interval_epochs=args.save_interval_epochs)
save_interval_epochs=args.save_interval_epochs,
num_classes=args.num_classes)
if __name__ == '__main__':
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import os.path as osp
import math
from paddle.fluid.dygraph.base import to_variable
import numpy as np
import paddle.fluid as fluid
from datasets.dataset import Dataset
import transforms as T
import models
import utils.logging as logging
from utils import get_environ_info
from utils import ConfusionMatrix
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of model
parser.add_argument(
'--model_name',
dest='model_name',
help="Model type for traing, which is one of ('UNet')",
type=str,
default='UNet')
# params of dataset
parser.add_argument(
'--data_dir',
dest='data_dir',
help='The root directory of dataset',
type=str)
parser.add_argument(
'--val_list',
dest='val_list',
help='Val list file of dataset',
type=str,
default=None)
parser.add_argument(
'--num_classes',
dest='num_classes',
help='Number of classes',
type=int,
default=2)
parser.add_argument(
'--ingore_index',
dest='ignore_index',
help=
'The pixel equaling ignore_index will not be computed during evaluation',
type=int,
default=255)
# params of evaluate
parser.add_argument(
"--input_size",
dest="input_size",
help="The image size for net inputs.",
nargs=2,
default=[512, 512],
type=int)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size',
type=int,
default=2)
parser.add_argument(
'--model_dir',
dest='model_dir',
help='The path of model for evaluation',
type=str,
default=None)
return parser.parse_args()
def evaluate(model,
eval_dataset=None,
model_dir=None,
num_classes=None,
batch_size=2,
ignore_index=255,
epoch_id=None):
ckpt_path = osp.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict)
model.eval()
data_generator = eval_dataset.generator(
batch_size=batch_size, drop_last=True)
total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
conf_mat = ConfusionMatrix(num_classes, streaming=True)
logging.info(
"Start to evaluating(total_samples={}, total_steps={})...".format(
eval_dataset.num_samples, total_steps))
for step, data in enumerate(data_generator()):
images = np.array([d[0] for d in data])
labels = np.array([d[1] for d in data]).astype('int64')
images = to_variable(images)
pred, _ = model(images, labels, mode='eval')
pred = pred.numpy()
mask = labels != ignore_index
conf_mat.calculate(pred=pred, label=labels, ignore=mask)
_, iou = conf_mat.mean_iou()
logging.info("[EVAL] Epoch={}, Step={}/{}, iou={}".format(
epoch_id, step + 1, total_steps, iou))
category_iou, miou = conf_mat.mean_iou()
category_acc, macc = conf_mat.accuracy()
logging.info("[EVAL] #image={} acc={:.4f} IoU={:.4f}".format(
eval_dataset.num_samples, macc, miou))
logging.info("[EVAL] Category IoU: " + str(category_iou))
logging.info("[EVAL] Category Acc: " + str(category_acc))
logging.info("[EVAL] Kappa:{:.4f} ".format(conf_mat.kappa()))
def arrange_transform(transforms, mode='train'):
arrange_transform = T.ArrangeSegmenter
if type(transforms.transforms[-1]).__name__.startswith('Arrange'):
transforms.transforms[-1] = arrange_transform(mode=mode)
else:
transforms.transforms.append(arrange_transform(mode=mode))
def main(args):
eval_transforms = T.Compose([T.Resize(args.input_size), T.Normalize()])
arrange_transform(eval_transforms, mode='eval')
eval_dataset = Dataset(
data_dir=args.data_dir,
file_list=args.val_list,
transforms=eval_transforms,
num_workers='auto',
buffer_size=100,
parallel_method='thread',
shuffle=False)
if args.model_name == 'UNet':
model = models.UNet(num_classes=args.num_classes)
evaluate(
model,
eval_dataset,
model_dir=args.model_dir,
num_classes=args.num_classes,
batch_size=args.batch_size,
ignore_index=args.ignore_index,
)
if __name__ == '__main__':
args = parse_args()
env_info = get_environ_info()
if env_info['place'] == 'cpu':
places = fluid.CPUPlace()
else:
places = fluid.CUDAPlace(0)
with fluid.dygraph.guard(places):
main(args)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册