提交 a65ad557 编写于 作者: D dengkaipeng

mv COCOMetric to yolov3/coco_metric

上级 5ec5f453
...@@ -16,13 +16,14 @@ from __future__ import absolute_import ...@@ -16,13 +16,14 @@ from __future__ import absolute_import
import six import six
import abc import abc
import numpy as np
import logging import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
__all__ = ['Metric'] __all__ = ['Metric', 'Accuracy']
@six.add_metaclass(abc.ABCMeta) @six.add_metaclass(abc.ABCMeta)
...@@ -58,3 +59,31 @@ class Metric(object): ...@@ -58,3 +59,31 @@ class Metric(object):
""" """
raise NotImplementedError("function 'accumulate' not implemented in {}.".format(self.__class__.__name__)) raise NotImplementedError("function 'accumulate' not implemented in {}.".format(self.__class__.__name__))
class Accuracy(Metric):
"""
Encapsulates accuracy metric logic
"""
def __init__(self, topk=(1, ), *args, **kwargs):
super(Accuracy, self).__init__(*args, **kwargs)
self.topk = topk
self.maxk = max(topk)
self.reset()
def update(self, pred, label, *args, **kwargs):
pred = np.argsort(pred[0])[:, ::-1][:, :self.maxk]
corr = (pred == np.repeat(label[0], self.maxk, 1))
self.correct = np.append(self.correct, corr, axis=0)
def reset(self):
self.correct = np.empty((0, self.maxk), dtype="int32")
def accumulate(self):
res = []
num_samples = self.correct.shape[0]
for k in self.topk:
correct_k = self.correct[:, :k].sum()
res.append(round(100.0 * correct_k / num_samples, 2))
return res
...@@ -27,6 +27,7 @@ from paddle.fluid.optimizer import Momentum ...@@ -27,6 +27,7 @@ from paddle.fluid.optimizer import Momentum
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from model import Model, CrossEntropy from model import Model, CrossEntropy
from metrics import Accuracy
class SimpleImgConvPool(fluid.dygraph.Layer): class SimpleImgConvPool(fluid.dygraph.Layer):
...@@ -143,7 +144,7 @@ def main(): ...@@ -143,7 +144,7 @@ def main():
model = MNIST() model = MNIST()
optim = Momentum(learning_rate=FLAGS.lr, momentum=.9, optim = Momentum(learning_rate=FLAGS.lr, momentum=.9,
parameter_list=model.parameters()) parameter_list=model.parameters())
model.prepare(optim, CrossEntropy()) model.prepare(optim, CrossEntropy(), metrics=Accuracy(topk=(1, 2)))
if FLAGS.resume is not None: if FLAGS.resume is not None:
model.load(FLAGS.resume) model.load(FLAGS.resume)
...@@ -163,6 +164,10 @@ def main(): ...@@ -163,6 +164,10 @@ def main():
if idx % 10 == 0: if idx % 10 == 0:
print("{:04d}: loss {:0.3f} top1: {:0.3f}%".format( print("{:04d}: loss {:0.3f} top1: {:0.3f}%".format(
idx, train_loss / (idx + 1), train_acc / (idx + 1))) idx, train_loss / (idx + 1), train_acc / (idx + 1)))
for metric in model._metrics:
res = metric.accumulate()
print("train epoch {:03d}: top1: {:0.3f}%, top2: {:0.3f}".format(e, res[0], res[1]))
metric.reset()
print("======== eval epoch {} ========".format(e)) print("======== eval epoch {} ========".format(e))
for idx, batch in enumerate(val_loader()): for idx, batch in enumerate(val_loader()):
...@@ -175,6 +180,10 @@ def main(): ...@@ -175,6 +180,10 @@ def main():
if idx % 10 == 0: if idx % 10 == 0:
print("{:04d}: loss {:0.3f} top1: {:0.3f}%".format( print("{:04d}: loss {:0.3f} top1: {:0.3f}%".format(
idx, val_loss / (idx + 1), val_acc / (idx + 1))) idx, val_loss / (idx + 1), val_acc / (idx + 1)))
for metric in model._metrics:
res = metric.accumulate()
print("eval epoch {:03d}: top1: {:0.3f}%, top2: {:0.3f}".format(e, res[0], res[1]))
metric.reset()
model.save('mnist_checkpoints/{:02d}'.format(e)) model.save('mnist_checkpoints/{:02d}'.format(e))
......
...@@ -26,7 +26,7 @@ from paddle.fluid.framework import in_dygraph_mode, Variable ...@@ -26,7 +26,7 @@ from paddle.fluid.framework import in_dygraph_mode, Variable
from paddle.fluid.executor import global_scope from paddle.fluid.executor import global_scope
from paddle.fluid.io import is_belong_to_optimizer from paddle.fluid.io import is_belong_to_optimizer
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from metrics.metric import Metric from metrics import Metric
__all__ = ['shape_hints', 'Model', 'Loss', 'CrossEntropy'] __all__ = ['shape_hints', 'Model', 'Loss', 'CrossEntropy']
...@@ -297,6 +297,8 @@ class StaticGraphAdapter(object): ...@@ -297,6 +297,8 @@ class StaticGraphAdapter(object):
metric.update(outputs, labels) metric.update(outputs, labels)
return outputs, losses return outputs, losses
else: # train else: # train
for metric in self.model._metrics:
metric.update(outputs, labels)
return outputs, losses return outputs, losses
def _make_program(self, inputs): def _make_program(self, inputs):
...@@ -311,7 +313,7 @@ class StaticGraphAdapter(object): ...@@ -311,7 +313,7 @@ class StaticGraphAdapter(object):
if self.mode != 'test': if self.mode != 'test':
label_vars = self._infer_label_vars(outputs) label_vars = self._infer_label_vars(outputs)
self._label_vars[self.mode] = label_vars self._label_vars[self.mode] = label_vars
losses = self.model._loss_function(outputs[0], label_vars) losses = self.model._loss_function(outputs, label_vars)
if self.mode == 'train': if self.mode == 'train':
self._loss_endpoint = fluid.layers.sum(losses) self._loss_endpoint = fluid.layers.sum(losses)
self.model._optimizer.minimize(self._loss_endpoint) self.model._optimizer.minimize(self._loss_endpoint)
...@@ -319,7 +321,7 @@ class StaticGraphAdapter(object): ...@@ -319,7 +321,7 @@ class StaticGraphAdapter(object):
prog = prog.clone(for_test=True) prog = prog.clone(for_test=True)
self._progs[self.mode] = prog self._progs[self.mode] = prog
self._endpoints[self.mode] = { self._endpoints[self.mode] = {
"output": outputs[1:], "output": outputs,
"label": label_vars, "label": label_vars,
"loss": losses, "loss": losses,
} }
...@@ -419,12 +421,14 @@ class DynamicGraphAdapter(object): ...@@ -419,12 +421,14 @@ class DynamicGraphAdapter(object):
self.mode = 'train' self.mode = 'train'
inputs = to_list(inputs) inputs = to_list(inputs)
labels = to_list(labels) labels = to_list(labels)
outputs = self.model.forward(*[to_variable(x) for x in inputs])[0] outputs = self.model.forward(*[to_variable(x) for x in inputs])
losses = self.model._loss_function(outputs, labels) losses = self.model._loss_function(outputs, labels)
final_loss = fluid.layers.sum(losses) final_loss = fluid.layers.sum(losses)
final_loss.backward() final_loss.backward()
self.model._optimizer.minimize(final_loss) self.model._optimizer.minimize(final_loss)
self.model.clear_gradients() self.model.clear_gradients()
for metric in self.model._metrics:
metric.update([to_numpy(o) for o in to_list(outputs)], labels)
return [to_numpy(o) for o in to_list(outputs)], \ return [to_numpy(o) for o in to_list(outputs)], \
[to_numpy(l) for l in losses] [to_numpy(l) for l in losses]
...@@ -436,18 +440,18 @@ class DynamicGraphAdapter(object): ...@@ -436,18 +440,18 @@ class DynamicGraphAdapter(object):
inputs = to_list(inputs) inputs = to_list(inputs)
labels = to_list(labels) labels = to_list(labels)
outputs = self.model.forward(*[to_variable(x) for x in inputs]) outputs = self.model.forward(*[to_variable(x) for x in inputs])
losses = self.model._loss_function(outputs[0], labels) losses = self.model._loss_function(outputs, labels)
for metric in self.model._metrics: for metric in self.model._metrics:
metric.update([to_numpy(o) for o in outputs[1:]], labels) metric.update([to_numpy(o) for o in to_list(outputs)], labels)
return [to_numpy(o) for o in to_list(outputs[0])], \ return [to_numpy(o) for o in to_list(outputs)], \
[to_numpy(l) for l in losses] [to_numpy(l) for l in losses]
def test(self, inputs, device='CPU', device_ids=None): def test(self, inputs, device='CPU', device_ids=None):
super(Model, self.model).eval() super(Model, self.model).eval()
self.mode = 'test' self.mode = 'test'
inputs = [to_variable(x) for x in to_list(inputs)] inputs = [to_variable(x) for x in to_list(inputs)]
outputs = self.model.forward(*inputs)[1:] outputs = self.model.forward(*inputs)
return [to_numpy(o) for o in to_list(outputs[1:])] return [to_numpy(o) for o in to_list(outputs)]
def parameters(self, *args, **kwargs): def parameters(self, *args, **kwargs):
return super(Model, self.model).parameters(*args, **kwargs) return super(Model, self.model).parameters(*args, **kwargs)
......
...@@ -35,7 +35,6 @@ from paddle.fluid.regularizer import L2Decay ...@@ -35,7 +35,6 @@ from paddle.fluid.regularizer import L2Decay
from model import Model, Loss, shape_hints from model import Model, Loss, shape_hints
from resnet import ResNet, ConvBNLayer from resnet import ResNet, ConvBNLayer
from metrics.coco import COCOMetric
import logging import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
...@@ -174,7 +173,7 @@ class YOLOv3(Model): ...@@ -174,7 +173,7 @@ class YOLOv3(Model):
route = self.route_blocks[idx](route) route = self.route_blocks[idx](route)
route = fluid.layers.resize_nearest(route, scale=2) route = fluid.layers.resize_nearest(route, scale=2)
if self.mode != 'train': if self.mode == 'test':
anchor_mask = self.anchor_masks[idx] anchor_mask = self.anchor_masks[idx]
mask_anchors = [] mask_anchors = []
for m in anchor_mask: for m in anchor_mask:
...@@ -195,10 +194,10 @@ class YOLOv3(Model): ...@@ -195,10 +194,10 @@ class YOLOv3(Model):
downsample //= 2 downsample //= 2
if self.mode == 'train': if self.mode != 'test':
return [outputs] return outputs
return [outputs, img_id, fluid.layers.multiclass_nms( return [img_id, fluid.layers.multiclass_nms(
bboxes=fluid.layers.concat(boxes, axis=1), bboxes=fluid.layers.concat(boxes, axis=1),
scores=fluid.layers.concat(scores, axis=2), scores=fluid.layers.concat(scores, axis=2),
score_threshold=self.valid_thresh, score_threshold=self.valid_thresh,
...@@ -533,12 +532,15 @@ def main(): ...@@ -533,12 +532,15 @@ def main():
anno_path = os.path.join(FLAGS.data, 'annotations', 'instances_val2017.json') anno_path = os.path.join(FLAGS.data, 'annotations', 'instances_val2017.json')
model.prepare(optim, model.prepare(optim,
YoloLoss(num_classes=NUM_CLASSES), YoloLoss(num_classes=NUM_CLASSES),
metrics=COCOMetric(anno_path, with_background=False)) # For YOLOv3, output variable in train/eval is different,
# which is not supported by metric, add by callback later?
# metrics=COCOMetric(anno_path, with_background=False)
)
for e in range(epoch): for e in range(epoch):
# logger.info("======== train epoch {} ========".format(e)) logger.info("======== train epoch {} ========".format(e))
# run(model, train_loader) run(model, train_loader)
# model.save('yolo_checkpoints/{:02d}'.format(e)) model.save('yolo_checkpoints/{:02d}'.format(e))
logger.info("======== eval epoch {} ========".format(e)) logger.info("======== eval epoch {} ========".format(e))
run(model, val_loader, mode='eval') run(model, val_loader, mode='eval')
# should be called in fit() # should be called in fit()
......
...@@ -12,5 +12,3 @@ ...@@ -12,5 +12,3 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import
...@@ -12,14 +12,12 @@ ...@@ -12,14 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import
import sys import sys
import json import json
from pycocotools.cocoeval import COCOeval from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO from pycocotools.coco import COCO
from .metric import Metric from metrics import Metric
import logging import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
...@@ -32,6 +30,7 @@ __all__ = ['COCOMetric'] ...@@ -32,6 +30,7 @@ __all__ = ['COCOMetric']
OUTFILE = './bbox.json' OUTFILE = './bbox.json'
# considered to change to a callback later
class COCOMetric(Metric): class COCOMetric(Metric):
""" """
Metrci for MS-COCO dataset, only support update with batch Metrci for MS-COCO dataset, only support update with batch
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册