提交 3a559be9 编写于 作者: S shippingwang

add metrics

上级 17966d76
...@@ -108,19 +108,35 @@ def test(args): ...@@ -108,19 +108,35 @@ def test(args):
test_feeder = fluid.DataFeeder(place=place, feed_list=test_feeds) test_feeder = fluid.DataFeeder(place=place, feed_list=test_feeds)
epoch_period = [] epoch_period = []
for test_iter, data in enumerate(test_reader()): for test_iter, data in enumerate(test_reader()):
cur_time = time.time() if args.model_name!="TALL":
test_outs = exe.run(fetch_list=test_fetch_list, cur_time = time.time()
test_outs = exe.run(fetch_list=test_fetch_list,
feed=test_feeder.feed(data)) feed=test_feeder.feed(data))
period = time.time() - cur_time period = time.time() - cur_time
epoch_period.append(period) epoch_period.append(period)
test_metrics.accumulate(test_outs) test_metrics.accumulate(test_outs)
# metric here # metric here
if args.log_interval > 0 and test_iter % args.log_interval == 0: if args.log_interval > 0 and test_iter % args.log_interval == 0:
info_str = '[EVAL] Batch {}'.format(test_iter) info_str = '[EVAL] Batch {}'.format(test_iter)
test_metrics.calculate_and_log_out(test_outs, info_str) test_metrics.calculate_and_log_out(test_outs, info_str)
else:
data_feed_in = [items[:2] for items in data]
cur_time = time.time()
test_outs = exe.run(fetch_list=test_fetch_list,
feed=test_feeder(data_feed_in))
period = time.time() - cur_time
epoch_period.append(period)
calc_info = [items[2:] for items in data]
test_result_list = [item for item in test_outs] + calc_info
test_metrics.accumulate(test_result_list)
#shipping back to develop metrics
if not os.path.isdir(args.save_dir): if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir) os.makedirs(args.save_dir)
test_metrics.finalize_and_log_out("[EVAL] eval finished. ", args.save_dir) test_metrics.finalize_and_log_out("[EVAL] eval finished. ", args.save_dir)
......
...@@ -430,12 +430,24 @@ class TallMetrics(Metrics): ...@@ -430,12 +430,24 @@ class TallMetrics(Metrics):
self.name = name self.name = name
self.mode =mode self.mode =mode
self.calculator = tall_metrics.MetricsCalculator(cfg=cfg, name=self.name, mode=self.mode) self.calculator = tall_metrics.MetricsCalculator(cfg=cfg, name=self.name, mode=self.mode)
self.IoU_thresh = [0.1, 0.3, 0.5, 0.7]
self.all_correct_num_10 = [0.0] * 5
self.all_correct_num_5 = [0.0] * 5
self.all_correct_num_1 = [0.0] * 5
self.all_retrievd = 0.0
def calculator_and_log_out(self, fetch_list, info=""): def calculator_and_log_out(self, fetch_list, info=""):
if self.mode == "train": if self.mode == "train":
loss = np.array(fetch_list[0]) loss = np.array(fetch_list[0])
logger.info(info +'\tLoss = {}'.format('%.6f' % np.mean(loss))) logger.info(info +'\tLoss = {}'.format('%.6f' % np.mean(loss)))
elif self.mode == "valid":
elif self.mode == "test":
pass
def accumalate():
if self.mode == "test":
outs = fetch_list[0] outs = fetch_list[0]
outputs = np.squeeze(outs) outputs = np.squeeze(outs)
start = fetch_list[1] start = fetch_list[1]
...@@ -459,27 +471,34 @@ class TallMetrics(Metrics): ...@@ -459,27 +471,34 @@ class TallMetrics(Metrics):
clips = [b[0] for b in movie_clip_featmaps] clips = [b[0] for b in movie_clip_featmaps]
sclips = [b[0] for b in movie_clip_sentences] sclips = [b[0] for b in movie_clip_sentences]
for i in range(len(IoU_thresh)): for i in range(len(sel.IoU_thresh)):
IoU = IoU_thresh[i] IoU = self.IoU_thresh[i]
correct_num_10 = compute_IoU_recall_top_n_forreg(10, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips) self.current_correct_num_10 = compute_IoU_recall_top_n_forreg(10, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips)
correct_num_5 = compute_IoU_recall_top_n_forreg(5, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips) self_current_correct_num_5 = compute_IoU_recall_top_n_forreg(5, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips)
correct_num_1 = compute_IoU_recall_top_n_forreg(1, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips) self.current_correct_num_1 = compute_IoU_recall_top_n_forreg(1, IoU, sentence_image_mat, sentence_image_reg_mat, sclips, iclips)
logger.info(info + " IoU=" + str(IoU) + ", R@10: " + str(correct_num_10 / len(sclips)) + "; IoU=" + str(IoU) + ", R@5: " + str(correct_num_5 / len(sclips)) + "; IoU=" + str(IoU) + ", R@1: " + str(correct_num_1 / len(sclips)))
#logger.info(info + " IoU=" + str(IoU) + ", R@10: " + str(correct_num_10 / len(sclips)) + "; IoU=" + str(IoU) + ", R@5: " + str(correct_num_5 / len(sclips)) + "; IoU=" + str(IoU) + ", R@1: " + str(correct_num_1 / len(sclips)))
all_correct_num_10[i] += correct_num_10 self.all_correct_num_10[i] += correct_num_10
all_correct_num_5[i] += correct_num_5 self.all_correct_num_5[i] += correct_num_5
all_correct_num_1[i] += correct_num_1 self.all_correct_num_1[i] += correct_num_1
all_retrievd += len(sclips)
self.all_retrievd += len(sclips)
else:
pass
def accumalate():
def finalize_and_log_out(self, info="", savedir="/"): def finalize_and_log_out(self, info="", savedir="/"):
all_retrievd = self.all_retrievd
for k in range(len(self.IoU_thresh)):
print(" IoU=" + str(self.IoU_thresh[k]) + ", R@10: " + str(all_correct_num_10[k] / all_retrievd) + "; IoU=" + str(self.IoU_thresh[k]) + ", R@5: " + str(all_correct_num_5[k] / all_retrievd) + "; IoU=" + str(self.IoU_thresh[k]) + ", R@1: " + str(all_correct_num_1[k] / all_retrievd))
R1_IOU5 = self all_correct_num_1[2] / all_retrievd
R5_IOU5 = self.all_correct_num_5[2] / all_retrievd
print "{}\n".format("best_R1_IOU5: %0.3f" % R1_IOU5)
print "{}\n".format("best_R5_IOU5: %0.3f" % R5_IOU5)
def reset(self): def reset(self):
self.calculator.clear() self.calculator.reset()
class MetricsZoo(object): class MetricsZoo(object):
......
...@@ -31,8 +31,14 @@ class MetricsCalculator(): ...@@ -31,8 +31,14 @@ class MetricsCalculator():
def reset(self): def reset(self):
logger.info("Resetting {} metrics...".format(self.mode)) logger.info("Resetting {} metrics...".format(self.mode))
self.IoU_thresh = [0.1, 0.3, 0.5, 0.7]
self.all_correct_num_10 = [0.0] * 5
self.all_correct_num_5 = [0.0] * 5
self.all_correct_num_1 = [0.0] * 5
self.all_retrievd = 0.0
def finalize_metrics(self): def finalize_metrics(self):
return return
def calculate_metrics(self,): def calculate_metrics(self,):
return return
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import sys
import cv2
import math
import random
import functools
try:
import cPickle as pickle
from cStringIO import StringIO
except ImportError:
import pickle
from io import BytesIO
import numpy as np
import paddle
from PIL import Image, ImageEnhance
import logging
from .reader_utils import DataReader
class TacosReader(DataReader):
def __init__(self, name, mode, cfg):
self.name = name
self.mode = mode
self.cfg = cfg
def create_reader(self):
cfg = self.cfg
mode = self.mode
num_reader_threads = cfg[mode.upper()]['num_reader_threads']
assert num_reader_threads >=1, \
"number of reader threads({}) should be a positive integer".format(num_reader_threads)
if num_reader_threads == 1:
reader_func = make_reader
else:
reader_func = make_multi_reader
filelist = cfg[mode.upper()]['']
if self.mode == 'train':
return reader_func()
elif self.mode == 'valid':
return reader_func()
else:
logger.info("Not implemented")
raise NotImplementedError
def make_reader(cfg):
def reader():
cs = cPickle.load(open(cfg.TRAIN.train_clip_sentvec))
movie_length_info = cPickle.load(open(cfg.TRAIN.movie_length_info))
#put train() in here
...@@ -22,7 +22,6 @@ import paddle ...@@ -22,7 +22,6 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import functools import functools
import pdb
random.seed(0) random.seed(0)
...@@ -35,17 +34,18 @@ class TallReader(DataReader): ...@@ -35,17 +34,18 @@ class TallReader(DataReader):
def __init__(self, name, mode, cfg): def __init__(self, name, mode, cfg):
self.name = name self.name = name
self.mode = mode self.mode = mode
self.cfg = cfg self.cfg = cfg
def create_reader(self): def create_reader(self):
cfg = self.cfg cfg = self.cfg
mode = self.mode mode = self.mode
if self.mode == 'train': if self.mode == 'train':
train_batch_size = cfg.TRAIN.batch_size train_batch_size = cfg.TRAIN.batch_size
return paddle.batch(train(cfg),batch_size = train_batch_size, drop_last=True) return paddle.batch(train(cfg),batch_size = train_batch_size, drop_last=True)
elif self.mode == 'valid': elif self.mode == 'valid':
return test(cfg) test_batch_size = cfg.VALID.batch_size
return paddle.batch(test(cfg),batch_size = test_batch_size, drop_last=True)
else: else:
logger.info("Not implemented") logger.info("Not implemented")
raise NotImplementedError raise NotImplementedError
...@@ -201,13 +201,13 @@ def train(cfg): ...@@ -201,13 +201,13 @@ def train(cfg):
clip_sentence_pairs_iou.append((clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset)) clip_sentence_pairs_iou.append((clip_sentence[0], clip_sentence[1], clip_name, start_offset, end_offset))
# count += 1 # count += 1
# if count > 200: # if count > 200:
# break # /yield break
num_samples_iou = len(clip_sentence_pairs_iou) num_samples_iou = len(clip_sentence_pairs_iou)
print "TRAIN: " + str(len(clip_sentence_pairs_iou))+" iou clip-sentence pairs are readed" print "TRAIN: " + str(len(clip_sentence_pairs_iou))+" iou clip-sentence pairs are readed"
return make_train_reader(cfg, clip_sentence_pairs_iou, shuffle=True, is_train=True) return make_train_reader(cfg, clip_sentence_pairs_iou, shuffle=True, is_train=True)
class test(cfg): class TACoS_Test_dataset(cfg):
''' '''
''' '''
def __init__(self, cfg): def __init__(self, cfg):
...@@ -304,3 +304,35 @@ class test(cfg): ...@@ -304,3 +304,35 @@ class test(cfg):
comb_feat = np.hstack((left_context_feat,feature_data,right_context_feat)) comb_feat = np.hstack((left_context_feat,feature_data,right_context_feat))
movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat)) movie_clip_featmap.append((self.sliding_clip_names[k], comb_feat))
return movie_clip_featmap, movie_clip_sentences return movie_clip_featmap, movie_clip_sentences
def test(cfg):
test_dataset = TACoS_Test_dataset(cfg)
idx = 0
for movie_name in test_dataset.movie_names:
idx += 1
print("%d/%d" % (idx, all_number))
movie_clip_featmaps, movie_clip_sentences = test_dataset.load_movie_slidingclip(movie_name, 16)
print("sentences: " + str(len(movie_clip_sentences)))
print("clips: " + str(len(movie_clip_featmaps))) # candidate clips)
sentence_image_mat = np.zeros([len(movie_clip_sentences), len(movie_clip_featmaps)])
sentence_image_reg_mat = np.zeros([len(movie_clip_sentences), len(movie_clip_featmaps), 2])
for k in range(len(movie_clip_sentences)):
sent_vec = movie_clip_sentences[k][1]
sent_vec = np.reshape(sent_vec, [1, sent_vec.shape[0]]) # 1,4800
#sent_vec = torch.from_numpy(sent_vec).cuda()
for t in range(len(movie_clip_featmaps)):
featmap = movie_clip_featmaps[t][1]
visual_clip_name = movie_clip_featmaps[t][0]
start = float(visual_clip_name.split("_")[1])
end = float(visual_clip_name.split("_")[2].split("_")[0])
featmap = np.reshape(featmap, [1, featmap.shape[0]])
feed_data = [[featmap, sent_vec, start, end, k, t, movie_clip_sentences, movie_clip_featmaps]]
yield feed_data
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册