提交 54638fd6 编写于 作者: W wuzewu

Update detection task

上级 5b54a7a5
#coding:utf-8
# -*- coding:utf8 -*-
import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
import numpy as np
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
from paddlehub.contrib.ppdet.utils.coco_eval import bbox2out
from paddlehub.common.detection_config import get_model_type, get_feed_list, get_mid_feature
from paddlehub.common import detection_config as dconf
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--use_gpu", type=ast.literal_eval, default=False, help="Whether use GPU for predict.")
parser.add_argument("--checkpoint_dir", type=str, default="paddlehub_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=2, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="ssd", help="Module used as a feature extractor.")
parser.add_argument("--dataset", type=str, default="coco10", help="Dataset to finetune.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="faster_rcnn_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="faster_rcnn_resnet50_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
# yapf: enable.
module_map = {
"yolov3": "yolov3_darknet53_coco2017",
"ssd": "ssd_vgg16_512_coco2017",
"faster_rcnn": "faster_rcnn_resnet50_coco2017",
}
def predict(args):
module_name = args.module # 'yolov3_darknet53_coco2017'
model_type = get_model_type(module_name) # 'yolo'
# define data
ds = hub.dataset.Coco10(model_type)
print("ds.num_labels", ds.num_labels)
data_reader = ObjectDetectionReader(dataset=ds, model_type=model_type)
# define model(program)
module = hub.Module(name=module_name)
if model_type == 'rcnn':
input_dict, output_dict, program = module.context(trainable=True, phase='train')
input_dict_pred, output_dict_pred, program_pred = module.context(trainable=False)
else:
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('rcnn')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=dataset, model_type='rcnn')
input_dict, output_dict, program = module.context(trainable=True)
input_dict_pred = output_dict_pred = None
feed_list, pred_feed_list = get_feed_list(module_name, input_dict, input_dict_pred)
feature, pred_feature = get_mid_feature(module_name, output_dict, output_dict_pred)
pred_input_dict, pred_output_dict, pred_program = module.context(
trainable=False, phase='predict')
feed_list = [
input_dict["image"].name, input_dict["im_info"].name,
input_dict['gt_bbox'].name, input_dict['gt_class'].name,
input_dict['is_crowd'].name
]
pred_feed_list = [
pred_input_dict['image'].name, pred_input_dict['im_info'].name,
pred_input_dict['im_shape'].name
]
feature = [
output_dict['head_feat'], output_dict['rpn_cls_loss'],
output_dict['rpn_reg_loss'], output_dict['generate_proposal_labels']
]
pred_feature = [pred_output_dict['head_feat'], pred_output_dict['rois']]
config = hub.RunConfig(
use_data_parallel=False,
......@@ -57,40 +59,24 @@ def predict(args):
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy())
task = hub.DetectionTask(
task = hub.FasterRCNNTask(
data_reader=data_reader,
num_classes=ds.num_labels,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
predict_feed_list=pred_feed_list,
predict_feature=pred_feature,
model_type=model_type,
config=config)
data = ["./test/test_img_bird.jpg", "./test/test_img_cat.jpg",]
label_map = ds.label_dict()
run_states = task.predict(data=data, accelerate_mode=False)
results = [run_state.run_results for run_state in run_states]
for outs in results:
keys = ['im_shape', 'im_id', 'bbox']
res = {
k: (np.array(v), v.recursive_sequence_lengths())
for k, v in zip(keys, outs)
}
print("im_id", res['im_id'])
is_bbox_normalized = dconf.conf[model_type]['is_bbox_normalized']
clsid2catid = {}
for k in label_map:
clsid2catid[k] = k
bbox_results = bbox2out([res], clsid2catid, is_bbox_normalized)
print(bbox_results)
data = [
"./test/test_img_bird.jpg",
"./test/test_img_cat.jpg",
]
label_map = dataset.label_dict()
results = task.predict(data=data, return_result=True, accelerate_mode=False)
print(results)
if __name__ == "__main__":
args = parser.parse_args()
if not args.module in module_map:
hub.logger.error("module should in %s" % module_map.keys())
exit(1)
args.module = module_map[args.module]
predict(args)
# -*- coding:utf8 -*-
import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="ssd_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="ssd_vgg16_512_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
# yapf: enable.
def predict(args):
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('ssd')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=dataset, model_type='ssd')
input_dict, output_dict, program = module.context(trainable=True)
feed_list = [input_dict["image"].name, input_dict["im_size"].name]
feature = output_dict['body_features']
config = hub.RunConfig(
use_data_parallel=False,
use_pyreader=True,
use_cuda=args.use_gpu,
batch_size=args.batch_size,
enable_memory_optim=False,
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy())
task = hub.SSDTask(
data_reader=data_reader,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
multi_box_head_config=module.multi_box_head_config,
config=config)
data = [
"./test/test_img_bird.jpg",
"./test/test_img_cat.jpg",
]
label_map = dataset.label_dict()
results = task.predict(data=data, return_result=True, accelerate_mode=False)
print(results)
if __name__ == "__main__":
args = parser.parse_args()
predict(args)
# -*- coding:utf8 -*-
import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="ssd_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="ssd_vgg16_512_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
# yapf: enable.
def predict(args):
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('ssd')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=dataset, model_type='ssd')
input_dict, output_dict, program = module.context(trainable=True)
feed_list = [input_dict["image"].name, input_dict["im_size"].name]
feature = output_dict['body_features']
config = hub.RunConfig(
use_data_parallel=False,
use_pyreader=True,
use_cuda=args.use_gpu,
batch_size=args.batch_size,
enable_memory_optim=False,
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy())
task = hub.SSDTask(
data_reader=data_reader,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
multi_box_head_config=module.multi_box_head_config,
config=config)
data = [
"./test/test_img_bird.jpg",
"./test/test_img_cat.jpg",
]
label_map = dataset.label_dict()
results = task.predict(data=data, return_result=True, accelerate_mode=False)
print(results)
if __name__ == "__main__":
args = parser.parse_args()
predict(args)
IMAGE_PATH
./resources/test/test_img_bird.jpg
input_data:
image:
type : IMAGE
key : IMAGE_PATH
config:
top_only : True
......@@ -3,65 +3,54 @@ import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
import numpy as np
from paddlehub.common.detection_config import get_model_type, get_feed_list, get_mid_feature
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=50, help="Number of epoches for fine-tuning.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=False, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="paddlehub_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="ssd", help="Module used as feature extractor.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="faster_rcnn_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=1, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="faster_rcnn_resnet50_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
parser.add_argument("--use_data_parallel", type=ast.literal_eval, default=False, help="Whether use data parallel.")
# yapf: enable.
module_map = {
"yolov3": "yolov3_darknet53_coco2017",
"ssd": "ssd_vgg16_512_coco2017",
"faster_rcnn": "faster_rcnn_resnet50_coco2017",
}
def finetune(args):
module_name = args.module # 'yolov3_darknet53_coco2017'
model_type = get_model_type(module_name) # 'yolo'
# define dataset
ds = hub.dataset.Coco10(model_type)
# base_path = '/home/local3/zhaopenghao/data/detect/paddle-job-84942-0'
# train_dir = 'train_data/images'
# train_list = 'train_data/coco/instances_coco.json'
# val_dir = 'eval_data/images'
# val_list = 'eval_data/coco/instances_coco.json'
# ds = ObjectDetectionDataset(base_path, train_dir, train_list, val_dir, val_list, val_dir, val_list, model_type=model_type)
# print(ds.label_dict())
print("ds.num_labels", ds.num_labels)
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('rcnn')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=ds, model_type=model_type)
data_reader = ObjectDetectionReader(dataset=dataset, model_type='rcnn')
# define model(program)
module = hub.Module(name=module_name)
if model_type == 'rcnn':
input_dict, output_dict, program = module.context(trainable=True, phase='train')
input_dict_pred, output_dict_pred, program_pred = module.context(trainable=False)
else:
input_dict, output_dict, program = module.context(trainable=True)
input_dict_pred = output_dict_pred = None
pred_input_dict, pred_output_dict, pred_program = module.context(
trainable=False, phase='predict')
print("input_dict keys", input_dict.keys())
print("output_dict keys", output_dict.keys())
feed_list, pred_feed_list = get_feed_list(module_name, input_dict, input_dict_pred)
print("output_dict length:", len(output_dict))
print(output_dict.keys())
if output_dict_pred is not None:
print(output_dict_pred.keys())
feature, pred_feature = get_mid_feature(module_name, output_dict, output_dict_pred)
feed_list = [
input_dict["image"].name, input_dict["im_info"].name,
input_dict['gt_bbox'].name, input_dict['gt_class'].name,
input_dict['is_crowd'].name
]
pred_feed_list = [
pred_input_dict['image'].name, pred_input_dict['im_info'].name,
pred_input_dict['im_shape'].name
]
feature = [
output_dict['head_feat'], output_dict['rpn_cls_loss'],
output_dict['rpn_reg_loss'], output_dict['generate_proposal_labels']
]
pred_feature = [pred_output_dict['head_feat'], pred_output_dict['rois']]
config = hub.RunConfig(
log_interval=10,
......@@ -73,25 +62,20 @@ def finetune(args):
batch_size=args.batch_size,
enable_memory_optim=False,
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy(learning_rate=0.00025, optimizer_name="adam"))
strategy=hub.finetune.strategy.DefaultFinetuneStrategy(
learning_rate=0.00025, optimizer_name="adam"))
task = hub.DetectionTask(
task = hub.FasterRCNNTask(
data_reader=data_reader,
num_classes=ds.num_labels,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
predict_feed_list=pred_feed_list,
predict_feature=pred_feature,
model_type=model_type,
config=config)
task.finetune_and_eval()
if __name__ == "__main__":
args = parser.parse_args()
if not args.module in module_map:
hub.logger.error("module should in %s" % module_map.keys())
exit(1)
args.module = module_map[args.module]
finetune(args)
# -*- coding:utf8 -*-
import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=50, help="Number of epoches for fine-tuning.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="ssd_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="ssd_vgg16_512_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
parser.add_argument("--use_data_parallel", type=ast.literal_eval, default=False, help="Whether use data parallel.")
# yapf: enable.
def finetune(args):
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('ssd')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=dataset, model_type='ssd')
input_dict, output_dict, program = module.context(trainable=True)
feed_list = [input_dict["image"].name, input_dict["im_size"].name]
feature = output_dict['body_features']
config = hub.RunConfig(
log_interval=10,
eval_interval=100,
use_data_parallel=args.use_data_parallel,
use_pyreader=True,
use_cuda=args.use_gpu,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
enable_memory_optim=False,
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy(
learning_rate=0.00025, optimizer_name="adam"))
task = hub.SSDTask(
data_reader=data_reader,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
multi_box_head_config=module.multi_box_head_config,
config=config)
task.finetune_and_eval()
if __name__ == "__main__":
args = parser.parse_args()
finetune(args)
# -*- coding:utf8 -*-
import argparse
import os
import ast
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddlehub.reader.cv_reader import ObjectDetectionReader
from paddlehub.dataset.base_cv_dataset import ObjectDetectionDataset
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=50, help="Number of epoches for fine-tuning.")
parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for fine-tuning.")
parser.add_argument("--checkpoint_dir", type=str, default="yolo_finetune_ckpt", help="Path to save log data.")
parser.add_argument("--batch_size", type=int, default=8, help="Total examples' number in batch for training.")
parser.add_argument("--module", type=str, default="yolov3_darknet53_coco2017", help="Module used as feature extractor.")
parser.add_argument("--dataset", type=str, default="coco_10", help="Dataset to finetune.")
parser.add_argument("--use_data_parallel", type=ast.literal_eval, default=False, help="Whether use data parallel.")
# yapf: enable.
def finetune(args):
module = hub.Module(name=args.module)
dataset = hub.dataset.Coco10('yolo')
print("dataset.num_labels:", dataset.num_labels)
# define batch reader
data_reader = ObjectDetectionReader(dataset=dataset, model_type='yolo')
input_dict, output_dict, program = module.context(trainable=True)
feed_list = [input_dict["image"].name, input_dict["im_size"].name]
feature = output_dict['head_features']
config = hub.RunConfig(
log_interval=10,
eval_interval=100,
use_data_parallel=args.use_data_parallel,
use_pyreader=True,
use_cuda=args.use_gpu,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
enable_memory_optim=False,
checkpoint_dir=args.checkpoint_dir,
strategy=hub.finetune.strategy.DefaultFinetuneStrategy(
learning_rate=0.00025, optimizer_name="adam"))
task = hub.YOLOTask(
data_reader=data_reader,
num_classes=dataset.num_labels,
feed_list=feed_list,
feature=feature,
config=config)
task.finetune_and_eval()
if __name__ == "__main__":
args = parser.parse_args()
finetune(args)
......@@ -48,7 +48,9 @@ from .io.type import DataType
from .finetune.task import BaseTask
from .finetune.task import ClassifierTask
from .finetune.task import DetectionTask
from .finetune.task import SSDTask
from .finetune.task import YOLOTask
from .finetune.task import FasterRCNNTask
from .finetune.task import TextClassifierTask
from .finetune.task import ImageClassifierTask
from .finetune.task import SequenceLabelTask
......
......@@ -15,7 +15,9 @@
from .base_task import BaseTask, RunEnv, RunState
from .classifier_task import ClassifierTask, ImageClassifierTask, TextClassifierTask, MultiLabelClassifierTask
from .detection_task import DetectionTask
from .ssd_task import SSDTask
from .yolo_task import YOLOTask
from .faster_rcnn_task import FasterRCNNTask
from .reading_comprehension_task import ReadingComprehensionTask
from .regression_task import RegressionTask
from .sequence_task import SequenceLabelTask
......@@ -403,6 +403,8 @@ class BaseTask(object):
with fluid.program_guard(self.env.main_program,
self._base_startup_program):
with fluid.unique_name.guard(self.env.UNG):
if self.is_train_phase or self.is_test_phase:
self.env.labels = self._add_label()
self.env.outputs = self._build_net()
if self.is_train_phase or self.is_test_phase:
self.env.labels = self._add_label()
......@@ -557,9 +559,6 @@ class BaseTask(object):
@property
def labels(self):
if self.is_predict_phase:
raise RuntimeError()
if not self.env.is_inititalized:
self._build_env()
return self.env.labels
......
#coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.initializer import Normal
from paddlehub.common.paddle_helper import clone_program
from paddlehub.finetune.task.detection_task import DetectionTask
class FasterRCNNTask(DetectionTask):
def __init__(self,
data_reader,
num_classes,
feed_list,
feature,
predict_feed_list=None,
predict_feature=None,
startup_program=None,
config=None,
metrics_choices="default"):
super(FasterRCNNTask, self).__init__(
data_reader=data_reader,
num_classes=num_classes,
feed_list=feed_list,
feature=feature,
model_type='rcnn',
startup_program=startup_program,
config=config,
metrics_choices=metrics_choices)
self._base_feed_list = feed_list
self._base_predict_feed_list = predict_feed_list
self.feature = feature
self.predict_feature = predict_feature
self.num_classes = num_classes
if predict_feature:
self._base_predict_main_program = clone_program(
predict_feature[0].block.program, for_test=False)
else:
self._base_predict_main_program = None
def _build_net(self):
if self.is_train_phase:
head_feat = self.feature[0]
else:
if self.is_predict_phase:
self.env.labels = self._add_label()
head_feat = self.main_program.global_block().vars[
self.predict_feature[0].name]
# Rename following layers for: ValueError: Variable cls_score_w has been created before.
# the previous shape is (2048, 81); the new shape is (100352, 81).
# They are not matched.
cls_score = fluid.layers.fc(
input=head_feat,
size=self.num_classes,
act=None,
name='my_cls_score',
param_attr=ParamAttr(
name='my_cls_score_w', initializer=Normal(loc=0.0, scale=0.01)),
bias_attr=ParamAttr(
name='my_cls_score_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
bbox_pred = fluid.layers.fc(
input=head_feat,
size=4 * self.num_classes,
act=None,
name='my_bbox_pred',
param_attr=ParamAttr(
name='my_bbox_pred_w', initializer=Normal(loc=0.0,
scale=0.001)),
bias_attr=ParamAttr(
name='my_bbox_pred_b',
learning_rate=2.,
regularizer=L2Decay(0.)))
if self.is_train_phase:
rpn_cls_loss, rpn_reg_loss, outs = self.feature[1:]
labels_int32 = outs[1]
bbox_targets = outs[2]
bbox_inside_weights = outs[3]
bbox_outside_weights = outs[4]
labels_int64 = fluid.layers.cast(x=labels_int32, dtype='int64')
labels_int64.stop_gradient = True
loss_cls = fluid.layers.softmax_with_cross_entropy(
logits=cls_score, label=labels_int64, numeric_stable_mode=True)
loss_cls = fluid.layers.reduce_mean(loss_cls)
loss_bbox = fluid.layers.smooth_l1(
x=bbox_pred,
y=bbox_targets,
inside_weight=bbox_inside_weights,
outside_weight=bbox_outside_weights,
sigma=1.0)
loss_bbox = fluid.layers.reduce_mean(loss_bbox)
total_loss = fluid.layers.sum(
[loss_bbox, loss_cls, rpn_cls_loss, rpn_reg_loss])
return [total_loss]
else:
rois = self.main_program.global_block().vars[
self.predict_feature[1].name]
im_info = self.feed_var_list[1]
im_shape = self.feed_var_list[3]
im_scale = fluid.layers.slice(im_info, [1], starts=[2], ends=[3])
im_scale = fluid.layers.sequence_expand(im_scale, rois)
boxes = rois / im_scale
cls_prob = fluid.layers.softmax(cls_score, use_cudnn=False)
bbox_pred = fluid.layers.reshape(bbox_pred,
(-1, self.num_classes, 4))
# decoded_box = self.box_coder(prior_box=boxes, target_box=bbox_pred)
decoded_box = fluid.layers.box_coder(
prior_box=boxes,
prior_box_var=[0.1, 0.1, 0.2, 0.2],
target_box=bbox_pred,
code_type='decode_center_size',
box_normalized=False,
axis=1)
cliped_box = fluid.layers.box_clip(
input=decoded_box, im_info=im_shape)
# pred_result = self.nms(bboxes=cliped_box, scores=cls_prob)
pred_result = fluid.layers.multiclass_nms(
bboxes=decoded_box,
scores=cls_prob,
score_threshold=.05,
nms_top_k=-1,
keep_top_k=100,
nms_threshold=.5,
normalized=False,
nms_eta=1.0,
background_label=0)
return [pred_result]
def _add_label(self):
if self.is_train_phase:
# 'im_id'
idx_list = [2]
elif self.is_test_phase:
# 'im_id', 'gt_box', 'gt_label', 'is_difficult'
idx_list = [2, 4, 5, 6]
else: # predict
idx_list = [2]
return self._add_label_by_fields(idx_list)
def _add_loss(self):
if self.is_train_phase:
loss = self.env.outputs[-1]
else:
loss = fluid.layers.fill_constant(
shape=[1], value=-1, dtype='float32')
return loss
def _feed_list(self, for_export=False):
if self.is_train_phase:
feed_list = [varname for varname in self._base_feed_list]
else:
feed_list = [varname for varname in self._base_predict_feed_list]
if self.is_train_phase:
# feed_list is ['image', 'im_info', 'gt_box', 'gt_label', 'is_crowd']
return feed_list[:2] + [self.labels[0].name] + feed_list[2:]
elif self.is_test_phase:
# feed list is ['image', 'im_info', 'im_shape']
return feed_list[:2] + [self.labels[0].name] + feed_list[2:] + \
[label.name for label in self.labels[1:]]
if for_export:
# skip im_id
return feed_list[:2] + feed_list[3:]
else:
return feed_list[:2] + [self.labels[0].name] + feed_list[2:]
def _fetch_list(self, for_export=False):
# ensure fetch 'im_shape', 'im_id', 'bbox' at first three elements in test phase
if self.is_train_phase:
return [self.loss.name]
elif self.is_test_phase:
# im_shape, im_id, bbox
return [
self.feed_list[2], self.labels[0].name, self.outputs[0].name,
self.loss.name
]
# im_shape, im_id, bbox
if for_export:
return [self.outputs[0].name]
else:
return [
self.feed_list[2], self.labels[0].name, self.outputs[0].name
]
@property
def base_main_program(self):
if self.is_train_phase:
return self._base_main_program
return self._base_predict_main_program
#coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddlehub.finetune.task.detection_task import DetectionTask
class SSDTask(DetectionTask):
def __init__(self,
data_reader,
num_classes,
feed_list,
feature,
multi_box_head_config,
startup_program=None,
config=None,
metrics_choices="default"):
super(SSDTask, self).__init__(
data_reader=data_reader,
num_classes=num_classes,
feed_list=feed_list,
feature=feature,
model_type='ssd',
startup_program=startup_program,
config=config,
metrics_choices=metrics_choices)
self._base_feed_list = feed_list
self.feature = feature
self.num_classes = num_classes
self.multi_box_head_config = multi_box_head_config
def _build_net(self):
if self.is_predict_phase: # add im_id
self.env.labels = self._add_label()
feature_list = self.feature
image = self.feed_var_list[0]
# fix input size according to its module
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=feature_list,
image=image,
num_classes=self.num_classes,
**self.multi_box_head_config)
self.env.mid_vars = [mbox_locs, mbox_confs, box, box_var]
nmsed_out = fluid.layers.detection_output(
mbox_locs,
mbox_confs,
box,
box_var,
background_label=0,
nms_threshold=0.45,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0)
return [nmsed_out]
def _add_label(self):
if self.is_train_phase:
# 'gt_box', 'gt_label'
idx_list = [1, 2]
elif self.is_test_phase:
# 'im_id', 'gt_box', 'gt_label', 'is_difficult'
idx_list = [2, 3, 4, 5]
else:
# im_id
idx_list = [1]
return self._add_label_by_fields(idx_list)
def _add_loss(self):
if self.is_train_phase:
gt_box = self.labels[0]
gt_label = self.labels[1]
else: # xTodo: update here when using new module
gt_box = self.labels[1]
gt_label = self.labels[2]
mbox_locs, mbox_confs, box, box_var = self.env.mid_vars
loss = fluid.layers.ssd_loss(
location=mbox_locs,
confidence=mbox_confs,
gt_box=gt_box,
gt_label=gt_label,
prior_box=box,
prior_box_var=box_var)
loss = fluid.layers.reduce_sum(loss)
loss.persistable = True
return loss
def _feed_list(self, for_export=False):
# todo: update when using new module
feed_list = [varname for varname in self._base_feed_list]
if self.is_train_phase:
feed_list = feed_list[:1] + [label.name for label in self.labels]
elif self.is_test_phase:
feed_list = feed_list + [label.name for label in self.labels]
else: # self.is_predict_phase:
if for_export:
feed_list = [feed_list[0]]
else:
# 'image', 'im_id', 'im_shape'
feed_list = [feed_list[0], self.labels[0].name, feed_list[1]]
return feed_list
def _fetch_list(self, for_export=False):
# ensure fetch 'im_shape', 'im_id', 'bbox' at first three elements in test phase
if self.is_train_phase:
return [self.loss.name]
elif self.is_test_phase:
# xTodo: update when using new module
# im_id, bbox, dets, loss
return [
self._base_feed_list[1], self.labels[0].name,
self.outputs[0].name, self.loss.name
]
# im_shape, im_id, bbox
if for_export:
return [self.outputs[0].name]
else:
return [
self._base_feed_list[1], self.labels[0].name,
self.outputs[0].name
]
#coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddlehub.finetune.task.detection_task import DetectionTask
class YOLOTask(DetectionTask):
def __init__(self,
data_reader,
num_classes,
feed_list,
feature,
startup_program=None,
config=None,
metrics_choices="default"):
super(YOLOTask, self).__init__(
data_reader=data_reader,
num_classes=num_classes,
feed_list=feed_list,
feature=feature,
model_type='yolo',
startup_program=startup_program,
config=config,
metrics_choices=metrics_choices)
self._base_feed_list = feed_list
self.feature = feature
self.num_classes = num_classes
def _parse_anchors(self, anchors):
"""
Check ANCHORS/ANCHOR_MASKS in config and parse mask_anchors
"""
self.anchors = []
self.mask_anchors = []
assert len(anchors) > 0, "ANCHORS not set."
assert len(self.anchor_masks) > 0, "ANCHOR_MASKS not set."
for anchor in anchors:
assert len(anchor) == 2, "anchor {} len should be 2".format(anchor)
self.anchors.extend(anchor)
anchor_num = len(anchors)
for masks in self.anchor_masks:
self.mask_anchors.append([])
for mask in masks:
assert mask < anchor_num, "anchor mask index overflow"
self.mask_anchors[-1].extend(anchors[mask])
def _build_net(self):
if self.is_predict_phase:
self.env.labels = self._add_label()
self.anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
anchors = [[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119],
[116, 90], [156, 198], [373, 326]]
self._parse_anchors(anchors)
tip_list = self.feature
outputs = []
for i, tip in enumerate(tip_list):
# out channel number = mask_num * (5 + class_num)
num_filters = len(self.anchor_masks[i]) * (self.num_classes + 5)
block_out = fluid.layers.conv2d(
input=tip,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
act=None,
# Rename for: conflict with module pretrain weights
param_attr=ParamAttr(
name="ft_yolo_output.{}.conv.weights".format(i)),
bias_attr=ParamAttr(
regularizer=L2Decay(0.),
name="ft_yolo_output.{}.conv.bias".format(i)))
outputs.append(block_out)
if self.is_train_phase:
return outputs
im_size = self.feed_var_list[1]
boxes = []
scores = []
downsample = 32
for i, output in enumerate(outputs):
box, score = fluid.layers.yolo_box(
x=output,
img_size=im_size,
anchors=self.mask_anchors[i],
class_num=self.num_classes,
conf_thresh=0.01,
downsample_ratio=downsample,
name="yolo_box" + str(i))
boxes.append(box)
scores.append(fluid.layers.transpose(score, perm=[0, 2, 1]))
downsample //= 2
yolo_boxes = fluid.layers.concat(boxes, axis=1)
yolo_scores = fluid.layers.concat(scores, axis=2)
# pred = self.nms(bboxes=yolo_boxes, scores=yolo_scores)
pred = fluid.layers.multiclass_nms(
bboxes=yolo_boxes,
scores=yolo_scores,
score_threshold=.01,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
normalized=False,
nms_eta=1.0,
background_label=-1)
return [pred]
def _add_label(self):
if self.is_train_phase:
idx_list = [1, 2, 3] # 'gt_box', 'gt_label', 'gt_score'
elif self.is_test_phase:
idx_list = [2, 3, 4,
5] # 'im_id', 'gt_box', 'gt_label', 'is_difficult'
else: # predict
idx_list = [2]
return self._add_label_by_fields(idx_list)
def _add_loss(self):
if self.is_train_phase:
gt_box, gt_label, gt_score = self.labels
outputs = self.outputs
losses = []
downsample = 32
for i, output in enumerate(outputs):
anchor_mask = self.anchor_masks[i]
loss = fluid.layers.yolov3_loss(
x=output,
gt_box=gt_box,
gt_label=gt_label,
gt_score=gt_score,
anchors=self.anchors,
anchor_mask=anchor_mask,
class_num=self.num_classes,
ignore_thresh=0.7,
downsample_ratio=downsample,
use_label_smooth=True,
name="yolo_loss" + str(i))
losses.append(fluid.layers.reduce_mean(loss))
downsample //= 2
loss = sum(losses)
else:
loss = fluid.layers.fill_constant(
shape=[1], value=-1, dtype='float32')
return loss
def _feed_list(self, for_export=False):
feed_list = [varname for varname in self._base_feed_list]
if self.is_train_phase:
return [feed_list[0]] + [label.name for label in self.labels]
elif self.is_test_phase:
return feed_list + [label.name for label in self.labels]
if for_export:
return feed_list[:2]
else:
return feed_list + [self.labels[0].name]
def _fetch_list(self, for_export=False):
# ensure fetch 'im_shape', 'im_id', 'bbox' at first three elements in test phase
if self.is_train_phase:
return [self.loss.name]
elif self.is_test_phase:
# im_shape, im_id, bbox
return [
self.feed_list[1], self.labels[0].name, self.outputs[0].name,
self.loss.name
]
# im_shape, im_id, bbox
if for_export:
return [self.outputs[0].name]
else:
return [
self.feed_list[1], self.labels[0].name, self.outputs[0].name
]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册