未验证 提交 141234c8 编写于 作者: K Kaipeng Deng 提交者: GitHub

Installable (#2034)

* ppdet installable
上级 7042801c
...@@ -20,7 +20,7 @@ addons: ...@@ -20,7 +20,7 @@ addons:
before_install: before_install:
- sudo pip install -U virtualenv pre-commit pip - sudo pip install -U virtualenv pre-commit pip
- docker pull paddlepaddle/paddle:latest - docker pull paddlepaddle/paddle:latest
- git pull https://github.com/PaddlePaddle/PaddleDetection master -r - git pull https://github.com/PaddlePaddle/PaddleDetection master
script: script:
- exit_code=0 - exit_code=0
......
metric: VOC metric: VOC
map_type: 11point
num_classes: 20 num_classes: 20
TrainDataset: TrainDataset:
......
...@@ -41,16 +41,6 @@ ...@@ -41,16 +41,6 @@
## PaddleDetection ## PaddleDetection
**安装Python依赖库:**
Python依赖库在[requirements.txt](../../../requirements.txt)中给出,可通过如下命令安装:
```
pip install -r requirements.txt
```
**注意:`llvmlite`需要安装`0.33`版本,`numba`需要安装`0.50`版本**
**克隆PaddleDetection库:** **克隆PaddleDetection库:**
您可以通过以下命令克隆PaddleDetection: 您可以通过以下命令克隆PaddleDetection:
...@@ -65,3 +55,10 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git ...@@ -65,3 +55,10 @@ git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd <path/to/clone/PaddleDetection> cd <path/to/clone/PaddleDetection>
git clone https://gitee.com/paddlepaddle/PaddleDetection git clone https://gitee.com/paddlepaddle/PaddleDetection
``` ```
**安装PaddleDetection库:**
```
cd PaddleDetection
python setup.py install
```
...@@ -11,3 +11,6 @@ ...@@ -11,3 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from . import (core, data, engine, modeling, model_zoo, optimizer, metrics,
py_op, utils)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import ppdet.modeling from . import config
import ppdet.optimizer
import ppdet.data
...@@ -126,6 +126,7 @@ def load_config(file_path): ...@@ -126,6 +126,7 @@ def load_config(file_path):
# load config from file and merge into global config # load config from file and merge into global config
cfg = _load_config_with_base(file_path) cfg = _load_config_with_base(file_path)
cfg['filename'] = os.path.splitext(os.path.split(file_path)[-1])[0]
merge_config(cfg) merge_config(cfg)
# parse config from merged config # parse config from merged config
......
...@@ -12,6 +12,10 @@ ...@@ -12,6 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from . import source
from . import transform
from . import reader
from .source import * from .source import *
from .transform import * from .transform import *
from .reader import * from .reader import *
...@@ -144,7 +144,7 @@ class ImageFolder(DetDataset): ...@@ -144,7 +144,7 @@ class ImageFolder(DetDataset):
for image in images: for image in images:
assert image != '' and os.path.isfile(image), \ assert image != '' and os.path.isfile(image), \
"Image {} not found".format(image) "Image {} not found".format(image)
if self.sample_num > 0 and ct >= self.sample_num: if self.sample_num and self.sample_num > 0 and ct >= self.sample_num:
break break
rec = {'im_id': np.array([ct]), 'im_file': image} rec = {'im_id': np.array([ct]), 'im_file': image}
self._imid2path[ct] = image self._imid2path[ct] = image
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import trainer
from .trainer import *
from . import callbacks
from .callbacks import *
from . import env
from .env import *
__all__ = trainer.__all__ \
+ callbacks.__all__ \
+ env.__all__
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import datetime
import paddle
from paddle.distributed import ParallelEnv
from ppdet.utils.checkpoint import save_model
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Callback', 'ComposeCallback', 'LogPrinter', 'Checkpointer']
class Callback(object):
def __init__(self, model):
self.model = model
def on_step_begin(self, status):
pass
def on_step_end(self, status):
pass
def on_epoch_begin(self, status):
pass
def on_epoch_end(self, status):
pass
class ComposeCallback(object):
def __init__(self, callbacks):
callbacks = [h for h in list(callbacks) if h is not None]
for h in callbacks:
assert isinstance(h,
Callback), "hook shoule be subclass of Callback"
self._callbacks = callbacks
def on_step_begin(self, status):
for h in self._callbacks:
h.on_step_begin(status)
def on_step_end(self, status):
for h in self._callbacks:
h.on_step_end(status)
def on_epoch_begin(self, status):
for h in self._callbacks:
h.on_epoch_begin(status)
def on_epoch_end(self, status):
for h in self._callbacks:
h.on_epoch_end(status)
class LogPrinter(Callback):
def __init__(self, model):
super(LogPrinter, self).__init__(model)
def on_step_end(self, status):
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
if self.model.mode == 'train':
epoch_id = status['epoch_id']
step_id = status['step_id']
steps_per_epoch = status['steps_per_epoch']
training_staus = status['training_staus']
batch_time = status['batch_time']
data_time = status['data_time']
epoches = self.model.cfg.epoch
batch_size = self.model.cfg['{}Reader'.format(
self.model.mode.capitalize())]['batch_size']
logs = training_staus.log()
space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
if step_id % self.model.cfg.log_iter == 0:
eta_steps = (epoches - epoch_id) * steps_per_epoch - step_id
eta_sec = eta_steps * batch_time.global_avg
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
ips = float(batch_size) / batch_time.avg
fmt = ' '.join([
'Epoch: [{}]',
'[{' + space_fmt + '}/{}]',
'learning_rate: {lr:.6f}',
'{meters}',
'eta: {eta}',
'batch_cost: {btime}',
'data_cost: {dtime}',
'ips: {ips:.4f} images/s',
])
fmt = fmt.format(
epoch_id,
step_id,
steps_per_epoch,
lr=status['learning_rate'],
meters=logs,
eta=eta_str,
btime=str(batch_time),
dtime=str(data_time),
ips=ips)
logger.info(fmt)
if self.model.mode == 'eval':
step_id = status['step_id']
if step_id % 100 == 0:
logger.info("Eval iter: {}".format(step_id))
def on_epoch_end(self, status):
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
if self.model.mode == 'eval':
sample_num = status['sample_num']
cost_time = status['cost_time']
logger.info('Total sample number: {}, averge FPS: {}'.format(
sample_num, sample_num / cost_time))
class Checkpointer(Callback):
def __init__(self, model):
super(Checkpointer, self).__init__(model)
def on_epoch_end(self, status):
assert self.model.mode == 'train', \
"Checkpointer can only be set during training"
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
epoch_id = status['epoch_id']
end_epoch = self.model.cfg.epoch
if epoch_id % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
save_dir = os.path.join(self.model.cfg.save_dir,
self.model.cfg.filename)
save_name = str(
epoch_id) if epoch_id != end_epoch - 1 else "model_final"
save_model(self.model.model, self.model.optimizer, save_dir,
save_name, epoch_id + 1)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
import paddle
from paddle.distributed import ParallelEnv
__all__ = ['init_parallel_env', 'set_random_seed']
def init_parallel_env():
env = os.environ
dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
if dist:
trainer_id = int(env['PADDLE_TRAINER_ID'])
local_seed = (99 + trainer_id)
random.seed(local_seed)
np.random.seed(local_seed)
if ParallelEnv().nranks > 1:
paddle.distributed.init_parallel_env()
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
...@@ -18,13 +18,12 @@ from __future__ import print_function ...@@ -18,13 +18,12 @@ from __future__ import print_function
import os import os
import yaml import yaml
import numpy as np
from collections import OrderedDict from collections import OrderedDict
from ppdet.utils.logger import setup_logger from ppdet.metrics import get_categories
logger = setup_logger('export_utils')
__all__ = ['dump_infer_config'] from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
# Global dictionary # Global dictionary
TRT_MIN_SUBGRAPH = { TRT_MIN_SUBGRAPH = {
...@@ -40,22 +39,13 @@ TRT_MIN_SUBGRAPH = { ...@@ -40,22 +39,13 @@ TRT_MIN_SUBGRAPH = {
} }
def parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): def _parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
preprocess_list = [] preprocess_list = []
anno_file = dataset_cfg.get_anno() anno_file = dataset_cfg.get_anno()
with_background = reader_cfg['with_background'] with_background = reader_cfg['with_background']
use_default_label = dataset_cfg.use_default_label
clsid2catid, catid2name = get_categories(metric, anno_file, with_background)
if metric == 'COCO':
from ppdet.utils.coco_eval import get_category_info
elif metric == 'VOC':
from ppdet.utils.voc_eval import get_category_info
else:
raise ValueError("metric only supports COCO, but received {}".format(
metric))
clsid2catid, catid2name = get_category_info(anno_file, with_background,
use_default_label)
label_list = [str(cat) for cat in catid2name.values()] label_list = [str(cat) for cat in catid2name.values()]
...@@ -86,7 +76,7 @@ def parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape): ...@@ -86,7 +76,7 @@ def parse_reader(reader_cfg, dataset_cfg, metric, arch, image_shape):
return with_background, preprocess_list, label_list, image_shape return with_background, preprocess_list, label_list, image_shape
def dump_infer_config(config, path, image_shape, model): def _dump_infer_config(config, path, image_shape, model):
arch_state = False arch_state = False
from ppdet.core.config.yaml_helpers import setup_orderdict from ppdet.core.config.yaml_helpers import setup_orderdict
setup_orderdict() setup_orderdict()
...@@ -112,7 +102,7 @@ def dump_infer_config(config, path, image_shape, model): ...@@ -112,7 +102,7 @@ def dump_infer_config(config, path, image_shape, model):
if getattr(model.__dict__, 'mask_post_process', None): if getattr(model.__dict__, 'mask_post_process', None):
infer_cfg['mask_resolution'] = model.mask_post_process.mask_resolution infer_cfg['mask_resolution'] = model.mask_post_process.mask_resolution
infer_cfg['with_background'], infer_cfg['Preprocess'], infer_cfg[ infer_cfg['with_background'], infer_cfg['Preprocess'], infer_cfg[
'label_list'], image_shape = parse_reader( 'label_list'], image_shape = _parse_reader(
config['TestReader'], config['TestDataset'], config['metric'], config['TestReader'], config['TestDataset'], config['metric'],
infer_cfg['arch'], image_shape) infer_cfg['arch'], image_shape)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import random
import datetime
import numpy as np
from PIL import Image
import paddle
from paddle.distributed import ParallelEnv
from paddle.static import InputSpec
from ppdet.core.workspace import create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.utils.visualizer import visualize_results
from ppdet.metrics import Metric, COCOMetric, VOCMetric, get_categories, get_infer_results
import ppdet.utils.stats as stats
from .callbacks import Callback, ComposeCallback, LogPrinter, Checkpointer
from .export_utils import _dump_infer_config
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Trainer']
class Trainer(object):
def __init__(self, cfg, mode='train'):
self.cfg = cfg
assert mode.lower() in ['train', 'eval', 'test'], \
"mode should be 'train', 'eval' or 'test'"
self.mode = mode.lower()
# build model
self.model = create(cfg.architecture)
if ParallelEnv().nranks > 1:
self.model = paddle.DataParallel(self.model)
# build data loader
self.dataset = cfg['{}Dataset'.format(self.mode.capitalize())]
# TestDataset build after user set images, skip loader creation here
if self.mode != 'test':
self.loader = create('{}Reader'.format(self.mode.capitalize()))(
self.dataset, cfg.worker_num)
# build optimizer in train mode
self.optimizer = None
if self.mode == 'train':
steps_per_epoch = len(self.loader)
self.lr = create('LearningRate')(steps_per_epoch)
self.optimizer = create('OptimizerBuilder')(self.lr,
self.model.parameters())
self.status = {}
self.start_epoch = 0
self.end_epoch = cfg.epoch
self._weights_loaded = False
# initial default callbacks
self._init_callbacks()
# initial default metrics
self._init_metrics()
self._reset_metrics()
def _init_callbacks(self):
if self.mode == 'train':
self._callbacks = [LogPrinter(self), Checkpointer(self)]
self._compose_callback = ComposeCallback(self._callbacks)
elif self.mode == 'eval':
self._callbacks = [LogPrinter(self)]
self._compose_callback = ComposeCallback(self._callbacks)
else:
self._callbacks = []
self._compose_callback = None
def _init_metrics(self):
if self.mode == 'eval':
if self.cfg.metric == 'COCO':
mask_resolution = self.model.mask_post_process.mask_resolution if hasattr(
self.model, 'mask_post_process') else None
self._metrics = [
COCOMetric(
anno_file=self.dataset.get_anno(),
with_background=self.cfg.with_background,
mask_resolution=mask_resolution)
]
elif self.cfg.metric == 'VOC':
self._metrics = [
VOCMetric(
anno_file=self.dataset.get_anno(),
with_background=self.cfg.with_background,
class_num=self.cfg.num_classes,
map_type=self.cfg.map_type)
]
else:
logger.warn("Metric not support for metric type {}".format(
self.cfg.metric))
self._metrics = []
else:
self._metrics = []
def _reset_metrics(self):
for metric in self._metrics:
metric.reset()
def register_callbacks(self, callbacks):
callbacks = [h for h in list(callbacks) if h is not None]
for c in callbacks:
assert isinstance(c, Callback), \
"metrics shoule be instances of subclass of Metric"
self._callbacks.extend(callbacks)
self._compose_callback = ComposeCallback(self._callbacks)
def register_metrics(self, metrics):
metrics = [m for m in list(metrics) if m is not None]
for m in metrics:
assert isinstance(m, Metric), \
"metrics shoule be instances of subclass of Metric"
self._metrics.extend(metrics)
def load_weights(self, weights, weight_type='pretrain'):
assert weight_type in ['pretrain', 'resume', 'finetune'], \
"weight_type can only be 'pretrain', 'resume', 'finetune'"
if weight_type == 'resume':
self.start_epoch = load_weight(self.model, weights, self.optimizer)
logger.debug("Resume weights of epoch {}".format(self.start_epoch))
else:
self.start_epoch = 0
load_pretrain_weight(self.model, weights,
self.cfg.get('load_static_weights', False),
weight_type)
logger.debug("Load {} weights {} to start training".format(
weight_type, weights))
self._weights_loaded = True
def train(self):
assert self.mode == 'train', "Model not in 'train' mode"
# if no given weights loaded, load backbone pretrain weights as default
if not self._weights_loaded:
self.load_weights(self.cfg.pretrain_weights)
self.status.update({
'epoch_id': self.start_epoch,
'step_id': 0,
'steps_per_epoch': len(self.loader)
})
self.status['batch_time'] = stats.SmoothedValue(
self.cfg.log_iter, fmt='{avg:.4f}')
self.status['data_time'] = stats.SmoothedValue(
self.cfg.log_iter, fmt='{avg:.4f}')
self.status['training_staus'] = stats.TrainingStats(self.cfg.log_iter)
for epoch_id in range(self.start_epoch, self.cfg.epoch):
self.status['epoch_id'] = epoch_id
self._compose_callback.on_epoch_begin(self.status)
self.loader.dataset.set_epoch(epoch_id)
iter_tic = time.time()
for step_id, data in enumerate(self.loader):
self.status['data_time'].update(time.time() - iter_tic)
self.status['step_id'] = step_id
self._compose_callback.on_step_begin(self.status)
# model forward
self.model.train()
outputs = self.model(data)
loss = outputs['loss']
# model backward
loss.backward()
self.optimizer.step()
curr_lr = self.optimizer.get_lr()
self.lr.step()
self.optimizer.clear_grad()
self.status['learning_rate'] = curr_lr
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
self.status['training_staus'].update(outputs)
self.status['batch_time'].update(time.time() - iter_tic)
self._compose_callback.on_step_end(self.status)
self._compose_callback.on_epoch_end(self.status)
def evaluate(self):
sample_num = 0
tic = time.time()
self._compose_callback.on_epoch_begin(self.status)
for step_id, data in enumerate(self.loader):
self.status['step_id'] = step_id
self._compose_callback.on_step_begin(self.status)
# forward
self.model.eval()
outs = self.model(data)
# update metrics
for metric in self._metrics:
metric.update(data, outs)
sample_num += data['im_id'].numpy().shape[0]
self._compose_callback.on_step_end(self.status)
self.status['sample_num'] = sample_num
self.status['cost_time'] = time.time() - tic
self._compose_callback.on_epoch_end(self.status)
# accumulate metric to log out
for metric in self._metrics:
metric.accumulate()
metric.log()
# reset metric states for metric may performed multiple times
self._reset_metrics()
def predict(self, images, draw_threshold=0.5, output_dir='output'):
self.dataset.set_images(images)
loader = create('TestReader')(self.dataset, 0)
imid2path = self.dataset.get_imid2path()
anno_file = self.dataset.get_anno()
with_background = self.cfg.with_background
clsid2catid, catid2name = get_categories(self.cfg.metric, anno_file,
with_background)
# Run Infer
for step_id, data in enumerate(loader):
self.status['step_id'] = step_id
# forward
self.model.eval()
outs = self.model(data)
for key, value in outs.items():
outs[key] = value.numpy()
for key in ['im_shape', 'scale_factor', 'im_id']:
outs[key] = data[key]
# FIXME: for more elegent coding
if 'mask' in outs and 'bbox' in outs:
mask_resolution = self.model.mask_post_process.mask_resolution
from ppdet.py_op.post_process import mask_post_process
outs['mask'] = mask_post_process(outs, outs['im_shape'],
outs['scale_factor'],
mask_resolution)
batch_res = get_infer_results(outs, clsid2catid)
bbox_num = outs['bbox_num']
start = 0
for i, im_id in enumerate(outs['im_id']):
image_path = imid2path[int(im_id)]
image = Image.open(image_path).convert('RGB')
end = start + bbox_num[i]
bbox_res = batch_res['bbox'][start:end] \
if 'bbox' in batch_res else None
mask_res = batch_res['mask'][start:end] \
if 'mask' in batch_res else None
image = visualize_results(image, bbox_res, mask_res,
int(outs['im_id']), catid2name,
draw_threshold)
# save image with detection
save_name = self._get_save_image_name(output_dir, image_path)
logger.info("Detection bbox results save in {}".format(
save_name))
image.save(save_name, quality=95)
start = end
def _get_save_image_name(self, output_dir, image_path):
"""
Get save image name from source image path.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_name = os.path.split(image_path)[-1]
name, ext = os.path.splitext(image_name)
return os.path.join(output_dir, "{}".format(name)) + ext
def export(self, output_dir='output_inference'):
model_name = os.path.splitext(os.path.split(self.cfg.filename)[-1])[0]
save_dir = os.path.join(output_dir, model_name)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
image_shape = None
if 'inputs_def' in self.cfg['TestReader']:
inputs_def = self.cfg['TestReader']['inputs_def']
image_shape = inputs_def.get('image_shape', None)
if image_shape is None:
image_shape = [3, None, None]
# Save infer cfg
_dump_infer_config(self.cfg,
os.path.join(save_dir, 'infer_cfg.yml'), image_shape,
self.model)
input_spec = [{
"image": InputSpec(
shape=[None] + image_shape, name='image'),
"im_shape": InputSpec(
shape=[None, 2], name='im_shape'),
"scale_factor": InputSpec(
shape=[None, 2], name='scale_factor')
}]
# dy2st and save model
static_model = paddle.jit.to_static(self.model, input_spec=input_spec)
paddle.jit.save(static_model, os.path.join(save_dir, 'model'))
logger.info("Export model and saved in {}".format(save_dir))
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import metrics
from .metrics import *
from . import category
from .category import *
__all__ = metrics.__all__ \
+ category.__all__
此差异已折叠。
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from ppdet.py_op.post_process import get_det_res, get_seg_res
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
def get_infer_results(outs, catid):
"""
Get result at the stage of inference.
The output format is dictionary containing bbox or mask result.
For example, bbox result is a list and each element contains
image_id, category_id, bbox and score.
"""
if outs is None or len(outs) == 0:
raise ValueError(
'The number of valid detection result if zero. Please use reasonable model and check input data.'
)
im_id = outs['im_id']
im_shape = outs['im_shape']
scale_factor = outs['scale_factor']
infer_res = {}
if 'bbox' in outs:
infer_res['bbox'] = get_det_res(outs['bbox'], outs['bbox_num'], im_id,
catid)
if 'mask' in outs:
# mask post process
infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox_num'], im_id,
catid)
return infer_res
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
jsonfile: Evaluation json file, eg: bbox.json, mask.json.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(jsonfile)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -20,10 +20,10 @@ from __future__ import unicode_literals ...@@ -20,10 +20,10 @@ from __future__ import unicode_literals
import sys import sys
import numpy as np import numpy as np
from .logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__) logger = setup_logger(__name__)
__all__ = ['bbox_area', 'jaccard_overlap', 'DetectionMAP'] __all__ = ['bbox_area', 'jaccard_overlap', 'prune_zero_padding', 'DetectionMAP']
def bbox_area(bbox, is_bbox_normalized): def bbox_area(bbox, is_bbox_normalized):
...@@ -55,6 +55,17 @@ def jaccard_overlap(pred, gt, is_bbox_normalized=False): ...@@ -55,6 +55,17 @@ def jaccard_overlap(pred, gt, is_bbox_normalized=False):
return overlap return overlap
def prune_zero_padding(gt_box, gt_label, difficult=None):
valid_cnt = 0
for i in range(len(gt_box)):
if gt_box[i, 0] == 0 and gt_box[i, 1] == 0 and \
gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
break
valid_cnt += 1
return (gt_box[:valid_cnt], gt_label[:valid_cnt], difficult[:valid_cnt]
if difficult is not None else None)
class DetectionMAP(object): class DetectionMAP(object):
""" """
Calculate detection mean average precision. Calculate detection mean average precision.
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import json
import paddle
import numpy as np
from .category import get_categories
from .map_utils import prune_zero_padding, DetectionMAP
from .coco_utils import get_infer_results, cocoapi_eval
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Metric', 'COCOMetric', 'VOCMetric', 'get_infer_results']
class Metric(paddle.metric.Metric):
def name(self):
return self.__class__.__name__
# paddle.metric.Metric defined :metch:`update`, :meth:`accumulate`
# :metch:`reset`, in ppdet, we also need following 2 methods:
# abstract method for logging metric results
def log(self):
pass
# abstract method for getting metric results
def get_results(self):
pass
class COCOMetric(Metric):
def __init__(self, anno_file, with_background=True, mask_resolution=None):
assert os.path.isfile(anno_file), \
"anno_file {} not a file".format(anno_file)
self.anno_file = anno_file
self.with_background = with_background
self.mask_resolution = mask_resolution
self.clsid2catid, self.catid2name = get_categories('COCO', anno_file,
with_background)
self.reset()
def reset(self):
# only bbox and mask evaluation support currently
self.results = {'bbox': [], 'mask': []}
self.eval_results = {}
def update(self, inputs, outputs):
outs = {}
# outputs Tensor -> numpy.ndarray
for k, v in outputs.items():
outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
# some input fields also needed
for k in ['im_id', 'scale_factor', 'im_shape']:
v = inputs[k]
outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
if 'mask' in outs and 'bbox' in outs:
from ppdet.py_op.post_process import mask_post_process
outs['mask'] = mask_post_process(outs, outs['im_shape'],
outs['scale_factor'],
self.mask_resolution)
infer_results = get_infer_results(outs, self.clsid2catid)
self.results['bbox'] += infer_results[
'bbox'] if 'bbox' in infer_results else []
self.results['mask'] += infer_results[
'mask'] if 'mask' in infer_results else []
def accumulate(self):
if len(self.results['bbox']) > 0:
with open("bbox.json", 'w') as f:
json.dump(self.results['bbox'], f)
logger.info('The bbox result is saved to bbox.json.')
bbox_stats = cocoapi_eval(
'bbox.json', 'bbox', anno_file=self.anno_file)
self.eval_results['bbox'] = bbox_stats
sys.stdout.flush()
if len(self.results['mask']) > 0:
with open("mask.json", 'w') as f:
json.dump(self.results['mask'], f)
logger.info('The mask result is saved to mask.json.')
seg_stats = cocoapi_eval(
'mask.json', 'segm', anno_file=self.anno_file)
self.eval_results['mask'] = seg_stats
sys.stdout.flush()
def log(self):
pass
def get_results(self):
return self.eval_results
class VOCMetric(Metric):
def __init__(self,
anno_file,
with_background=True,
class_num=20,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
assert os.path.isfile(anno_file), \
"anno_file {} not a file".format(anno_file)
self.anno_file = anno_file
self.with_background = with_background
self.clsid2catid, self.catid2name = get_categories('VOC', anno_file,
with_background)
self.overlap_thresh = overlap_thresh
self.map_type = map_type
self.evaluate_difficult = evaluate_difficult
self.detection_map = DetectionMAP(
class_num=class_num,
overlap_thresh=overlap_thresh,
map_type=map_type,
is_bbox_normalized=is_bbox_normalized,
evaluate_difficult=evaluate_difficult)
self.reset()
def reset(self):
self.detection_map.reset()
def update(self, inputs, outputs):
bboxes = outputs['bbox'].numpy()
bbox_lengths = outputs['bbox_num'].numpy()
if bboxes.shape == (1, 1) or bboxes is None:
return
gt_boxes = inputs['gt_bbox'].numpy()
gt_labels = inputs['gt_class'].numpy()
difficults = inputs['difficult'].numpy() if not self.evaluate_difficult \
else None
scale_factor = inputs['scale_factor'].numpy(
) if 'scale_factor' in inputs else np.ones(
(gt_boxes.shape[0], 2)).astype('float32')
bbox_idx = 0
for i in range(gt_boxes.shape[0]):
gt_box = gt_boxes[i]
h, w = scale_factor[i]
gt_box = gt_box / np.array([w, h, w, h])
gt_label = gt_labels[i]
difficult = None if difficults is None \
else difficults[i]
bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box, gt_label, difficult = prune_zero_padding(gt_box, gt_label,
difficult)
self.detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
def accumulate(self):
logger.info("Accumulating evaluatation results...")
self.detection_map.accumulate()
def log(self):
map_stat = 100. * self.detection_map.get_map()
logger.info("mAP({:.2f}, {}) = {:.2f}%".format(self.overlap_thresh,
self.map_type, map_stat))
def get_results(self):
self.detection_map.get_map()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import model_zoo
from .model_zoo import *
__all__ = model_zoo.__all__
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import glob
import pkg_resources
try:
from collections.abc import Sequence
except:
from collections import Sequence
from ppdet.core.workspace import load_config, create
from ppdet.utils.checkpoint import load_weight
from ppdet.utils.download import get_config_path
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = [
'list_model', 'get_config_file', 'get_weights_url', 'get_model',
'MODEL_ZOO_FILENAME'
]
MODEL_ZOO_FILENAME = 'MODEL_ZOO'
def list_model(filters=[]):
model_zoo_file = pkg_resources.resource_filename('ppdet.model_zoo',
MODEL_ZOO_FILENAME)
with open(model_zoo_file) as f:
model_names = f.read().splitlines()
# filter model_name
def filt(name):
for f in filters:
if name.find(f) < 0:
return False
return True
if isinstance(filters, str) or not isinstance(filters, Sequence):
filters = [filters]
model_names = [name for name in model_names if filt(name)]
if len(model_names) == 0 and len(filters) > 0:
raise ValueError("no model found, please check filters seeting, "
"filters can be set as following kinds:\n"
"\tDataset: coco, voc ...\n"
"\tArchitecture: yolo, rcnn, ssd ...\n"
"\tBackbone: resnet, vgg, darknet ...\n")
model_str = "Available Models:\n"
for model_name in model_names:
model_str += "\t{}\n".format(model_name)
logger.info(model_str)
# models and configs save on bcebos under dygraph directory
def get_config_file(model_name):
return get_config_path("ppdet://dygraph/configs/{}.yml".format(model_name))
def get_weights_url(model_name):
return "ppdet://dygraph/{}.pdparams".format(model_name)
def get_model(model_name, pretrained=True):
cfg_file = get_config_file(model_name)
cfg = load_config(cfg_file)
model = create(cfg.architecture)
if pretrained:
load_weight(model, get_weights_url(model_name))
return model
...@@ -526,8 +526,10 @@ class SSDBox(object): ...@@ -526,8 +526,10 @@ class SSDBox(object):
out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h out_h = paddle.exp(box[:, :, 3] * 0.2) * pb_h
if self.is_normalized: if self.is_normalized:
h = im_shape[:, 0] / scale_factor[:, 0] h = paddle.unsqueeze(
w = im_shape[:, 1] / scale_factor[:, 1] im_shape[:, 0] / scale_factor[:, 0], axis=-1)
w = paddle.unsqueeze(
im_shape[:, 1] / scale_factor[:, 1], axis=-1)
output = paddle.stack( output = paddle.stack(
[(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h, [(out_x - out_w / 2.) * w, (out_y - out_h / 2.) * h,
(out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h], (out_x + out_w / 2.) * w, (out_y + out_h / 2.) * h],
......
...@@ -35,10 +35,12 @@ def is_url(path): ...@@ -35,10 +35,12 @@ def is_url(path):
Args: Args:
path (string): URL string or not. path (string): URL string or not.
""" """
return path.startswith('http://') or path.startswith('https://') return path.startswith('http://') \
or path.startswith('https://') \
or path.startswith('ppdet://')
def get_weight_path(path): def get_weights_path_dist(path):
env = os.environ env = os.environ
if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env: if 'PADDLE_TRAINERS_NUM' in env and 'PADDLE_TRAINER_ID' in env:
trainer_id = int(env['PADDLE_TRAINER_ID']) trainer_id = int(env['PADDLE_TRAINER_ID'])
...@@ -79,7 +81,7 @@ def _strip_postfix(path): ...@@ -79,7 +81,7 @@ def _strip_postfix(path):
def load_weight(model, weight, optimizer=None): def load_weight(model, weight, optimizer=None):
if is_url(weight): if is_url(weight):
weight = get_weight_path(weight) weight = get_weights_path_dist(weight)
path = _strip_postfix(weight) path = _strip_postfix(weight)
pdparam_path = path + '.pdparams' pdparam_path = path + '.pdparams'
...@@ -110,7 +112,7 @@ def load_pretrain_weight(model, ...@@ -110,7 +112,7 @@ def load_pretrain_weight(model,
weight_type='pretrain'): weight_type='pretrain'):
assert weight_type in ['pretrain', 'finetune'] assert weight_type in ['pretrain', 'finetune']
if is_url(pretrain_weight): if is_url(pretrain_weight):
pretrain_weight = get_weight_path(pretrain_weight) pretrain_weight = get_weights_path_dist(pretrain_weight)
path = _strip_postfix(pretrain_weight) path = _strip_postfix(pretrain_weight)
if not (os.path.isdir(path) or os.path.isfile(path) or if not (os.path.isdir(path) or os.path.isfile(path) or
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import json
import cv2
import numpy as np
from .logger import setup_logger
logger = setup_logger(__name__)
__all__ = [
'bbox_eval',
'mask_eval',
'bbox2out',
'mask2out',
'get_category_info',
'proposal_eval',
'cocoapi_eval',
]
def clip_bbox(bbox, im_size=None):
h = 1. if im_size is None else im_size[0]
w = 1. if im_size is None else im_size[1]
xmin = max(min(bbox[0], w), 0.)
ymin = max(min(bbox[1], h), 0.)
xmax = max(min(bbox[2], w), 0.)
ymax = max(min(bbox[3], h), 0.)
return xmin, ymin, xmax, ymax
def proposal_eval(results, anno_file, outfile, max_dets=(100, 300, 1000)):
assert 'proposal' in results[0]
assert outfile.endswith('.json')
xywh_results = proposal2out(results)
assert len(
xywh_results) > 0, "The number of valid proposal detected is zero.\n \
Please use reasonable model and check input data."
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
cocoapi_eval(outfile, 'proposal', anno_file=anno_file, max_dets=max_dets)
# flush coco evaluation result
sys.stdout.flush()
def bbox_eval(results,
anno_file,
outfile,
with_background=True,
is_bbox_normalized=False,
save_only=False):
assert 'bbox' in results[0]
assert outfile.endswith('.json')
from pycocotools.coco import COCO
coco_gt = COCO(anno_file)
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
xywh_results = bbox2out(
results, clsid2catid, is_bbox_normalized=is_bbox_normalized)
if len(xywh_results) == 0:
logger.warning("The number of valid bbox detected is zero.\n \
Please use reasonable model and check input data.\n \
stop eval!")
return [0.0]
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
if save_only:
logger.info('The bbox result is saved to {} and do not '
'evaluate the mAP.'.format(outfile))
return
map_stats = cocoapi_eval(outfile, 'bbox', coco_gt=coco_gt)
# flush coco evaluation result
sys.stdout.flush()
return map_stats
def mask_eval(results,
anno_file,
outfile,
resolution,
thresh_binarize=0.5,
save_only=False):
assert 'mask' in results[0]
assert outfile.endswith('.json')
from pycocotools.coco import COCO
coco_gt = COCO(anno_file)
clsid2catid = {i + 1: v for i, v in enumerate(coco_gt.getCatIds())}
segm_results = []
for t in results:
im_ids = np.array(t['im_id'][0])
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
masks = t['mask']
if bboxes.shape == (1, 1) or bboxes is None:
continue
if len(bboxes.tolist()) == 0:
continue
s = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i][0])
clsid_scores = bboxes[s:s + num][:, 0:2]
mask = masks[s:s + num]
s += num
for j in range(num):
clsid, score = clsid_scores[j].tolist()
catid = int(clsid2catid[clsid])
segm = mask[j]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': int(catid),
'segmentation': segm,
'score': score
}
segm_results.append(coco_res)
if len(segm_results) == 0:
logger.warning("The number of valid mask detected is zero.\n \
Please use reasonable model and check input data.")
return
with open(outfile, 'w') as f:
json.dump(segm_results, f)
if save_only:
logger.info('The mask result is saved to {} and do not '
'evaluate the mAP.'.format(outfile))
return
cocoapi_eval(outfile, 'segm', coco_gt=coco_gt)
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
jsonfile: Evaluation json file, eg: bbox.json, mask.json.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(jsonfile)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
def proposal2out(results, is_bbox_normalized=False):
xywh_res = []
for t in results:
bboxes = t['proposal'][0]
lengths = t['proposal'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
assert len(lengths) == im_ids.size
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
xmin, ymin, xmax, ymax = dt.tolist()
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': 1,
'bbox': bbox,
'score': 1.0
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def bbox2out(results, clsid2catid, is_bbox_normalized=False):
"""
Args:
results: request a dict, should include: `bbox`, `im_id`,
if is_bbox_normalized=True, also need `im_shape`.
clsid2catid: class id to category id map of COCO2017 dataset.
is_bbox_normalized: whether or not bbox is normalized.
"""
xywh_res = []
for t in results:
bboxes = t['bbox'][0]
if len(t['bbox'][1]) == 0: continue
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None or len(bboxes) == 0:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
clsid, score, xmin, ymin, xmax, ymax = dt.tolist()
catid = (clsid2catid[int(clsid)])
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
im_shape = t['im_shape'][0][i].tolist()
im_height, im_width = int(im_shape[0]), int(im_shape[1])
xmin *= im_width
ymin *= im_height
w *= im_width
h *= im_height
else:
# for yolov4
# w = xmax - xmin
# h = ymax - ymin
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': catid,
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
import pycocotools.mask as mask_util
scale = (resolution + 2.0) / resolution
segm_res = []
# for each batch
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0])
if bboxes.shape == (1, 1) or bboxes is None:
continue
if len(bboxes.tolist()) == 0:
continue
masks = t['mask'][0]
s = 0
# for each sample
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i][0])
im_shape = t['im_shape'][0][i]
bbox = bboxes[s:s + num][:, 2:]
clsid_scores = bboxes[s:s + num][:, 0:2]
mask = masks[s:s + num]
s += num
im_h = int(im_shape[0])
im_w = int(im_shape[1])
expand_bbox = expand_boxes(bbox, scale)
expand_bbox = expand_bbox.astype(np.int32)
padded_mask = np.zeros(
(resolution + 2, resolution + 2), dtype=np.float32)
for j in range(num):
xmin, ymin, xmax, ymax = expand_bbox[j].tolist()
clsid, score = clsid_scores[j].tolist()
clsid = int(clsid)
padded_mask[1:-1, 1:-1] = mask[j, clsid, :, :]
catid = clsid2catid[clsid]
w = xmax - xmin + 1
h = ymax - ymin + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
resized_mask = cv2.resize(padded_mask, (w, h))
resized_mask = np.array(
resized_mask > thresh_binarize, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x0 = min(max(xmin, 0), im_w)
x1 = min(max(xmax + 1, 0), im_w)
y0 = min(max(ymin, 0), im_h)
y1 = min(max(ymax + 1, 0), im_h)
im_mask[y0:y1, x0:x1] = resized_mask[(y0 - ymin):(y1 - ymin), (
x0 - xmin):(x1 - xmin)]
segm = mask_util.encode(
np.array(
im_mask[:, :, np.newaxis], order='F'))[0]
catid = clsid2catid[clsid]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def expand_boxes(boxes, scale):
"""
Expand an array of boxes by a given scale.
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def get_category_info(anno_file=None,
with_background=True,
use_default_label=False):
if use_default_label or anno_file is None \
or not os.path.exists(anno_file):
logger.info("Not found annotation file {}, load "
"coco17 categories.".format(anno_file))
return coco17_category_info(with_background)
else:
logger.info("Load categories from {}".format(anno_file))
return get_category_info_from_anno(anno_file, with_background)
def get_category_info_from_anno(anno_file, with_background=True):
"""
Get class id to category id map and category id
to category name map from annotation file.
Args:
anno_file (str): annotation file path
with_background (bool, default True):
whether load background as class 0.
"""
from pycocotools.coco import COCO
coco = COCO(anno_file)
cats = coco.loadCats(coco.getCatIds())
clsid2catid = {
i + int(with_background): cat['id']
for i, cat in enumerate(cats)
}
catid2name = {cat['id']: cat['name'] for cat in cats}
if with_background:
clsid2catid.update({0: 0})
catid2name.update({0: 'background'})
return clsid2catid, catid2name
def coco17_category_info(with_background=True):
"""
Get class id to category id map and category id
to category name map of COCO2017 dataset
Args:
with_background (bool, default True):
whether load background as class 0.
"""
clsid2catid = {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
12: 13,
13: 14,
14: 15,
15: 16,
16: 17,
17: 18,
18: 19,
19: 20,
20: 21,
21: 22,
22: 23,
23: 24,
24: 25,
25: 27,
26: 28,
27: 31,
28: 32,
29: 33,
30: 34,
31: 35,
32: 36,
33: 37,
34: 38,
35: 39,
36: 40,
37: 41,
38: 42,
39: 43,
40: 44,
41: 46,
42: 47,
43: 48,
44: 49,
45: 50,
46: 51,
47: 52,
48: 53,
49: 54,
50: 55,
51: 56,
52: 57,
53: 58,
54: 59,
55: 60,
56: 61,
57: 62,
58: 63,
59: 64,
60: 65,
61: 67,
62: 70,
63: 72,
64: 73,
65: 74,
66: 75,
67: 76,
68: 77,
69: 78,
70: 79,
71: 80,
72: 81,
73: 82,
74: 84,
75: 85,
76: 86,
77: 87,
78: 88,
79: 89,
80: 90
}
catid2name = {
0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush'
}
if not with_background:
clsid2catid = {k - 1: v for k, v in clsid2catid.items()}
catid2name.pop(0)
else:
clsid2catid.update({0: 0})
return clsid2catid, catid2name
...@@ -18,6 +18,7 @@ from __future__ import print_function ...@@ -18,6 +18,7 @@ from __future__ import print_function
import os import os
import os.path as osp import os.path as osp
import yaml
import shutil import shutil
import requests import requests
import tqdm import tqdm
...@@ -26,17 +27,19 @@ import tarfile ...@@ -26,17 +27,19 @@ import tarfile
import zipfile import zipfile
from .voc_utils import create_list from .voc_utils import create_list
from ppdet.core.workspace import BASE_KEY
from .logger import setup_logger from .logger import setup_logger
logger = setup_logger(__name__) logger = setup_logger(__name__)
__all__ = [ __all__ = [
'get_weights_path', 'get_dataset_path', 'download_dataset', 'get_weights_path', 'get_dataset_path', 'get_config_path',
'create_voc_list' 'download_dataset', 'create_voc_list'
] ]
WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights") WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset") DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
CONFIGS_HOME = osp.expanduser("~/.cache/paddle/configs")
# dict of {dataset_name: (download_info, sub_dirs)} # dict of {dataset_name: (download_info, sub_dirs)}
# download info: [(url, md5sum)] # download info: [(url, md5sum)]
...@@ -83,15 +86,68 @@ DATASETS = { ...@@ -83,15 +86,68 @@ DATASETS = {
DOWNLOAD_RETRY_LIMIT = 3 DOWNLOAD_RETRY_LIMIT = 3
PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX = 'https://paddlemodels.bj.bcebos.com/object_detection/'
def parse_url(url):
url = url.replace("ppdet://", PPDET_WEIGHTS_DOWNLOAD_URL_PREFIX)
return url
def get_weights_path(url): def get_weights_path(url):
"""Get weights path from WEIGHT_HOME, if not exists, """Get weights path from WEIGHTS_HOME, if not exists,
download it from url. download it from url.
""" """
url = parse_url(url)
path, _ = get_path(url, WEIGHTS_HOME) path, _ = get_path(url, WEIGHTS_HOME)
return path return path
def get_config_path(url):
"""Get weights path from CONFIGS_HOME, if not exists,
download it from url.
"""
url = parse_url(url)
path, _ = get_path(url, CONFIGS_HOME)
_download_config(path, url, CONFIGS_HOME)
return path
def _download_config(cfg_path, cfg_url, cur_dir):
with open(cfg_path) as f:
cfg = yaml.load(f, Loader=yaml.Loader)
# download dependence base ymls
if BASE_KEY in cfg:
base_ymls = list(cfg[BASE_KEY])
for base_yml in base_ymls:
if base_yml.startswith("~"):
base_yml = os.path.expanduser(base_yml)
relpath = osp.relpath(base_yml, cfg_path)
if not base_yml.startswith('/'):
relpath = base_yml
base_yml = os.path.join(os.path.dirname(cfg_path), base_yml)
if osp.isfile(base_yml):
logger.debug("Found _BASE_ config: {}".format(base_yml))
continue
# download to CONFIGS_HOME firstly
base_yml_url = osp.join(osp.split(cfg_url)[0], relpath)
path, _ = get_path(base_yml_url, CONFIGS_HOME)
# move from CONFIGS_HOME to dst_path to restore config directory structure
dst_path = osp.join(cur_dir, relpath)
dst_dir = osp.split(dst_path)[0]
if not osp.isdir(dst_dir):
os.makedirs(dst_dir)
shutil.move(path, dst_path)
# perfrom download base yml recursively
_download_config(dst_path, base_yml_url, osp.split(dst_path)[0])
def get_dataset_path(path, annotation, image_dir): def get_dataset_path(path, annotation, image_dir):
""" """
If path exists, return path. If path exists, return path.
...@@ -204,7 +260,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True): ...@@ -204,7 +260,7 @@ def get_path(url, root_dir, md5sum=None, check_exist=True):
# new weights format which postfix is 'pdparams' not # new weights format which postfix is 'pdparams' not
# need to decompress # need to decompress
if osp.splitext(fullname)[-1] != '.pdparams': if osp.splitext(fullname)[-1] not in ['.pdparams', '.yml']:
_decompress(fullname) _decompress(fullname)
return fullpath, exist_flag return fullpath, exist_flag
......
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import json
from ppdet.py_op.post_process import get_det_res, get_seg_res
from .logger import setup_logger
logger = setup_logger(__name__)
def json_eval_results(metric, json_directory=None, dataset=None):
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert metric == 'COCO'
from ppdet.utils.coco_eval import cocoapi_eval
anno_file = dataset.get_anno()
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
if json_directory:
assert os.path.exists(
json_directory), "The json directory:{} does not exist".format(
json_directory)
for k, v in enumerate(json_file_list):
json_file_list[k] = os.path.join(str(json_directory), v)
coco_eval_style = ['proposal', 'bbox', 'segm']
for i, v_json in enumerate(json_file_list):
if os.path.exists(v_json):
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
else:
logger.info("{} not exists!".format(v_json))
def get_infer_results(outs_res, eval_type, catid):
"""
Get result at the stage of inference.
The output format is dictionary containing bbox or mask result.
For example, bbox result is a list and each element contains
image_id, category_id, bbox and score.
"""
if outs_res is None or len(outs_res) == 0:
raise ValueError(
'The number of valid detection result if zero. Please use reasonable model and check input data.'
)
infer_res = {k: [] for k in eval_type}
for i, outs in enumerate(outs_res):
im_id = outs['im_id']
im_shape = outs['im_shape']
scale_factor = outs['scale_factor']
if 'bbox' in eval_type:
infer_res['bbox'] += get_det_res(outs['bbox'], outs['bbox_num'],
im_id, catid)
if 'mask' in eval_type:
# mask post process
infer_res['mask'] += get_seg_res(outs['mask'], outs['bbox_num'],
im_id, catid)
return infer_res
def eval_results(res, metric, dataset):
"""
Evalute the inference result
"""
eval_res = []
if metric == 'COCO':
from ppdet.utils.coco_eval import cocoapi_eval
if 'bbox' in res:
with open("bbox.json", 'w') as f:
json.dump(res['bbox'], f)
logger.info('The bbox result is saved to bbox.json.')
bbox_stats = cocoapi_eval(
'bbox.json', 'bbox', anno_file=dataset.get_anno())
eval_res.append(bbox_stats)
sys.stdout.flush()
if 'mask' in res:
with open("mask.json", 'w') as f:
json.dump(res['mask'], f)
logger.info('The mask result is saved to mask.json.')
seg_stats = cocoapi_eval(
'mask.json', 'segm', anno_file=dataset.get_anno())
eval_res.append(seg_stats)
sys.stdout.flush()
elif metric == 'VOC':
from ppdet.utils.voc_eval import bbox_eval
bbox_stats = bbox_eval(res, 21)
else:
raise NotImplemented("Only COCO metric is supported now.")
return eval_res
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import numpy as np
from ..data.source.voc import pascalvoc_label
from .map_utils import DetectionMAP
from .coco_eval import bbox2out
from .logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['bbox_eval', 'bbox2out', 'get_category_info']
def bbox_eval(results,
class_num,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
"""
Bounding box evaluation for VOC dataset
Args:
results (list): prediction bounding box results.
class_num (int): evaluation class number.
overlap_thresh (float): the postive threshold of
bbox overlap
map_type (string): method for mAP calcualtion,
can only be '11point' or 'integral'
is_bbox_normalized (bool): whether bbox is normalized
to range [0, 1].
evaluate_difficult (bool): whether to evaluate
difficult gt bbox.
"""
assert 'bbox' in results[0]
logger.info("Start evaluate...")
detection_map = DetectionMAP(
class_num=class_num,
overlap_thresh=overlap_thresh,
map_type=map_type,
is_bbox_normalized=is_bbox_normalized,
evaluate_difficult=evaluate_difficult)
for t in results:
bboxes = t['bbox']
bbox_lengths = t['bbox_num']
if bboxes.shape == (1, 1) or bboxes is None:
continue
gt_boxes = t['gt_bbox']
gt_labels = t['gt_class']
difficults = t['difficult'] if not evaluate_difficult \
else None
scale_factor = t['scale_factor'] if 'scale_factor' in t else np.ones(
(gt_boxes.shape[0], 2)).astype('float32')
bbox_idx = 0
for i in range(gt_boxes.shape[0]):
gt_box = gt_boxes[i]
h, w = scale_factor[i]
gt_box = gt_box / np.array([w, h, w, h])
gt_label = gt_labels[i]
difficult = None if difficults is None \
else difficults[i]
bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box, gt_label, difficult = prune_zero_padding(gt_box, gt_label,
difficult)
detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
logger.info("Accumulating evaluatation results...")
detection_map.accumulate()
map_stat = 100. * detection_map.get_map()
logger.info("mAP({:.2f}, {}) = {:.2f}".format(overlap_thresh, map_type,
map_stat))
return map_stat
def prune_zero_padding(gt_box, gt_label, difficult=None):
valid_cnt = 0
for i in range(len(gt_box)):
if gt_box[i, 0] == 0 and gt_box[i, 1] == 0 and \
gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
break
valid_cnt += 1
return (gt_box[:valid_cnt], gt_label[:valid_cnt], difficult[:valid_cnt]
if difficult is not None else None)
def get_category_info(anno_file=None,
with_background=True,
use_default_label=False):
if use_default_label or anno_file is None \
or not os.path.exists(anno_file):
logger.info("Not found annotation file {}, load "
"voc2012 categories.".format(anno_file))
return vocall_category_info(with_background)
else:
logger.info("Load categories from {}".format(anno_file))
return get_category_info_from_anno(anno_file, with_background)
def get_category_info_from_anno(anno_file, with_background=True):
"""
Get class id to category id map and category id
to category name map from annotation file.
Args:
anno_file (str): annotation file path
with_background (bool, default True):
whether load background as class 0.
"""
cats = []
with open(anno_file) as f:
for line in f.readlines():
cats.append(line.strip())
if cats[0] != 'background' and with_background:
cats.insert(0, 'background')
if cats[0] == 'background' and not with_background:
cats = cats[1:]
clsid2catid = {i: i for i in range(len(cats))}
catid2name = {i: name for i, name in enumerate(cats)}
return clsid2catid, catid2name
def vocall_category_info(with_background=True):
"""
Get class id to category id map and category id
to category name map of mixup voc dataset
Args:
with_background (bool, default True):
whether load background as class 0.
"""
label_map = pascalvoc_label(with_background)
label_map = sorted(label_map.items(), key=lambda x: x[1])
cats = [l[0] for l in label_map]
if with_background:
cats.insert(0, 'background')
clsid2catid = {i: i for i in range(len(cats))}
catid2name = {i: name for i, name in enumerate(cats)}
return clsid2catid, catid2name
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path as osp
import glob
import shutil
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def parse_requirements(fname):
with open(fname, encoding="utf-8-sig") as f:
requirements = f.readlines()
return requirements
def package_model_zoo():
from ppdet.model_zoo import MODEL_ZOO_FILENAME
cur_dir = osp.dirname(osp.realpath(__file__))
cfg_dir = osp.join(cur_dir, "configs")
cfgs = glob.glob(osp.join(cfg_dir, '*.yml'))
model_names = [osp.splitext(osp.split(cfg)[1])[0] for cfg in cfgs]
model_zoo_file = osp.join(cur_dir, 'ppdet', 'model_zoo', MODEL_ZOO_FILENAME)
with open(model_zoo_file, 'w') as wf:
for model_name in model_names:
wf.write("{}\n".format(model_name))
return [model_zoo_file]
packages = [
'ppdet',
'ppdet.core',
'ppdet.data',
'ppdet.engine',
'ppdet.modeling',
'ppdet.model_zoo',
'ppdet.py_op',
'ppdet.utils',
]
if __name__ == "__main__":
setup(
name='ppdet',
packages=find_packages(exclude=("configs", "tools", "deploy")),
package_data={'ppdet.model_zoo': package_model_zoo()},
author='PaddlePaddle',
version='2.0-beta',
install_requires=parse_requirements('./requirements.txt'),
description='Object detection and instance segmentation toolkit based on PaddlePaddle',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/PaddlePaddle/PaddleDetection',
download_url='https://github.com/PaddlePaddle/PaddleDetection.git',
keywords=['ppdet paddle ppyolo'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Natural Language :: Chinese (Simplified)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7', 'Topic :: Utilities'
],
license='Apache License 2.0',
ext_modules=[], )
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import os, sys import os, sys
# add python path of PadleDetection to sys.path # add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2))) parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
...@@ -24,17 +25,14 @@ if parent_path not in sys.path: ...@@ -24,17 +25,14 @@ if parent_path not in sys.path:
# ignore numba warning # ignore numba warning
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import random
import numpy as np
import paddle
import time
import paddle
from paddle.distributed import ParallelEnv from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.core.workspace import load_config, merge_config
from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser from ppdet.utils.cli import ArgsParser
from ppdet.utils.eval_utils import get_infer_results, eval_results from ppdet.engine import Trainer
from ppdet.utils.checkpoint import load_weight
from ppdet.utils.logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger('eval') logger = setup_logger('eval')
...@@ -58,74 +56,15 @@ def parse_args(): ...@@ -58,74 +56,15 @@ def parse_args():
return args return args
def run(FLAGS, cfg, place): def run(FLAGS, cfg):
# build trainer
# Model trainer = Trainer(cfg, mode='eval')
main_arch = cfg.architecture
model = create(cfg.architecture) # load weights
trainer.load_weights(cfg.weights, 'resume')
# Init Model
load_weight(model, cfg.weights) # training
trainer.evaluate()
# Data Reader
dataset = cfg.EvalDataset
eval_loader = create('EvalReader')(dataset, cfg['worker_num'])
extra_key = ['im_shape', 'scale_factor', 'im_id']
if cfg.metric == 'VOC':
extra_key += ['gt_bbox', 'gt_class', 'difficult']
# Run Eval
outs_res = []
sample_num = 0
start_time = time.time()
for iter_id, data in enumerate(eval_loader):
# forward
model.eval()
outs = model(data)
for key in extra_key:
outs[key] = data[key]
for key, value in outs.items():
outs[key] = value.numpy()
if 'mask' in outs and 'bbox' in outs:
mask_resolution = model.mask_post_process.mask_resolution
from ppdet.py_op.post_process import mask_post_process
outs['mask'] = mask_post_process(
outs, outs['im_shape'], outs['scale_factor'], mask_resolution)
outs_res.append(outs)
# log
sample_num += outs['im_id'].shape[0]
if iter_id % 100 == 0:
logger.info("Eval iter: {}".format(iter_id))
cost_time = time.time() - start_time
logger.info('Total sample number: {}, averge FPS: {}'.format(
sample_num, sample_num / cost_time))
eval_type = []
if 'bbox' in outs:
eval_type.append('bbox')
if 'mask' in outs:
eval_type.append('mask')
# Metric
# TODO: support other metric
with_background = cfg.with_background
use_default_label = dataset.use_default_label
if cfg.metric == 'COCO':
from ppdet.utils.coco_eval import get_category_info
clsid2catid, catid2name = get_category_info(
dataset.get_anno(), with_background, use_default_label)
infer_res = get_infer_results(outs_res, eval_type, clsid2catid)
elif cfg.metric == 'VOC':
from ppdet.utils.voc_eval import get_category_info
clsid2catid, catid2name = get_category_info(
dataset.get_label_list(), with_background, use_default_label)
infer_res = outs_res
eval_results(infer_res, cfg.metric, dataset)
def main(): def main():
...@@ -139,7 +78,8 @@ def main(): ...@@ -139,7 +78,8 @@ def main():
place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu' place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
place = paddle.set_device(place) place = paddle.set_device(place)
run(FLAGS, cfg, place)
run(FLAGS, cfg)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -24,23 +24,16 @@ if parent_path not in sys.path: ...@@ -24,23 +24,16 @@ if parent_path not in sys.path:
# ignore numba warning # ignore numba warning
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import glob
import numpy as np
from PIL import Image
import paddle import paddle
import paddle.nn as nn
from paddle.static import InputSpec
from ppdet.core.workspace import load_config, merge_config, create from ppdet.core.workspace import load_config, merge_config
from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser from ppdet.utils.cli import ArgsParser
from ppdet.utils.checkpoint import load_weight from ppdet.engine import Detector
from export_utils import dump_infer_config
from paddle.jit import to_static
from ppdet.utils.logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger('eval') logger = setup_logger('export_model')
def parse_args(): def parse_args():
...@@ -54,48 +47,15 @@ def parse_args(): ...@@ -54,48 +47,15 @@ def parse_args():
return args return args
def dygraph_to_static(model, save_dir, cfg):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
image_shape = None
if 'inputs_def' in cfg['TestReader']:
inputs_def = cfg['TestReader']['inputs_def']
image_shape = inputs_def.get('image_shape', None)
if image_shape is None:
image_shape = [3, None, None]
# Save infer cfg
dump_infer_config(cfg,
os.path.join(save_dir, 'infer_cfg.yml'), image_shape,
model)
input_spec = [{
"image": InputSpec(
shape=[None] + image_shape, name='image'),
"im_shape": InputSpec(
shape=[None, 2], name='im_shape'),
"scale_factor": InputSpec(
shape=[None, 2], name='scale_factor')
}]
export_model = to_static(model, input_spec=input_spec)
# save Model
paddle.jit.save(export_model, os.path.join(save_dir, 'model'))
def run(FLAGS, cfg): def run(FLAGS, cfg):
# build detector
detector = Detector(cfg, mode='test')
# Model # load weights
main_arch = cfg.architecture detector.load_weights(cfg.weights, 'resume')
model = create(cfg.architecture)
cfg_name = os.path.basename(FLAGS.config).split('.')[0]
save_dir = os.path.join(FLAGS.output_dir, cfg_name)
# Init Model
load_weight(model, cfg.weights)
# export config and model # export model
dygraph_to_static(model, save_dir, cfg) detector.export(FLAGS.output_dir)
logger.info('Export model to {}'.format(save_dir))
def main(): def main():
......
...@@ -25,17 +25,13 @@ if parent_path not in sys.path: ...@@ -25,17 +25,13 @@ if parent_path not in sys.path:
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import glob import glob
import numpy as np
from PIL import Image
import paddle import paddle
from paddle.distributed import ParallelEnv from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config, create from ppdet.core.workspace import load_config, merge_config
from ppdet.engine import Trainer
from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.visualizer import visualize_results
from ppdet.utils.cli import ArgsParser from ppdet.utils.cli import ArgsParser
from ppdet.utils.checkpoint import load_weight
from ppdet.utils.eval_utils import get_infer_results
from ppdet.utils.logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger('train') logger = setup_logger('train')
...@@ -77,17 +73,6 @@ def parse_args(): ...@@ -77,17 +73,6 @@ def parse_args():
return args return args
def get_save_image_name(output_dir, image_path):
"""
Get save image name from source image path.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_name = os.path.split(image_path)[-1]
name, ext = os.path.splitext(image_name)
return os.path.join(output_dir, "{}".format(name)) + ext
def get_test_images(infer_dir, infer_img): def get_test_images(infer_dir, infer_img):
""" """
Get image path list in TEST mode Get image path list in TEST mode
...@@ -119,101 +104,21 @@ def get_test_images(infer_dir, infer_img): ...@@ -119,101 +104,21 @@ def get_test_images(infer_dir, infer_img):
return images return images
def run(FLAGS, cfg, place): def run(FLAGS, cfg):
# build trainer
# Model trainer = Trainer(cfg, mode='test')
main_arch = cfg.architecture
model = create(cfg.architecture) # load weights
trainer.load_weights(cfg.weights, 'resume')
# data
dataset = cfg.TestDataset # get inference images
test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
dataset.set_images(test_images)
test_loader = create('TestReader')(dataset, cfg['worker_num']) # inference
extra_key = ['im_shape', 'scale_factor', 'im_id'] trainer.predict(
images,
# TODO: support other metrics draw_threshold=FLAGS.draw_threshold,
imid2path = dataset.get_imid2path() output_dir=FLAGS.output_dir)
anno_file = dataset.get_anno()
with_background = cfg.with_background
use_default_label = dataset.use_default_label
if cfg.metric == 'COCO':
from ppdet.utils.coco_eval import get_category_info
if cfg.metric == 'VOC':
from ppdet.utils.voc_eval import get_category_info
clsid2catid, catid2name = get_category_info(anno_file, with_background,
use_default_label)
# Init Model
load_weight(model, cfg.weights)
# Run Infer
for iter_id, data in enumerate(test_loader):
# forward
model.eval()
outs = model(data)
for key in extra_key:
outs[key] = data[key]
for key, value in outs.items():
outs[key] = value.numpy()
if 'mask' in outs and 'bbox' in outs:
mask_resolution = model.mask_post_process.mask_resolution
from ppdet.py_op.post_process import mask_post_process
outs['mask'] = mask_post_process(
outs, outs['im_shape'], outs['scale_factor'], mask_resolution)
eval_type = []
if 'bbox' in outs:
eval_type.append('bbox')
if 'mask' in outs:
eval_type.append('mask')
batch_res = get_infer_results([outs], eval_type, clsid2catid)
logger.info('Infer iter {}'.format(iter_id))
bbox_res = None
mask_res = None
bbox_num = outs['bbox_num']
start = 0
for i, im_id in enumerate(outs['im_id']):
image_path = imid2path[int(im_id)]
image = Image.open(image_path).convert('RGB')
end = start + bbox_num[i]
# use VisualDL to log original image
if FLAGS.use_vdl:
original_image_np = np.array(image)
vdl_writer.add_image(
"original/frame_{}".format(vdl_image_frame),
original_image_np, vdl_image_step)
if 'bbox' in batch_res:
bbox_res = batch_res['bbox'][start:end]
if 'mask' in batch_res:
mask_res = batch_res['mask'][start:end]
image = visualize_results(image, bbox_res, mask_res,
int(outs['im_id']), catid2name,
FLAGS.draw_threshold)
# use VisualDL to log image with bbox
if FLAGS.use_vdl:
infer_image_np = np.array(image)
vdl_writer.add_image("bbox/frame_{}".format(vdl_image_frame),
infer_image_np, vdl_image_step)
vdl_image_step += 1
if vdl_image_step % 10 == 0:
vdl_image_step = 0
vdl_image_frame += 1
# save image with detection
save_name = get_save_image_name(FLAGS.output_dir, image_path)
logger.info("Detection bbox results save in {}".format(save_name))
image.save(save_name, quality=95)
start = end
def main(): def main():
...@@ -227,7 +132,7 @@ def main(): ...@@ -227,7 +132,7 @@ def main():
place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu' place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
place = paddle.set_device(place) place = paddle.set_device(place)
run(FLAGS, cfg, place) run(FLAGS, cfg)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -26,19 +26,17 @@ if parent_path not in sys.path: ...@@ -26,19 +26,17 @@ if parent_path not in sys.path:
import warnings import warnings
warnings.filterwarnings('ignore') warnings.filterwarnings('ignore')
import random import random
import datetime
import time
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed import ParallelEnv from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config, create from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight, save_model from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.engine import Trainer, init_parallel_env, set_random_seed
import ppdet.utils.cli as cli import ppdet.utils.cli as cli
import ppdet.utils.check as check import ppdet.utils.check as check
import ppdet.utils.stats as stats
from ppdet.utils.logger import setup_logger from ppdet.utils.logger import setup_logger
logger = setup_logger('train') logger = setup_logger('train')
...@@ -83,131 +81,21 @@ def parse_args(): ...@@ -83,131 +81,21 @@ def parse_args():
return args return args
def run(FLAGS, cfg, place): def run(FLAGS, cfg):
env = os.environ # init parallel environment if nranks > 1
FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env init_parallel_env()
if FLAGS.dist:
trainer_id = int(env['PADDLE_TRAINER_ID'])
local_seed = (99 + trainer_id)
random.seed(local_seed)
np.random.seed(local_seed)
if FLAGS.enable_ce: if FLAGS.enable_ce:
random.seed(0) set_random_seed(0)
np.random.seed(0)
# build trainer
if ParallelEnv().nranks > 1: trainer = Trainer(cfg, mode='train')
paddle.distributed.init_parallel_env()
# load weights
# Data trainer.load_weights(cfg.pretrain_weights, FLAGS.weight_type)
datasets = cfg.TrainDataset
train_loader = create('TrainReader')(datasets, cfg['worker_num']) # training
steps = len(train_loader) trainer.train()
# Model
model = create(cfg.architecture)
# Optimizer
lr = create('LearningRate')(steps)
optimizer = create('OptimizerBuilder')(lr, model.parameters())
# Init Model & Optimzer
start_epoch = 0
if FLAGS.weight_type == 'resume':
start_epoch = load_weight(model, cfg.pretrain_weights, optimizer)
else:
load_pretrain_weight(model, cfg.pretrain_weights,
cfg.get('load_static_weights', False),
FLAGS.weight_type)
if getattr(model.backbone, 'norm_type', None) == 'sync_bn':
assert cfg.use_gpu and ParallelEnv(
).nranks > 1, 'you should use bn rather than sync_bn while using a single gpu'
# sync_bn = (getattr(model.backbone, 'norm_type', None) == 'sync_bn' and
# cfg.use_gpu and ParallelEnv().nranks > 1)
# if sync_bn:
# model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# The parameter filter is temporary fix for training because of #28997
# in Paddle.
def no_grad(param):
if param.name.startswith("conv1_") or param.name.startswith("res2a_") \
or param.name.startswith("res2b_") or param.name.startswith("res2c_"):
return True
for param in filter(no_grad, model.parameters()):
param.stop_gradient = True
# Parallel Model
if ParallelEnv().nranks > 1:
model = paddle.DataParallel(model)
cfg_name = os.path.basename(FLAGS.config).split('.')[0]
save_dir = os.path.join(cfg.save_dir, cfg_name)
# Run Train
end_epoch = int(cfg.epoch)
batch_size = int(cfg['TrainReader']['batch_size'])
total_steps = (end_epoch - start_epoch) * steps
step_id = 0
train_stats = stats.TrainingStats(cfg.log_iter)
batch_time = stats.SmoothedValue(fmt='{avg:.4f}')
data_time = stats.SmoothedValue(fmt='{avg:.4f}')
end_time = time.time()
space_fmt = ':' + str(len(str(steps))) + 'd'
# Run Train
for cur_eid in range(start_epoch, end_epoch):
datasets.set_epoch(cur_eid)
for iter_id, data in enumerate(train_loader):
data_time.update(time.time() - end_time)
# Model Forward
model.train()
outputs = model(data)
loss = outputs['loss']
# Model Backward
loss.backward()
optimizer.step()
curr_lr = optimizer.get_lr()
lr.step()
optimizer.clear_grad()
batch_time.update(time.time() - end_time)
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
train_stats.update(outputs)
logs = train_stats.log()
if iter_id % cfg.log_iter == 0:
eta_sec = (total_steps - step_id) * batch_time.global_avg
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
ips = float(batch_size) / batch_time.avg
fmt = ' '.join([
'Epoch: [{}]',
'[{' + space_fmt + '}/{}]',
'{meters}',
'eta: {eta}',
'batch_cost: {btime}',
'data_cost: {dtime}',
'ips: {ips:.4f} images/s',
])
fmt = fmt.format(
cur_eid,
iter_id,
steps,
meters=logs,
eta=eta_str,
btime=str(batch_time),
dtime=str(data_time),
ips=ips)
logger.info(fmt)
step_id += 1
end_time = time.time() # after copy outputs to CPU.
# Save Stage
if (ParallelEnv().local_rank == 0 and \
(cur_eid % cfg.snapshot_epoch) == 0) or (cur_eid + 1) == end_epoch:
save_name = str(
cur_eid) if cur_eid + 1 != end_epoch else "model_final"
save_model(model, optimizer, save_dir, save_name, cur_eid + 1)
def main(): def main():
...@@ -222,7 +110,7 @@ def main(): ...@@ -222,7 +110,7 @@ def main():
place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu' place = 'gpu:{}'.format(ParallelEnv().dev_id) if cfg.use_gpu else 'cpu'
place = paddle.set_device(place) place = paddle.set_device(place)
run(FLAGS, cfg, place) run(FLAGS, cfg)
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册