提交 6af0c07a 编写于 作者: D dengkaipeng

refine code

上级 97a365e5
......@@ -29,6 +29,7 @@ from paddle.fluid.executor import global_scope
from paddle.fluid.io import is_belong_to_optimizer
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.layers.utils import flatten
from paddle.fluid.incubate.fleet.collective import fleet, DistributedStrategy
from paddle.fluid.incubate.fleet.base import role_maker
from paddle.fluid.io import DataLoader, Dataset
......@@ -414,13 +415,7 @@ class StaticGraphAdapter(object):
losses = []
metrics = []
with fluid.program_guard(prog, self._startup_prog):
if isinstance(self.model._inputs, dict):
ins = [
self.model._inputs[n]
for n in extract_args(self.model.forward) if n != 'self'
]
else:
ins = self.model._inputs
ins = self.model._inputs
lbls = self.model._labels if self.model._labels else []
inputs = [k.forward() for k in to_list(ins)]
labels = [k.forward() for k in to_list(lbls)]
......@@ -867,8 +862,10 @@ class Model(fluid.dygraph.Layer):
metric.__class__.__name__)
self._metrics = to_list(metrics)
self._inputs = inputs
self._labels = labels
self._inputs = to_list(inputs) if not isinstance(inputs, dict) else [
inputs[n] for n in extract_args(self.forward) if n != 'self'
]
self._labels = to_list(labels)
if not in_dygraph_mode():
self._adapter.prepare()
......@@ -1127,16 +1124,14 @@ class Model(fluid.dygraph.Layer):
outputs = []
for data in tqdm.tqdm(loader):
if not fluid.in_dygraph_mode():
data = data[0]
assert len(data) == len(self._inputs) + len(self._labels), \
"data fileds number mismatch"
inputs_data = data[:len(self._inputs)]
outputs.append(self.test(inputs_data))
# sample list to batched data
data = flatten(data)
# if not fluid.in_dygraph_mode():
# data = data[0]
outputs.append(self.test(data[:len(self._inputs)]))
# NOTE: we do not stack or concanate here for output lod tensor
# may loss its detail info
# sample list data to batch data
outputs = list(zip(*outputs))
self._test_dataloader = None
......@@ -1174,22 +1169,30 @@ class Model(fluid.dygraph.Layer):
callbacks.on_epoch_begin(epoch)
for step, data in enumerate(data_loader):
if not fluid.in_dygraph_mode():
data = data[0]
batch_size = data[0].shape()[0]
else:
batch_size = data[0].shape[0]
assert len(data) == len(self._inputs) + len(self._labels), \
"data fileds number mismatch"
inputs_data = data[:len(self._inputs)]
labels_data = data[len(self._inputs):]
# data might come from different types of data_loader and have
# different format, as following:
# 1. DataLoader in static graph:
# [[input1, input2, ..., label1, lable2, ...]]
# 2. DataLoader in dygraph
# [input1, input2, ..., label1, lable2, ...]
# 3. custumed iterator yield concated inputs and labels:
# [input1, input2, ..., label1, lable2, ...]
# 4. custumed iterator yield seperated inputs and labels:
# ([input1, input2, ...], [label1, lable2, ...])
# To handle all of these, flatten (nested) list to list.
data = flatten(data)
# LoDTensor.shape is callable, where LoDTensor comes from
# DataLoader in static graph
batch_size = data[0].shape()[0] if callable(data[
0].shape) else data[0].shape[0]
callbacks.on_batch_begin(mode, step, logs)
if mode == 'train':
outs = self.train(inputs_data, labels_data)
outs = self.train(data[:len(self._inputs)],
data[len(self._inputs):])
else:
outs = self.eval(inputs_data, labels_data)
outs = self.eval(data[:len(self._inputs)],
data[len(self._inputs):])
# losses
loss = outs[0] if self._metrics else outs
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
......@@ -24,11 +24,12 @@ logger = logging.getLogger(__name__)
__all__ = ['COCOMetric']
OUTFILE = './bbox.json'
# considered to change to a callback later
# COCOMetric behavior is different from Metric defined in high
# level API, COCOMetric will and con only accumulate on the epoch
# end, so we impliment COCOMetric as not a high level API Metric
class COCOMetric():
"""
Metrci for MS-COCO dataset, only support update with batch
......
......@@ -17,9 +17,17 @@ from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm
from paddle.fluid.dygraph.base import to_variable
__all__ = ['DarkNet53', 'ConvBNLayer']
from model import Model
from download import get_weights_path
__all__ = ['DarkNet53', 'ConvBNLayer', 'darknet53']
# {num_layers: (url, md5)}
pretrain_infos = {
53: ('https://paddlemodels.bj.bcebos.com/hapi/darknet53.pdparams',
'2506357a5c31e865785112fc614a487d')
}
class ConvBNLayer(fluid.dygraph.Layer):
......@@ -128,10 +136,13 @@ class LayerWarp(fluid.dygraph.Layer):
DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
class DarkNet53(fluid.dygraph.Layer):
def __init__(self, ch_in=3):
class DarkNet53(Model):
def __init__(self, num_layers=53, ch_in=3):
super(DarkNet53, self).__init__()
self.stages = DarkNet_cfg[53]
assert num_layers in DarkNet_cfg.keys(), \
"only support num_layers in {} currently" \
.format(DarkNet_cfg.keys())
self.stages = DarkNet_cfg[num_layers]
self.stages = self.stages[0:5]
self.conv0 = ConvBNLayer(
......@@ -175,3 +186,19 @@ class DarkNet53(fluid.dygraph.Layer):
out = self.downsample_list[i](out)
return blocks[-1:-4:-1]
def _darknet(num_layers=53, input_channels=3, pretrained=True):
model = DarkNet53(num_layers, input_channels)
if pretrained:
assert num_layers in pretrain_infos.keys(), \
"DarkNet{} do not have pretrained weights now, " \
"pretrained should be set as False".format(num_layers)
weight_path = get_weights_path(*(pretrain_infos[num_layers]))
assert weight_path.endswith('.pdparams'), \
"suffix of weight must be .pdparams"
model.load(weight_path[:-9])
return model
def darknet53(input_channels=3, pretrained=True):
return _darknet(53, input_channels, pretrained)
......@@ -27,10 +27,10 @@ from paddle.fluid.io import DataLoader
from model import Model, Input, set_device
from distributed import DistributedBatchSampler
from yolov3.coco import *
from yolov3.transforms import *
from yolov3.modeling import *
from yolov3.coco_metric import *
from modeling import yolov3_darknet53, YoloLoss
from coco_metric import COCOMetric
from coco import COCODataset
from transforms import *
NUM_MAX_BOXES = 50
......@@ -107,7 +107,7 @@ def main():
with_background=False,
transform=eval_transform)
# batch_size can only be 1 in evaluation for YOLOv3
# prediction bbox is LoDTensor
# prediction bbox is a LoDTensor
batch_sampler = DistributedBatchSampler(dataset,
batch_size=1,
shuffle=False,
......@@ -121,8 +121,11 @@ def main():
return_list=True,
collate_fn=eval_collate_fn)
model = YOLOv3(num_classes=dataset.num_classes,
model_mode='eval' if FLAGS.eval_only else 'train')
pretrained = FLAGS.eval_only and FLAGS.weights is None
model = yolov3_darknet53(num_classes=dataset.num_classes,
model_mode='eval' if FLAGS.eval_only else 'train',
pretrained=pretrained)
if FLAGS.pretrain_weights is not None:
model.load(FLAGS.pretrain_weights, skip_mismatch=True, reset_optimizer=True)
......@@ -143,7 +146,7 @@ def main():
# but only accumulate at the end of an epoch
if FLAGS.eval_only:
if FLAGS.weights is not None:
model.load(FLAGS.weights)
model.load(FLAGS.weights, reset_optimizer=True)
preds = model.predict(loader)
_, _, _, img_ids, bboxes = preds
......@@ -172,8 +175,10 @@ def main():
if __name__ == '__main__':
parser = argparse.ArgumentParser("Yolov3 Training on COCO")
parser.add_argument('data', metavar='DIR', help='path to COCO dataset')
parser = argparse.ArgumentParser("Yolov3 Training on VOC")
parser.add_argument(
"--data", type=str, default='dataset/voc',
help="path to dataset directory")
parser.add_argument(
"--device", type=str, default='gpu', help="device to use, gpu or cpu")
parser.add_argument(
......
......@@ -21,10 +21,17 @@ from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from model import Model, Loss
from .darknet import DarkNet53, ConvBNLayer
from download import get_weights_path
from darknet import darknet53, ConvBNLayer
__all__ = ['YoloLoss', 'YOLOv3']
# {num_layers: (url, md5)}
pretrain_infos = {
53: ('https://paddlemodels.bj.bcebos.com/hapi/yolov3_darknet53.pdparams',
'aed7dd45124ff2e844ae3bd5ba6c91d2')
}
class YoloDetectionBlock(fluid.dygraph.Layer):
def __init__(self, ch_in, channel):
......@@ -97,7 +104,7 @@ class YOLOv3(Model):
self.nms_posk = 100
self.draw_thresh = 0.5
self.block = DarkNet53()
self.backbone = darknet53(pretrained=(model_mode=='train'))
self.block_outputs = []
self.yolo_blocks = []
self.route_blocks = []
......@@ -137,7 +144,7 @@ class YOLOv3(Model):
scores = []
downsample = 32
feats = self.block(inputs)
feats = self.backbone(inputs)
route = None
for idx, feat in enumerate(feats):
if idx > 0:
......@@ -218,3 +225,21 @@ class YoloLoss(Loss):
losses.append(loss)
downsample //= 2
return losses
def _yolov3_darknet(num_layers=53, num_classes=80,
model_mode='train', pretrained=True):
model = YOLOv3(num_classes, model_mode)
if pretrained:
assert num_layers in pretrain_infos.keys(), \
"YOLOv3-DarkNet{} do not have pretrained weights now, " \
"pretrained should be set as False".format(num_layers)
weight_path = get_weights_path(*(pretrain_infos[num_layers]))
assert weight_path.endswith('.pdparams'), \
"suffix of weight must be .pdparams"
model.load(weight_path[:-9])
return model
def yolov3_darknet53(num_classes=80, model_mode='train', pretrained=True):
return _yolov3_darknet(53, num_classes, model_mode, pretrained)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册