提交 59f12446 编写于 作者: D dengkaipeng

merge master

- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
hooks:
- id: yapf
files: \.py$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: a11d9314b22d8f8c7556443875b731ef05965464
hooks:
- id: check-merge-conflict
- id: check-symlinks
- id: detect-private-key
files: (?!.*paddle)^.*$
- id: end-of-file-fixer
files: \.(md|yml)$
- id: trailing-whitespace
files: \.(md|yml)$
- repo: https://github.com/Lucas-C/pre-commit-hooks
sha: v1.0.1
hooks:
- id: forbid-crlf
files: \.(md|yml)$
- id: remove-crlf
files: \.(md|yml)$
- id: forbid-tabs
files: \.(md|yml)$
- id: remove-tabs
files: \.(md|yml)$
......@@ -26,7 +26,7 @@ from paddle import fluid
from paddle.fluid.optimizer import Momentum
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from model import Model, CrossEntropy
from model import Model, CrossEntropy, Input
from metrics import Accuracy
......@@ -79,7 +79,6 @@ class SimpleImgConvPool(fluid.dygraph.Layer):
class MNIST(Model):
def __init__(self):
super(MNIST, self).__init__()
self._simple_img_conv_pool_1 = SimpleImgConvPool(
1, 20, 5, 2, 2, act="relu")
......@@ -89,12 +88,13 @@ class MNIST(Model):
pool_2_shape = 50 * 4 * 4
SIZE = 10
scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(800,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
self._fc = Linear(
800,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
def forward(self, inputs):
x = self._simple_img_conv_pool_1(inputs)
......@@ -138,13 +138,15 @@ def main():
paddle.batch(paddle.dataset.mnist.test(),
batch_size=FLAGS.batch_size, drop_last=True), 1, 1)
device_ids = list(range(FLAGS.num_devices))
with guard:
model = MNIST()
optim = Momentum(learning_rate=FLAGS.lr, momentum=.9,
parameter_list=model.parameters())
model.prepare(optim, CrossEntropy(), metrics=Accuracy(topk=(1, 2)))
optim = Momentum(
learning_rate=FLAGS.lr,
momentum=.9,
parameter_list=model.parameters())
inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
labels = [Input([None, 1], 'int64', name='label')]
model.prepare(optim, CrossEntropy(), Accuracy(topk=(1, 2)), inputs, labels)
if FLAGS.resume is not None:
model.load(FLAGS.resume)
......@@ -153,8 +155,7 @@ def main():
val_loss = 0.0
print("======== train epoch {} ========".format(e))
for idx, batch in enumerate(train_loader()):
losses, metrics = model.train(batch[0], batch[1], device='gpu',
device_ids=device_ids)
losses, metrics = model.train(batch[0], batch[1])
train_loss += np.sum(losses)
if idx % 10 == 0:
......@@ -167,8 +168,7 @@ def main():
print("======== eval epoch {} ========".format(e))
for idx, batch in enumerate(val_loader()):
losses, metrics = model.eval(batch[0], batch[1], device='gpu',
device_ids=device_ids)
losses, metrics = model.eval(batch[0], batch[1])
val_loss += np.sum(losses)
if idx % 10 == 0:
......@@ -188,14 +188,21 @@ if __name__ == '__main__':
parser.add_argument(
"-e", "--epoch", default=100, type=int, help="number of epoch")
parser.add_argument(
'--lr', '--learning-rate', default=1e-3, type=float, metavar='LR',
'--lr',
'--learning-rate',
default=1e-3,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument(
"-b", "--batch_size", default=128, type=int, help="batch size")
parser.add_argument(
"-n", "--num_devices", default=4, type=int, help="number of devices")
"-n", "--num_devices", default=1, type=int, help="number of devices")
parser.add_argument(
"-r", "--resume", default=None, type=str,
"-r",
"--resume",
default=None,
type=str,
help="checkpoint path to resume")
FLAGS = parser.parse_args()
main()
此差异已折叠。
......@@ -33,7 +33,7 @@ from paddle.fluid.dygraph.nn import Conv2D
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
from model import Model, Loss, shape_hints
from model import Model, Loss, Input
from resnet import ResNet, ConvBNLayer
import logging
......@@ -152,7 +152,6 @@ class YOLOv3(Model):
act='leaky_relu'))
self.route_blocks.append(route)
@shape_hints(inputs=[None, 3, None, None], img_info=[None, 3])
def forward(self, inputs, img_info):
outputs = []
boxes = []
......@@ -208,10 +207,9 @@ class YOLOv3(Model):
class YoloLoss(Loss):
def __init__(self, num_classes=80, num_max_boxes=50):
def __init__(self, num_classes=80):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_max_boxes = num_max_boxes
self.ignore_thresh = 0.7
self.anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45,
59, 119, 116, 90, 156, 198, 373, 326]
......@@ -240,16 +238,6 @@ class YoloLoss(Loss):
downsample //= 2
return losses
def infer_shape(self, _):
return [
[None, self.num_max_boxes, 4],
[None, self.num_max_boxes],
[None, self.num_max_boxes]
]
def infer_dtype(self, _):
return ['float32', 'int32', 'float32']
def make_optimizer(parameter_list=None):
base_lr = FLAGS.lr
......@@ -470,8 +458,7 @@ def run(model, loader, mode='train'):
start = time.time()
for idx, batch in enumerate(loader()):
losses, _ = getattr(model, mode)(
batch[0], batch[1], device='gpu', device_ids=device_ids)
losses = getattr(model, mode)(batch[0], batch[1])
total_loss += np.sum(losses)
if idx > 1: # skip first two steps
......@@ -521,7 +508,8 @@ def main():
os.mkdir('yolo_checkpoints')
with guard:
NUM_CLASSES=7
NUM_CLASSES = 7
NUM_MAX_BOXES = 50
model = YOLOv3(num_classes=NUM_CLASSES)
# XXX transfer learning
if FLAGS.pretrain_weights is not None:
......@@ -530,12 +518,18 @@ def main():
model.load(FLAGS.weights)
optim = make_optimizer(parameter_list=model.parameters())
anno_path = os.path.join(FLAGS.data, 'annotations', 'instances_val2017.json')
inputs = [Input([None, 3, None, None], 'float32', name='image'),
Input([None, 3], 'int32', name='img_info')]
labels = [Input([None, NUM_MAX_BOXES, 4], 'float32', name='gt_bbox'),
Input([None, NUM_MAX_BOXES], 'int32', name='gt_label'),
Input([None, NUM_MAX_BOXES], 'float32', name='gt_score')]
model.prepare(optim,
YoloLoss(num_classes=NUM_CLASSES),
# For YOLOv3, output variable in train/eval is different,
# which is not supported by metric, add by callback later?
# metrics=COCOMetric(anno_path, with_background=False)
)
inputs=inputs,
labels = labels)
for e in range(epoch):
logger.info("======== train epoch {} ========".format(e))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册