未验证 提交 7d3a89f6 编写于 作者: W wangxinxin08 提交者: GitHub

migrate yolov3 tp API 2.0 (#1500)

上级 079c83c7
......@@ -248,7 +248,7 @@ def create(cls_or_name, **kwargs):
if isinstance(target, SchemaDict):
kwargs[k] = create(target_key)
elif hasattr(target, '__dict__'): # serialized object
kwargs[k] = new_dict
kwargs[k] = target
else:
raise ValueError("Unsupported injection type:", target_key)
# prevent modification of global config values of reference types
......
......@@ -3,8 +3,8 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph.base import to_variable
import paddle
import paddle.nn as nn
from ppdet.core.workspace import register
from ppdet.utils.data_structure import BufferDict
......@@ -12,7 +12,7 @@ __all__ = ['BaseArch']
@register
class BaseArch(Layer):
class BaseArch(nn.Layer):
def __init__(self):
super(BaseArch, self).__init__()
......@@ -39,10 +39,10 @@ class BaseArch(Layer):
input_v = np.array(input)[np.newaxis, ...]
inputs[name].append(input_v)
for name in input_def:
inputs[name] = to_variable(np.concatenate(inputs[name]))
inputs[name] = paddle.to_tensor(np.concatenate(inputs[name]))
return inputs
def model_arch(self, mode):
def model_arch(self):
raise NotImplementedError("Should implement model_arch method!")
def loss(self, ):
......
import paddle.fluid as fluid
from paddle.fluid.dygraph import Layer
from paddle.fluid.param_attr import ParamAttr
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm
from ppdet.core.workspace import register, serializable
__all__ = ['DarkNet', 'ConvBNLayer']
class ConvBNLayer(Layer):
class ConvBNLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
......@@ -20,25 +20,22 @@ class ConvBNLayer(Layer):
name=None):
super(ConvBNLayer, self).__init__()
self.conv = Conv2D(
num_channels=ch_in,
num_filters=ch_out,
filter_size=filter_size,
self.conv = nn.Conv2d(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
param_attr=ParamAttr(name=name + '.conv.weights'),
bias_attr=False,
act=None)
weight_attr=ParamAttr(name=name + '.conv.weights'),
bias_attr=False)
bn_name = name + '.bn'
self.batch_norm = BatchNorm(
num_channels=ch_out,
param_attr=ParamAttr(
self.batch_norm = nn.BatchNorm2d(
ch_out,
weight_attr=ParamAttr(
name=bn_name + '.scale', regularizer=L2Decay(0.)),
bias_attr=ParamAttr(
name=bn_name + '.offset', regularizer=L2Decay(0.)),
moving_mean_name=bn_name + '.mean',
moving_variance_name=bn_name + '.var')
name=bn_name + '.offset', regularizer=L2Decay(0.)))
self.act = act
......@@ -46,11 +43,11 @@ class ConvBNLayer(Layer):
out = self.conv(inputs)
out = self.batch_norm(out)
if self.act == 'leaky':
out = fluid.layers.leaky_relu(x=out, alpha=0.1)
out = F.leaky_relu(out, 0.1)
return out
class DownSample(Layer):
class DownSample(nn.Layer):
def __init__(self,
ch_in,
ch_out,
......@@ -75,7 +72,7 @@ class DownSample(Layer):
return out
class BasicBlock(Layer):
class BasicBlock(nn.Layer):
def __init__(self, ch_in, ch_out, name=None):
super(BasicBlock, self).__init__()
......@@ -97,11 +94,11 @@ class BasicBlock(Layer):
def forward(self, inputs):
conv1 = self.conv1(inputs)
conv2 = self.conv2(conv1)
out = fluid.layers.elementwise_add(x=inputs, y=conv2, act=None)
out = paddle.add(x=inputs, y=conv2)
return out
class Blocks(Layer):
class Blocks(nn.Layer):
def __init__(self, ch_in, ch_out, count, name=None):
super(Blocks, self).__init__()
......@@ -127,7 +124,7 @@ DarkNet_cfg = {53: ([1, 2, 8, 8, 4])}
@register
@serializable
class DarkNet(Layer):
class DarkNet(nn.Layer):
def __init__(self,
depth=53,
freeze_at=-1,
......
import numpy as np
import paddle.fluid as fluid
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
......@@ -90,9 +93,9 @@ class BBoxPostProcessYOLO(object):
self.num_classes, i)
boxes_list.append(boxes)
scores_list.append(fluid.layers.transpose(scores, perm=[0, 2, 1]))
yolo_boxes = fluid.layers.concat(boxes_list, axis=1)
yolo_scores = fluid.layers.concat(scores_list, axis=2)
scores_list.append(paddle.transpose(scores, perm=[0, 2, 1]))
yolo_boxes = paddle.concat(boxes_list, axis=1)
yolo_scores = paddle.concat(scores_list, axis=2)
bbox = self.nms(bboxes=yolo_boxes, scores=yolo_scores)
# TODO: parse the lod of nmsed_bbox
# default batch size is 1
......
import paddle.fluid as fluid
import paddle
from paddle.fluid.dygraph import Layer
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import Normal
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.dygraph.nn import Conv2D, BatchNorm
from paddle.fluid.dygraph import Sequential
from ppdet.core.workspace import register
from ..backbone.darknet import ConvBNLayer
class YoloDetBlock(Layer):
class YoloDetBlock(nn.Layer):
def __init__(self, ch_in, channel, name):
super(YoloDetBlock, self).__init__()
self.ch_in = ch_in
......@@ -26,7 +24,7 @@ class YoloDetBlock(Layer):
#['tip', channel, channel * 2, 3],
]
self.conv_module = Sequential()
self.conv_module = nn.Sequential()
for idx, (conv_name, ch_in, ch_out, filter_size,
post_name) in enumerate(conv_def):
self.conv_module.add_sublayer(
......@@ -52,7 +50,7 @@ class YoloDetBlock(Layer):
@register
class YOLOFeat(Layer):
class YOLOFeat(nn.Layer):
__shared__ = ['num_levels']
def __init__(self, feat_in_list=[1024, 768, 384], num_levels=3):
......@@ -88,19 +86,19 @@ class YOLOFeat(Layer):
yolo_feats = []
for i, block in enumerate(body_feats):
if i > 0:
block = fluid.layers.concat(input=[route, block], axis=1)
block = paddle.concat([route, block], axis=1)
route, tip = self.yolo_blocks[i](block)
yolo_feats.append(tip)
if i < self.num_levels - 1:
route = self.route_blocks[i](route)
route = fluid.layers.resize_nearest(route, scale=2.)
route = F.resize_nearest(route, scale=2.)
return yolo_feats
@register
class YOLOv3Head(Layer):
class YOLOv3Head(nn.Layer):
__shared__ = ['num_classes', 'num_levels', 'use_fine_grained_loss']
__inject__ = ['yolo_feat']
......@@ -130,14 +128,13 @@ class YOLOv3Head(Layer):
name = 'yolo_output.{}'.format(i)
yolo_out = self.add_sublayer(
name,
Conv2D(
num_channels=1024 // (2**i),
num_filters=num_filters,
filter_size=1,
nn.Conv2d(
in_channels=1024 // (2**i),
out_channels=num_filters,
kernel_size=1,
stride=1,
padding=0,
act=None,
param_attr=ParamAttr(name=name + '.conv.weights'),
weight_attr=ParamAttr(name=name + '.conv.weights'),
bias_attr=ParamAttr(
name=name + '.conv.bias', regularizer=L2Decay(0.))))
self.yolo_out_list.append(yolo_out)
......
......@@ -19,12 +19,12 @@ from __future__ import print_function
import math
import logging
from paddle import fluid
import paddle
import paddle.nn as nn
import paddle.fluid.optimizer as optimizer
import paddle.optimizer as optimizer
import paddle.fluid.regularizer as regularizer
from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter
from paddle.fluid.layers.ops import cos
from paddle import cos
from ppdet.core.workspace import register, serializable
......@@ -61,7 +61,7 @@ class PiecewiseDecay(object):
for i in self.gamma:
value.append(base_lr * i)
return fluid.dygraph.PiecewiseDecay(boundary, value, begin=0, step=1)
return optimizer.lr_scheduler.PiecewiseLR(boundary, value)
@serializable
......@@ -142,9 +142,10 @@ class OptimizerBuilder():
def __call__(self, learning_rate, params=None):
if self.clip_grad_by_norm is not None:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=self.clip_grad_by_norm))
grad_clip = nn.GradientClipByGlobalNorm(
clip_norm=self.clip_grad_by_norm)
else:
grad_clip = None
if self.regularizer:
reg_type = self.regularizer['type'] + 'Decay'
......@@ -158,6 +159,7 @@ class OptimizerBuilder():
del optim_args['type']
op = getattr(optimizer, optim_type)
return op(learning_rate=learning_rate,
parameter_list=params,
regularization=regularization,
parameters=params,
weight_decay=regularization,
grad_clip=grad_clip,
**optim_args)
......@@ -18,8 +18,8 @@ from __future__ import print_function
import sys
import paddle.fluid as fluid
import paddle
from paddle import fluid
import logging
import six
import paddle.version as fluid_version
......@@ -65,8 +65,12 @@ def check_version(version='1.7.0'):
version_split = version.split('.')
length = min(len(version_installed), len(version_split))
flag = False
for i in six.moves.range(length):
if version_installed[i] < version_split[i]:
if version_installed[i] > version_split[i]:
flag = True
break
if not flag:
raise Exception(err)
......
......@@ -13,7 +13,8 @@ import warnings
warnings.filterwarnings('ignore')
import random
import numpy as np
import paddle.fluid as fluid
import paddle
from paddle.distributed import ParallelEnv
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
......@@ -94,10 +95,9 @@ def main():
check_gpu(cfg.use_gpu)
check_version()
place = fluid.CUDAPlace(fluid.dygraph.parallel.Env()
.dev_id) if cfg.use_gpu else fluid.CPUPlace()
with fluid.dygraph.guard(place):
place = paddle.CUDAPlace(ParallelEnv()
.dev_id) if cfg.use_gpu else paddle.CPUPlace()
paddle.disable_static(place)
run(FLAGS, cfg)
......
......@@ -15,14 +15,15 @@ import random
import datetime
import numpy as np
from collections import deque
import paddle.fluid as fluid
import paddle
from paddle import fluid
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.data.reader import create_reader
from ppdet.utils.stats import TrainingStats
from ppdet.utils.check import check_gpu, check_version, check_config
from ppdet.utils.cli import ArgsParser
from ppdet.utils.checkpoint import load_dygraph_ckpt, save_dygraph_ckpt
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed import ParallelEnv
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
......@@ -117,9 +118,10 @@ def run(FLAGS, cfg):
# Parallel Model
if ParallelEnv().nranks > 1:
strategy = fluid.dygraph.parallel.prepare_context()
model = fluid.dygraph.parallel.DataParallel(model, strategy)
strategy = paddle.distributed.init_parallel_env()
model = paddle.DataParallel(model, strategy)
logger.info("success!")
# Data Reader
start_iter = 0
if cfg.use_gpu:
......@@ -157,8 +159,10 @@ def run(FLAGS, cfg):
else:
loss.backward()
optimizer.minimize(loss)
model.clear_gradients()
curr_lr = optimizer.current_step_lr()
optimizer.step()
curr_lr = optimizer.get_lr()
lr.step()
optimizer.clear_grad()
if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
# Log state
......@@ -190,10 +194,10 @@ def main():
check_gpu(cfg.use_gpu)
check_version()
place = fluid.CUDAPlace(ParallelEnv().dev_id) \
if cfg.use_gpu else fluid.CPUPlace()
place = paddle.CUDAPlace(ParallelEnv().dev_id) \
if cfg.use_gpu else paddle.CPUPlace()
paddle.disable_static(place)
with fluid.dygraph.guard(place):
run(FLAGS, cfg)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册