未验证 提交 1475bb05 编写于 作者: K Kaipeng Deng 提交者: GitHub

exit 1 when travis check failed (#214)

* exit1 in travis failed
上级 fcfdbd2e
......@@ -26,6 +26,7 @@ script:
- .travis/precommit.sh || exit_code=$(( exit_code | $? ))
- docker run -i --rm -v "$PWD:/py_unittest" paddlepaddle/paddle:latest /bin/bash -c
'cd /py_unittest; sh .travis/unittest.sh' || exit_code=$(( exit_code | $? ))
- if [ $exit_code -eq 0 ]; then true; else exit 1; fi;
notifications:
email:
......
# add python requirements for unittests here, note install Cython
# and pycocotools directly is not supported in travis ci.
tqdm
......@@ -3,6 +3,8 @@
abort(){
echo "Run unittest failed" 1>&2
echo "Please check your code" 1>&2
echo " 1. you can run unit tests by 'bash .travis/unittest.sh' locally" 1>&2
echo " 2. you can add python requirements in .travis/requirements.txt if you use new requirements in unit tests" 1>&2
exit 1
}
......@@ -18,10 +20,11 @@ unittest(){
trap 'abort' 0
set -e
# install python dependencies
if [ -f "requirements.txt" ]; then
pip install -r requirements.txt
# install travis python dependencies
if [ -f ".travis/requirements.txt" ]; then
pip install -r .travis/requirements.txt
fi
export PYTHONPATH=`pwd`:$PYTHONPATH
unittest .
......
......@@ -134,8 +134,7 @@ _DETECTIONBOX = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[],
serialized_start=43,
serialized_end=175,
)
serialized_end=175)
_DETECTIONRESULT = _descriptor.Descriptor(
name='DetectionResult',
......@@ -186,8 +185,7 @@ _DETECTIONRESULT = _descriptor.Descriptor(
extension_ranges=[],
oneofs=[],
serialized_start=177,
serialized_end=267,
)
serialized_end=267)
_DETECTIONRESULT.fields_by_name['detection_boxes'].message_type = _DETECTIONBOX
DESCRIPTOR.message_types_by_name['DetectionBox'] = _DETECTIONBOX
......@@ -195,8 +193,9 @@ DESCRIPTOR.message_types_by_name['DetectionResult'] = _DETECTIONRESULT
DetectionBox = _reflection.GeneratedProtocolMessageType(
'DetectionBox',
(_message.Message,),
dict(DESCRIPTOR=_DETECTIONBOX,
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONBOX,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionBox)
))
......@@ -204,8 +203,9 @@ _sym_db.RegisterMessage(DetectionBox)
DetectionResult = _reflection.GeneratedProtocolMessageType(
'DetectionResult',
(_message.Message,),
dict(DESCRIPTOR=_DETECTIONRESULT,
(_message.Message, ),
dict(
DESCRIPTOR=_DETECTIONRESULT,
__module__='detection_result_pb2'
# @@protoc_insertion_point(class_scope:PaddleSolution.DetectionResult)
))
......
......@@ -85,8 +85,8 @@ if __name__ == "__main__":
for box in detection_result.detection_boxes:
if box.score >= Flags.threshold:
box_class = getattr(box, 'class')
text_class_score_str = "%s %.2f" % (class2LabelMap.get(
str(box_class)), box.score)
text_class_score_str = "%s %.2f" % (
class2LabelMap.get(str(box_class)), box.score)
text_point = (int(box.left_top_x), int(box.left_top_y))
ptLeftTop = (int(box.left_top_x), int(box.left_top_y))
......@@ -106,8 +106,8 @@ if __name__ == "__main__":
text_box_left_top = (text_point[0],
text_point[1] - text_size[0][1])
text_box_right_bottom = (text_point[0] +
text_size[0][0], text_point[1])
text_box_right_bottom = (
text_point[0] + text_size[0][0], text_point[1])
cv2.rectangle(img, text_box_left_top,
text_box_right_bottom, color, -1, 8)
......
......@@ -23,14 +23,19 @@ import re
try:
from docstring_parser import parse as doc_parse
except Exception:
def doc_parse(*args):
pass
try:
from typeguard import check_type
except Exception:
def check_type(*args):
pass
__all__ = ['SchemaValue', 'SchemaDict', 'SharedConfig', 'extract_schema']
......
......@@ -25,11 +25,11 @@ import shutil
import numpy as np
import PIL.ImageDraw
label_to_num = {}
categories_list = []
labels_list = []
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
......@@ -287,16 +287,14 @@ def main():
indent=4,
cls=MyEncoder)
if args.val_proportion != 0:
val_data_coco = deal_json(args.dataset_type,
args.output_dir + '/val',
val_data_coco = deal_json(args.dataset_type, args.output_dir + '/val',
args.json_input_dir)
val_json_path = osp.join(args.output_dir + '/annotations',
'instance_val.json')
json.dump(
val_data_coco, open(val_json_path, 'w'), indent=4, cls=MyEncoder)
if args.test_proportion != 0:
test_data_coco = deal_json(args.dataset_type,
args.output_dir + '/test',
test_data_coco = deal_json(args.dataset_type, args.output_dir + '/test',
args.json_input_dir)
test_json_path = osp.join(args.output_dir + '/annotations',
'instance_test.json')
......
......@@ -64,8 +64,8 @@ class CBResNet(object):
variant='b',
feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[],
nonlocal_stages = [],
repeat_num = 2):
nonlocal_stages=[],
repeat_num=2):
super(CBResNet, self).__init__()
if isinstance(feature_maps, Integral):
......@@ -102,19 +102,26 @@ class CBResNet(object):
self.nonlocal_stages = nonlocal_stages
self.nonlocal_mod_cfg = {
50 : 2,
101 : 5,
152 : 8,
200 : 12,
50: 2,
101: 5,
152: 8,
200: 12,
}
self.stage_filters = [64, 128, 256, 512]
self._c1_out_chan_num = 64
self.na = NameAdapter(self)
def _conv_offset(self, input, filter_size, stride, padding, act=None, name=None):
def _conv_offset(self,
input,
filter_size,
stride,
padding,
act=None,
name=None):
out_channel = filter_size * filter_size * 3
out = fluid.layers.conv2d(input,
out = fluid.layers.conv2d(
input,
num_filters=out_channel,
filter_size=filter_size,
stride=stride,
......@@ -145,7 +152,8 @@ class CBResNet(object):
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights_"+str(self.curr_level)),
param_attr=ParamAttr(
name=name + "_weights_" + str(self.curr_level)),
bias_attr=False)
else:
offset_mask = self._conv_offset(
......@@ -155,8 +163,8 @@ class CBResNet(object):
padding=(filter_size - 1) // 2,
act=None,
name=name + "_conv_offset_" + str(self.curr_level))
offset_channel = filter_size ** 2 * 2
mask_channel = filter_size ** 2
offset_channel = filter_size**2 * 2
mask_channel = filter_size**2
offset, mask = fluid.layers.split(
input=offset_mask,
num_or_sections=[offset_channel, mask_channel],
......@@ -173,7 +181,8 @@ class CBResNet(object):
groups=groups,
deformable_groups=1,
im2col_step=1,
param_attr=ParamAttr(name=name + "_weights_"+str(self.curr_level)),
param_attr=ParamAttr(
name=name + "_weights_" + str(self.curr_level)),
bias_attr=False)
bn_name = self.na.fix_conv_norm_name(name)
......@@ -181,11 +190,11 @@ class CBResNet(object):
norm_lr = 0. if self.freeze_norm else 1.
norm_decay = self.norm_decay
pattr = ParamAttr(
name=bn_name + '_scale_'+str(self.curr_level),
name=bn_name + '_scale_' + str(self.curr_level),
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay))
battr = ParamAttr(
name=bn_name + '_offset_'+str(self.curr_level),
name=bn_name + '_offset_' + str(self.curr_level),
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay))
......@@ -194,11 +203,12 @@ class CBResNet(object):
out = fluid.layers.batch_norm(
input=conv,
act=act,
name=bn_name + '.output.1_'+str(self.curr_level),
name=bn_name + '.output.1_' + str(self.curr_level),
param_attr=pattr,
bias_attr=battr,
moving_mean_name=bn_name + '_mean_'+str(self.curr_level),
moving_variance_name=bn_name + '_variance_'+str(self.curr_level),
moving_mean_name=bn_name + '_mean_' + str(self.curr_level),
moving_variance_name=bn_name + '_variance_' +
str(self.curr_level),
use_global_stats=global_stats)
scale = fluid.framework._get_var(pattr.name)
bias = fluid.framework._get_var(battr.name)
......@@ -262,7 +272,7 @@ class CBResNet(object):
act=act,
groups=g,
name=_name,
dcn=(i==1 and dcn))
dcn=(i == 1 and dcn))
short = self._shortcut(
input,
num_filters * expand,
......@@ -273,8 +283,7 @@ class CBResNet(object):
if callable(getattr(self, '_squeeze_excitation', None)):
residual = self._squeeze_excitation(
input=residual, num_channels=num_filters, name='fc' + name)
return fluid.layers.elementwise_add(
x=short, y=residual, act='relu')
return fluid.layers.elementwise_add(x=short, y=residual, act='relu')
def basicblock(self, input, num_filters, stride, is_first, name, dcn=False):
assert dcn is False, "Not implemented yet."
......@@ -313,10 +322,10 @@ class CBResNet(object):
is_first = False if stage_num != 2 else True
dcn = True if stage_num in self.dcn_v2_stages else False
nonlocal_mod = 1000
if stage_num in self.nonlocal_stages:
nonlocal_mod = self.nonlocal_mod_cfg[self.depth] if stage_num==4 else 2
nonlocal_mod = self.nonlocal_mod_cfg[
self.depth] if stage_num == 4 else 2
# Make the layer name and parameter name consistent
# with ImageNet pre-trained model
......@@ -335,11 +344,12 @@ class CBResNet(object):
# add non local model
dim_in = conv.shape[1]
nonlocal_name = "nonlocal_conv{}_lvl{}".format( stage_num, self.curr_level )
nonlocal_name = "nonlocal_conv{}_lvl{}".format(stage_num,
self.curr_level)
if i % nonlocal_mod == nonlocal_mod - 1:
conv = add_space_nonlocal(
conv, dim_in, dim_in,
nonlocal_name + '_{}'.format(i), int(dim_in / 2) )
conv = add_space_nonlocal(conv, dim_in, dim_in,
nonlocal_name + '_{}'.format(i),
int(dim_in / 2))
return conv
......@@ -349,9 +359,9 @@ class CBResNet(object):
conv1_name = self.na.fix_c1_stage_name()
if self.variant in ['c', 'd']:
conv1_1_name= "conv1_1"
conv1_2_name= "conv1_2"
conv1_3_name= "conv1_3"
conv1_1_name = "conv1_1"
conv1_2_name = "conv1_2"
conv1_3_name = "conv1_3"
conv_def = [
[out_chan // 2, 3, 2, conv1_1_name],
[out_chan // 2, 3, 1, conv1_2_name],
......@@ -377,14 +387,15 @@ class CBResNet(object):
pool_type='max')
return output
def connect( self, left, right, name ):
def connect(self, left, right, name):
ch_right = right.shape[1]
conv = self._conv_norm( left,
conv = self._conv_norm(
left,
num_filters=ch_right,
filter_size=1,
stride=1,
act="relu",
name=name+"_connect")
name=name + "_connect")
shape = fluid.layers.shape(right)
shape_hw = fluid.layers.slice(shape, axes=[0], starts=[2], ends=[4])
out_shape_ = shape_hw
......@@ -414,11 +425,11 @@ class CBResNet(object):
for num in range(1, self.repeat_num):
self.curr_level = num
res = self.c1_stage(input)
for i in range( len(res_endpoints) ):
res = self.connect( res_endpoints[i], res, "test_c"+str(i+1) )
res = self.layer_warp(res, i+2)
for i in range(len(res_endpoints)):
res = self.connect(res_endpoints[i], res, "test_c" + str(i + 1))
res = self.layer_warp(res, i + 2)
res_endpoints[i] = res
if self.freeze_at >= i+2:
if self.freeze_at >= i + 2:
res.stop_gradient = True
return OrderedDict([('res{}_sum'.format(self.feature_maps[idx]), feat)
......
......@@ -40,12 +40,12 @@ class HRFPN(object):
spatial_scale (list): feature map scaling factor
"""
def __init__(self,
def __init__(
self,
num_chan=256,
pooling_type="avg",
share_conv=False,
spatial_scale=[1./64, 1./32, 1./16, 1./8, 1./4],
):
spatial_scale=[1. / 64, 1. / 32, 1. / 16, 1. / 8, 1. / 4], ):
self.num_chan = num_chan
self.pooling_type = pooling_type
self.share_conv = share_conv
......@@ -63,11 +63,12 @@ class HRFPN(object):
# resize
for i in range(1, len(body_dict)):
resized = self.resize_input_tensor(body_dict[body_name_list[i]], outs[0], 2**i)
outs.append( resized )
resized = self.resize_input_tensor(body_dict[body_name_list[i]],
outs[0], 2**i)
outs.append(resized)
# concat
out = fluid.layers.concat( outs, axis=1 )
out = fluid.layers.concat(outs, axis=1)
# reduction
out = fluid.layers.conv2d(
......@@ -82,28 +83,34 @@ class HRFPN(object):
# conv
outs = [out]
for i in range(1, num_out):
outs.append(self.pooling(out, size=2**i, stride=2**i, pooling_type=self.pooling_type))
outs.append(
self.pooling(
out, size=2**i, stride=2**i,
pooling_type=self.pooling_type))
outputs = []
for i in range(num_out):
conv_name = "shared_fpn_conv" if self.share_conv else "shared_fpn_conv_"+str(i)
conv_name = "shared_fpn_conv" if self.share_conv else "shared_fpn_conv_" + str(
i)
conv = fluid.layers.conv2d(
input=outs[i],
num_filters=self.num_chan,
filter_size=3,
stride=1,
padding=1,
param_attr=ParamAttr(name=conv_name+"_weights"),
param_attr=ParamAttr(name=conv_name + "_weights"),
bias_attr=False)
outputs.append( conv )
outputs.append(conv)
for idx in range(0, num_out-len(body_name_list)):
body_name_list.append("fpn_res5_sum_subsampled_{}x".format( 2**(idx+1) ))
for idx in range(0, num_out - len(body_name_list)):
body_name_list.append("fpn_res5_sum_subsampled_{}x".format(2**(idx +
1)))
outputs = outputs[::-1]
body_name_list = body_name_list[::-1]
res_dict = OrderedDict([(body_name_list[k], outputs[k]) for k in range(len(body_name_list))])
res_dict = OrderedDict([(body_name_list[k], outputs[k])
for k in range(len(body_name_list))])
return res_dict, self.spatial_scale
def resize_input_tensor(self, body_input, ref_output, scale):
......@@ -117,10 +124,9 @@ class HRFPN(object):
return body_output
def pooling(self, input, size, stride, pooling_type):
pool = fluid.layers.pool2d(input=input,
pool = fluid.layers.pool2d(
input=input,
pool_size=size,
pool_stride=stride,
pool_type=pooling_type)
return pool
\ No newline at end of file
......@@ -92,13 +92,15 @@ class HRNet(object):
channels_2, channels_3, channels_4 = self.channels[width]
num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3
x = self.conv_bn_layer(input=input,
x = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=64,
stride=2,
if_act=True,
name='layer1_1')
x = self.conv_bn_layer(input=x,
x = self.conv_bn_layer(
input=x,
filter_size=3,
num_filters=64,
stride=2,
......@@ -119,10 +121,11 @@ class HRNet(object):
def layer1(self, input, name=None):
conv = input
for i in range(4):
conv = self.bottleneck_block(conv,
conv = self.bottleneck_block(
conv,
num_filters=64,
downsample=True if i == 0 else False,
name=name+'_'+str(i+1))
name=name + '_' + str(i + 1))
return conv
def transition_layer(self, x, in_channels, out_channels, name=None):
......@@ -132,19 +135,21 @@ class HRNet(object):
for i in range(num_out):
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = self.conv_bn_layer(x[i],
residual = self.conv_bn_layer(
x[i],
filter_size=3,
num_filters=out_channels[i],
name=name+'_layer_'+str(i+1))
name=name + '_layer_' + str(i + 1))
out.append(residual)
else:
out.append(x[i])
else:
residual = self.conv_bn_layer(x[-1],
residual = self.conv_bn_layer(
x[-1],
filter_size=3,
num_filters=out_channels[i],
stride=2,
name=name+'_layer_'+str(i+1))
name=name + '_layer_' + str(i + 1))
out.append(residual)
return out
......@@ -153,9 +158,11 @@ class HRNet(object):
for i in range(len(channels)):
residual = x[i]
for j in range(block_num):
residual = self.basic_block(residual,
residual = self.basic_block(
residual,
channels[i],
name=name+'_branch_layer_'+str(i+1)+'_'+str(j+1))
name=name + '_branch_layer_' + str(i + 1) + '_' +
str(j + 1))
out.append(residual)
return out
......@@ -165,29 +172,35 @@ class HRNet(object):
residual = x[i]
for j in range(len(channels)):
if j > i:
y = self.conv_bn_layer(x[j],
y = self.conv_bn_layer(
x[j],
filter_size=1,
num_filters=channels[i],
if_act=False,
name=name+'_layer_'+str(i+1)+'_'+str(j+1))
y = fluid.layers.resize_nearest(input=y, scale=2 ** (j - i))
name=name + '_layer_' + str(i + 1) + '_' + str(j + 1))
y = fluid.layers.resize_nearest(input=y, scale=2**(j - i))
residual = fluid.layers.elementwise_add(
x=residual, y=y, act=None)
elif j < i:
y = x[j]
for k in range(i - j):
if k == i - j - 1:
y = self.conv_bn_layer(y,
y = self.conv_bn_layer(
y,
filter_size=3,
num_filters=channels[i],
stride=2,if_act=False,
name=name+'_layer_'+str(i+1)+'_'+str(j+1)+'_'+str(k+1))
stride=2,
if_act=False,
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1))
else:
y = self.conv_bn_layer(y,
y = self.conv_bn_layer(
y,
filter_size=3,
num_filters=channels[j],
stride=2,
name=name+'_layer_'+str(i+1)+'_'+str(j+1)+'_'+str(k+1))
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1))
residual = fluid.layers.elementwise_add(
x=residual, y=y, act=None)
......@@ -195,23 +208,36 @@ class HRNet(object):
out.append(residual)
return out
def high_resolution_module(self, x, channels, multi_scale_output=True, name=None):
def high_resolution_module(self,
x,
channels,
multi_scale_output=True,
name=None):
residual = self.branches(x, 4, channels, name=name)
out = self.fuse_layers(residual, channels, multi_scale_output=multi_scale_output, name=name)
out = self.fuse_layers(
residual,
channels,
multi_scale_output=multi_scale_output,
name=name)
return out
def stage(self, x, num_modules, channels, multi_scale_output=True, name=None):
def stage(self,
x,
num_modules,
channels,
multi_scale_output=True,
name=None):
out = x
for i in range(num_modules):
if i == num_modules - 1 and multi_scale_output == False:
out = self.high_resolution_module(out,
out = self.high_resolution_module(
out,
channels,
multi_scale_output=False,
name=name+'_'+str(i+1))
name=name + '_' + str(i + 1))
else:
out = self.high_resolution_module(out,
channels,
name=name+'_'+str(i+1))
out = self.high_resolution_module(
out, channels, name=name + '_' + str(i + 1))
return out
......@@ -219,113 +245,142 @@ class HRNet(object):
out = []
num_filters_list = [128, 256, 512, 1024]
for i in range(len(x)):
out.append(self.conv_bn_layer(input=x[i],
out.append(
self.conv_bn_layer(
input=x[i],
filter_size=1,
num_filters=num_filters_list[i],
name=name+'conv_'+str(i+1)))
name=name + 'conv_' + str(i + 1)))
return out
def basic_block(self, input, num_filters, stride=1, downsample=False, name=None):
def basic_block(self,
input,
num_filters,
stride=1,
downsample=False,
name=None):
residual = input
conv = self.conv_bn_layer(input=input,
conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=num_filters,
stride=stride,
name=name+'_conv1')
conv = self.conv_bn_layer(input=conv,
name=name + '_conv1')
conv = self.conv_bn_layer(
input=conv,
filter_size=3,
num_filters=num_filters,
if_act=False,
name=name+'_conv2')
name=name + '_conv2')
if downsample:
residual = self.conv_bn_layer(input=input,
residual = self.conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters,
if_act=False,
name=name+'_downsample')
name=name + '_downsample')
if self.has_se:
conv = self.squeeze_excitation(
input=conv,
num_channels=num_filters,
reduction_ratio=16,
name='fc'+name)
name='fc' + name)
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def bottleneck_block(self, input, num_filters, stride=1, downsample=False, name=None):
def bottleneck_block(self,
input,
num_filters,
stride=1,
downsample=False,
name=None):
residual = input
conv = self.conv_bn_layer(input=input,
conv = self.conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters,
name=name+'_conv1')
conv = self.conv_bn_layer(input=conv,
name=name + '_conv1')
conv = self.conv_bn_layer(
input=conv,
filter_size=3,
num_filters=num_filters,
stride=stride,
name=name+'_conv2')
conv = self.conv_bn_layer(input=conv,
name=name + '_conv2')
conv = self.conv_bn_layer(
input=conv,
filter_size=1,
num_filters=num_filters*4,
num_filters=num_filters * 4,
if_act=False,
name=name+'_conv3')
name=name + '_conv3')
if downsample:
residual = self.conv_bn_layer(input=input,
residual = self.conv_bn_layer(
input=input,
filter_size=1,
num_filters=num_filters*4,
num_filters=num_filters * 4,
if_act=False,
name=name+'_downsample')
name=name + '_downsample')
if self.has_se:
conv = self.squeeze_excitation(
input=conv,
num_channels=num_filters * 4,
reduction_ratio=16,
name='fc'+name)
name='fc' + name)
return fluid.layers.elementwise_add(x=residual, y=conv, act='relu')
def squeeze_excitation(self, input, num_channels, reduction_ratio, name=None):
def squeeze_excitation(self,
input,
num_channels,
reduction_ratio,
name=None):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
squeeze = fluid.layers.fc(
input=pool,
size=num_channels / reduction_ratio,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv),name=name+'_sqz_weights'),
bias_attr=ParamAttr(name=name+'_sqz_offset'))
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + '_sqz_weights'),
bias_attr=ParamAttr(name=name + '_sqz_offset'))
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
excitation = fluid.layers.fc(
input=squeeze,
size=num_channels,
act='sigmoid',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(
-stdv, stdv),name=name+'_exc_weights'),
bias_attr=ParamAttr(name=name+'_exc_offset'))
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + '_exc_weights'),
bias_attr=ParamAttr(name=name + '_exc_offset'))
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def conv_bn_layer(self,input, filter_size, num_filters, stride=1, padding=1, num_groups=1, if_act=True, name=None):
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride=1,
padding=1,
num_groups=1,
if_act=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size-1)//2,
padding=(filter_size - 1) // 2,
groups=num_groups,
act=None,
param_attr=ParamAttr(initializer=MSRA(), name=name+'_weights'),
param_attr=ParamAttr(
initializer=MSRA(), name=name + '_weights'),
bias_attr=False)
bn_name = name + '_bn'
bn = self._bn( input=conv, bn_name=bn_name )
bn = self._bn(input=conv, bn_name=bn_name)
if if_act:
bn = fluid.layers.relu(bn)
return bn
def _bn(self,
input,
act=None,
bn_name=None):
def _bn(self, input, act=None, bn_name=None):
norm_lr = 0. if self.freeze_norm else 1.
norm_decay = self.norm_decay
pattr = ParamAttr(
......@@ -363,10 +418,10 @@ class HRNet(object):
res = input
feature_maps = self.feature_maps
self.net( input )
self.net(input)
for i in feature_maps:
res = self.end_points[i-2]
res = self.end_points[i - 2]
if i in self.feature_maps:
res_endpoints.append(res)
if self.freeze_at >= i:
......@@ -374,4 +429,3 @@ class HRNet(object):
return OrderedDict([('res{}_sum'.format(self.feature_maps[idx]), feat)
for idx, feat in enumerate(res_endpoints)])
......@@ -54,7 +54,8 @@ class Res2Net(ResNet):
"""
__shared__ = ['norm_type', 'freeze_norm', 'weight_prefix_name']
def __init__(self,
def __init__(
self,
depth=50,
width=26,
scales=4,
......@@ -66,8 +67,9 @@ class Res2Net(ResNet):
feature_maps=[2, 3, 4, 5],
dcn_v2_stages=[],
weight_prefix_name='',
nonlocal_stages=[],):
super(Res2Net, self).__init__(depth=depth,
nonlocal_stages=[], ):
super(Res2Net, self).__init__(
depth=depth,
freeze_at=freeze_at,
norm_type=norm_type,
freeze_norm=freeze_norm,
......@@ -78,7 +80,8 @@ class Res2Net(ResNet):
weight_prefix_name=weight_prefix_name,
nonlocal_stages=nonlocal_stages)
assert depth >= 50, "just support depth>=50 in res2net, but got depth=".format(depth)
assert depth >= 50, "just support depth>=50 in res2net, but got depth=".format(
depth)
# res2net config
self.scales = scales
self.width = width
......@@ -107,26 +110,32 @@ class Res2Net(ResNet):
ys = []
for s in range(self.scales - 1):
if s == 0 or stride == 2:
ys.append(self._conv_norm(input=xs[s],
num_filters=num_filters1//self.scales,
ys.append(
self._conv_norm(
input=xs[s],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name+ '_branch2b_' + str(s+1),
name=name + '_branch2b_' + str(s + 1),
dcn_v2=dcn_v2))
else:
ys.append(self._conv_norm(input=xs[s]+ys[-1],
num_filters=num_filters1//self.scales,
ys.append(
self._conv_norm(
input=xs[s] + ys[-1],
num_filters=num_filters1 // self.scales,
stride=stride,
filter_size=3,
act='relu',
name=name+ '_branch2b_' + str(s+1),
name=name + '_branch2b_' + str(s + 1),
dcn_v2=dcn_v2))
if stride == 1:
ys.append(xs[-1])
else:
ys.append(fluid.layers.pool2d(input=xs[-1],
ys.append(
fluid.layers.pool2d(
input=xs[-1],
pool_size=3,
pool_stride=stride,
pool_padding=1,
......@@ -138,18 +147,14 @@ class Res2Net(ResNet):
num_filters=num_filters2,
filter_size=1,
act=None,
name=name+"_branch2c")
name=name + "_branch2c")
short = self._shortcut(input,
num_filters2,
stride,
is_first,
name=name + "_branch1")
short = self._shortcut(
input, num_filters2, stride, is_first, name=name + "_branch1")
return fluid.layers.elementwise_add(
x=short, y=conv2, act='relu', name=name + ".add.output.5")
def layer_warp(self, input, stage_num):
"""
Args:
......@@ -168,12 +173,13 @@ class Res2Net(ResNet):
is_first = False if stage_num != 2 else True
dcn_v2 = True if stage_num in self.dcn_v2_stages else False
num_filters1 = self.num_filters1[stage_num-2]
num_filters2 = self.num_filters2[stage_num-2]
num_filters1 = self.num_filters1[stage_num - 2]
num_filters2 = self.num_filters2[stage_num - 2]
nonlocal_mod = 1000
if stage_num in self.nonlocal_stages:
nonlocal_mod = self.nonlocal_mod_cfg[self.depth] if stage_num==4 else 2
nonlocal_mod = self.nonlocal_mod_cfg[
self.depth] if stage_num == 4 else 2
# Make the layer name and parameter name consistent
# with ImageNet pre-trained model
......@@ -193,11 +199,11 @@ class Res2Net(ResNet):
# add non local model
dim_in = conv.shape[1]
nonlocal_name = "nonlocal_conv{}".format( stage_num )
nonlocal_name = "nonlocal_conv{}".format(stage_num)
if i % nonlocal_mod == nonlocal_mod - 1:
conv = add_space_nonlocal(
conv, dim_in, dim_in,
nonlocal_name + '_{}'.format(i), int(dim_in / 2) )
conv = add_space_nonlocal(conv, dim_in, dim_in,
nonlocal_name + '_{}'.format(i),
int(dim_in / 2))
return conv
......@@ -217,7 +223,7 @@ class Res2NetC5(Res2Net):
variant='b',
feature_maps=[5],
weight_prefix_name=''):
super(Res2NetC5, self).__init__(depth, width, scales,
freeze_at, norm_type, freeze_norm,
norm_decay, variant, feature_maps)
super(Res2NetC5, self).__init__(depth, width, scales, freeze_at,
norm_type, freeze_norm, norm_decay,
variant, feature_maps)
self.severed_head = True
......@@ -36,16 +36,25 @@ class IouLoss(object):
max_height (int): max height of input to support random shape input
max_width (int): max width of input to support random shape input
"""
def __init__(self,
loss_weight=2.5,
max_height=608,
max_width=608):
def __init__(self, loss_weight=2.5, max_height=608, max_width=608):
self._loss_weight = loss_weight
self._MAX_HI = max_height
self._MAX_WI = max_width
def __call__(self, x, y, w, h, tx, ty, tw, th,
anchors, downsample_ratio, batch_size, eps=1.e-10):
def __call__(self,
x,
y,
w,
h,
tx,
ty,
tw,
th,
anchors,
downsample_ratio,
batch_size,
eps=1.e-10):
'''
Args:
x | y | w | h ([Variables]): the output of yolov3 for encoded x|y|w|h
......@@ -55,10 +64,10 @@ class IouLoss(object):
batch_size (int): training batch size
eps (float): the decimal to prevent the denominator eqaul zero
'''
x1, y1, x2, y2 = self._bbox_transform(x, y, w, h, anchors,
downsample_ratio, batch_size, False)
x1g, y1g, x2g, y2g = self._bbox_transform(tx, ty, tw, th,
anchors, downsample_ratio, batch_size, True)
x1, y1, x2, y2 = self._bbox_transform(
x, y, w, h, anchors, downsample_ratio, batch_size, False)
x1g, y1g, x2g, y2g = self._bbox_transform(
tx, ty, tw, th, anchors, downsample_ratio, batch_size, True)
x2 = fluid.layers.elementwise_max(x1, x2)
y2 = fluid.layers.elementwise_max(y1, y2)
......@@ -76,14 +85,16 @@ class IouLoss(object):
intsctk = (xkis2 - xkis1) * (ykis2 - ykis1)
intsctk = intsctk * fluid.layers.greater_than(
xkis2, xkis1) * fluid.layers.greater_than(ykis2, ykis1)
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk + eps
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g
) - intsctk + eps
iouk = intsctk / unionk
loss_iou = 1. - iouk * iouk
loss_iou = loss_iou * self._loss_weight
return loss_iou
def _bbox_transform(self, dcx, dcy, dw, dh, anchors, downsample_ratio, batch_size, is_gt):
def _bbox_transform(self, dcx, dcy, dw, dh, anchors, downsample_ratio,
batch_size, is_gt):
grid_x = int(self._MAX_WI / downsample_ratio)
grid_y = int(self._MAX_HI / downsample_ratio)
an_num = len(anchors) // 2
......@@ -125,14 +136,16 @@ class IouLoss(object):
anchor_w_np = np.array(anchor_w_)
anchor_w_np = np.reshape(anchor_w_np, newshape=[1, an_num, 1, 1])
anchor_w_np = np.tile(anchor_w_np, reps=[batch_size, 1, grid_y, grid_x])
anchor_w_max = self._create_tensor_from_numpy(anchor_w_np.astype(np.float32))
anchor_w_max = self._create_tensor_from_numpy(
anchor_w_np.astype(np.float32))
anchor_w = fluid.layers.crop(x=anchor_w_max, shape=dcx)
anchor_w.stop_gradient = True
anchor_h_ = [anchors[i] for i in range(0, len(anchors)) if i % 2 == 1]
anchor_h_np = np.array(anchor_h_)
anchor_h_np = np.reshape(anchor_h_np, newshape=[1, an_num, 1, 1])
anchor_h_np = np.tile(anchor_h_np, reps=[batch_size, 1, grid_y, grid_x])
anchor_h_max = self._create_tensor_from_numpy(anchor_h_np.astype(np.float32))
anchor_h_max = self._create_tensor_from_numpy(
anchor_h_np.astype(np.float32))
anchor_h = fluid.layers.crop(x=anchor_h_max, shape=dcx)
anchor_h.stop_gradient = True
# e^tw e^th
......@@ -148,7 +161,6 @@ class IouLoss(object):
pw.stop_gradient = True
ph.stop_gradient = True
x1 = cx - 0.5 * pw
y1 = cy - 0.5 * ph
x2 = cx + 0.5 * pw
......@@ -169,4 +181,3 @@ class IouLoss(object):
default_initializer=NumpyArrayInitializer(numpy_array))
paddle_array.stop_gradient = True
return paddle_array
......@@ -131,8 +131,8 @@ class YOLOv3Loss(object):
loss_h = fluid.layers.abs(h - th) * tscale_tobj
loss_h = fluid.layers.reduce_sum(loss_h, dim=[1, 2, 3])
if self._iou_loss is not None:
loss_iou = self._iou_loss(x, y, w, h, tx, ty, tw, th,
anchors, downsample, self._batch_size)
loss_iou = self._iou_loss(x, y, w, h, tx, ty, tw, th, anchors,
downsample, self._batch_size)
loss_iou = loss_iou * tscale_tobj
loss_iou = fluid.layers.reduce_sum(loss_iou, dim=[1, 2, 3])
loss_ious.append(fluid.layers.reduce_mean(loss_iou))
......
......@@ -220,27 +220,31 @@ class CascadeBBoxHead(object):
pred_result = self.nms(bboxes=box_out, scores=boxes_cls_prob_mean)
return {"bbox": pred_result}
def get_prediction_cls_aware(self,
im_info,
im_shape,
cascade_cls_prob,
cascade_decoded_box,
cascade_bbox_reg_weights):
def get_prediction_cls_aware(self, im_info, im_shape, cascade_cls_prob,
cascade_decoded_box, cascade_bbox_reg_weights):
'''
get_prediction_cls_aware: predict bbox for each class
'''
cascade_num_stage = 3
cascade_eval_weight = [0.2, 0.3, 0.5]
# merge 3 stages results
sum_cascade_cls_prob = sum([ prob*cascade_eval_weight[idx] for idx, prob in enumerate(cascade_cls_prob) ])
sum_cascade_decoded_box = sum([ bbox*cascade_eval_weight[idx] for idx, bbox in enumerate(cascade_decoded_box) ])
sum_cascade_cls_prob = sum([
prob * cascade_eval_weight[idx]
for idx, prob in enumerate(cascade_cls_prob)
])
sum_cascade_decoded_box = sum([
bbox * cascade_eval_weight[idx]
for idx, bbox in enumerate(cascade_decoded_box)
])
self.im_scale = fluid.layers.slice(im_info, [1], starts=[2], ends=[3])
im_scale_lod = fluid.layers.sequence_expand(self.im_scale, sum_cascade_decoded_box)
im_scale_lod = fluid.layers.sequence_expand(self.im_scale,
sum_cascade_decoded_box)
sum_cascade_decoded_box = sum_cascade_decoded_box / im_scale_lod
decoded_bbox = sum_cascade_decoded_box
decoded_bbox = fluid.layers.reshape(decoded_bbox, shape=(-1, self.num_classes, 4) )
decoded_bbox = fluid.layers.reshape(
decoded_bbox, shape=(-1, self.num_classes, 4))
box_out = fluid.layers.box_clip(input=decoded_bbox, im_info=im_shape)
pred_result = self.nms(bboxes=box_out, scores=sum_cascade_cls_prob)
......
此差异已折叠。
......@@ -35,7 +35,6 @@ set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
from paddle import fluid
from ppdet.experimental import mixed_precision_context
from ppdet.core.workspace import load_config, merge_config, create
......@@ -85,11 +84,16 @@ def main():
eval_prog = eval_prog.clone(True)
if FLAGS.print_params:
print("-------------------------All parameters in current graph----------------------")
print(
"-------------------------All parameters in current graph----------------------"
)
for block in eval_prog.blocks:
for param in block.all_parameters():
print("parameter name: {}\tshape: {}".format(param.name, param.shape))
print("------------------------------------------------------------------------------")
print("parameter name: {}\tshape: {}".format(param.name,
param.shape))
print(
"------------------------------------------------------------------------------"
)
return
eval_reader = create_reader(cfg.EvalReader)
......@@ -133,8 +137,8 @@ def main():
compiled_eval_prog = fluid.compiler.CompiledProgram(program)
results = eval_run(exe, compiled_eval_prog, eval_loader,
eval_keys, eval_values, eval_cls)
results = eval_run(exe, compiled_eval_prog, eval_loader, eval_keys,
eval_values, eval_cls)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
......@@ -152,12 +156,15 @@ def main():
pruned_params = FLAGS.pruned_params
assert (FLAGS.pruned_params is not None), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
assert (
FLAGS.pruned_params is not None
), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
pruned_params = FLAGS.pruned_params.strip().split(",")
logger.info("pruned params: {}".format(pruned_params))
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(" ")]
logger.info("pruned ratios: {}".format(pruned_ratios))
sensitivity(eval_prog,
sensitivity(
eval_prog,
place,
pruned_params,
test,
......@@ -195,7 +202,8 @@ if __name__ == '__main__':
"--pruned_ratios",
default="0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9",
type=str,
help="The ratios pruned iteratively for each parameter when calculating sensitivities.")
help="The ratios pruned iteratively for each parameter when calculating sensitivities."
)
parser.add_argument(
"-P",
"--print_params",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册