提交 3fbf8b4c 编写于 作者: W wuzewu

Fixed the bug where multiple call contexts caused parameter names to alternate in retinanet module

上级 abc05deb
...@@ -69,7 +69,7 @@ class RetinaNetResNet50FPN(hub.Module): ...@@ -69,7 +69,7 @@ class RetinaNetResNet50FPN(hub.Module):
num_classes=81, num_classes=81,
trainable=True, trainable=True,
pretrained=True, pretrained=True,
get_prediction=False): phase='train'):
""" """
Distill the Head Features, so as to perform transfer learning. Distill the Head Features, so as to perform transfer learning.
...@@ -77,7 +77,7 @@ class RetinaNetResNet50FPN(hub.Module): ...@@ -77,7 +77,7 @@ class RetinaNetResNet50FPN(hub.Module):
num_classes (int): number of classes. num_classes (int): number of classes.
trainable (bool): whether to set parameters trainable. trainable (bool): whether to set parameters trainable.
pretrained (bool): whether to load default pretrained model. pretrained (bool): whether to load default pretrained model.
get_prediction (bool): whether to get prediction. phase (str): optional choices are 'train' and 'predict'.
Returns: Returns:
inputs(dict): the input variables. inputs(dict): the input variables.
...@@ -87,97 +87,103 @@ class RetinaNetResNet50FPN(hub.Module): ...@@ -87,97 +87,103 @@ class RetinaNetResNet50FPN(hub.Module):
context_prog = fluid.Program() context_prog = fluid.Program()
startup_program = fluid.Program() startup_program = fluid.Program()
with fluid.program_guard(context_prog, startup_program): with fluid.program_guard(context_prog, startup_program):
var_prefix = '@HUB_{}@'.format(self.name) with fluid.unique_name.guard():
# image var_prefix = '@HUB_{}@'.format(self.name)
image = fluid.layers.data( # image
name='image', image = fluid.layers.data(
shape=[-1, 3, -1, -1], name='image',
dtype='float32', shape=[-1, 3, -1, -1],
lod_level=0) dtype='float32',
# im_info lod_level=0)
im_info = fluid.layers.data( # im_info
name='im_info', shape=[3], dtype='float32', lod_level=0) im_info = fluid.layers.data(
# backbone name='im_info', shape=[3], dtype='float32', lod_level=0)
backbone = ResNet( # backbone
norm_type='affine_channel', backbone = ResNet(
freeze_at=2, norm_type='affine_channel',
norm_decay=0., freeze_at=2,
depth=50, norm_decay=0.,
feature_maps=[3, 4, 5]) depth=50,
body_feats = backbone(image) feature_maps=[3, 4, 5])
# retina_head body_feats = backbone(image)
retina_head = RetinaHead( # retina_head
anchor_generator=AnchorGenerator( retina_head = RetinaHead(
aspect_ratios=[1.0, 2.0, 0.5], anchor_generator=AnchorGenerator(
variance=[1.0, 1.0, 1.0, 1.0]), aspect_ratios=[1.0, 2.0, 0.5],
target_assign=RetinaTargetAssign( variance=[1.0, 1.0, 1.0, 1.0]),
positive_overlap=0.5, negative_overlap=0.4), target_assign=RetinaTargetAssign(
output_decoder=RetinaOutputDecoder( positive_overlap=0.5, negative_overlap=0.4),
score_thresh=0.05, output_decoder=RetinaOutputDecoder(
nms_thresh=0.5, score_thresh=0.05,
pre_nms_top_n=1000, nms_thresh=0.5,
detections_per_im=100, pre_nms_top_n=1000,
nms_eta=1.0), detections_per_im=100,
num_convs_per_octave=4, nms_eta=1.0),
num_chan=256, num_convs_per_octave=4,
max_level=7, num_chan=256,
min_level=3, max_level=7,
prior_prob=0.01, min_level=3,
base_scale=4, prior_prob=0.01,
num_scales_per_octave=3) base_scale=4,
# fpn num_scales_per_octave=3)
fpn = FPN( # fpn
max_level=7, fpn = FPN(
min_level=3, max_level=7,
num_chan=256, min_level=3,
spatial_scale=[0.03125, 0.0625, 0.125], num_chan=256,
has_extra_convs=True) spatial_scale=[0.03125, 0.0625, 0.125],
# body_feats has_extra_convs=True)
body_feats, spatial_scale = fpn.get_output(body_feats) # body_feats
# inputs, outputs, context_prog body_feats, spatial_scale = fpn.get_output(body_feats)
inputs = { # inputs, outputs, context_prog
'image': var_prefix + image.name, inputs = {
'im_info': var_prefix + im_info.name 'image': var_prefix + image.name,
} 'im_info': var_prefix + im_info.name
if get_prediction:
pred = retina_head.get_prediction(body_feats, spatial_scale,
im_info)
outputs = {'bbox_out': var_prefix + pred.name}
else:
outputs = {
'body_features':
[var_prefix + var.name for key, var in body_feats.items()]
} }
if phase == 'predict':
pred = retina_head.get_prediction(body_feats, spatial_scale,
im_info)
outputs = {'bbox_out': var_prefix + pred.name}
else:
outputs = {
'body_features': [
var_prefix + var.name
for key, var in body_feats.items()
]
}
# add_vars_prefix # add_vars_prefix
add_vars_prefix(context_prog, var_prefix) add_vars_prefix(context_prog, var_prefix)
add_vars_prefix(fluid.default_startup_program(), var_prefix) add_vars_prefix(fluid.default_startup_program(), var_prefix)
global_vars = context_prog.global_block().vars global_vars = context_prog.global_block().vars
inputs = {key: global_vars[value] for key, value in inputs.items()} inputs = {
outputs = { key: global_vars[value]
key: global_vars[value] if not isinstance(value, list) else for key, value in inputs.items()
[global_vars[var] for var in value] }
for key, value in outputs.items() outputs = {
} key: global_vars[value] if not isinstance(value, list) else
[global_vars[var] for var in value]
for key, value in outputs.items()
}
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
for param in context_prog.global_block().iter_parameters(): for param in context_prog.global_block().iter_parameters():
param.trainable = trainable param.trainable = trainable
if pretrained: if pretrained:
def _if_exist(var): def _if_exist(var):
return os.path.exists( return os.path.exists(
os.path.join(self.default_pretrained_model_path, os.path.join(self.default_pretrained_model_path,
var.name)) var.name))
fluid.io.load_vars( fluid.io.load_vars(
exe, exe,
self.default_pretrained_model_path, self.default_pretrained_model_path,
predicate=_if_exist) predicate=_if_exist)
else: else:
exe.run(startup_program) exe.run(startup_program)
return inputs, outputs, context_prog return inputs, outputs, context_prog
def save_inference_model(self, def save_inference_model(self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册