diff --git a/configs/anchor_free/fcos_dcn_r50_fpn_1x.yml b/configs/anchor_free/fcos_dcn_r50_fpn_1x.yml index ff46e744e48aa52a0b5cd58a85eb829b0574c70d..7a4d770cd24188a57ba39430026545f0cd9a0d40 100644 --- a/configs/anchor_free/fcos_dcn_r50_fpn_1x.yml +++ b/configs/anchor_free/fcos_dcn_r50_fpn_1x.yml @@ -180,4 +180,3 @@ TestReader: use_padded_im_info: true batch_size: 1 shuffle: false - diff --git a/ppdet/modeling/ops.py b/ppdet/modeling/ops.py index 1f758ec0d5ea7f4a1515e459ac70f4ff6bd24ef8..f4787cf7007665ec0291dd728e56ea73f678b290 100644 --- a/ppdet/modeling/ops.py +++ b/ppdet/modeling/ops.py @@ -29,8 +29,8 @@ __all__ = [ 'AnchorGenerator', 'AnchorGrid', 'DropBlock', 'RPNTargetAssign', 'GenerateProposals', 'MultiClassNMS', 'BBoxAssigner', 'MaskAssigner', 'RoIAlign', 'RoIPool', 'MultiBoxHead', 'SSDLiteMultiBoxHead', - 'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder', - 'ConvNorm', 'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner' + 'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder', 'ConvNorm', + 'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner' ] diff --git a/ppdet/optimizer.py b/ppdet/optimizer.py index 21920bb66e168ed00518a6bb799f12670b8fdb89..5f7cfefabcacd0a5412d49d65d177458d1e2b713 100644 --- a/ppdet/optimizer.py +++ b/ppdet/optimizer.py @@ -197,6 +197,10 @@ class OptimizerBuilder(): self.optimizer = optimizer def __call__(self, learning_rate): + if self.clip_grad_by_norm is not None: + fluid.clip.set_gradient_clip( + clip=fluid.clip.GradientClipByGlobalNorm( + clip_norm=self.clip_grad_by_norm)) if self.regularizer: reg_type = self.regularizer['type'] + 'Decay' reg_factor = self.regularizer['factor'] diff --git a/tools/train.py b/tools/train.py index 477fe669e1301c2c5faa914d3f1ff4d8e3f84c67..e2d21cf80b11ed0fb19e29d9709f87e68607f641 100644 --- a/tools/train.py +++ b/tools/train.py @@ -126,11 +126,7 @@ def main(): loss *= ctx.get_loss_scale_var() lr = lr_builder() optimizer = optim_builder(lr) - clip = None - if optim_builder.clip_grad_by_norm is not None: - clip = fluid.clip.GradientClipByGlobalNorm( - clip_norm=optim_builder.clip_grad_by_norm) - optimizer.minimize(loss, grad_clip=clip) + optimizer.minimize(loss) if FLAGS.fp16: loss /= ctx.get_loss_scale_var()