未验证 提交 c7a1252f 编写于 作者: Y Yang Zhang 提交者: GitHub

Revert back to `fluid.clip` for gradient clipping (#568)

* Revert back to `fluid.clip` for gradient clipping

current API is in flux, wait for it to stabilize

* Please CI

still buggy
上级 0e228b11
...@@ -180,4 +180,3 @@ TestReader: ...@@ -180,4 +180,3 @@ TestReader:
use_padded_im_info: true use_padded_im_info: true
batch_size: 1 batch_size: 1
shuffle: false shuffle: false
...@@ -29,8 +29,8 @@ __all__ = [ ...@@ -29,8 +29,8 @@ __all__ = [
'AnchorGenerator', 'AnchorGrid', 'DropBlock', 'RPNTargetAssign', 'AnchorGenerator', 'AnchorGrid', 'DropBlock', 'RPNTargetAssign',
'GenerateProposals', 'MultiClassNMS', 'BBoxAssigner', 'MaskAssigner', 'GenerateProposals', 'MultiClassNMS', 'BBoxAssigner', 'MaskAssigner',
'RoIAlign', 'RoIPool', 'MultiBoxHead', 'SSDLiteMultiBoxHead', 'RoIAlign', 'RoIPool', 'MultiBoxHead', 'SSDLiteMultiBoxHead',
'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder', 'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder', 'ConvNorm',
'ConvNorm', 'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner' 'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner'
] ]
......
...@@ -197,6 +197,10 @@ class OptimizerBuilder(): ...@@ -197,6 +197,10 @@ class OptimizerBuilder():
self.optimizer = optimizer self.optimizer = optimizer
def __call__(self, learning_rate): def __call__(self, learning_rate):
if self.clip_grad_by_norm is not None:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=self.clip_grad_by_norm))
if self.regularizer: if self.regularizer:
reg_type = self.regularizer['type'] + 'Decay' reg_type = self.regularizer['type'] + 'Decay'
reg_factor = self.regularizer['factor'] reg_factor = self.regularizer['factor']
......
...@@ -126,11 +126,7 @@ def main(): ...@@ -126,11 +126,7 @@ def main():
loss *= ctx.get_loss_scale_var() loss *= ctx.get_loss_scale_var()
lr = lr_builder() lr = lr_builder()
optimizer = optim_builder(lr) optimizer = optim_builder(lr)
clip = None optimizer.minimize(loss)
if optim_builder.clip_grad_by_norm is not None:
clip = fluid.clip.GradientClipByGlobalNorm(
clip_norm=optim_builder.clip_grad_by_norm)
optimizer.minimize(loss, grad_clip=clip)
if FLAGS.fp16: if FLAGS.fp16:
loss /= ctx.get_loss_scale_var() loss /= ctx.get_loss_scale_var()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册