未验证 提交 c7a1252f 编写于 作者: Y Yang Zhang 提交者: GitHub

Revert back to `fluid.clip` for gradient clipping (#568)

* Revert back to `fluid.clip` for gradient clipping

current API is in flux, wait for it to stabilize

* Please CI

still buggy
上级 0e228b11
......@@ -180,4 +180,3 @@ TestReader:
use_padded_im_info: true
batch_size: 1
shuffle: false
......@@ -29,8 +29,8 @@ __all__ = [
'AnchorGenerator', 'AnchorGrid', 'DropBlock', 'RPNTargetAssign',
'GenerateProposals', 'MultiClassNMS', 'BBoxAssigner', 'MaskAssigner',
'RoIAlign', 'RoIPool', 'MultiBoxHead', 'SSDLiteMultiBoxHead',
'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder',
'ConvNorm', 'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner'
'SSDOutputDecoder', 'RetinaTargetAssign', 'RetinaOutputDecoder', 'ConvNorm',
'DeformConvNorm', 'MultiClassSoftNMS', 'LibraBBoxAssigner'
]
......
......@@ -197,6 +197,10 @@ class OptimizerBuilder():
self.optimizer = optimizer
def __call__(self, learning_rate):
if self.clip_grad_by_norm is not None:
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=self.clip_grad_by_norm))
if self.regularizer:
reg_type = self.regularizer['type'] + 'Decay'
reg_factor = self.regularizer['factor']
......
......@@ -126,11 +126,7 @@ def main():
loss *= ctx.get_loss_scale_var()
lr = lr_builder()
optimizer = optim_builder(lr)
clip = None
if optim_builder.clip_grad_by_norm is not None:
clip = fluid.clip.GradientClipByGlobalNorm(
clip_norm=optim_builder.clip_grad_by_norm)
optimizer.minimize(loss, grad_clip=clip)
optimizer.minimize(loss)
if FLAGS.fp16:
loss /= ctx.get_loss_scale_var()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册