From 915a08af1ca69c250ba47208d54d72ba4dad3f37 Mon Sep 17 00:00:00 2001 From: NotBad Date: Tue, 30 Jun 2020 17:26:11 +0800 Subject: [PATCH] fix GradientClipByGlobalNorm. test=develop --- doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst index 188cd030f..a99c14eeb 100644 --- a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst +++ b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst @@ -58,8 +58,8 @@ GradientClipByGlobalNorm # return Parameter.name=="fc_0.w_0" # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) - sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1) - sgd_optimizer.minimize(loss, grad_clip=clip) + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -95,5 +95,5 @@ GradientClipByGlobalNorm # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=0.1, parameter_list=linear.parameters()) - sgd_optimizer.minimize(loss, grad_clip=clip) \ No newline at end of file + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss) -- GitLab