diff --git a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst index 188cd030f7e5b7eeae24e84ad482c077ceb9da10..a99c14eebf26619694a4616ec0a678a6fba8bf35 100644 --- a/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst +++ b/doc/fluid/api_cn/clip_cn/GradientClipByGlobalNorm_cn.rst @@ -58,8 +58,8 @@ GradientClipByGlobalNorm # return Parameter.name=="fc_0.w_0" # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) - sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1) - sgd_optimizer.minimize(loss, grad_clip=clip) + sgd_optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.1, grad_clip=clip) + sgd_optimizer.minimize(loss) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -95,5 +95,5 @@ GradientClipByGlobalNorm # clip = fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0, need_clip=fileter_func) sgd_optimizer = fluid.optimizer.SGD( - learning_rate=0.1, parameter_list=linear.parameters()) - sgd_optimizer.minimize(loss, grad_clip=clip) \ No newline at end of file + learning_rate=0.1, parameter_list=linear.parameters(), grad_clip=clip) + sgd_optimizer.minimize(loss)