diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 0693567945abcc589dcd3957e0cb1c7c878c7309..79b37aa8c63c648303a5bb2a3b48f6a1c1f66b4f 100755 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -1110,7 +1110,7 @@ paddle.fluid.clip.ErrorClipByValue ('paddle.fluid.clip.ErrorClipByValue', ('docu paddle.fluid.clip.ErrorClipByValue.__init__ (ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.clip.GradientClipByValue ('paddle.fluid.clip.GradientClipByValue', ('document', 'b6eb70fb2a39db5c00534f20d62f5741')) paddle.fluid.clip.GradientClipByValue.__init__ (ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.clip.GradientClipByNorm ('paddle.fluid.clip.GradientClipByNorm', ('document', 'a5c23d96a3d8c8c1183e9469a5d0d52e')) +paddle.fluid.clip.GradientClipByNorm ('paddle.fluid.clip.GradientClipByNorm', ('document', '93d62f284d2cdb87f2723fcc63d818f9')) paddle.fluid.clip.GradientClipByNorm.__init__ (ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.clip.GradientClipByGlobalNorm ('paddle.fluid.clip.GradientClipByGlobalNorm', ('document', '025b2f323f59c882e2245c2fb39c66bb')) paddle.fluid.clip.GradientClipByGlobalNorm.__init__ (ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 4496c146bca20d1722a1386826c7b1d6ea034704..d280ec50354c6444562623366a20c9d32295e993 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -184,36 +184,78 @@ class GradientClipByValue(BaseGradientClipAttr): class GradientClipByNorm(BaseGradientClipAttr): - """ - Clips tensor values to a maximum L2-norm. + """ + Convert the input multidimensional Tensor :math:`X` to a multidimensional Tensor whose L2 norm does not exceed the given two-norm maximum ( :math:`clip\_norm` ). + + The tensor is not passed through this class, but passed through the parametre of ``main_program`` in ``fluid.program_guard``. - This operator limits the L2 norm of the input :math:`X` within :math:`max\_norm`. - If the L2 norm of :math:`X` is less than or equal to :math:`max\_norm`, :math:`Out` - will be the same as :math:`X`. If the L2 norm of :math:`X` is greater than - :math:`max\_norm`, :math:`X` will be linearly scaled to make the L2 norm of - :math:`Out` equal to :math:`max\_norm`, as shown in the following formula: + This class limits the L2 norm of the input :math:`X` within :math:`clip\_norm`. .. math:: + Out = + \\left \{ + \\begin{aligned} + & X & & if (norm(X) \\leq clip\_norm) \\\\ + & \\frac{clip\_norm*X}{norm(X)} & & if (norm(X) > clip\_norm) \\\\ + \\end{aligned} + \\right. - Out = \\frac{max\_norm * X}{norm(X)}, where :math:`norm(X)` represents the L2 norm of :math:`X`. - Args: - clip_norm (float): The maximum norm value + .. math:: + norm(X) = ( \\sum_{i=1}^{n}|x\_i|^2)^{ \\frac{1}{2}} + Args: + clip_norm(float): The maximum norm value + Examples: .. code-block:: python import paddle.fluid as fluid - w_param_attrs = fluid.ParamAttr(name=None, - initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0), - learning_rate=1.0, - regularizer=fluid.regularizer.L1Decay(1.0), - trainable=True, - gradient_clip=fluid.clip.GradientClipByNorm(clip_norm=2.0)) - x = fluid.layers.data(name='x', shape=[10], dtype='float32') - y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs) + import paddle.fluid.core as core + import paddle + place = core.CPUPlace() + prog = fluid.framework.Program() + startup_program = fluid.framework.Program() + with fluid.program_guard( + main_program=prog, startup_program=startup_program): + image = fluid.data(name='x', shape=[None, 784], dtype='float32', lod_level=0) + label = fluid.data(name='y', shape=[None, 1], dtype='int64', lod_level=0) + hidden1 = fluid.layers.fc(input=image, size=128, act='relu') + hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') + predict = fluid.layers.fc(input=hidden2, size=10, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + prog_clip = prog.clone() + avg_cost_clip = prog_clip.block(0).var(avg_cost.name) + p_g = fluid.backward.append_backward(loss=avg_cost) + p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) + with fluid.program_guard(main_program=prog_clip, startup_program=startup_program): + fluid.clip.set_gradient_clip( + fluid.clip.GradientClipByNorm(clip_norm=2.0)) + p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) + grad_list = [elem[1] for elem in p_g] + grad_clip_list = [elem[1] for elem in p_g_clip] + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=8192), + batch_size=128) + + exe = fluid.Executor(place) + feeder = fluid.DataFeeder(feed_list=[image, label], place=place) + exe.run(startup_program) + + count = 0 + for data in train_reader(): + count += 1 + print("count:%s" % count) + if count > 5: + break + out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list) + out_clip = exe.run(prog_clip, + feed=feeder.feed(data), + fetch_list=grad_clip_list) """