From 9cd86487cc46ff933eda0b12cd4bfe5d303f4221 Mon Sep 17 00:00:00 2001 From: ysh329 Date: Wed, 30 Sep 2020 07:19:22 -0500 Subject: [PATCH] Fix api for ErrorClipByValue, code demo of clip_by_norm. test=develop (#27654) * Fix ErrorClipByValue api and demo code of clip_by_value. test=develop Co-authored-by: tianshuo78520a <707759223@qq.com> --- python/paddle/fluid/layers/nn.py | 13 +++++++++---- python/paddle/nn/__init__.py | 1 - 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1bd279c1e8..733d8b5d29 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -12415,12 +12415,17 @@ def clip_by_norm(x, max_norm, name=None): Examples: .. code-block:: python - import paddle.fluid as fluid - input = fluid.data( - name='data', shape=[None, 1], dtype='float32') - reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0) + import paddle + import numpy as np + + paddle.disable_static() + input = paddle.to_tensor(data=np.array([[0.1, 0.2], [0.3, 0.4]]), dtype="float32") + reward = paddle.nn.clip_by_norm(x=input, max_norm=1.0) """ + if in_dygraph_mode(): + return core.ops.clip_by_norm(x, 'max_norm', max_norm) + helper = LayerHelper("clip_by_norm", **locals()) check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm') check_type(max_norm, 'max_norm', (float), 'clip_by_norm') diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index b79b965f5b..2452f19698 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -31,7 +31,6 @@ __all__ += rnn.__all__ __all__ += weight_norm_hook.__all__ # TODO: define alias in nn directory -# from .clip import ErrorClipByValue #DEFINE_ALIAS from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS from .clip import GradientClipByNorm #DEFINE_ALIAS from .clip import GradientClipByValue #DEFINE_ALIAS -- GitLab