未验证 提交 9cd86487 编写于 作者: Y ysh329 提交者: GitHub

Fix api for ErrorClipByValue, code demo of clip_by_norm. test=develop (#27654)

* Fix ErrorClipByValue api and demo code of clip_by_value. test=develop
Co-authored-by: Ntianshuo78520a <707759223@qq.com>
上级 54c368db
...@@ -12415,12 +12415,17 @@ def clip_by_norm(x, max_norm, name=None): ...@@ -12415,12 +12415,17 @@ def clip_by_norm(x, max_norm, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
input = fluid.data( import numpy as np
name='data', shape=[None, 1], dtype='float32')
reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0) paddle.disable_static()
input = paddle.to_tensor(data=np.array([[0.1, 0.2], [0.3, 0.4]]), dtype="float32")
reward = paddle.nn.clip_by_norm(x=input, max_norm=1.0)
""" """
if in_dygraph_mode():
return core.ops.clip_by_norm(x, 'max_norm', max_norm)
helper = LayerHelper("clip_by_norm", **locals()) helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm') check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_type(max_norm, 'max_norm', (float), 'clip_by_norm') check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
......
...@@ -31,7 +31,6 @@ __all__ += rnn.__all__ ...@@ -31,7 +31,6 @@ __all__ += rnn.__all__
__all__ += weight_norm_hook.__all__ __all__ += weight_norm_hook.__all__
# TODO: define alias in nn directory # TODO: define alias in nn directory
# from .clip import ErrorClipByValue #DEFINE_ALIAS
from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS from .clip import GradientClipByGlobalNorm #DEFINE_ALIAS
from .clip import GradientClipByNorm #DEFINE_ALIAS from .clip import GradientClipByNorm #DEFINE_ALIAS
from .clip import GradientClipByValue #DEFINE_ALIAS from .clip import GradientClipByValue #DEFINE_ALIAS
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册