提交 6403e886 编写于 作者: G GaoWei8 提交者: hong19860320

[cherry-pick] add input type and dtype check for elu_op (#20106) fix...

[cherry-pick] add input type and dtype check for  elu_op (#20106) fix GradientClipByNorm english doc (#20224) (#20457)

* fix GradientClipByNorm english doc (#20224)

* fix GradientClipByNorm english doc
test=develop
test=document_fix

* add input type and dtype check for  elu_op (#20106)

* elu input check
test=develop

* test=develop
上级 de388a59
......@@ -1110,7 +1110,7 @@ paddle.fluid.clip.ErrorClipByValue ('paddle.fluid.clip.ErrorClipByValue', ('docu
paddle.fluid.clip.ErrorClipByValue.__init__ (ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.clip.GradientClipByValue ('paddle.fluid.clip.GradientClipByValue', ('document', 'b7a22f687269cae0c338ef3866322db7'))
paddle.fluid.clip.GradientClipByValue.__init__ (ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.clip.GradientClipByNorm ('paddle.fluid.clip.GradientClipByNorm', ('document', 'a5c23d96a3d8c8c1183e9469a5d0d52e'))
paddle.fluid.clip.GradientClipByNorm ('paddle.fluid.clip.GradientClipByNorm', ('document', '93d62f284d2cdb87f2723fcc63d818f9'))
paddle.fluid.clip.GradientClipByNorm.__init__ (ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.clip.GradientClipByGlobalNorm ('paddle.fluid.clip.GradientClipByGlobalNorm', ('document', 'ef50acbe212101121d4b82f693ec1733'))
paddle.fluid.clip.GradientClipByGlobalNorm.__init__ (ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
......
......@@ -183,36 +183,78 @@ class GradientClipByValue(BaseGradientClipAttr):
class GradientClipByNorm(BaseGradientClipAttr):
"""
Clips tensor values to a maximum L2-norm.
"""
Convert the input multidimensional Tensor :math:`X` to a multidimensional Tensor whose L2 norm does not exceed the given two-norm maximum ( :math:`clip\_norm` ).
The tensor is not passed through this class, but passed through the parametre of ``main_program`` in ``fluid.program_guard``.
This operator limits the L2 norm of the input :math:`X` within :math:`max\_norm`.
If the L2 norm of :math:`X` is less than or equal to :math:`max\_norm`, :math:`Out`
will be the same as :math:`X`. If the L2 norm of :math:`X` is greater than
:math:`max\_norm`, :math:`X` will be linearly scaled to make the L2 norm of
:math:`Out` equal to :math:`max\_norm`, as shown in the following formula:
This class limits the L2 norm of the input :math:`X` within :math:`clip\_norm`.
.. math::
Out =
\\left \{
\\begin{aligned}
& X & & if (norm(X) \\leq clip\_norm) \\\\
& \\frac{clip\_norm*X}{norm(X)} & & if (norm(X) > clip\_norm) \\\\
\\end{aligned}
\\right.
Out = \\frac{max\_norm * X}{norm(X)},
where :math:`norm(X)` represents the L2 norm of :math:`X`.
Args:
clip_norm (float): The maximum norm value
.. math::
norm(X) = ( \\sum_{i=1}^{n}|x\_i|^2)^{ \\frac{1}{2}}
Args:
clip_norm(float): The maximum norm value
Examples:
.. code-block:: python
import paddle.fluid as fluid
w_param_attrs = fluid.ParamAttr(name=None,
initializer=fluid.initializer.UniformInitializer(low=-1.0, high=1.0, seed=0),
learning_rate=1.0,
regularizer=fluid.regularizer.L1Decay(1.0),
trainable=True,
gradient_clip=fluid.clip.GradientClipByNorm(clip_norm=2.0))
x = fluid.layers.data(name='x', shape=[10], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs)
import paddle.fluid.core as core
import paddle
place = core.CPUPlace()
prog = fluid.framework.Program()
startup_program = fluid.framework.Program()
with fluid.program_guard(
main_program=prog, startup_program=startup_program):
image = fluid.data(name='x', shape=[None, 784], dtype='float32', lod_level=0)
label = fluid.data(name='y', shape=[None, 1], dtype='int64', lod_level=0)
hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
p_g = fluid.backward.append_backward(loss=avg_cost)
p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)
with fluid.program_guard(main_program=prog_clip, startup_program=startup_program):
fluid.clip.set_gradient_clip(
fluid.clip.GradientClipByNorm(clip_norm=2.0))
p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip)
grad_list = [elem[1] for elem in p_g]
grad_clip_list = [elem[1] for elem in p_g_clip]
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=8192),
batch_size=128)
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
exe.run(startup_program)
count = 0
for data in train_reader():
count += 1
print("count:%s" % count)
if count > 5:
break
out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
out_clip = exe.run(prog_clip,
feed=feeder.feed(data),
fetch_list=grad_clip_list)
"""
......
......@@ -11542,6 +11542,17 @@ def elu(x, alpha=1.0, name=None):
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
if not isinstance(x, Variable):
raise TypeError(
"The type of 'x' in elu must be Variable, but received %s" %
(type(x)))
if convert_dtype(x.dtype) in ['float16']:
warnings.warn(
"The data type of 'x' in elu only support float16 in GPU now.")
if convert_dtype(x.dtype) not in ['float16', 'float32', 'float64']:
raise TypeError(
"The data type of 'x' in elu must be float16 (only support on GPU), float32 or float64, but received %s."
% (convert_dtype(x.dtype)))
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
......
......@@ -19,6 +19,8 @@ import numpy as np
import paddle.fluid.core as core
from op_test import OpTest
from scipy.special import expit, erf
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestActivation(OpTest):
......@@ -519,6 +521,18 @@ class TestELU(TestActivation):
self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestELUOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of elu_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.elu, x1)
# The input dtype of elu_op must be float16 float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.elu, x2)
class TestReciprocal(TestActivation):
def setUp(self):
self.op_type = "reciprocal"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册