未验证 提交 ac9afa02 编写于 作者: Z zhupengyang 提交者: GitHub

paddle.nn.functional.logsigmoid -> log_sigmoid (#27277)

上级 bf461fa5
...@@ -285,7 +285,7 @@ def monkey_patch_math_varbase(): ...@@ -285,7 +285,7 @@ def monkey_patch_math_varbase():
('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)), ('__ge__', _binary_creator_('__ge__', 'greater_equal', False, None)),
('__array_ufunc__', None), ('__array_ufunc__', None),
('sigmoid', _method_creator_('sigmoid', 'name=None')), ('sigmoid', _method_creator_('sigmoid', 'name=None')),
('logsigmoid', _method_creator_('logsigmoid', 'name=None')), ('log_sigmoid', _method_creator_('logsigmoid', 'name=None')),
('exp', _method_creator_('exp', 'name=None')), ('exp', _method_creator_('exp', 'name=None')),
('tanh', _method_creator_('tanh', 'name=None')), ('tanh', _method_creator_('tanh', 'name=None')),
('atan', _method_creator_('atan', 'name=None')), ('atan', _method_creator_('atan', 'name=None')),
......
...@@ -20,7 +20,10 @@ from ..framework import convert_np_dtype_to_dtype_, Variable ...@@ -20,7 +20,10 @@ from ..framework import convert_np_dtype_to_dtype_, Variable
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from paddle.utils import deprecated from paddle.utils import deprecated
__deprecated_func_name__ = {'tanh_shrink': 'tanhshrink', } __deprecated_func_name__ = {
'tanh_shrink': 'tanhshrink',
'logsigmoid': 'log_sigmoid'
}
__activations_noattr__ = [ __activations_noattr__ = [
'sigmoid', 'sigmoid',
...@@ -106,7 +109,7 @@ Examples: ...@@ -106,7 +109,7 @@ Examples:
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.logsigmoid(x) out = F.log_sigmoid(x)
print(out.numpy()) print(out.numpy())
# [-0.91301525 -0.79813887 -0.64439666 -0.55435524] # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]
......
...@@ -128,7 +128,7 @@ class TestLogSigmoid(TestActivation): ...@@ -128,7 +128,7 @@ class TestLogSigmoid(TestActivation):
class TestLogSigmoidAPI(unittest.TestCase): class TestLogSigmoidAPI(unittest.TestCase):
# test paddle.nn.LogSigmoid, paddle.nn.functional.logsigmoid # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
def setUp(self): def setUp(self):
self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
...@@ -137,36 +137,45 @@ class TestLogSigmoidAPI(unittest.TestCase): ...@@ -137,36 +137,45 @@ class TestLogSigmoidAPI(unittest.TestCase):
def test_static_api(self): def test_static_api(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17]) x = paddle.data('X', [11, 17])
out1 = F.logsigmoid(x) out1 = F.log_sigmoid(x)
m = paddle.nn.LogSigmoid() m = paddle.nn.LogSigmoid()
out2 = m(x) out2 = m(x)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = np.log(1 / (1 + np.exp(-self.x_np))) out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in res: for r in res:
self.assertEqual(np.allclose(out_ref, r), True) self.assertTrue(np.allclose(out_ref, r))
def test_dygraph_api(self): def test_dygraph_api(self):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out1 = F.logsigmoid(x) out1 = F.log_sigmoid(x)
m = paddle.nn.LogSigmoid() m = paddle.nn.LogSigmoid()
out2 = m(x) out2 = m(x)
out_ref = np.log(1 / (1 + np.exp(-self.x_np))) out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in [out1, out2]: for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True) self.assertTrue(np.allclose(out_ref, r.numpy()))
paddle.enable_static() paddle.enable_static()
def test_fluid_api(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.data('X', [11, 17])
out = paddle.fluid.layers.logsigmoid(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
self.assertTrue(np.allclose(out_ref, res[0]))
def test_errors(self): def test_errors(self):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable. # The input type must be Variable.
self.assertRaises(TypeError, F.logsigmoid, 1) self.assertRaises(TypeError, F.log_sigmoid, 1)
# The input dtype must be float16, float32, float64. # The input dtype must be float16, float32, float64.
x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32')
self.assertRaises(TypeError, F.logsigmoid, x_int32) self.assertRaises(TypeError, F.log_sigmoid, x_int32)
# support the input dtype is float16 # support the input dtype is float16
x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16')
F.logsigmoid(x_fp16) F.log_sigmoid(x_fp16)
class TestTanh(TestActivation, TestParameter): class TestTanh(TestActivation, TestParameter):
......
...@@ -2677,13 +2677,6 @@ class TestBook(LayerTest): ...@@ -2677,13 +2677,6 @@ class TestBook(LayerTest):
out = layers.sigmoid(input, name='sigmoid') out = layers.sigmoid(input, name='sigmoid')
return (out) return (out)
def make_logsigmoid(self):
with program_guard(fluid.default_main_program(),
fluid.default_startup_program()):
input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.logsigmoid(input, name='logsigmoid')
return (out)
def make_exp(self): def make_exp(self):
with program_guard(fluid.default_main_program(), with program_guard(fluid.default_main_program(),
fluid.default_startup_program()): fluid.default_startup_program()):
......
...@@ -307,7 +307,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase): ...@@ -307,7 +307,7 @@ class TestMathOpPatchesVarBase(unittest.TestCase):
np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy( np.array_equal(x.sigmoid().numpy(), fluid.layers.sigmoid(x).numpy(
))) )))
self.assertTrue( self.assertTrue(
np.array_equal(x.logsigmoid().numpy(), np.array_equal(x.log_sigmoid().numpy(),
fluid.layers.logsigmoid(x).numpy())) fluid.layers.logsigmoid(x).numpy()))
self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy())) self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
self.assertTrue( self.assertTrue(
......
...@@ -39,7 +39,7 @@ from .activation import hard_sigmoid #DEFINE_ALIAS ...@@ -39,7 +39,7 @@ from .activation import hard_sigmoid #DEFINE_ALIAS
from .activation import hard_swish #DEFINE_ALIAS from .activation import hard_swish #DEFINE_ALIAS
from .activation import hsigmoid #DEFINE_ALIAS from .activation import hsigmoid #DEFINE_ALIAS
from .activation import leaky_relu #DEFINE_ALIAS from .activation import leaky_relu #DEFINE_ALIAS
from .activation import logsigmoid #DEFINE_ALIAS from .activation import log_sigmoid #DEFINE_ALIAS
from .activation import maxout #DEFINE_ALIAS from .activation import maxout #DEFINE_ALIAS
from .activation import prelu #DEFINE_ALIAS from .activation import prelu #DEFINE_ALIAS
from .activation import relu #DEFINE_ALIAS from .activation import relu #DEFINE_ALIAS
......
...@@ -35,7 +35,7 @@ __all__ = [ ...@@ -35,7 +35,7 @@ __all__ = [
'hard_swish', 'hard_swish',
'hsigmoid', 'hsigmoid',
'leaky_relu', 'leaky_relu',
'logsigmoid', 'log_sigmoid',
'maxout', 'maxout',
'prelu', 'prelu',
'relu', 'relu',
...@@ -552,13 +552,13 @@ def relu(x, name=None): ...@@ -552,13 +552,13 @@ def relu(x, name=None):
return out return out
def logsigmoid(x, name=None): def log_sigmoid(x, name=None):
""" """
logsigmoid activation. log_sigmoid activation.
.. math:: .. math::
logsigmoid(x) = log \\frac{1}{1 + e^{-x}} log\\_sigmoid(x) = log \\frac{1}{1 + e^{-x}}
Parameters: Parameters:
x (Tensor): The input Tensor with data type float32, float64. x (Tensor): The input Tensor with data type float32, float64.
...@@ -573,20 +573,19 @@ def logsigmoid(x, name=None): ...@@ -573,20 +573,19 @@ def logsigmoid(x, name=None):
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.logsigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.logsigmoid(x) return core.ops.logsigmoid(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'logsigmoid') 'log_sigmoid')
helper = LayerHelper("logsigmoid", **locals()) helper = LayerHelper("log_sigmoid", **locals())
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out}) helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
return out return out
......
...@@ -860,11 +860,10 @@ class LogSigmoid(layers.Layer): ...@@ -860,11 +860,10 @@ class LogSigmoid(layers.Layer):
.. code-block:: python .. code-block:: python
import paddle import paddle
import numpy as np
paddle.disable_static() paddle.disable_static()
x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
m = paddle.nn.LogSigmoid() m = paddle.nn.LogSigmoid()
out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] out = m(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
""" """
...@@ -874,7 +873,7 @@ class LogSigmoid(layers.Layer): ...@@ -874,7 +873,7 @@ class LogSigmoid(layers.Layer):
self._name = name self._name = name
def forward(self, x): def forward(self, x):
return F.logsigmoid(x, self._name) return F.log_sigmoid(x, self._name)
class Softmax(layers.Layer): class Softmax(layers.Layer):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册