From b2034c28547731d8f55706ceef6360d24627c1fb Mon Sep 17 00:00:00 2001 From: zhupengyang Date: Tue, 11 Aug 2020 18:54:13 +0800 Subject: [PATCH] softmax: imperative->static; fix doc examples (#26134) --- .../fluid/tests/unittests/test_softmax_op.py | 34 +++++++++---------- python/paddle/nn/functional/activation.py | 4 +-- python/paddle/nn/layer/activation.py | 6 ++-- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 1d04a4a2716..25e95216968 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -21,6 +21,7 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard import paddle +import paddle.nn.functional as F np.random.seed(10) @@ -231,34 +232,33 @@ class TestNnFunctionalSoftmaxApi(unittest.TestCase): self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np) def test_api_static(self): - train_program = Program() - startup_program = Program() - with program_guard(train_program, startup_program): + with program_guard(Program()): x = paddle.data('X', self.x_np.shape, 'float32') - out = paddle.nn.functional.softmax(x) + out = F.softmax(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) + self.assertEqual(np.allclose(self.out_ref, res[0]), True) - exe = paddle.Executor(self.place) - res = exe.run(train_program, feed={'X': self.x_np}, fetch_list=[out]) + def test_api_imperative(self): + paddle.disable_static(self.place) - assert np.allclose(self.out_ref, res[0]) + x = paddle.to_variable(self.x_np) + out = F.softmax(x) + self.assertEqual(np.allclose(self.out_ref, out.numpy()), True) - def test_api_imperative(self): - with paddle.imperative.guard(self.place): - x = paddle.imperative.to_variable(self.x_np) - out = paddle.nn.functional.softmax(x) - assert np.allclose(self.out_ref, out.numpy()) + out = F.softmax(x, axis=0) + out_ref = np.apply_along_axis(stable_softmax, 0, self.x_np) + self.assertEqual(np.allclose(out_ref, out.numpy()), True) - out = paddle.nn.functional.softmax(x, axis=0) - out_ref = np.apply_along_axis(stable_softmax, 0, self.x_np) - assert np.allclose(out_ref, out.numpy()) + paddle.enable_static() def test_error(self): with program_guard(Program(), Program()): # The x should be variable and its dtype should be float32, float64. - self.assertRaises(TypeError, paddle.nn.functional.softmax, [1]) + self.assertRaises(TypeError, F.softmax, [1]) x = paddle.data(name='x', shape=[2, 3], dtype='int32') - self.assertRaises(TypeError, paddle.nn.functional.softmax, x) + self.assertRaises(TypeError, F.softmax, x) if __name__ == "__main__": diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index b75dd22429b..f524d74f408 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -401,7 +401,7 @@ def softmax(x, axis=-1, name=None): import paddle.nn.functional as F import numpy as np - paddle.enable_imperative() + paddle.disable_static() x = np.array([[[2.0, 3.0, 4.0, 5.0], [3.0, 4.0, 5.0, 6.0], @@ -409,7 +409,7 @@ def softmax(x, axis=-1, name=None): [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [6.0, 7.0, 8.0, 9.0]]], 'float32') - x = paddle.imperative.to_variable(x) + x = paddle.to_variable(x) out = F.softmax(x) # [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426], # [0.0320586 , 0.08714432, 0.23688282, 0.64391426], diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index 1b82daedea8..d13f36a3185 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -232,11 +232,11 @@ class LeakyReLU(layers.Layer): import paddle import numpy as np - paddle.enable_imperative() + paddle.disable_static() lrelu = paddle.nn.LeakyReLU() - x = paddle.imperative.to_variable(np.array([-2, 0, 1], 'float32')) - out = lrelu(x) # [-0.02, 0, 1] + x = paddle.to_variable(np.array([-2, 0, 1], 'float32')) + out = lrelu(x) # [-0.02, 0., 1.] """ def __init__(self, alpha=1e-2, name=None): -- GitLab