test_rnn_nets.py 12.5 KB
Newer Older
F
Feiyu Chan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle
16

F
Feiyu Chan 已提交
17
paddle.set_default_dtype("float64")
18 19
import os
import tempfile
20
import unittest
F
Feiyu Chan 已提交
21

22
import numpy as np
F
Feiyu Chan 已提交
23
from convert import convert_params_for_net
24 25
from rnn_numpy import GRU, LSTM, SimpleRNN

26 27
bidirectional_list = ["bidirectional", "bidirect"]

F
Feiyu Chan 已提交
28 29 30

class TestSimpleRNN(unittest.TestCase):
    def __init__(self, time_major=True, direction="forward", place="cpu"):
31
        super().__init__("runTest")
F
Feiyu Chan 已提交
32 33
        self.time_major = time_major
        self.direction = direction
34
        self.num_directions = 2 if direction in bidirectional_list else 1
35
        self.place = place
F
Feiyu Chan 已提交
36 37

    def setUp(self):
38 39 40 41
        # Since `set_device` is global, set `set_device` in `setUp` rather than
        # `__init__` to avoid using an error device set by another test case.
        place = paddle.set_device(self.place)
        paddle.disable_static(place)
42 43 44 45 46 47
        rnn1 = SimpleRNN(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
        rnn2 = paddle.nn.SimpleRNN(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
F
Feiyu Chan 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
        convert_params_for_net(rnn1, rnn2)

        self.rnn1 = rnn1
        self.rnn2 = rnn2

    def test_with_initial_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        prev_h = np.random.randn(2 * self.num_directions, 4, 32)

        y1, h1 = rnn1(x, prev_h)
63
        y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
F
Feiyu Chan 已提交
64 65 66 67 68 69 70 71 72 73 74 75
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_zero_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])

        y1, h1 = rnn1(x)
76
        y2, h2 = rnn2(paddle.to_tensor(x))
F
Feiyu Chan 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_input_lengths(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        sequence_length = np.array([12, 10, 9, 8], dtype=np.int64)

        y1, h1 = rnn1(x, sequence_length=sequence_length)

91
        seq_len = paddle.to_tensor(sequence_length)
92 93 94
        mask = paddle.static.nn.sequence_lod.sequence_mask(
            seq_len, dtype=paddle.get_default_dtype()
        )
F
Feiyu Chan 已提交
95 96
        if self.time_major:
            mask = paddle.transpose(mask, [1, 0])
97
        y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
98 99
        mask = paddle.unsqueeze(mask, -1)
        y2 = paddle.multiply(y2, mask)
F
Feiyu Chan 已提交
100 101 102 103

        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

G
Guo Sheng 已提交
104 105 106
    def test_predict(self):
        predict_test_util(self.place, "SimpleRNN")

F
Feiyu Chan 已提交
107 108 109 110
    def runTest(self):
        self.test_with_initial_state()
        self.test_with_zero_state()
        self.test_with_input_lengths()
G
Guo Sheng 已提交
111
        self.test_predict()
F
Feiyu Chan 已提交
112 113 114 115


class TestGRU(unittest.TestCase):
    def __init__(self, time_major=True, direction="forward", place="cpu"):
116
        super().__init__("runTest")
F
Feiyu Chan 已提交
117 118
        self.time_major = time_major
        self.direction = direction
119
        self.num_directions = 2 if direction in bidirectional_list else 1
120
        self.place = place
F
Feiyu Chan 已提交
121 122

    def setUp(self):
123 124 125 126
        # Since `set_device` is global, set `set_device` in `setUp` rather than
        # `__init__` to avoid using an error device set by another test case.
        place = paddle.set_device(self.place)
        paddle.disable_static(place)
127 128 129 130 131 132
        rnn1 = GRU(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
        rnn2 = paddle.nn.GRU(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
F
Feiyu Chan 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
        convert_params_for_net(rnn1, rnn2)

        self.rnn1 = rnn1
        self.rnn2 = rnn2

    def test_with_initial_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        prev_h = np.random.randn(2 * self.num_directions, 4, 32)

        y1, h1 = rnn1(x, prev_h)
148
        y2, h2 = rnn2(paddle.to_tensor(x), paddle.to_tensor(prev_h))
F
Feiyu Chan 已提交
149 150 151 152 153 154 155 156 157 158 159 160
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_zero_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])

        y1, h1 = rnn1(x)
161
        y2, h2 = rnn2(paddle.to_tensor(x))
F
Feiyu Chan 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_input_lengths(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        sequence_length = np.array([12, 10, 9, 8], dtype=np.int64)

        y1, h1 = rnn1(x, sequence_length=sequence_length)

176
        seq_len = paddle.to_tensor(sequence_length)
177 178 179
        mask = paddle.static.nn.sequence_lod.sequence_mask(
            seq_len, dtype=paddle.get_default_dtype()
        )
F
Feiyu Chan 已提交
180 181
        if self.time_major:
            mask = paddle.transpose(mask, [1, 0])
182
        y2, h2 = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
183 184
        mask = paddle.unsqueeze(mask, -1)
        y2 = paddle.multiply(y2, mask)
F
Feiyu Chan 已提交
185 186 187 188

        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)

G
Guo Sheng 已提交
189 190 191
    def test_predict(self):
        predict_test_util(self.place, "GRU")

F
Feiyu Chan 已提交
192 193 194 195
    def runTest(self):
        self.test_with_initial_state()
        self.test_with_zero_state()
        self.test_with_input_lengths()
G
Guo Sheng 已提交
196
        self.test_predict()
F
Feiyu Chan 已提交
197 198 199 200


class TestLSTM(unittest.TestCase):
    def __init__(self, time_major=True, direction="forward", place="cpu"):
201
        super().__init__("runTest")
F
Feiyu Chan 已提交
202 203
        self.time_major = time_major
        self.direction = direction
204
        self.num_directions = 2 if direction in bidirectional_list else 1
205
        self.place = place
F
Feiyu Chan 已提交
206 207

    def setUp(self):
208 209 210 211
        # Since `set_device` is global, set `set_device` in `setUp` rather than
        # `__init__` to avoid using an error device set by another test case.
        place = paddle.set_device(self.place)
        paddle.disable_static(place)
212 213 214 215 216 217
        rnn1 = LSTM(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
        rnn2 = paddle.nn.LSTM(
            16, 32, 2, time_major=self.time_major, direction=self.direction
        )
F
Feiyu Chan 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
        convert_params_for_net(rnn1, rnn2)

        self.rnn1 = rnn1
        self.rnn2 = rnn2

    def test_with_initial_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        prev_h = np.random.randn(2 * self.num_directions, 4, 32)
        prev_c = np.random.randn(2 * self.num_directions, 4, 32)

        y1, (h1, c1) = rnn1(x, (prev_h, prev_c))
234 235 236 237
        y2, (h2, c2) = rnn2(
            paddle.to_tensor(x),
            (paddle.to_tensor(prev_h), paddle.to_tensor(prev_c)),
        )
F
Feiyu Chan 已提交
238 239 240 241 242 243 244 245 246 247 248 249 250
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_zero_state(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])

        y1, (h1, c1) = rnn1(x)
251
        y2, (h2, c2) = rnn2(paddle.to_tensor(x))
F
Feiyu Chan 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)

    def test_with_input_lengths(self):
        rnn1 = self.rnn1
        rnn2 = self.rnn2

        x = np.random.randn(12, 4, 16)
        if not self.time_major:
            x = np.transpose(x, [1, 0, 2])
        sequence_length = np.array([12, 10, 9, 8], dtype=np.int64)

        y1, (h1, c1) = rnn1(x, sequence_length=sequence_length)

267
        seq_len = paddle.to_tensor(sequence_length)
268 269 270
        mask = paddle.static.nn.sequence_lod.sequence_mask(
            seq_len, dtype=paddle.get_default_dtype()
        )
F
Feiyu Chan 已提交
271 272
        if self.time_major:
            mask = paddle.transpose(mask, [1, 0])
273
        y2, (h2, c2) = rnn2(paddle.to_tensor(x), sequence_length=seq_len)
274 275
        mask = paddle.unsqueeze(mask, -1)
        y2 = paddle.multiply(y2, mask)
F
Feiyu Chan 已提交
276 277 278 279 280

        np.testing.assert_allclose(y1, y2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(h1, h2.numpy(), atol=1e-8, rtol=1e-5)
        np.testing.assert_allclose(c1, c2.numpy(), atol=1e-8, rtol=1e-5)

281
    def test_predict(self):
G
Guo Sheng 已提交
282
        predict_test_util(self.place, "LSTM")
283
        predict_test_util(self.place, "LSTM", False)
284

F
Feiyu Chan 已提交
285 286 287 288
    def runTest(self):
        self.test_with_initial_state()
        self.test_with_zero_state()
        self.test_with_input_lengths()
289
        self.test_predict()
F
Feiyu Chan 已提交
290 291


292
def predict_test_util(place, mode, stop_gradient=True):
G
Guo Sheng 已提交
293 294 295 296 297 298
    place = paddle.set_device(place)
    paddle.seed(123)
    np.random.seed(123)

    class Net(paddle.nn.Layer):
        def __init__(self):
299
            super().__init__()
300 301 302
            self.rnn = getattr(paddle.nn, mode)(
                16, 32, 2, direction="bidirectional", dropout=0.1
            )
G
Guo Sheng 已提交
303 304 305 306 307

        def forward(self, input):
            return self.rnn(input)

    x = paddle.randn((4, 10, 16))
308
    x.stop_gradient = stop_gradient
G
Guo Sheng 已提交
309
    seq_len = paddle.to_tensor(np.array([10, 6, 8, 5]))
310 311 312
    mask = paddle.static.nn.sequence_lod.sequence_mask(
        seq_len, maxlen=10, dtype=x.dtype
    )
G
Guo Sheng 已提交
313 314 315 316 317 318
    mask = paddle.unsqueeze(mask, [2])
    rnn = Net()
    y, _ = rnn(x)
    y = y * mask
    loss = paddle.mean(y)
    loss.backward()
319 320 321
    optimizer = paddle.optimizer.Adam(
        learning_rate=0.1, parameters=rnn.parameters()
    )
G
Guo Sheng 已提交
322 323 324 325 326 327 328 329
    optimizer.step()
    rnn.eval()
    y, _ = rnn(x)
    # `jit.to_static` would include a train_program, eval mode might cause
    # some errors currently, such as dropout grad op gets `is_test == True`.
    rnn.train()

    rnn = paddle.jit.to_static(
330 331
        rnn, [paddle.static.InputSpec(shape=[None, None, 16], dtype=x.dtype)]
    )
332 333 334 335
    temp_dir = tempfile.TemporaryDirectory()
    save_dirname = os.path.join(temp_dir.name, "./inference/%s_infer" % mode)

    paddle.jit.save(rnn, save_dirname)
G
Guo Sheng 已提交
336 337 338 339 340 341

    paddle.enable_static()

    new_scope = paddle.static.Scope()
    with paddle.static.scope_guard(new_scope):
        exe = paddle.static.Executor(place)
342 343 344 345 346 347 348 349 350 351
        [
            inference_program,
            feed_target_names,
            fetch_targets,
        ] = paddle.static.load_inference_model(save_dirname, exe)
        results = exe.run(
            inference_program,
            feed={feed_target_names[0]: x.numpy()},
            fetch_list=fetch_targets,
        )
G
Guo Sheng 已提交
352
        np.testing.assert_equal(
353 354
            y.numpy(), results[0]
        )  # eval results equal predict results
G
Guo Sheng 已提交
355 356
    paddle.disable_static()

357 358
    temp_dir.cleanup()

G
Guo Sheng 已提交
359

F
Feiyu Chan 已提交
360 361
def load_tests(loader, tests, pattern):
    suite = unittest.TestSuite()
362 363 364
    devices = (
        ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"]
    )
365
    for direction in ["forward", "bidirectional", "bidirect"]:
F
Feiyu Chan 已提交
366 367 368 369 370
        for time_major in [True, False]:
            for device in devices:
                for test_class in [TestSimpleRNN, TestLSTM, TestGRU]:
                    suite.addTest(test_class(time_major, direction, device))
    return suite
371

372

373 374
if __name__ == '__main__':
    unittest.main()