test_basic_gru_api.py 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

17
import numpy
18 19
import numpy as np

20 21
import paddle.fluid as fluid
import paddle.fluid.core as core
22 23
import paddle.fluid.layers as layers
from paddle.fluid import framework
24 25 26
from paddle.fluid.contrib.layers import basic_gru
from paddle.fluid.executor import Executor

27 28
np.set_seed(123)

29 30 31 32 33 34 35 36 37
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0


def sigmoid(x):
    y = np.copy(x)
    y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
    y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
38
    return 1.0 / (1.0 + np.exp(-y))
39 40 41


def tanh(x):
42
    y = -2.0 * x
43
    y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
    return (2.0 / (1.0 + np.exp(y))) - 1.0


def gru_np(
    input,
    init_h,
    hidden_size,
    gate_weight,
    gate_bias,
    candidate_weight,
    candidate_bias,
    num_layers=1,
    batch_first=False,
    is_bidirect=False,
    sequence_length=None,
):
60 61 62 63 64 65 66 67 68 69
    def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b):
        concat_1 = np.concatenate([step_in, pre_hidden], 1)

        gate_input = np.matmul(concat_1, gate_w)
        gate_input += gate_b
        gate_input = sigmoid(gate_input)
        r, u = np.split(gate_input, indices_or_sections=2, axis=1)

        r_hidden = r * pre_hidden

70 71 72
        candidate = np.matmul(
            np.concatenate([step_in, r_hidden], 1), candidate_w
        )
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

        candidate += candidate_b
        c = tanh(candidate)

        new_hidden = u * pre_hidden + (1 - u) * c

        return new_hidden

    if batch_first:
        input = np.tranpose(input, [1, 0, 2])

    batch_size = input.shape[1]
    mask = None
    if sequence_length is not None:
        max_seq_len = input.shape[0]

        mask = np.zeros([batch_size, max_seq_len])

        for i, len in enumerate(sequence_length):
            mask[i, :len] = 1.0

        mask = np.transpose(mask, [1, 0])

    direc_num = 1
    if is_bidirect:
        direc_num = 2
    if init_h:
100 101 102
        init_h = np.reshape(
            init_h, shape=[num_layers, direc_num, -1, hidden_size]
        )
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
    else:
        init_h = np.zeros([num_layers, direc_num, batch_size, hidden_size])

    def get_single_direction_output(rnn_input, mask=None, direc_index=0):
        seq_len = rnn_input.shape[0]

        output = []
        # init pre hidden
        pre_hidden_array = []
        for i in range(num_layers):
            pre_hidden_array.append(init_h[i, direc_index])

        for i in range(seq_len):
            step_input = rnn_input[i]

            if mask is not None:
                step_mask = mask[i]
                step_mask = np.reshape(step_mask, [-1, 1])

            for i in range(num_layers):
                new_hidden = step(
124 125
                    step_input,
                    pre_hidden_array[i],
126 127 128
                    gate_weight[direc_index * num_layers + i],
                    gate_bias[direc_index * num_layers + i],
                    candidate_weight[direc_index * num_layers + i],
129 130
                    candidate_bias[direc_index * num_layers + i],
                )
131 132

                if mask is not None:
133 134 135 136
                    new_hidden = (
                        new_hidden * step_mask
                        + (1 - step_mask) * pre_hidden_array[i]
                    )
137 138 139 140 141 142 143 144 145

                pre_hidden_array[i] = new_hidden

                step_input = new_hidden
            output.append(step_input)
        rnn_out = np.concatenate(output, 0)
        rnn_out = np.reshape(rnn_out, [seq_len, -1, hidden_size])

        last_hidden_out = np.concatenate(pre_hidden_array, 0)
146 147 148
        last_hidden_out = np.reshape(
            last_hidden_out, [num_layers, -1, hidden_size]
        )
149 150 151

        return rnn_out, last_hidden_out

152 153 154
    fw_rnn_out, fw_last_hidden = get_single_direction_output(
        input, mask, direc_index=0
    )
155 156 157 158 159 160 161

    if is_bidirect:
        bw_input = input[::-1]
        bw_mask = None
        if mask is not None:
            bw_mask = mask[::-1]

162 163 164
        bw_rnn_out, bw_last_hidden = get_single_direction_output(
            bw_input, bw_mask, direc_index=1
        )
165 166 167 168 169

        bw_rnn_out = bw_rnn_out[::-1]

        rnn_out = np.concatenate([fw_rnn_out, bw_rnn_out], 2)
        last_hidden = np.concatenate([fw_last_hidden, bw_last_hidden], 1)
170 171 172
        last_hidden = np.reshape(
            last_hidden, [num_layers * direc_num, -1, hidden_size]
        )
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197

        if batch_first:
            rnn_out = np.transpose(rnn_out, [1, 0, 2])

        return rnn_out, last_hidden
    else:
        rnn_out = fw_rnn_out
        last_hidden = fw_last_hidden

        if batch_first:
            rnn_out = np.transpose(rnn_out, [1, 0, 2])

        return rnn_out, last_hidden


class TestBasicGRUApi(unittest.TestCase):
    def setUp(self):
        self.hidden_size = 10
        self.batch_size = 5
        self.seq_len = 6
        self.num_layers = 2
        self.is_bidirect = True
        self.batch_first = False

    def test_run(self):
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
        x = layers.data(
            name='x',
            shape=[-1, self.batch_size, self.hidden_size],
            dtype='float32',
        )
        sequence_length = layers.data(
            name="sequence_length", shape=[-1], dtype='float32'
        )

        rnn_out, last_hidden = basic_gru(
            x,
            None,
            self.hidden_size,
            num_layers=self.num_layers,
            batch_first=self.batch_first,
            bidirectional=self.is_bidirect,
            sequence_length=sequence_length,
        )
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238

        last_hidden.persisbale = True
        rnn_out.persisbale = True

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        exe = Executor(place)
        exe.run(framework.default_startup_program())

        param_list = fluid.default_main_program().block(0).all_parameters()

        # process weight and bias
        gate_weight = []
        gate_bias = []
        candidate_weight = []
        candidate_bias = []

        for i in range(self.num_layers):
            gate_w_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_0"
            gate_b_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_0"
239 240 241 242 243 244
            candidate_w_name = (
                "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_1"
            )
            candidate_b_name = (
                "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_1"
            )
245

246
            gate_w = np.array(
247 248 249 250 251
                fluid.global_scope().find_var(gate_w_name).get_tensor()
            )
            gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype(
                'float32'
            )
252
            fluid.global_scope().find_var(gate_w_name).get_tensor().set(
253 254
                gate_w, place
            )
255 256

            gate_b = np.array(
257 258 259 260 261
                fluid.global_scope().find_var(gate_b_name).get_tensor()
            )
            gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype(
                'float32'
            )
262
            fluid.global_scope().find_var(gate_b_name).get_tensor().set(
263 264
                gate_b, place
            )
265 266

            candidate_w = np.array(
267 268
                fluid.global_scope().find_var(candidate_w_name).get_tensor()
            )
269
            candidate_w = np.random.uniform(
270 271
                -0.1, 0.1, size=candidate_w.shape
            ).astype('float32')
272
            fluid.global_scope().find_var(candidate_w_name).get_tensor().set(
273 274
                candidate_w, place
            )
275

276
            candidate_b = np.array(
277 278
                fluid.global_scope().find_var(candidate_b_name).get_tensor()
            )
279
            candidate_b = np.random.uniform(
280 281
                -0.1, 0.1, size=candidate_b.shape
            ).astype('float32')
282
            fluid.global_scope().find_var(candidate_b_name).get_tensor().set(
283 284
                candidate_b, place
            )
285 286 287 288 289 290 291 292

            gate_weight.append(gate_w)
            gate_bias.append(gate_b)
            candidate_weight.append(candidate_w)
            candidate_bias.append(candidate_b)

        if self.is_bidirect:
            for i in range(self.num_layers):
293 294 295 296 297 298 299 300 301 302 303 304
                gate_w_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_0"
                )
                gate_b_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_0"
                )
                candidate_w_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_1"
                )
                candidate_b_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_1"
                )
305

306
                gate_w = np.array(
307 308 309 310 311
                    fluid.global_scope().find_var(gate_w_name).get_tensor()
                )
                gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype(
                    'float32'
                )
312
                fluid.global_scope().find_var(gate_w_name).get_tensor().set(
313 314
                    gate_w, place
                )
315

316
                gate_b = np.array(
317 318 319 320 321
                    fluid.global_scope().find_var(gate_b_name).get_tensor()
                )
                gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype(
                    'float32'
                )
322
                fluid.global_scope().find_var(gate_b_name).get_tensor().set(
323 324
                    gate_b, place
                )
325

326 327 328
                candidate_w = np.array(
                    fluid.global_scope().find_var(candidate_w_name).get_tensor()
                )
329
                candidate_w = np.random.uniform(
330 331
                    -0.1, 0.1, size=candidate_w.shape
                ).astype('float32')
332
                fluid.global_scope().find_var(
333 334
                    candidate_w_name
                ).get_tensor().set(candidate_w, place)
335

336 337 338
                candidate_b = np.array(
                    fluid.global_scope().find_var(candidate_b_name).get_tensor()
                )
339
                candidate_b = np.random.uniform(
340 341
                    -0.1, 0.1, size=candidate_b.shape
                ).astype('float32')
342
                fluid.global_scope().find_var(
343 344
                    candidate_b_name
                ).get_tensor().set(candidate_b, place)
345 346 347 348 349 350

                gate_weight.append(gate_w)
                gate_bias.append(gate_b)
                candidate_weight.append(candidate_w)
                candidate_bias.append(candidate_b)

351
        step_input_np = np.random.uniform(
352 353
            -0.1, 0.1, (self.seq_len, self.batch_size, self.hidden_size)
        ).astype('float32')
354
        sequence_length_np = np.random.randint(
355 356
            self.seq_len // 2, self.seq_len, size=(self.batch_size)
        ).astype('int64')
357

358 359 360 361
        out = exe.run(
            feed={'x': step_input_np, 'sequence_length': sequence_length_np},
            fetch_list=[rnn_out, last_hidden],
        )
362 363 364 365

        api_rnn_out = out[0]
        api_last_hidden = out[1]

366 367 368 369 370 371 372 373 374 375 376 377 378
        np_out = gru_np(
            step_input_np,
            None,
            self.hidden_size,
            gate_weight,
            gate_bias,
            candidate_weight,
            candidate_bias,
            num_layers=self.num_layers,
            batch_first=self.batch_first,
            is_bidirect=self.is_bidirect,
            sequence_length=sequence_length_np,
        )
379

380
        np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0)
381

382 383 384
        np.testing.assert_allclose(
            api_last_hidden, np_out[1], rtol=0.0001, atol=0
        )
385 386 387 388


if __name__ == '__main__':
    unittest.main()