test_basic_gru_api.py 11.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.contrib.layers import basic_gru
from paddle.fluid.executor import Executor
from paddle.fluid import framework

import numpy as np

26 27
np.set_seed(123)

28 29 30 31 32 33 34 35 36
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0


def sigmoid(x):
    y = np.copy(x)
    y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
    y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
37
    return 1.0 / (1.0 + np.exp(-y))
38 39 40


def tanh(x):
41
    y = -2.0 * x
42
    y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
    return (2.0 / (1.0 + np.exp(y))) - 1.0


def gru_np(
    input,
    init_h,
    hidden_size,
    gate_weight,
    gate_bias,
    candidate_weight,
    candidate_bias,
    num_layers=1,
    batch_first=False,
    is_bidirect=False,
    sequence_length=None,
):
59 60 61 62 63 64 65 66 67 68
    def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b):
        concat_1 = np.concatenate([step_in, pre_hidden], 1)

        gate_input = np.matmul(concat_1, gate_w)
        gate_input += gate_b
        gate_input = sigmoid(gate_input)
        r, u = np.split(gate_input, indices_or_sections=2, axis=1)

        r_hidden = r * pre_hidden

69 70 71
        candidate = np.matmul(
            np.concatenate([step_in, r_hidden], 1), candidate_w
        )
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

        candidate += candidate_b
        c = tanh(candidate)

        new_hidden = u * pre_hidden + (1 - u) * c

        return new_hidden

    if batch_first:
        input = np.tranpose(input, [1, 0, 2])

    batch_size = input.shape[1]
    mask = None
    if sequence_length is not None:
        max_seq_len = input.shape[0]

        mask = np.zeros([batch_size, max_seq_len])

        for i, len in enumerate(sequence_length):
            mask[i, :len] = 1.0

        mask = np.transpose(mask, [1, 0])

    direc_num = 1
    if is_bidirect:
        direc_num = 2
    if init_h:
99 100 101
        init_h = np.reshape(
            init_h, shape=[num_layers, direc_num, -1, hidden_size]
        )
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    else:
        init_h = np.zeros([num_layers, direc_num, batch_size, hidden_size])

    def get_single_direction_output(rnn_input, mask=None, direc_index=0):
        seq_len = rnn_input.shape[0]

        output = []
        # init pre hidden
        pre_hidden_array = []
        for i in range(num_layers):
            pre_hidden_array.append(init_h[i, direc_index])

        for i in range(seq_len):
            step_input = rnn_input[i]

            if mask is not None:
                step_mask = mask[i]
                step_mask = np.reshape(step_mask, [-1, 1])

            for i in range(num_layers):
                new_hidden = step(
123 124
                    step_input,
                    pre_hidden_array[i],
125 126 127
                    gate_weight[direc_index * num_layers + i],
                    gate_bias[direc_index * num_layers + i],
                    candidate_weight[direc_index * num_layers + i],
128 129
                    candidate_bias[direc_index * num_layers + i],
                )
130 131

                if mask is not None:
132 133 134 135
                    new_hidden = (
                        new_hidden * step_mask
                        + (1 - step_mask) * pre_hidden_array[i]
                    )
136 137 138 139 140 141 142 143 144

                pre_hidden_array[i] = new_hidden

                step_input = new_hidden
            output.append(step_input)
        rnn_out = np.concatenate(output, 0)
        rnn_out = np.reshape(rnn_out, [seq_len, -1, hidden_size])

        last_hidden_out = np.concatenate(pre_hidden_array, 0)
145 146 147
        last_hidden_out = np.reshape(
            last_hidden_out, [num_layers, -1, hidden_size]
        )
148 149 150

        return rnn_out, last_hidden_out

151 152 153
    fw_rnn_out, fw_last_hidden = get_single_direction_output(
        input, mask, direc_index=0
    )
154 155 156 157 158 159 160

    if is_bidirect:
        bw_input = input[::-1]
        bw_mask = None
        if mask is not None:
            bw_mask = mask[::-1]

161 162 163
        bw_rnn_out, bw_last_hidden = get_single_direction_output(
            bw_input, bw_mask, direc_index=1
        )
164 165 166 167 168

        bw_rnn_out = bw_rnn_out[::-1]

        rnn_out = np.concatenate([fw_rnn_out, bw_rnn_out], 2)
        last_hidden = np.concatenate([fw_last_hidden, bw_last_hidden], 1)
169 170 171
        last_hidden = np.reshape(
            last_hidden, [num_layers * direc_num, -1, hidden_size]
        )
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196

        if batch_first:
            rnn_out = np.transpose(rnn_out, [1, 0, 2])

        return rnn_out, last_hidden
    else:
        rnn_out = fw_rnn_out
        last_hidden = fw_last_hidden

        if batch_first:
            rnn_out = np.transpose(rnn_out, [1, 0, 2])

        return rnn_out, last_hidden


class TestBasicGRUApi(unittest.TestCase):
    def setUp(self):
        self.hidden_size = 10
        self.batch_size = 5
        self.seq_len = 6
        self.num_layers = 2
        self.is_bidirect = True
        self.batch_first = False

    def test_run(self):
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
        x = layers.data(
            name='x',
            shape=[-1, self.batch_size, self.hidden_size],
            dtype='float32',
        )
        sequence_length = layers.data(
            name="sequence_length", shape=[-1], dtype='float32'
        )

        rnn_out, last_hidden = basic_gru(
            x,
            None,
            self.hidden_size,
            num_layers=self.num_layers,
            batch_first=self.batch_first,
            bidirectional=self.is_bidirect,
            sequence_length=sequence_length,
        )
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237

        last_hidden.persisbale = True
        rnn_out.persisbale = True

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()

        exe = Executor(place)
        exe.run(framework.default_startup_program())

        param_list = fluid.default_main_program().block(0).all_parameters()

        # process weight and bias
        gate_weight = []
        gate_bias = []
        candidate_weight = []
        candidate_bias = []

        for i in range(self.num_layers):
            gate_w_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_0"
            gate_b_name = "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_0"
238 239 240 241 242 243
            candidate_w_name = (
                "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.w_1"
            )
            candidate_b_name = (
                "basic_gru_layers_" + str(i) + "/BasicGRUUnit_0.b_1"
            )
244

245
            gate_w = np.array(
246 247 248 249 250
                fluid.global_scope().find_var(gate_w_name).get_tensor()
            )
            gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype(
                'float32'
            )
251
            fluid.global_scope().find_var(gate_w_name).get_tensor().set(
252 253
                gate_w, place
            )
254 255

            gate_b = np.array(
256 257 258 259 260
                fluid.global_scope().find_var(gate_b_name).get_tensor()
            )
            gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype(
                'float32'
            )
261
            fluid.global_scope().find_var(gate_b_name).get_tensor().set(
262 263
                gate_b, place
            )
264 265

            candidate_w = np.array(
266 267
                fluid.global_scope().find_var(candidate_w_name).get_tensor()
            )
268
            candidate_w = np.random.uniform(
269 270
                -0.1, 0.1, size=candidate_w.shape
            ).astype('float32')
271
            fluid.global_scope().find_var(candidate_w_name).get_tensor().set(
272 273
                candidate_w, place
            )
274

275
            candidate_b = np.array(
276 277
                fluid.global_scope().find_var(candidate_b_name).get_tensor()
            )
278
            candidate_b = np.random.uniform(
279 280
                -0.1, 0.1, size=candidate_b.shape
            ).astype('float32')
281
            fluid.global_scope().find_var(candidate_b_name).get_tensor().set(
282 283
                candidate_b, place
            )
284 285 286 287 288 289 290 291

            gate_weight.append(gate_w)
            gate_bias.append(gate_b)
            candidate_weight.append(candidate_w)
            candidate_bias.append(candidate_b)

        if self.is_bidirect:
            for i in range(self.num_layers):
292 293 294 295 296 297 298 299 300 301 302 303
                gate_w_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_0"
                )
                gate_b_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_0"
                )
                candidate_w_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.w_1"
                )
                candidate_b_name = (
                    "basic_gru_reverse_layers_" + str(i) + "/BasicGRUUnit_0.b_1"
                )
304

305
                gate_w = np.array(
306 307 308 309 310
                    fluid.global_scope().find_var(gate_w_name).get_tensor()
                )
                gate_w = np.random.uniform(-0.1, 0.1, size=gate_w.shape).astype(
                    'float32'
                )
311
                fluid.global_scope().find_var(gate_w_name).get_tensor().set(
312 313
                    gate_w, place
                )
314

315
                gate_b = np.array(
316 317 318 319 320
                    fluid.global_scope().find_var(gate_b_name).get_tensor()
                )
                gate_b = np.random.uniform(-0.1, 0.1, size=gate_b.shape).astype(
                    'float32'
                )
321
                fluid.global_scope().find_var(gate_b_name).get_tensor().set(
322 323
                    gate_b, place
                )
324

325 326 327
                candidate_w = np.array(
                    fluid.global_scope().find_var(candidate_w_name).get_tensor()
                )
328
                candidate_w = np.random.uniform(
329 330
                    -0.1, 0.1, size=candidate_w.shape
                ).astype('float32')
331
                fluid.global_scope().find_var(
332 333
                    candidate_w_name
                ).get_tensor().set(candidate_w, place)
334

335 336 337
                candidate_b = np.array(
                    fluid.global_scope().find_var(candidate_b_name).get_tensor()
                )
338
                candidate_b = np.random.uniform(
339 340
                    -0.1, 0.1, size=candidate_b.shape
                ).astype('float32')
341
                fluid.global_scope().find_var(
342 343
                    candidate_b_name
                ).get_tensor().set(candidate_b, place)
344 345 346 347 348 349

                gate_weight.append(gate_w)
                gate_bias.append(gate_b)
                candidate_weight.append(candidate_w)
                candidate_bias.append(candidate_b)

350
        step_input_np = np.random.uniform(
351 352
            -0.1, 0.1, (self.seq_len, self.batch_size, self.hidden_size)
        ).astype('float32')
353
        sequence_length_np = np.random.randint(
354 355
            self.seq_len // 2, self.seq_len, size=(self.batch_size)
        ).astype('int64')
356

357 358 359 360
        out = exe.run(
            feed={'x': step_input_np, 'sequence_length': sequence_length_np},
            fetch_list=[rnn_out, last_hidden],
        )
361 362 363 364

        api_rnn_out = out[0]
        api_last_hidden = out[1]

365 366 367 368 369 370 371 372 373 374 375 376 377
        np_out = gru_np(
            step_input_np,
            None,
            self.hidden_size,
            gate_weight,
            gate_bias,
            candidate_weight,
            candidate_bias,
            num_layers=self.num_layers,
            batch_first=self.batch_first,
            is_bidirect=self.is_bidirect,
            sequence_length=sequence_length_np,
        )
378

379
        np.testing.assert_allclose(api_rnn_out, np_out[0], rtol=0.0001, atol=0)
380

381 382 383
        np.testing.assert_allclose(
            api_last_hidden, np_out[1], rtol=0.0001, atol=0
        )
384 385 386 387


if __name__ == '__main__':
    unittest.main()