seq2seq_dygraph_model.py 31.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# -*- coding: utf-8 -*-
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
17 18
from seq2seq_utils import Seq2SeqModelHyperParams as args

19
import paddle
20
import paddle.fluid as fluid
21
from paddle.fluid import ParamAttr, layers
22 23 24
from paddle.fluid.dygraph import Layer
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.nn import Embedding
25
from paddle.jit.api import declarative
26

27
INF = 1.0 * 1e5
28
alpha = 0.6
29 30 31
uniform_initializer = lambda x: fluid.initializer.UniformInitializer(
    low=-x, high=x
)
32 33 34 35
zero_constant = fluid.initializer.Constant(0.0)


class BasicLSTMUnit(Layer):
36 37 38 39 40 41 42 43 44 45 46
    def __init__(
        self,
        hidden_size,
        input_size,
        param_attr=None,
        bias_attr=None,
        gate_activation=None,
        activation=None,
        forget_bias=1.0,
        dtype='float32',
    ):
47
        super().__init__(dtype)
48 49 50 51

        self._hiden_size = hidden_size
        self._param_attr = param_attr
        self._bias_attr = bias_attr
52 53
        self._gate_activation = gate_activation or paddle.nn.functional.sigmoid
        self._activation = activation or paddle.tanh
54 55 56 57 58 59 60
        self._forget_bias = forget_bias
        self._dtype = dtype
        self._input_size = input_size

        self._weight = self.create_parameter(
            attr=self._param_attr,
            shape=[self._input_size + self._hiden_size, 4 * self._hiden_size],
61 62
            dtype=self._dtype,
        )
63

64 65 66 67 68 69
        self._bias = self.create_parameter(
            attr=self._bias_attr,
            shape=[4 * self._hiden_size],
            dtype=self._dtype,
            is_bias=True,
        )
70 71 72 73 74

    def forward(self, input, pre_hidden, pre_cell):
        concat_input_hidden = layers.concat([input, pre_hidden], 1)
        gate_input = layers.matmul(x=concat_input_hidden, y=self._weight)

75
        gate_input = paddle.add(gate_input, self._bias)
76
        i, j, f, o = layers.split(gate_input, num_or_sections=4, dim=-1)
77 78
        new_cell = paddle.add(
            paddle.multiply(
79 80
                pre_cell, paddle.nn.functional.sigmoid(f + self._forget_bias)
            ),
81
            paddle.multiply(paddle.nn.functional.sigmoid(i), paddle.tanh(j)),
82
        )
83

84
        new_hidden = paddle.tanh(new_cell) * paddle.nn.functional.sigmoid(o)
85 86 87 88 89

        return new_hidden, new_cell


class BaseModel(fluid.dygraph.Layer):
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
    def __init__(
        self,
        hidden_size,
        src_vocab_size,
        tar_vocab_size,
        batch_size,
        num_layers=1,
        init_scale=0.1,
        dropout=None,
        beam_size=1,
        beam_start_token=1,
        beam_end_token=2,
        beam_max_step_num=2,
        mode='train',
    ):
105
        super().__init__()
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
        self.hidden_size = hidden_size
        self.src_vocab_size = src_vocab_size
        self.tar_vocab_size = tar_vocab_size
        self.batch_size = batch_size
        self.num_layers = num_layers
        self.init_scale = init_scale
        self.dropout = dropout
        self.beam_size = beam_size
        self.beam_start_token = beam_start_token
        self.beam_end_token = beam_end_token
        self.beam_max_step_num = beam_max_step_num
        self.mode = mode
        self.kinf = 1e9

        param_attr = ParamAttr(initializer=uniform_initializer(self.init_scale))
        bias_attr = ParamAttr(initializer=zero_constant)
        forget_bias = 1.0

        self.src_embeder = Embedding(
            size=[self.src_vocab_size, self.hidden_size],
            param_attr=fluid.ParamAttr(
127 128 129
                initializer=uniform_initializer(init_scale)
            ),
        )
130 131 132 133 134

        self.tar_embeder = Embedding(
            size=[self.tar_vocab_size, self.hidden_size],
            is_sparse=False,
            param_attr=fluid.ParamAttr(
135 136 137
                initializer=uniform_initializer(init_scale)
            ),
        )
138 139 140 141 142 143

        self.enc_units = []
        for i in range(num_layers):
            self.enc_units.append(
                self.add_sublayer(
                    "enc_units_%d" % i,
144 145 146 147 148 149 150 151 152
                    BasicLSTMUnit(
                        hidden_size=self.hidden_size,
                        input_size=self.hidden_size,
                        param_attr=param_attr,
                        bias_attr=bias_attr,
                        forget_bias=forget_bias,
                    ),
                )
            )
153 154 155 156 157 158

        self.dec_units = []
        for i in range(num_layers):
            self.dec_units.append(
                self.add_sublayer(
                    "dec_units_%d" % i,
159 160 161 162 163 164 165 166 167 168
                    BasicLSTMUnit(
                        hidden_size=self.hidden_size,
                        input_size=self.hidden_size,
                        param_attr=param_attr,
                        bias_attr=bias_attr,
                        forget_bias=forget_bias,
                    ),
                )
            )

169
        self.fc = paddle.nn.Linear(
170 171
            self.hidden_size,
            self.tar_vocab_size,
172 173 174 175 176
            weight_attr=paddle.ParamAttr(
                initializer=paddle.nn.initializer.Uniform(
                    low=-self.init_scale, high=self.init_scale
                )
            ),
177 178
            bias_attr=False,
        )
179 180

    def _transpose_batch_time(self, x):
181
        return paddle.transpose(x, [1, 0] + list(range(2, len(x.shape))))
182 183

    def _merge_batch_beams(self, x):
184
        return paddle.reshape(x, shape=(-1, x.shape[2]))
185 186

    def _split_batch_beams(self, x):
187
        return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
188 189 190

    def _expand_to_beam_size(self, x):
        x = fluid.layers.unsqueeze(x, [1])
191 192 193
        expand_shape = [-1] * len(x.shape)
        expand_shape[1] = self.beam_size * x.shape[1]
        x = paddle.expand(x, expand_shape)
194 195 196
        return x

    def _real_state(self, state, new_state, step_mask):
197 198 199
        new_state = fluid.layers.elementwise_mul(
            new_state, step_mask, axis=0
        ) - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0)
200 201 202
        return new_state

    def _gather(self, x, indices, batch_pos):
203
        topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
204
        return paddle.gather_nd(x, topk_coordinates)
205 206 207 208 209 210 211 212 213 214 215 216

    @declarative
    def forward(self, inputs):
        src, tar, label, src_sequence_length, tar_sequence_length = inputs
        if src.shape[0] < self.batch_size:
            self.batch_size = src.shape[0]

        src_emb = self.src_embeder(self._transpose_batch_time(src))

        # NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully.
        # Because nested list can't be transformed now.
        enc_hidden_0 = to_variable(
217 218
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
219
        enc_cell_0 = to_variable(
220 221
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
222 223 224 225 226
        zero = fluid.layers.zeros(shape=[1], dtype="int64")
        enc_hidden = fluid.layers.create_array(dtype="float32")
        enc_cell = fluid.layers.create_array(dtype="float32")
        for i in range(self.num_layers):
            index = zero + i
227 228 229 230 231 232
            enc_hidden = fluid.layers.array_write(
                enc_hidden_0, index, array=enc_hidden
            )
            enc_cell = fluid.layers.array_write(
                enc_cell_0, index, array=enc_cell
            )
233 234 235

        max_seq_len = src_emb.shape[0]

236 237 238
        enc_len_mask = fluid.layers.sequence_mask(
            src_sequence_length, maxlen=max_seq_len, dtype="float32"
        )
239
        enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
240 241 242 243 244 245 246 247 248 249

        # TODO: Because diff exits if call while_loop in static graph.
        # In while block, a Variable created in parent block participates in the calculation of gradient,
        # the gradient is wrong because each step scope always returns the same value generated by last step.
        # NOTE: Replace max_seq_len(Tensor src_emb.shape[0]) with args.max_seq_len(int) to avoid this bug temporarily.
        for k in range(args.max_seq_len):
            enc_step_input = src_emb[k]
            step_mask = enc_len_mask[k]
            new_enc_hidden, new_enc_cell = [], []
            for i in range(self.num_layers):
250 251 252
                enc_new_hidden, enc_new_cell = self.enc_units[i](
                    enc_step_input, enc_hidden[i], enc_cell[i]
                )
253
                if self.dropout is not None and self.dropout > 0.0:
254 255 256
                    enc_step_input = fluid.layers.dropout(
                        enc_new_hidden,
                        dropout_prob=self.dropout,
257 258
                        dropout_implementation='upscale_in_train',
                    )
259 260 261 262
                else:
                    enc_step_input = enc_new_hidden

                new_enc_hidden.append(
263 264
                    self._real_state(enc_hidden[i], enc_new_hidden, step_mask)
                )
265
                new_enc_cell.append(
266 267
                    self._real_state(enc_cell[i], enc_new_cell, step_mask)
                )
268 269 270 271 272 273 274 275 276 277 278 279

            enc_hidden, enc_cell = new_enc_hidden, new_enc_cell

        dec_hidden, dec_cell = enc_hidden, enc_cell
        tar_emb = self.tar_embeder(self._transpose_batch_time(tar))
        max_seq_len = tar_emb.shape[0]
        dec_output = []
        for step_idx in range(max_seq_len):
            j = step_idx + 0
            step_input = tar_emb[j]
            new_dec_hidden, new_dec_cell = [], []
            for i in range(self.num_layers):
280 281 282
                new_hidden, new_cell = self.dec_units[i](
                    step_input, dec_hidden[i], dec_cell[i]
                )
283 284
                new_dec_hidden.append(new_hidden)
                new_dec_cell.append(new_cell)
285
                if self.dropout is not None and self.dropout > 0.0:
286 287 288
                    step_input = fluid.layers.dropout(
                        new_hidden,
                        dropout_prob=self.dropout,
289 290
                        dropout_implementation='upscale_in_train',
                    )
291 292 293 294
                else:
                    step_input = new_hidden
            dec_output.append(step_input)

295
        dec_output = paddle.stack(dec_output)
296
        dec_output = self.fc(self._transpose_batch_time(dec_output))
297 298 299
        loss = fluid.layers.softmax_with_cross_entropy(
            logits=dec_output, label=label, soft_label=False
        )
300
        loss = paddle.squeeze(loss, axes=[2])
301
        max_tar_seq_len = fluid.layers.shape(tar)[1]
302 303 304
        tar_mask = fluid.layers.sequence_mask(
            tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
        )
305 306
        loss = loss * tar_mask
        loss = fluid.layers.reduce_mean(loss, dim=[0])
307
        loss = paddle.sum(loss)
308 309 310 311 312 313 314 315 316 317 318

        return loss

    @declarative
    def beam_search(self, inputs):
        src, tar, label, src_sequence_length, tar_sequence_length = inputs
        if src.shape[0] < self.batch_size:
            self.batch_size = src.shape[0]

        src_emb = self.src_embeder(self._transpose_batch_time(src))
        enc_hidden_0 = to_variable(
319 320
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
321
        enc_cell_0 = to_variable(
322 323
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
324 325 326 327 328
        zero = fluid.layers.zeros(shape=[1], dtype="int64")
        enc_hidden = fluid.layers.create_array(dtype="float32")
        enc_cell = fluid.layers.create_array(dtype="float32")
        for j in range(self.num_layers):
            index = zero + j
329 330 331 332 333 334
            enc_hidden = fluid.layers.array_write(
                enc_hidden_0, index, array=enc_hidden
            )
            enc_cell = fluid.layers.array_write(
                enc_cell_0, index, array=enc_cell
            )
335 336 337

        max_seq_len = src_emb.shape[0]

338 339 340
        enc_len_mask = fluid.layers.sequence_mask(
            src_sequence_length, maxlen=max_seq_len, dtype="float32"
        )
341
        enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
342 343 344 345 346 347 348 349

        for k in range(args.max_seq_len):
            enc_step_input = src_emb[k]
            step_mask = enc_len_mask[k]

            new_enc_hidden, new_enc_cell = [], []

            for i in range(self.num_layers):
350 351 352
                enc_new_hidden, enc_new_cell = self.enc_units[i](
                    enc_step_input, enc_hidden[i], enc_cell[i]
                )
353
                if self.dropout is not None and self.dropout > 0.0:
354 355 356
                    enc_step_input = fluid.layers.dropout(
                        enc_new_hidden,
                        dropout_prob=self.dropout,
357 358
                        dropout_implementation='upscale_in_train',
                    )
359 360 361 362
                else:
                    enc_step_input = enc_new_hidden

                new_enc_hidden.append(
363 364
                    self._real_state(enc_hidden[i], enc_new_hidden, step_mask)
                )
365
                new_enc_cell.append(
366 367
                    self._real_state(enc_cell[i], enc_new_cell, step_mask)
                )
368 369 370 371 372

            enc_hidden, enc_cell = new_enc_hidden, new_enc_cell

        # beam search
        batch_beam_shape = (self.batch_size, self.beam_size)
373 374 375
        vocab_size_tensor = to_variable(
            np.full((1), self.tar_vocab_size)
        ).astype("int64")
376
        start_token_tensor = to_variable(
377 378
            np.full(batch_beam_shape, self.beam_start_token, dtype='int64')
        )
379
        end_token_tensor = to_variable(
380 381
            np.full(batch_beam_shape, self.beam_end_token, dtype='int64')
        )
382 383
        step_input = self.tar_embeder(start_token_tensor)
        beam_finished = to_variable(
384 385
            np.full(batch_beam_shape, 0, dtype='float32')
        )
386
        beam_state_log_probs = to_variable(
387 388 389 390
            np.array(
                [[0.0] + [-self.kinf] * (self.beam_size - 1)], dtype="float32"
            )
        )
391 392 393
        beam_state_log_probs = paddle.expand(
            beam_state_log_probs,
            [self.batch_size * beam_state_log_probs.shape[0], -1],
394
        )
395 396 397 398
        dec_hidden, dec_cell = enc_hidden, enc_cell
        dec_hidden = [self._expand_to_beam_size(ele) for ele in dec_hidden]
        dec_cell = [self._expand_to_beam_size(ele) for ele in dec_cell]

399
        batch_pos = paddle.expand(
400
            fluid.layers.unsqueeze(
401
                to_variable(np.arange(0, self.batch_size, 1, dtype="int64")),
402 403
                [1],
            ),
404
            [-1, self.beam_size],
405
        )
406 407 408
        predicted_ids = []
        parent_ids = []

409
        for step_idx in range(paddle.to_tensor(self.beam_max_step_num)):
410
            if paddle.sum(1 - beam_finished).numpy()[0] == 0:
411 412 413 414 415 416 417 418 419 420
                break
            step_input = self._merge_batch_beams(step_input)
            new_dec_hidden, new_dec_cell = [], []
            state = 0
            dec_hidden = [
                self._merge_batch_beams(state) for state in dec_hidden
            ]
            dec_cell = [self._merge_batch_beams(state) for state in dec_cell]

            for i in range(self.num_layers):
421 422 423
                new_hidden, new_cell = self.dec_units[i](
                    step_input, dec_hidden[i], dec_cell[i]
                )
424 425
                new_dec_hidden.append(new_hidden)
                new_dec_cell.append(new_cell)
426
                if self.dropout is not None and self.dropout > 0.0:
427 428 429
                    step_input = fluid.layers.dropout(
                        new_hidden,
                        dropout_prob=self.dropout,
430 431
                        dropout_implementation='upscale_in_train',
                    )
432 433
                else:
                    step_input = new_hidden
434

435 436 437
            cell_outputs = self._split_batch_beams(step_input)
            cell_outputs = self.fc(cell_outputs)

438
            step_log_probs = paddle.log(fluid.layers.softmax(cell_outputs))
439 440 441
            noend_array = [-self.kinf] * self.tar_vocab_size
            noend_array[self.beam_end_token] = 0
            noend_mask_tensor = to_variable(
442 443
                np.array(noend_array, dtype='float32')
            )
444

445
            step_log_probs = paddle.multiply(
446
                paddle.expand(
447
                    fluid.layers.unsqueeze(beam_finished, [2]),
448
                    [-1, -1, self.tar_vocab_size],
449 450 451 452 453 454 455 456
                ),
                noend_mask_tensor,
            ) - fluid.layers.elementwise_mul(
                step_log_probs, (beam_finished - 1), axis=0
            )
            log_probs = fluid.layers.elementwise_add(
                x=step_log_probs, y=beam_state_log_probs, axis=0
            )
457
            scores = paddle.reshape(
458 459 460 461 462
                log_probs, [-1, self.beam_size * self.tar_vocab_size]
            )
            topk_scores, topk_indices = fluid.layers.topk(
                input=scores, k=self.beam_size
            )
463

464 465
            beam_indices = paddle.floor_divide(topk_indices, vocab_size_tensor)
            token_indices = paddle.remainder(topk_indices, vocab_size_tensor)
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
            next_log_probs = self._gather(scores, topk_indices, batch_pos)

            x = 0
            new_dec_hidden = [
                self._split_batch_beams(state) for state in new_dec_hidden
            ]
            new_dec_cell = [
                self._split_batch_beams(state) for state in new_dec_cell
            ]
            new_dec_hidden = [
                self._gather(x, beam_indices, batch_pos) for x in new_dec_hidden
            ]
            new_dec_cell = [
                self._gather(x, beam_indices, batch_pos) for x in new_dec_cell
            ]

            new_dec_hidden = [
                self._gather(x, beam_indices, batch_pos) for x in new_dec_hidden
            ]
            new_dec_cell = [
                self._gather(x, beam_indices, batch_pos) for x in new_dec_cell
            ]
            next_finished = self._gather(beam_finished, beam_indices, batch_pos)
            next_finished = fluid.layers.cast(next_finished, "bool")
2
201716010711 已提交
490
            next_finished = paddle.logical_or(
491
                next_finished,
492
                paddle.equal(token_indices, end_token_tensor),
493
            )
494 495 496 497 498 499 500 501 502
            next_finished = fluid.layers.cast(next_finished, "float32")

            dec_hidden, dec_cell = new_dec_hidden, new_dec_cell
            beam_finished = next_finished
            beam_state_log_probs = next_log_probs
            step_input = self.tar_embeder(token_indices)
            predicted_ids.append(token_indices)
            parent_ids.append(beam_indices)

503 504
        predicted_ids = paddle.stack(predicted_ids)
        parent_ids = paddle.stack(parent_ids)
505 506 507
        predicted_ids = paddle.nn.functional.gather_tree(
            predicted_ids, parent_ids
        )
508 509
        predicted_ids = self._transpose_batch_time(predicted_ids)
        return predicted_ids
510 511 512


class AttentionModel(fluid.dygraph.Layer):
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
    def __init__(
        self,
        hidden_size,
        src_vocab_size,
        tar_vocab_size,
        batch_size,
        num_layers=1,
        init_scale=0.1,
        dropout=None,
        beam_size=1,
        beam_start_token=1,
        beam_end_token=2,
        beam_max_step_num=2,
        mode='train',
    ):
528
        super().__init__()
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
        self.hidden_size = hidden_size
        self.src_vocab_size = src_vocab_size
        self.tar_vocab_size = tar_vocab_size
        self.batch_size = batch_size
        self.num_layers = num_layers
        self.init_scale = init_scale
        self.dropout = dropout
        self.beam_size = beam_size
        self.beam_start_token = beam_start_token
        self.beam_end_token = beam_end_token
        self.beam_max_step_num = beam_max_step_num
        self.mode = mode
        self.kinf = 1e9

        param_attr = ParamAttr(initializer=uniform_initializer(self.init_scale))
        bias_attr = ParamAttr(initializer=zero_constant)
        forget_bias = 1.0

        self.src_embeder = Embedding(
            size=[self.src_vocab_size, self.hidden_size],
            param_attr=fluid.ParamAttr(
                name='source_embedding',
551 552 553
                initializer=uniform_initializer(init_scale),
            ),
        )
554 555 556 557 558 559

        self.tar_embeder = Embedding(
            size=[self.tar_vocab_size, self.hidden_size],
            is_sparse=False,
            param_attr=fluid.ParamAttr(
                name='target_embedding',
560 561 562
                initializer=uniform_initializer(init_scale),
            ),
        )
563 564 565 566 567 568

        self.enc_units = []
        for i in range(num_layers):
            self.enc_units.append(
                self.add_sublayer(
                    "enc_units_%d" % i,
569 570 571 572 573 574 575 576 577
                    BasicLSTMUnit(
                        hidden_size=self.hidden_size,
                        input_size=self.hidden_size,
                        param_attr=param_attr,
                        bias_attr=bias_attr,
                        forget_bias=forget_bias,
                    ),
                )
            )
578 579 580 581 582 583 584

        self.dec_units = []
        for i in range(num_layers):
            if i == 0:
                self.dec_units.append(
                    self.add_sublayer(
                        "dec_units_%d" % i,
585 586 587 588 589 590 591 592 593 594 595 596 597 598
                        BasicLSTMUnit(
                            hidden_size=self.hidden_size,
                            input_size=self.hidden_size * 2,
                            param_attr=ParamAttr(
                                name="dec_units_%d" % i,
                                initializer=uniform_initializer(
                                    self.init_scale
                                ),
                            ),
                            bias_attr=bias_attr,
                            forget_bias=forget_bias,
                        ),
                    )
                )
599 600 601 602
            else:
                self.dec_units.append(
                    self.add_sublayer(
                        "dec_units_%d" % i,
603 604 605 606 607 608 609 610 611 612 613 614 615 616
                        BasicLSTMUnit(
                            hidden_size=self.hidden_size,
                            input_size=self.hidden_size,
                            param_attr=ParamAttr(
                                name="dec_units_%d" % i,
                                initializer=uniform_initializer(
                                    self.init_scale
                                ),
                            ),
                            bias_attr=bias_attr,
                            forget_bias=forget_bias,
                        ),
                    )
                )
617

618
        self.attn_fc = paddle.nn.Linear(
619 620
            self.hidden_size,
            self.hidden_size,
621
            weight_attr=paddle.ParamAttr(
622
                name="self_attn_fc",
623 624 625
                initializer=paddle.nn.initializer.Uniform(
                    low=-self.init_scale, high=self.init_scale
                ),
626 627 628
            ),
            bias_attr=False,
        )
629

630
        self.concat_fc = paddle.nn.Linear(
631 632
            2 * self.hidden_size,
            self.hidden_size,
633
            weight_attr=paddle.ParamAttr(
634
                name="self_concat_fc",
635 636 637
                initializer=paddle.nn.initializer.Uniform(
                    low=-self.init_scale, high=self.init_scale
                ),
638 639 640 641
            ),
            bias_attr=False,
        )

642
        self.fc = paddle.nn.Linear(
643 644
            self.hidden_size,
            self.tar_vocab_size,
645 646 647 648 649
            weight_attr=paddle.ParamAttr(
                name="self_fc",
                initializer=paddle.nn.initializer.Uniform(
                    low=-self.init_scale, high=self.init_scale
                ),
650 651 652
            ),
            bias_attr=False,
        )
653 654

    def _transpose_batch_time(self, x):
655
        return paddle.transpose(x, [1, 0] + list(range(2, len(x.shape))))
656 657

    def _merge_batch_beams(self, x):
658
        return paddle.reshape(x, shape=(-1, x.shape[2]))
659 660 661

    def tile_beam_merge_with_batch(self, x):
        x = fluid.layers.unsqueeze(x, [1])  # [batch_size, 1, ...]
662 663 664
        expand_shape = [-1] * len(x.shape)
        expand_shape[1] = self.beam_size * x.shape[1]
        x = paddle.expand(x, expand_shape)  # [batch_size, beam_size, ...]
665
        x = paddle.transpose(
666 667
            x, list(range(2, len(x.shape))) + [0, 1]
        )  # [..., batch_size, beam_size]
668
        # use 0 to copy to avoid wrong shape
669
        x = paddle.reshape(
670 671
            x, shape=[0] * (len(x.shape) - 2) + [-1]
        )  # [..., batch_size * beam_size]
672
        x = paddle.transpose(
673 674
            x, [len(x.shape) - 1] + list(range(0, len(x.shape) - 1))
        )  # [batch_size * beam_size, ...]
675 676 677
        return x

    def _split_batch_beams(self, x):
678
        return paddle.reshape(x, shape=(-1, self.beam_size, x.shape[1]))
679 680 681

    def _expand_to_beam_size(self, x):
        x = fluid.layers.unsqueeze(x, [1])
682 683 684
        expand_shape = [-1] * len(x.shape)
        expand_shape[1] = self.beam_size * x.shape[1]
        x = paddle.expand(x, expand_shape)
685 686 687
        return x

    def _real_state(self, state, new_state, step_mask):
688 689 690
        new_state = fluid.layers.elementwise_mul(
            new_state, step_mask, axis=0
        ) - fluid.layers.elementwise_mul(state, (step_mask - 1), axis=0)
691 692 693
        return new_state

    def _gather(self, x, indices, batch_pos):
694
        topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
695
        return paddle.gather_nd(x, topk_coordinates)
696 697 698 699 700 701 702

    def attention(self, query, enc_output, mask=None):
        query = fluid.layers.unsqueeze(query, [1])
        memory = self.attn_fc(enc_output)
        attn = fluid.layers.matmul(query, memory, transpose_y=True)

        if mask is not None:
703
            attn = paddle.transpose(attn, [1, 0, 2])
704
            attn = paddle.add(attn, mask * 1000000000)
705
            attn = paddle.transpose(attn, [1, 0, 2])
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
        weight = fluid.layers.softmax(attn)
        weight_memory = fluid.layers.matmul(weight, memory)

        return weight_memory

    def _change_size_for_array(self, func, array):
        print(" ^" * 10, "_change_size_for_array")
        print("array : ", array)
        for i, state in enumerate(array):
            fluid.layers.array_write(func(state), i, array)

        return array

    @declarative
    def forward(self, inputs):
        src, tar, label, src_sequence_length, tar_sequence_length = inputs
        if src.shape[0] < self.batch_size:
            self.batch_size = src.shape[0]

        src_emb = self.src_embeder(self._transpose_batch_time(src))

        # NOTE: modify model code about `enc_hidden` and `enc_cell` to transforme dygraph code successfully.
        # Because nested list can't be transformed now.
        enc_hidden_0 = to_variable(
730 731
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
732 733
        enc_hidden_0.stop_gradient = True
        enc_cell_0 = to_variable(
734 735
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
736 737 738 739 740 741
        enc_hidden_0.stop_gradient = True
        zero = fluid.layers.zeros(shape=[1], dtype="int64")
        enc_hidden = fluid.layers.create_array(dtype="float32")
        enc_cell = fluid.layers.create_array(dtype="float32")
        for i in range(self.num_layers):
            index = zero + i
742 743 744 745 746 747
            enc_hidden = fluid.layers.array_write(
                enc_hidden_0, index, array=enc_hidden
            )
            enc_cell = fluid.layers.array_write(
                enc_cell_0, index, array=enc_cell
            )
748 749 750

        max_seq_len = src_emb.shape[0]

751 752 753 754
        enc_len_mask = fluid.layers.sequence_mask(
            src_sequence_length, maxlen=max_seq_len, dtype="float32"
        )
        enc_padding_mask = enc_len_mask - 1.0
755
        enc_len_mask = paddle.transpose(enc_len_mask, [1, 0])
756 757 758 759 760 761 762 763 764 765 766

        enc_outputs = []
        # TODO: Because diff exits if call while_loop in static graph.
        # In while block, a Variable created in parent block participates in the calculation of gradient,
        # the gradient is wrong because each step scope always returns the same value generated by last step.
        for p in range(max_seq_len):
            k = 0 + p
            enc_step_input = src_emb[k]
            step_mask = enc_len_mask[k]
            new_enc_hidden, new_enc_cell = [], []
            for i in range(self.num_layers):
767 768 769
                enc_new_hidden, enc_new_cell = self.enc_units[i](
                    enc_step_input, enc_hidden[i], enc_cell[i]
                )
770
                if self.dropout is not None and self.dropout > 0.0:
771 772 773
                    enc_step_input = fluid.layers.dropout(
                        enc_new_hidden,
                        dropout_prob=self.dropout,
774 775
                        dropout_implementation='upscale_in_train',
                    )
776 777 778 779
                else:
                    enc_step_input = enc_new_hidden

                new_enc_hidden.append(
780 781
                    self._real_state(enc_hidden[i], enc_new_hidden, step_mask)
                )
782
                new_enc_cell.append(
783 784
                    self._real_state(enc_cell[i], enc_new_cell, step_mask)
                )
785 786 787
            enc_outputs.append(enc_step_input)
            enc_hidden, enc_cell = new_enc_hidden, new_enc_cell

788
        enc_outputs = paddle.stack(enc_outputs)
789 790 791 792
        enc_outputs = self._transpose_batch_time(enc_outputs)

        # train
        input_feed = to_variable(
793 794
            np.zeros((self.batch_size, self.hidden_size), dtype='float32')
        )
795 796 797 798 799 800 801 802 803 804 805 806 807
        # NOTE: set stop_gradient here, otherwise grad var is null
        input_feed.stop_gradient = True
        dec_hidden, dec_cell = enc_hidden, enc_cell
        tar_emb = self.tar_embeder(self._transpose_batch_time(tar))
        max_seq_len = tar_emb.shape[0]
        dec_output = []

        for step_idx in range(max_seq_len):
            j = step_idx + 0
            step_input = tar_emb[j]
            step_input = fluid.layers.concat([step_input, input_feed], 1)
            new_dec_hidden, new_dec_cell = [], []
            for i in range(self.num_layers):
808 809 810
                new_hidden, new_cell = self.dec_units[i](
                    step_input, dec_hidden[i], dec_cell[i]
                )
811 812
                new_dec_hidden.append(new_hidden)
                new_dec_cell.append(new_cell)
813
                if self.dropout is not None and self.dropout > 0.0:
814 815 816
                    step_input = fluid.layers.dropout(
                        new_hidden,
                        dropout_prob=self.dropout,
817 818
                        dropout_implementation='upscale_in_train',
                    )
819 820 821
                else:
                    step_input = new_hidden
            dec_att = self.attention(step_input, enc_outputs, enc_padding_mask)
822
            dec_att = paddle.squeeze(dec_att, [1])
823 824 825 826 827 828
            concat_att_out = fluid.layers.concat([dec_att, step_input], 1)
            out = self.concat_fc(concat_att_out)
            input_feed = out
            dec_output.append(out)
            dec_hidden, dec_cell = new_dec_hidden, new_dec_cell

829
        dec_output = paddle.stack(dec_output)
830
        dec_output = self.fc(self._transpose_batch_time(dec_output))
831 832 833
        loss = fluid.layers.softmax_with_cross_entropy(
            logits=dec_output, label=label, soft_label=False
        )
834
        loss = paddle.squeeze(loss, axes=[2])
835
        max_tar_seq_len = fluid.layers.shape(tar)[1]
836 837 838
        tar_mask = fluid.layers.sequence_mask(
            tar_sequence_length, maxlen=max_tar_seq_len, dtype='float32'
        )
839 840
        loss = loss * tar_mask
        loss = fluid.layers.reduce_mean(loss, dim=[0])
841
        loss = paddle.sum(loss)
842 843

        return loss