config.py 3.4 KB
Newer Older
Y
ying 已提交
1 2 3 4 5
class TrainTaskConfig(object):
    use_gpu = False
    # the epoch number to train.
    pass_num = 2

6
    # the number of sequences contained in a mini-batch.
Y
ying 已提交
7 8
    batch_size = 64

9
    # the hyper parameters for Adam optimizer.
Y
ying 已提交
10 11 12 13 14
    learning_rate = 0.001
    beta1 = 0.9
    beta2 = 0.98
    eps = 1e-9

15
    # the parameters for learning rate scheduling.
16 17
    warmup_steps = 4000

G
guosheng 已提交
18
    # the flag indicating to use average loss or sum loss when training.
19
    use_avg_cost = False
G
guosheng 已提交
20

21 22
    # the directory for saving trained models.
    model_dir = "trained_models"
23 24 25 26


class InferTaskConfig(object):
    use_gpu = False
27
    # the number of examples in one run for sequence generation.
28
    batch_size = 10
29

30
    # the parameters for beam search.
31 32
    beam_size = 5
    max_length = 30
33
    # the number of decoded sentences to output.
34 35
    n_best = 1

36 37 38 39 40
    # the flags indicating whether to output the special tokens.
    output_bos = False
    output_eos = False
    output_unk = False

41 42
    # the directory for loading the trained model.
    model_path = "trained_models/pass_1.infer.model"
43

Y
ying 已提交
44 45

class ModelHyperParams(object):
46 47 48 49
    # This model directly uses paddle.dataset.wmt16 in which <bos>, <eos> and
    # <unk> token has alreay been added. As for the <pad> token, any token
    # included in dict can be used to pad, since the paddings' loss will be
    # masked out and make no effect on parameter gradients.
Y
ying 已提交
50 51 52 53 54 55 56

    # size of source word dictionary.
    src_vocab_size = 10000

    # size of target word dictionay
    trg_vocab_size = 10000

57 58 59 60
    # index for <bos> token
    bos_idx = 0
    # index for <eos> token
    eos_idx = 1
61 62
    # index for <unk> token
    unk_idx = 2
63

G
guosheng 已提交
64
    # max length of sequences.
65 66 67
    # The size of position encoding table should at least plus 1, since the
    # sinusoid position encoding starts from 1 and 0 can be used as the padding
    # token for position encoding.
Y
ying 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
    max_length = 50

    # the dimension for word embeddings, which is also the last dimension of
    # the input and output of multi-head attention, position-wise feed-forward
    # networks, encoder and decoder.

    d_model = 512
    # size of the hidden layer in position-wise feed-forward networks.
    d_inner_hid = 1024
    # the dimension that keys are projected to for dot-product attention.
    d_key = 64
    # the dimension that values are projected to for dot-product attention.
    d_value = 64
    # number of head used in multi-head attention.
    n_head = 8
    # number of sub-layers to be stacked in the encoder and decoder.
    n_layer = 6
    # dropout rate used by all dropout layers.
    dropout = 0.1


# Names of position encoding table which will be initialized externally.
pos_enc_param_names = (
    "src_pos_enc_table",
    "trg_pos_enc_table", )

94 95
# Names of all data layers in encoder listed in order.
encoder_input_data_names = (
Y
ying 已提交
96 97
    "src_word",
    "src_pos",
98
    "src_slf_attn_bias",
99
    "src_data_shape",
G
guosheng 已提交
100 101
    "src_slf_attn_pre_softmax_shape",
    "src_slf_attn_post_softmax_shape", )
102 103 104

# Names of all data layers in decoder listed in order.
decoder_input_data_names = (
Y
ying 已提交
105 106 107 108
    "trg_word",
    "trg_pos",
    "trg_slf_attn_bias",
    "trg_src_attn_bias",
109
    "trg_data_shape",
G
guosheng 已提交
110 111 112 113
    "trg_slf_attn_pre_softmax_shape",
    "trg_slf_attn_post_softmax_shape",
    "trg_src_attn_pre_softmax_shape",
    "trg_src_attn_post_softmax_shape",
114 115 116 117
    "enc_output", )

# Names of label related data layers listed in order.
label_data_names = (
118 119
    "lbl_word",
    "lbl_weight", )