text8.yaml 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
# The frequency to save trained models when training.
save_step: 10000
# The frequency to fetch and print output when training.
print_step: 100
# Path of the checkpoint, to resume the previous training
init_from_checkpoint: ""
# Path of the pretrain model, to better solve the current task
init_from_pretrain_model: ""
# Path of trained parameter, to make prediction
init_from_params: "./trained_models/step_final/"
# The directory for saving model
save_model: "trained_models"
# The directory for saving inference model.
inference_model_dir: "infer_model"
# Set seed for CE or debug
random_seed: None
# The path to data files 
data: "./gen_data/text8/"
# The name of dataset
dataset: "text8"

# Whether to use cuda
use_gpu: True

# Args for reader, see reader.py for details
token_delimiter: None
batch_size: 15
eval_batch_size: 5

# Hyparams for training:
# The number of epoches for training
epoch: 30

# The hyper parameters for optimizer.
# Type of ptimizer. 
optim: adam
# Learning rate schedule. 
scheduler: cosine
# This static learning_rate will be applied to the LearningRateScheduler
# derived learning rate the to get the final learning rate.
learning_rate: 0.00025
# The hyper parameters for Adam optimizer.
beta1: 0.9
beta2: 0.997
eps: 1e-9
# The hyper parameters for Momentum optimizer.
mom: 0.0
# Global gradient clip. 
clip: 0.25
# The parameters for learning rate scheduling.
warmup_steps: 0
# The parameters for CosineAnnealingDecay. Minimum learning rate.
eta_min: 0.0
# The parameters for ReduceLROnPlateau.
# The Ratio that the learning rate will be reduced. 
decay_rate: 0.5
# When loss doesn’t improve for this number of epochs, learing rate will be reduced.
patience: 0
# The lower bound of the learning rate after reduction.
min_lr: 0.0

# Hyparams for model:
# Whe use adaptive softmax. 
adaptive: False
# Size of dictionary. This can be obtained automatically. 
ntokens: 10000
# The dimension for word embeddings, which is also the last dimension of
# the input and output of multi-head attention, position-wise feed-forward
# networks, encoder and decoder.
d_model: 512
# Dimension of heads.
d_head: 64
# Size of the hidden layer in position-wise feed-forward networks.
d_inner_hid: 2048
# Number of head used in multi-head attention.
n_head: 8
# Number of sub-layers to be stacked in the encoder and decoder.
n_layer: 12
# Dropout rates.
dropout: 0.1
# Attention dropout
attn_dropout: 0.0
# Attention type for decoder. 
# 0 for relative partial MHA (in Transformer-XL). 
# 1 for relative MHA (in Shaw et al). 
attn_type: 0
# Apply layer normalization before or after sublayers. 
normalize_before: False
# Whether to tie weight or not. 
tie_weight: True
# The length of the extended context.
ext_len: 0
# The divident value for softmax and adapative input. 
div_val: 1
# Target length. The number of tokens to predict. 
tgt_len: 512
# Memory length. The length of the retained previous heads. 
mem_len: 512
# Use the same attention length for all tokens. 
same_length: False
# Use the same positional encoding after clamp len. 
clamp_len: -1
# The number of samples in sample softmax. -1 means do not use sampled softmax. 
sample_softmax: -1
# Max step for training.
max_step: 400000
# Target length for evaluation. That is, the number of tokens to predict for evaluation. 
eval_tgt_len: 128
# What kind of mode for evaluation. valid, test or both("all"). 
mode: "all"
# Maximum evaluation step. 
max_eval_steps: -1