# Argument Outline It looks like there are a lot of arguments. However, most of them are for developers or alrealy set automatically in cluster submitting environment and users do not need to care about them. Here, we divide these arguments into serveral classes according to the scenario that they are used in. For example, the arguments in `common` can be used in all scenes. Some arguments can be only used in certain layers. Some are needed by multi machines training in cluster, etc.
args | local train | cluster train | local test | cluster test | |
---|---|---|---|---|---|
common | job | √ | √ | √ | √ |
use_gpu | √ | √ | √ | √ | |
local | √ | √ | √ | √ | |
config | √ | √ | √ | √ | |
config_args | √ | √ | √ | √ | |
num_passes | √ | √ | √ | √ | |
trainer_count | √ | √ | √ | √ | |
version | √ | √ | √ | √ | |
show_layer_stat | √ | √ | √ | √ | |
train | dot_period | √ | √ | ||
test_period | √ | √ | |||
saving_period | √ | √ | |||
show_parameter_stats_period | √ | √ | |||
init_model_path | √ | √ | √ | ||
load_missing_parameter_strategy | √ | √ | |||
saving_period_by_batches | √ | √ | |||
use_old_updater | √ | √ | |||
enable_grad_share | √ | √ | |||
grad_share_block_num | √ | √ | |||
log_error_clipping | √ | √ | |||
log_clipping | √ | √ | |||
save_only_one | √ | √ | |||
allow_inefficient_sparse_update | √ | √ | |||
start_pass | √ | √ | |||
train/test | save_dir | √ | √ | √ | √ |
testing during training | test_all_data_in_one_period | √ | √ | ||
average_test_period | √ | √ | |||
test | model_list | √ | √ | ||
test_wait | √ | √ | |||
test_pass | √ | √ | |||
predict_output_dir | √ | √ | |||
distribute_test | √ | √ | |||
Auc/PnpairValidation | predict_file | √ | √ | ||
GPU | gpu_id | √ | √ | √ | √ |
parallel_nn | √ | √ | √ | √ | |
allow_only_one_model_on_one_gpu | √ | √ | √ | √ | |
cudnn_dir | √ | √ | √ | √ | |
cuda_dir | √ | √ | √ | √ | |
cudnn_conv_workspace_limit_in_mb | √ | √ | √ | √ | |
RNN | beam_size | √ | √ | ||
rnn_use_batch | √ | √ | √ | √ | |
prev_batch_state | √ | √ | |||
diy_beam_search_prob_so | √ | √ | |||
metric learning | external | √ | √ | √ | √ |
data_server_port | √ | √ | |||
PServer | start_pserver | √ | √ | ||
pservers | √ | √ | |||
port | √ | √ | |||
port_num | √ | √ | |||
ports_num_for_sparse | √ | √ | |||
nics | √ | √ | |||
rdma_tcp | √ | √ | |||
small_messages | √ | ||||
loadsave_parameters_in_pserver | √ | √ | |||
log_period_server | √ | ||||
pserver_num_threads | √ | ||||
sock_send_buf_size | √ | ||||
sock_recv_buf_size | √ | ||||
num_gradient_servers | √ | ||||
parameter_block_size | √ | ||||
parameter_block_size_for_sparse | √ | ||||
Async SGD | async_count | √ | |||
async_lagged_ratio_min | √ | ||||
async_lagged_ratio_default | √ | ||||
Performance Tuning | log_barrier_abstract | √ | |||
log_barrier_lowest_nodes | √ | ||||
log_barrier_show_log | √ | ||||
check_sparse_distribution_batches | √ | ||||
check_sparse_distribution_ratio | √ | ||||
check_sparse_distribution_unbalance_degree | √ | ||||
check_sparse_distribution_in_pserver | √ | ||||
show_check_sparse_distribution_log | √ | ||||
Data Provider | memory_threshold_on_load_data | √ | √ | ||
RandomNumber | seed | √ | √ | ||
thread_local_rand_use_global_seed | √ | √ | |||
UnitTest | checkgrad_eps | ||||
Matrix/Vector | enable_parallel_vector | √ | √ | √ | √ |