TrainerConfig.proto 6.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14
syntax = "proto2";
Z
zhangjinchao01 已提交
15 16 17 18 19 20 21 22

import "DataConfig.proto";
import "ModelConfig.proto";

package paddle;

message OptimizationConfig {
  required int32 batch_size = 3;
L
liaogang 已提交
23 24 25
  required string algorithm = 4 [ default = "async_sgd" ];
  optional int32 num_batches_per_send_parameter = 5 [ default = 1 ];
  optional int32 num_batches_per_get_parameter = 6 [ default = 1 ];
Z
zhangjinchao01 已提交
26

Y
Yu Yang 已提交
27
  required double learning_rate = 7;
L
liaogang 已提交
28 29 30
  optional double learning_rate_decay_a = 8 [ default = 0 ];
  optional double learning_rate_decay_b = 9 [ default = 0 ];
  optional string learning_rate_schedule = 27 [ default = "constant" ];
Z
zhangjinchao01 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
  // learning rate will be scaled according to learning_rate_schedule
  // 1), constant:
  // lr = learning_rate
  // 2), poly:
  // lr = learning_rate *
  //      pow(1 + learning_rate_decay_a * num_samples_processed,
  //          -learning_rate_decay_b)
  // 3), exp:
  // lr = learning_rate *
  //      pow(learning_rate_decay_a,
  //          num_samples_processed / learning_rate_decay_b)
  // 4), discexp:
  // lr = learning_rate *
  //      pow(learning_rate_decay_a,
  //          floor(num_samples_processed / learning_rate_decay_b))
  // 5), linear:
  // lr = max(learning_rate - learning_rate_decay_a * num_samples_processed,
  //          learning_rate_decay_b)

  // owlqn related
  // L1-regularization
L
liaogang 已提交
52
  optional double l1weight = 10 [ default = 0.1 ];
Z
zhangjinchao01 已提交
53
  // L2-regularization
L
liaogang 已提交
54
  optional double l2weight = 11 [ default = 0 ];
Z
zhangjinchao01 已提交
55 56
  // "c1" in wolfe condition: if (newobj <= oldobj + c1 * origDirDeriv * step)
  // then accept the step
L
liaogang 已提交
57
  optional double c1 = 12 [ default = 0.0001 ];
Z
zhangjinchao01 已提交
58
  // multiply the step with "backoff", when wolfe condition doesn't satisfy
L
liaogang 已提交
59
  optional double backoff = 13 [ default = 0.5 ];
Z
zhangjinchao01 已提交
60
  // how many "s"s and "y"s are kept in owlqn
L
liaogang 已提交
61
  optional int32 owlqn_steps = 14 [ default = 10 ];
Z
zhangjinchao01 已提交
62
  // accept the step if encountered "max_backoff" times of "reduce the step"
L
liaogang 已提交
63
  optional int32 max_backoff = 15 [ default = 5 ];
Z
zhangjinchao01 已提交
64 65 66
  // L2-regularization coefficient is reduced linearly from iteration 0 to
  // "l2weight_zero_iter", and set to 0 after "l2weight_zero_iter"
  // iterations. set "l2weight_zero_iter" to 0 to disable this strategy.
L
liaogang 已提交
67
  optional int32 l2weight_zero_iter = 17 [ default = 0 ];
Z
zhangjinchao01 已提交
68 69 70 71 72 73

  // averaged sgd
  // About average_window * numBatchProcessed parameter are used
  // for average. To be accurate, between average_window * numBatchProcessed
  // and 2 * average_window * numBatchProcessed parameters are used for
  // average.
L
liaogang 已提交
74 75
  optional double average_window = 18 [ default = 0 ];
  optional int64 max_average_window = 19 [ default = 0x7fffffffffffffff ];
Z
zhangjinchao01 已提交
76 77 78 79 80

  //////////////////////////
  // Options Adaptive SGD //
  //////////////////////////

L
liaogang 已提交
81 82 83 84
  // learning method for sgd/asgd, such as "momentum", "adagrad", "adadelta",
  // "rmsprop"
  // default learning method("momentum") use global decayed learning rate with
  // momentum.
Z
zhangjinchao01 已提交
85
  // "adagrad", "adadelta" and "rmsprop" can set momentum too.
L
liaogang 已提交
86 87 88
  optional string learning_method = 23 [ default = "momentum" ];
  optional double ada_epsilon = 24 [ default = 1e-6 ];
  optional double ada_rou = 26 [ default = 0.95 ];
Z
zhangjinchao01 已提交
89 90

  // Force to do average in cpu in order to save gpu memory usage
L
liaogang 已提交
91
  optional bool do_average_in_cpu = 25 [ default = false ];
Z
zhangjinchao01 已提交
92 93 94

  // delta add rate in pserver, used while num_batches_per_send_parameter>1
  // will be divided by #machines automatically.
L
liaogang 已提交
95
  optional double delta_add_rate = 28 [ default = 1.0 ];
Z
zhangjinchao01 已提交
96 97 98 99

  // We split a large size into smaller mini-batches, whose sizes are
  // determined by mini_batch_size. It only takes effect when there is
  // an ExternalMachine.
L
liaogang 已提交
100
  optional int32 mini_batch_size = 29 [ default = 128 ];
Z
zhangjinchao01 已提交
101 102

  // automatically set if any one of parameters set sparse remote update flag
L
liaogang 已提交
103
  optional bool use_sparse_remote_updater = 30 [ default = false ];
Z
zhangjinchao01 已提交
104

L
liaogang 已提交
105
  // how to update center parameter and feedback to local parameter,
Z
zhangjinchao01 已提交
106
  // when use local sgd update in cluster training.
L
liaogang 已提交
107 108 109 110 111
  // A option is elastic_average, proposed by the paper: Deep learning with
  // elastic averaging SGD.
  // If use elastic_average method, every trainer node should sample from whole
  // data sets.
  optional string center_parameter_update_method = 31 [ default = "average" ];
Z
zhangjinchao01 已提交
112 113 114

  // shrink sparse parameter value
  // only works if parameter is remote sparse update and has L1 decay rate
L
liaogang 已提交
115
  optional double shrink_parameter_value = 32 [ default = 0 ];
Z
zhangjinchao01 已提交
116 117 118 119

  ////////////////////////////
  // Options Adam Optimizer //
  ////////////////////////////
L
liaogang 已提交
120 121 122
  optional double adam_beta1 = 33 [ default = 0.9 ];
  optional double adam_beta2 = 34 [ default = 0.999 ];
  optional double adam_epsilon = 35 [ default = 1e-8 ];
Z
zhangjinchao01 已提交
123 124 125 126 127 128

  // arguments for learning rate scheduler
  // Format: num1:rate1,num2:rate2,...,numK:rateK
  // For learning_rate_schedule="manual", num is the number of samples,
  // For learning_rate_schedule="pass_manual",
  //  num is the number of passes (starting from 0)
L
liaogang 已提交
129 130
  optional string learning_rate_args = 36 [ default = "" ];

Z
zhangjinchao01 已提交
131 132 133
  // for async sgd gradient commit control.
  // when async_lagged_grad_discard_ratio * num_gradient_servers commit passed,
  // current async gradient will be discard silently.
L
liaogang 已提交
134
  optional double async_lagged_grad_discard_ratio = 37 [ default = 1.5 ];
135

L
liaogang 已提交
136 137
  // global threshold for gradient clipping
  optional double gradient_clipping_threshold = 38 [ default = 0.0 ];
Z
zhangjinchao01 已提交
138 139 140
};

message TrainerConfig {
E
emailweixu 已提交
141
  optional ModelConfig model_config = 1;
Z
zhangjinchao01 已提交
142 143 144 145 146 147
  optional DataConfig data_config = 2;
  required OptimizationConfig opt_config = 3;
  optional DataConfig test_data_config = 4;
  repeated string config_files = 5;

  // the directory to save/load model files for each training path
L
liaogang 已提交
148
  optional string save_dir = 6 [ default = "./output/model" ];
Z
zhangjinchao01 已提交
149 150 151 152 153 154 155

  // Path of the initial model parameters.
  // If it was set, start_pass will be ignored.
  optional string init_model_path = 7;

  // Start training from this pass.
  // Will load parameter from the previous pass.
L
liaogang 已提交
156
  optional int32 start_pass = 8 [ default = 0 ];
Z
zhangjinchao01 已提交
157 158 159 160

  // file path to the trainer config file
  optional string config_file = 9;
}