TrainerConfig.proto 6.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
syntax = "proto2";
15 16 17 18 19 20 21

import "DataConfig.proto";
import "ModelConfig.proto";

package paddle;

message OptimizationConfig {
22
  optional int32 batch_size = 3 [ default = 1 ];
23 24 25
  required string algorithm = 4 [ default = "async_sgd" ];
  optional int32 num_batches_per_send_parameter = 5 [ default = 1 ];
  optional int32 num_batches_per_get_parameter = 6 [ default = 1 ];
26

27
  required double learning_rate = 7;
28 29 30
  optional double learning_rate_decay_a = 8 [ default = 0 ];
  optional double learning_rate_decay_b = 9 [ default = 0 ];
  optional string learning_rate_schedule = 27 [ default = "constant" ];
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
  // learning rate will be scaled according to learning_rate_schedule
  // 1), constant:
  // lr = learning_rate
  // 2), poly:
  // lr = learning_rate *
  //      pow(1 + learning_rate_decay_a * num_samples_processed,
  //          -learning_rate_decay_b)
  // 3), exp:
  // lr = learning_rate *
  //      pow(learning_rate_decay_a,
  //          num_samples_processed / learning_rate_decay_b)
  // 4), discexp:
  // lr = learning_rate *
  //      pow(learning_rate_decay_a,
  //          floor(num_samples_processed / learning_rate_decay_b))
  // 5), linear:
  // lr = max(learning_rate - learning_rate_decay_a * num_samples_processed,
  //          learning_rate_decay_b)

  // owlqn related
  // L1-regularization
52
  optional double l1weight = 10 [ default = 0.1 ];
53
  // L2-regularization
54
  optional double l2weight = 11 [ default = 0 ];
55 56
  // "c1" in wolfe condition: if (newobj <= oldobj + c1 * origDirDeriv * step)
  // then accept the step
57
  optional double c1 = 12 [ default = 0.0001 ];
58
  // multiply the step with "backoff", when wolfe condition doesn't satisfy
59
  optional double backoff = 13 [ default = 0.5 ];
60
  // how many "s"s and "y"s are kept in owlqn
61
  optional int32 owlqn_steps = 14 [ default = 10 ];
62
  // accept the step if encountered "max_backoff" times of "reduce the step"
63
  optional int32 max_backoff = 15 [ default = 5 ];
64 65 66
  // L2-regularization coefficient is reduced linearly from iteration 0 to
  // "l2weight_zero_iter", and set to 0 after "l2weight_zero_iter"
  // iterations. set "l2weight_zero_iter" to 0 to disable this strategy.
67
  optional int32 l2weight_zero_iter = 17 [ default = 0 ];
68 69 70 71 72 73

  // averaged sgd
  // About average_window * numBatchProcessed parameter are used
  // for average. To be accurate, between average_window * numBatchProcessed
  // and 2 * average_window * numBatchProcessed parameters are used for
  // average.
74 75
  optional double average_window = 18 [ default = 0 ];
  optional int64 max_average_window = 19 [ default = 0x7fffffffffffffff ];
76 77 78 79 80

  //////////////////////////
  // Options Adaptive SGD //
  //////////////////////////

81 82 83 84
  // learning method for sgd/asgd, such as "momentum", "adagrad", "adadelta",
  // "rmsprop"
  // default learning method("momentum") use global decayed learning rate with
  // momentum.
85
  // "adagrad", "adadelta" and "rmsprop" can set momentum too.
86 87 88
  optional string learning_method = 23 [ default = "momentum" ];
  optional double ada_epsilon = 24 [ default = 1e-6 ];
  optional double ada_rou = 26 [ default = 0.95 ];
89 90

  // Force to do average in cpu in order to save gpu memory usage
91
  optional bool do_average_in_cpu = 25 [ default = false ];
92 93 94

  // delta add rate in pserver, used while num_batches_per_send_parameter>1
  // will be divided by #machines automatically.
95
  optional double delta_add_rate = 28 [ default = 1.0 ];
96 97 98 99

  // We split a large size into smaller mini-batches, whose sizes are
  // determined by mini_batch_size. It only takes effect when there is
  // an ExternalMachine.
100
  optional int32 mini_batch_size = 29 [ default = 128 ];
101 102

  // automatically set if any one of parameters set sparse remote update flag
103
  optional bool use_sparse_remote_updater = 30 [ default = false ];
104

105
  // how to update center parameter and feedback to local parameter,
106
  // when use local sgd update in cluster training.
107 108 109 110 111
  // A option is elastic_average, proposed by the paper: Deep learning with
  // elastic averaging SGD.
  // If use elastic_average method, every trainer node should sample from whole
  // data sets.
  optional string center_parameter_update_method = 31 [ default = "average" ];
112 113 114

  // shrink sparse parameter value
  // only works if parameter is remote sparse update and has L1 decay rate
115
  optional double shrink_parameter_value = 32 [ default = 0 ];
116 117 118 119

  ////////////////////////////
  // Options Adam Optimizer //
  ////////////////////////////
120 121 122
  optional double adam_beta1 = 33 [ default = 0.9 ];
  optional double adam_beta2 = 34 [ default = 0.999 ];
  optional double adam_epsilon = 35 [ default = 1e-8 ];
123 124 125 126 127 128

  // arguments for learning rate scheduler
  // Format: num1:rate1,num2:rate2,...,numK:rateK
  // For learning_rate_schedule="manual", num is the number of samples,
  // For learning_rate_schedule="pass_manual",
  //  num is the number of passes (starting from 0)
129 130
  optional string learning_rate_args = 36 [ default = "" ];

131 132 133
  // for async sgd gradient commit control.
  // when async_lagged_grad_discard_ratio * num_gradient_servers commit passed,
  // current async gradient will be discard silently.
134
  optional double async_lagged_grad_discard_ratio = 37 [ default = 1.5 ];
135

136 137
  // global threshold for gradient clipping
  optional double gradient_clipping_threshold = 38 [ default = 0.0 ];
138 139 140
};

message TrainerConfig {
E
emailweixu 已提交
141
  optional ModelConfig model_config = 1;
142 143 144 145 146 147
  optional DataConfig data_config = 2;
  required OptimizationConfig opt_config = 3;
  optional DataConfig test_data_config = 4;
  repeated string config_files = 5;

  // the directory to save/load model files for each training path
148
  optional string save_dir = 6 [ default = "./output/model" ];
149 150 151 152 153 154 155

  // Path of the initial model parameters.
  // If it was set, start_pass will be ignored.
  optional string init_model_path = 7;

  // Start training from this pass.
  // Will load parameter from the previous pass.
156
  optional int32 start_pass = 8 [ default = 0 ];
157 158 159 160

  // file path to the trainer config file
  optional string config_file = 9;
}
反馈
建议
客服 返回
顶部