distributed_strategy.proto 15.6 KB
Newer Older
1
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2
// Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto2";
package paddle.fleet;

enum Mode {
  COLLECTIVE = 1;
  PS = 2;
  PIPELINE = 3;
  HETER = 4; // support XPU and GPU computing server
}

J
JZ-LIANG 已提交
26 27 28 29
message RecomputeConfig {
  repeated string checkpoints = 1;
  optional bool enable_offload = 2 [ default = false ];
  repeated int32 checkpoint_shape = 3;
30
  optional bool enable_tuning = 4 [ default = false ]; // incubate for auto parallel
J
JZ-LIANG 已提交
31
}
32

33
message ShardingConfig {
34
  optional string sharding_segment_strategy = 1
35
      [ default = 'segment_broadcast_MB' ];
36 37 38 39 40 41 42 43 44 45
  optional float segment_broadcast_MB = 2 [ default = 32.0 ];
  repeated string segment_anchors = 3;
  optional int32 sharding_degree = 4 [ default = 8 ];
  optional int32 mp_degree = 5 [ default = 1 ];
  optional int32 dp_degree = 6 [ default = 1 ];
  optional bool hybrid_dp = 7 [ default = false ];
  optional int32 gradient_merge_acc_step = 8 [ default = 1 ];
  optional bool optimize_offload = 9 [ default = false ];
  optional bool pp_allreduce_in_optimize = 10 [ default = false ];
  optional int32 pp_degree = 11 [ default = 1 ];
46
  optional bool optimize_cast = 12 [ default = false ];
47 48
  // Optimizer sharding. Temporary plans and may be deprecated
  optional bool _dp_as_optimizer_sharding = 13 [ default = false ];
J
JZ-LIANG 已提交
49
  optional int32 stage = 14 [ default = 1 ];
50
  optional bool enable_tuning = 15 [ default = false ]; // incubate for auto parallel
51 52
}

53 54 55 56 57 58 59
// for dygraph
message MpConfig {
    optional bool sync_param= 1 [ default = false ];
    optional bool sync_grad= 2 [ default = false ];
    optional bool sync_moment= 3 [ default = false ];
}

60 61 62 63
message HybridConfig {
  optional int32 dp_degree = 1 [ default = -1 ];
  optional int32 mp_degree = 2 [ default = 1 ];
  optional int32 pp_degree = 3 [ default = 1 ];
J
JZ-LIANG 已提交
64
  optional int32 sharding_degree = 4 [ default = 1 ];
65
  optional MpConfig mp_configs = 5;
66 67
}

68 69 70 71 72 73 74
message AMPConfig {
  optional float init_loss_scaling = 1 [ default = 32768.0 ];
  optional int32 incr_every_n_steps = 2 [ default = 1000 ];
  optional int32 decr_every_n_nan_or_inf = 3 [ default = 2 ];
  optional float incr_ratio = 4 [ default = 2.0 ];
  optional float decr_ratio = 5 [ default = 0.8 ];
  optional bool use_dynamic_loss_scaling = 6 [ default = true ];
75 76 77
  repeated string custom_white_list = 7;
  repeated string custom_black_list = 8;
  repeated string custom_black_varnames = 9;
78 79
  optional bool use_pure_fp16 = 10 [ default = false ];
  optional bool use_fp16_guard = 11 [ default = true ];
80 81
  optional bool use_optimizer_fp16 = 12
      [ default = false ]; // auto parallel effective only
82
}
83

84 85 86 87
message LocalSGDConfig {
  optional int32 k_steps = 1 [ default = 1 ];
  optional int32 begin_step = 2 [ default = 1 ];
}
88

89 90 91 92 93
message AdaptiveLocalSGDConfig {
  optional int32 init_k_steps = 1 [ default = 1 ];
  optional int32 begin_step = 2 [ default = 1 ];
}

94 95 96 97 98
message GradientMergeConfig {
  optional int32 k_steps = 1 [ default = 1 ];
  optional bool avg = 2 [ default = true ];
}

99 100 101 102 103 104
message DGCConfig {
  optional int32 rampup_begin_step = 1 [ default = 0 ];
  optional int32 rampup_step = 2 [ default = 1 ];
  repeated float sparsity = 3;
}

105 106 107
message LarsConfig {
  optional float lars_coeff = 1 [ default = 0.001 ];
  optional float lars_weight_decay = 2 [ default = 0.0005 ];
108 109
  optional float epsilon = 3 [ default = 0.0 ];
  repeated string exclude_from_weight_decay = 4;
110 111 112
}

message LambConfig {
113 114
  optional float lamb_weight_decay = 1 [ default = 0.01 ];
  repeated string exclude_from_weight_decay = 2;
115 116
}

117 118 119 120 121 122 123 124 125 126
message BuildStrategy {
  optional bool enable_sequential_execution = 1 [ default = false ];
  optional bool fuse_elewise_add_act_ops = 2 [ default = false ];
  optional bool fuse_bn_act_ops = 3 [ default = false ];
  optional bool fuse_relu_depthwise_conv = 4 [ default = false ];
  optional bool fuse_broadcast_ops = 5 [ default = false ];
  optional bool fuse_all_optimizer_ops = 6 [ default = false ];
  optional bool enable_inplace = 7 [ default = false ];
  optional bool enable_backward_optimizer_op_deps = 8 [ default = true ];
  optional bool cache_runtime_context = 9 [ default = false ];
127 128 129
  optional bool fuse_bn_add_act_ops = 10 [ default = true ];
  optional bool enable_auto_fusion = 11 [ default = false ];
  optional bool enable_addto = 12 [ default = false ];
Z
Zeng Jinle 已提交
130
  optional bool fix_op_run_order = 13 [ default = false ];
131
  optional bool allow_cuda_graph_capture = 14 [ default = false ];
132
  optional int32 reduce_strategy = 15 [ default = 0 ];
133
  optional bool fuse_gemm_epilogue = 16 [ default = false ];
134
  optional string debug_graphviz_path = 17;
135
  optional bool fused_attention = 18 [ default = false];
136
  optional bool fused_feedforward = 19 [ default = false];
137
}
138

139 140 141 142 143 144 145
message ExecutionStrategy {
  optional int32 num_threads = 1 [ default = 1 ];
  optional int32 num_iteration_per_drop_scope = 2 [ default = 10 ];
  optional int32 num_iteration_per_run = 3 [ default = 1 ];
  optional bool use_thread_barrier = 4 [ default = false ];
}

Y
Yuang Liu 已提交
146 147 148 149 150 151 152 153
message GradientScaleConfig {
  // Optional value ['avg', 'sum', 'customized']
  // If avg, loss@grad will be divided by the number of devices,
  // that is, the gradient will be accumulated and averaged among
  // multiple devices.
  // Else if sum, the gradient will accumulated among multiple
  // devices.
  optional string scale_strategy = 1 [ default = 'avg' ];
154 155 156 157
  // The avg_loss flag is used to determine the position of average
  // If scale_gradient is False, it will avg the loss@Grad before grad merge.
  // Otherwise, it will do grad merge firstly, then avg the grad after merging.
  optional bool scale_gradient = 2 [ default = false ];
Y
Yuang Liu 已提交
158 159
}

160
message AsyncConfig {
161
  optional int32 k_steps = 1 [ default = -1 ];
162 163 164 165 166 167 168
  optional int32 max_merge_var_num = 2 [ default = 1 ];
  optional int32 send_queue_size = 3 [ default = 16 ];
  optional bool independent_recv_thread = 4 [ default = false ];
  optional int32 min_send_grad_num_before_recv = 5 [ default = 1 ];
  optional int32 thread_pool_size = 6 [ default = 1 ];
  optional int32 send_wait_times = 7 [ default = 1 ];
  optional bool runtime_split_send_recv = 8 [ default = false ];
C
Chengmo 已提交
169
  optional bool launch_barrier = 9 [ default = true ];
170
  optional string heter_worker_device_guard = 10 [ default = 'cpu' ];
171
  optional int32 lr_decay_steps = 11 [ default = 10 ];
T
Thunderbrook 已提交
172
  optional int32 use_ps_gpu = 12 [ default = 0 ];
173 174
}

175 176 177 178 179
message TrainerDescConfig {
  optional string dump_fields_path = 1;
  repeated string dump_fields = 2;
  repeated string dump_param = 3;
  repeated string stat_var_names = 4;
180 181
  optional string trainer = 5;
  optional string device_worker = 6;
182 183
  repeated string local_sparse = 7;
  repeated string remote_sparse = 8;
184 185
}

186 187 188
message PipelineConfig {
  optional int32 micro_batch_size = 1 [ default = 1 ];
  optional int32 accumulate_steps = 2 [ default = 1 ];
189
  optional string schedule_mode = 3 [ default = '1F1B' ];
190
  optional bool p2p_cache_shape = 4 [ default = true ];
191
  optional bool enable_partial_send_recv = 5 [ default = true ];
192
}
193

L
lilong12 已提交
194 195
message TensorParallelConfig {
  optional int32 tensor_parallel_degree = 1 [ default = 1 ];
196
  optional int32 tensor_init_seed = 2 [ default = -1 ];
L
lilong12 已提交
197 198
}

199 200 201 202 203 204 205 206
message QatConfig {
  optional bool channel_wise_abs_max = 1 [default = true];
  optional int32 weight_bits = 2 [default = 8];
  optional int32 activation_bits = 3 [default = 8];
  repeated string not_quant_pattern = 4;
  optional string algo = 5;
}

207 208 209 210 211 212 213
enum TableType {
  PS_SPARSE_TABLE = 0;
  PS_DENSE_TABLE = 1;
}

message TableParameter {
  optional uint64 table_id = 1;
214 215 216 217 218
  optional string table_name = 2;
  optional string table_class = 3;
  optional uint64 shard_num = 4 [ default = 1000 ];
  optional TableType type = 5;
  optional TableAccessorParameter accessor = 6;
219
  optional bool compress_in_save = 7 [ default = false ];
220 221 222 223 224 225 226
  // for cache model
  optional bool enable_sparse_table_cache = 10 [ default = true ];
  optional double sparse_table_cache_rate = 11 [ default = 0.00055 ];
  optional uint32 sparse_table_cache_file_num = 12 [ default = 16 ];
  // for patch model
  optional bool enable_revert = 13 [ default = false ];
  optional float shard_merge_rate = 14 [ default = 1.0 ];
227 228 229 230
}

message TableAccessorParameter {
  optional string accessor_class = 1;
231 232 233 234
  optional uint32 fea_dim = 4 [ default = 11 ];   // field size of one value
  optional uint32 embedx_dim = 5 [ default = 8 ]; // embedx feature size
  optional uint32 embedx_threshold = 6
      [ default = 10 ]; // embedx feature create threshold
235
  optional CtrAccessorParameter ctr_accessor_param = 7;
236
  repeated TableAccessorSaveParameter table_accessor_save_param = 8;
D
danleifeng 已提交
237 238
  optional SGDParameter embed_sgd_param = 10;
  optional SGDParameter embedx_sgd_param = 11;
D
danleifeng 已提交
239 240 241 242 243 244
  optional GraphSGDParameter graph_sgd_param = 12;
}

message GraphSGDParameter {
  optional uint32 nodeid_slot = 1 [ default = 9008 ];
  optional float feature_learning_rate = 2 [ default = 0.05 ];
245 246 247 248
}

message SGDParameter {
  optional string name = 1;
249 250 251
  optional SparseNaiveSGDRuleParameter naive = 2;
  optional SparseAdagradSGDRuleParameter adagrad = 3;
  optional SparseAdamSGDParameter adam = 4;
252 253
}

254 255 256 257 258 259 260 261 262 263 264
message SparseNaiveSGDRuleParameter { // SparseNaiveSGDRule
  optional double learning_rate = 1 [ default = 0.05 ];
  optional double initial_range = 2 [ default = 0.0001 ];
  repeated float weight_bounds = 3;
}

message
    SparseAdagradSGDRuleParameter { // SparseAdaGradSGDRule|StdAdaGradSGDRule
  optional double learning_rate = 1 [ default = 0.05 ];
  optional double initial_g2sum = 2 [ default = 3.0 ];
  optional double initial_range = 3 [ default = 0.0001 ];
265 266 267
  repeated float weight_bounds = 4;
}

D
danleifeng 已提交
268
message SparseAdamSGDParameter { // SparseAdamSGDRule | SparseSharedAdamSGDRule
269 270 271 272 273 274 275 276
  optional double learning_rate = 1 [ default = 0.001 ];
  optional double initial_range = 2 [ default = 0.0001 ];
  optional double beta1_decay_rate = 3 [ default = 0.9 ];
  optional double beta2_decay_rate = 4 [ default = 0.999 ];
  optional double ada_epsilon = 5 [ default = 1e-08 ];
  repeated float weight_bounds = 6;
}

277
message CtrAccessorParameter {
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
  optional float nonclk_coeff = 1
      [ default = 0.1 ]; // to calculate show_click_score
  optional float click_coeff = 2
      [ default = 1 ]; // to calculate show_click_score
  optional float base_threshold = 3 [
    default = 1.5
  ]; // show_click_score > base_threshold, this feature can be saved
  optional float delta_threshold = 4
      [ default =
            0.25 ]; // delta_score > delta_threshold, this feature can be saved
  optional float delta_keep_days = 5
      [ default =
            16 ]; // unseen_day < delta_keep_days, this feature can be saved
  optional float show_click_decay_rate = 6
      [ default = 0.98 ]; // show/click will update to
                          // show/click *
                          // show_click_decay_rate after a day
  optional float delete_threshold = 7
      [ default = 0.8 ]; // threshold to shrink a feasign
  optional float delete_after_unseen_days = 8 [ default = 30 ];
  optional int32 ssd_unseenday_threshold = 9 [ default = 1 ];
299
  optional bool show_scale = 10 [ default = true ];
L
lxsbupt 已提交
300
  repeated float load_filter_slots = 11;
301 302 303 304 305 306
}

message TableAccessorSaveParameter {
  optional uint32 param = 1;
  optional string converter = 2;
  optional string deconverter = 3;
307 308 309 310 311 312 313 314 315
}

message FsClientParameter {
  optional string uri = 1;
  optional string user = 2;
  optional string passwd = 3;
  optional string hadoop_bin = 4;
}

316 317 318 319 320 321 322 323 324 325 326 327 328
message DistributedStrategy {
  // bool options
  optional Mode mode = 1 [ default = COLLECTIVE ];
  optional bool amp = 2 [ default = false ];
  optional bool recompute = 3 [ default = false ];
  optional bool localsgd = 4 [ default = false ];
  optional bool dgc = 5 [ default = false ];
  optional bool gradient_merge = 6 [ default = false ];
  optional bool lars = 7 [ default = false ];
  optional bool lamb = 8 [ default = false ];
  optional bool pipeline = 9 [ default = false ];
  optional bool elastic = 10 [ default = false ];
  optional bool auto = 11 [ default = false ];
D
Dong Daxiang 已提交
329
  optional bool a_sync = 12 [ default = true ];
330 331 332 333 334
  optional bool sync_nccl_allreduce = 13 [ default = true ];
  optional int32 nccl_comm_num = 14 [ default = 1 ];
  optional bool use_hierarchical_allreduce = 15 [ default = false ];
  optional int32 hierarchical_allreduce_inter_nranks = 16 [ default = 1 ];
  optional bool sync_batch_norm = 17 [ default = false ];
李季 已提交
335
  optional bool fuse_all_reduce_ops = 18 [ default = true ];
336 337
  optional int32 fuse_grad_size_in_MB = 19 [ default = 32 ];
  optional float fuse_grad_size_in_TFLOPS = 20 [ default = 50 ];
338
  optional bool cudnn_exhaustive_search = 21 [ default = false ];
L
lilong12 已提交
339
  optional int32 conv_workspace_size_limit = 22 [ default = 512 ];
340
  optional bool cudnn_batchnorm_spatial_persistent = 23 [ default = false ];
341
  optional bool adaptive_localsgd = 24 [ default = false ];
342
  optional bool fp16_allreduce = 25 [ default = false ];
343
  optional bool sharding = 26 [ default = false ];
344
  optional float last_comm_group_size_MB = 27 [ default = 1 ];
345
  optional bool find_unused_parameters = 28 [ default = false ];
L
lilong12 已提交
346
  optional bool tensor_parallel = 29 [ default = false ];
347
  optional bool without_graph_optimization = 30 [ default = true ];
348
  optional int32 fuse_grad_size_in_num = 31 [ default = 8 ];
349
  optional bool calc_comm_same_stream = 32 [ default = false ];
350
  optional bool asp = 33 [ default = false ];
351
  optional bool fuse_grad_merge = 34 [ default = false ];
352
  optional bool semi_auto = 35 [ default = false ];
W
wangguanqun 已提交
353
  optional bool adam_d2sum = 36 [ default = false ];
Z
zhaoyingli 已提交
354
  optional bool auto_search = 37 [ default = false ];
K
kuizhiqing 已提交
355
  optional bool heter_ccl_mode = 38 [ default = false ];
356
  optional bool is_fl_ps_mode = 39 [ default = false ];
357
  optional bool with_coordinator = 40 [ default = false ];
358
  optional bool qat = 41 [ default = false ];
359
  optional bool split_data = 42 [ default = true ];
360

361 362 363 364
  optional RecomputeConfig recompute_configs = 101;
  optional AMPConfig amp_configs = 102;
  optional LocalSGDConfig localsgd_configs = 103;
  optional GradientMergeConfig gradient_merge_configs = 104;
365
  optional DGCConfig dgc_configs = 105;
366
  optional PipelineConfig pipeline_configs = 106;
D
Dong Daxiang 已提交
367
  optional AsyncConfig a_sync_configs = 107;
368 369
  optional LarsConfig lars_configs = 108;
  optional LambConfig lamb_configs = 109;
370
  optional AdaptiveLocalSGDConfig adaptive_localsgd_configs = 110;
371
  optional ShardingConfig sharding_configs = 111;
372
  optional HybridConfig hybrid_configs = 112;
L
lilong12 已提交
373
  optional TensorParallelConfig tensor_parallel_configs = 113;
374
  optional TrainerDescConfig trainer_desc_configs = 114;
375
  repeated TableParameter downpour_table_param = 115;
376
  optional FsClientParameter fs_client_param = 116;
377
  optional QatConfig qat_configs = 117;
378

379 380
  optional BuildStrategy build_strategy = 201;
  optional ExecutionStrategy execution_strategy = 202;
Y
Yuang Liu 已提交
381
  optional GradientScaleConfig gradient_scale_configs = 203;
382 383 384 385 386 387 388 389 390 391 392 393 394
}

message DistributedJobInfo {
  optional int32 worker_num = 1;
  optional int32 server_num = 2;
  repeated string worker_ips = 3;
  repeated string server_endpoints = 4;
  optional string origin_startup = 5;
  optional string origin_main = 6; // without backpropagation and optimization
  optional string distributed_main = 7; // with backpropagation and optimization
  optional string optimizer_name = 8;   // optimizer name
  optional DistributedStrategy strategy = 101;
}