ParameterConfig.proto 3.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Z
zhangjinchao01 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14
syntax = "proto2";
Z
zhangjinchao01 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

package paddle;

/**
 * Configuration structure for parameter
 */

enum ParameterInitStrategy {
  PARAMETER_INIT_NORMAL = 0;
  PARAMETER_INIT_UNIFORM = 1;
}

message ParameterUpdaterHookConfig {
  required string type = 1;
  optional string purning_mask_filename = 2;
}

message ParameterConfig {
  required string name = 1;
  required uint64 size = 2;
Y
Yu Yang 已提交
35 36 37 38
  optional double learning_rate = 3 [default = 1.0];
  optional double momentum = 4 [default = 0.0];
  optional double initial_mean = 5 [default = 0.0];
  optional double initial_std = 6 [default = 0.01];
Z
zhangjinchao01 已提交
39
  // use L2-regularization if decay_rate set and decay_rate_l1 not set
Y
Yu Yang 已提交
40
  optional double decay_rate = 7 [default = 0.0];
Z
zhangjinchao01 已提交
41
  // use L1-regularization if decay_rate_l1 set
Y
Yu Yang 已提交
42
  optional double decay_rate_l1 = 8 [default = 0.0];
Z
zhangjinchao01 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
  // dims of Parameter, e.g. dims[0] as height, dims[1] as width..
  repeated uint64 dims = 9;
  // the gpu device which the parameter in.
  // Only used by ParallelNeuralNetork. Ignored otherwise.
  optional int32 device = 10 [default = -1];
  // how to init the parameter: 0 -> normal, 1 -> uniform
  // 0: treat initial_mean as mean, intial_std as standard deviation
  // 1: range is (initial_mean - initial_std) to (initial_mean + initial_std)
  optional int32 initial_strategy = 11 [default = 0];
  // define the variance when init the parameter, by height of the Matrix
  optional bool initial_smart = 12 [default = false];
  // apply regularization every # batches
  optional int32 num_batches_regularization = 13 [default = 1];
  // if is_sparse is true, para is sparse, else para is dense
  optional bool is_sparse = 14[default = false];
58 59
  // if para is sparse, format should be "csc" or "csr", empty means is not sparse
  optional string format = 15 [default = ""];
Z
zhangjinchao01 已提交
60 61 62
  // sparse remote update or not
  optional bool sparse_remote_update = 16 [default = false];
  // gradient clipping threshold, no clipping by default
Y
Yu Yang 已提交
63
  optional double gradient_clipping_threshold = 17 [default = 0.0];
Z
zhangjinchao01 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
  // static parameters are fixed when training
  optional bool is_static = 18 [default = false];
  // para_id should NOT be set by config_parser. It is for
  // internal use.
  optional uint64 para_id = 19;

  repeated ParameterUpdaterHookConfig update_hooks = 20;
  // setup load mat -> csr
  optional bool need_compact = 21 [default = false];
  // whether to do sparse update for this parameter
  optional bool sparse_update = 22 [default = false];

  // whether this parameter is shared or not.
  optional bool is_shared = 23 [default = false];
  // parameter block size
  optional uint64 parameter_block_size = 24 [default = 0];
}