op_conf.proto 5.4 KB
Newer Older
W
willzhang4a58 已提交
1
syntax = "proto3";
2 3
package oneflow;

J
jiyuan 已提交
4
import "oneflow/core/common/shape.proto";
W
willzhang4a58 已提交
5

S
Shiyuan Shang-Guan 已提交
6
message ConstantFillConf {
W
willzhang4a58 已提交
7 8 9
  float value = 1;
}

S
Shiyuan Shang-Guan 已提交
10
message UniformFillConf {
W
willzhang4a58 已提交
11 12 13 14
  float min = 1;
  float max = 2;
}

S
Shiyuan Shang-Guan 已提交
15
message GaussianFillConf {
W
willzhang4a58 已提交
16 17 18 19
  float mean = 1;
  float std = 2;
}

S
Shiyuan Shang-Guan 已提交
20
message FillConf {
W
willzhang4a58 已提交
21
  oneof type {
S
Shiyuan Shang-Guan 已提交
22 23 24
    ConstantFillConf constant_conf = 1;
    UniformFillConf uniform_conf = 2;
    GaussianFillConf gaussian_conf = 3;
W
willzhang4a58 已提交
25
  }
W
Wind5 已提交
26 27
}

W
willzhang4a58 已提交
28
message ConvolutionOpConf {
W
willzhang4a58 已提交
29 30
  string in = 1;
  string out = 2;
W
Wind5 已提交
31

W
willzhang4a58 已提交
32
  int32 out_num = 3; // The number of outputs for the layer
W
Wind5 已提交
33
  bool has_bias_term = 4; // whether to have bias terms
W
Wind5 已提交
34 35 36

  // Pad, kernel size, and stride are all given as a single value for equal
  // dimensions in all spatial dimensions, or once per spatial dimension.
W
willzhang4a58 已提交
37 38 39
  repeated int32 pad = 5; // The padding size; defaults to 0
  repeated int32 kernel_size = 6; // The kernel size
  repeated int32 stride = 7; // The stride; defaults to 1
W
Wind5 已提交
40 41 42
  // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
  // holes. (Kernel dilation is sometimes referred to by its use in the
  // algorithme ¨¤ trous from Holschneider et al. 1987.)
W
willzhang4a58 已提交
43
  repeated int32 dilation = 8; // The dilation; defaults to 1
W
Wind5 已提交
44

W
willzhang4a58 已提交
45
  int32 group = 15; // The group size for group conv
W
Wind5 已提交
46

S
Shiyuan Shang-Guan 已提交
47 48
  FillConf weight_fill = 16; // The fill for the weight
  FillConf bias_fill = 17; // The fill for the bias
W
Wind5 已提交
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66

  // The axis to interpret as "channels" when performing convolution.
  // Preceding dimensions are treated as independent inputs;
  // succeeding dimensions are treated as "spatial".
  // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
  // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
  // groups g>1) filters across the spatial axes (H, W) of the input.
  // With (N, C, D, H, W) inputs, and axis == 1, we perform
  // N independent 3D convolutions, sliding (C/g)-channels
  // filters across the spatial axes (D, H, W) of the input.
  int32 axis = 18;

  // Whether to force use of the general ND convolution, even if a specific
  // implementation for blobs of the appropriate number of spatial dimensions
  // is available. (Currently, there is only a 2D-specific convolution
  // implementation; for input blobs with num_axes != 2, this option is
  // ignored and the ND implementation will be used.)
  bool force_nd_im2col = 19;
W
willzhang4a58 已提交
67 68
}

W
willzhang4a58 已提交
69
message InnerProductOpConf {
W
willzhang4a58 已提交
70
  string in = 1;
W
willzhang4a58 已提交
71
  string out = 2;
W
Wind5 已提交
72

W
willzhang4a58 已提交
73
  int32 out_num = 3; // The number of outputs for the layer
W
Wind5 已提交
74
  bool has_bias_term = 4; // whether to have bias terms
S
Shiyuan Shang-Guan 已提交
75 76
  FillConf weight_fill = 5; // The fill for the weight
  FillConf bias_fill = 6; // The fill for the bias
W
Wind5 已提交
77 78 79 80 81 82 83 84 85

  // The first axis to be lumped into a single inner product computation;
  // all preceding axes are retained in the output.
  // May be negative to index from the end (e.g., -1 for the last axis).
  int32 axis = 7;
  // Specify whether to transpose the weight matrix or not.
  // If transpose == true, any operations will be performed on the transpose
  // of the weight matrix. The weight matrix itself is not going to be transposed
  // but rather the transfer flag of operations will be toggled accordingly.
W
willzhang4a58 已提交
86
  bool need_transpose = 8;
87 88
}

W
willzhang4a58 已提交
89
message DataLoaderOpConf {
W
willzhang4a58 已提交
90 91
  string feature = 1;
  ShapeProto shape_of_one_feature_ins = 2;
W
willzhang4a58 已提交
92
  string label = 3;
W
willzhang4a58 已提交
93
  string data_dir = 4;
W
willzhang4a58 已提交
94 95
}

W
willzhang4a58 已提交
96
message PoolingOpConf {
W
willzhang4a58 已提交
97 98
  string in = 1;
  string out = 2;
W
Wind5 已提交
99 100 101 102 103 104 105

  enum PoolMethod {
    MAX = 0;
    AVE = 1;
    STOCHASTIC = 2;
  }
  PoolMethod pool = 3; // The pooling method
106 107
  // Pad, kernel size, and stride are all given as a repeated value
  // dimensions in height and width or 3D
W
willzhang4a58 已提交
108 109 110
  repeated int32 pad = 4; // The padding size {H, W} or {D, H, W}
  repeated int32 kernel_size = 7; // The kernel size
  repeated int32 stride = 10; // The stride
W
willzhang4a58 已提交
111 112
}

W
willzhang4a58 已提交
113
message ReluOpConf {
W
willzhang4a58 已提交
114 115 116 117
  string in = 1;
  string out = 2;
}

W
willzhang4a58 已提交
118
message SoftmaxOpConf {
W
willzhang4a58 已提交
119 120 121 122
  string in = 1;
  string out = 2;
}

W
willzhang4a58 已提交
123
message MultinomialLogisticLossOpConf {
124
  string prediction = 1;
W
willzhang4a58 已提交
125 126
  string label = 2;
  string loss = 3;
W
copyop  
willzhang4a58 已提交
127 128
}

W
willzhang4a58 已提交
129 130 131
message ConcatOpConf {
  repeated string in = 1;
  string out = 2;
W
Wind5 已提交
132
  int32 axis = 3;
W
willzhang4a58 已提交
133 134
}

W
willzhang4a58 已提交
135
message CopyCommNetOpConf {
L
LeGend-AI 已提交
136 137
}

W
willzhang4a58 已提交
138
message CopyHdOpConf {
W
willzhang4a58 已提交
139
  enum Type {
W
copyop  
willzhang4a58 已提交
140 141 142
    H2D = 0;
    D2H = 1;
  }
W
willzhang4a58 已提交
143
  Type type = 1;
W
copyop  
willzhang4a58 已提交
144
}
W
willzhang4a58 已提交
145

W
willzhang4a58 已提交
146
message CloneOpConf {
W
willzhang4a58 已提交
147
  int32 out_num = 1;
148
  string lbn = 2;
W
willzhang4a58 已提交
149 150
}

W
willzhang4a58 已提交
151
message BoxConcatConf {
W
willzhang4a58 已提交
152
  int32 axis = 1;
W
willzhang4a58 已提交
153 154
}

W
willzhang4a58 已提交
155 156 157
message BoxAddConf {
}

W
willzhang4a58 已提交
158
message BoxDataSplitConf {
W
willzhang4a58 已提交
159 160 161 162 163 164 165
}

message BoxCloneConf {
}

message BoxingOpConf {
  string lbn = 1;
W
willzhang4a58 已提交
166 167
  int32 in_num = 2;
  int32 out_num = 3;
W
willzhang4a58 已提交
168 169 170 171
  oneof in_box {
    BoxConcatConf concat_box = 4;
    BoxAddConf add_box = 5;
  }
W
willzhang4a58 已提交
172
  oneof out_box {
W
willzhang4a58 已提交
173 174
    BoxDataSplitConf data_split_box = 6;
    BoxCloneConf clone_box = 7;
W
willzhang4a58 已提交
175
  }
W
willzhang4a58 已提交
176 177
}

W
willzhang4a58 已提交
178
message ModelUpdateOpConf {
duduscript's avatar
duduscript 已提交
179
  float learning_rate = 1;
W
willzhang4a58 已提交
180 181
}

L
LeGend-AI 已提交
182 183 184
message ModelDiffAccOpConf {
}

L
LeGend-AI 已提交
185 186 187 188
message ModelSaveOpConf {
  repeated string lbns = 1;
}

W
willzhang4a58 已提交
189
message OperatorConf {
W
willzhang4a58 已提交
190
  string name = 1;
191
  oneof op_type {
W
willzhang4a58 已提交
192
    ConvolutionOpConf convolution_conf = 100;
W
willzhang4a58 已提交
193
    InnerProductOpConf innerproduct_conf = 101;
W
willzhang4a58 已提交
194 195 196 197 198
    DataLoaderOpConf data_loader_conf = 102;
    PoolingOpConf pooling_conf = 103;
    ReluOpConf relu_conf = 104;
    SoftmaxOpConf softmax_conf = 105;
    MultinomialLogisticLossOpConf multinomial_logistic_loss_conf = 106;
W
willzhang4a58 已提交
199
    CopyHdOpConf copy_hd_conf = 107;
W
willzhang4a58 已提交
200 201 202
    CloneOpConf clone_conf = 108;
    BoxingOpConf boxing_conf = 109;
    ModelUpdateOpConf model_update_conf = 110;
203
    ModelSaveOpConf model_save_conf = 111;
L
LeGend-AI 已提交
204
    ModelDiffAccOpConf model_diff_acc_conf = 112;
W
willzhang4a58 已提交
205
    ConcatOpConf concat_conf = 113;
W
willzhang4a58 已提交
206
    CopyCommNetOpConf copy_comm_net_conf = 114;
207 208
  }
}