milvus.yaml 19.1 KB
Newer Older
1 2 3 4 5 6
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
F
FluorineDog 已提交
7 8
# with the License. You may obtain a copy of the License at
#
9
#     http://www.apache.org/licenses/LICENSE-2.0
F
FluorineDog 已提交
10
#
11 12 13 14 15
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
F
FluorineDog 已提交
16

17
# Related configuration of etcd, used to store Milvus metadata & service discovery.
B
bigsheeper 已提交
18
etcd:
19 20
  endpoints:
    - localhost:2379
21
  rootPath: by-dev # The root path where data is stored in etcd
C
cai.zhang 已提交
22 23
  metaSubPath: meta # metaRootPath = rootPath + '/' + metaSubPath
  kvSubPath: kv # kvRootPath = rootPath + '/' + kvSubPath
24
  log:
25 26 27 28 29
    # path is one of:
    #  - "default" as os.Stderr,
    #  - "stderr" as os.Stderr,
    #  - "stdout" as os.Stdout,
    #  - file path to append server logs to.
30
    # please adjust in embedded Milvus: /tmp/milvus/logs/etcd.log
31
    path: stdout
32 33
    level: info # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
  use:
34
    # please adjust in embedded Milvus: true
35
    embed: false # Whether to enable embedded Etcd (an in-process EtcdServer).
36 37 38 39
  data:
    # Embedded Etcd only.
    # please adjust in embedded Milvus: /tmp/milvus/etcdData/
    dir: default.etcd
40 41 42 43 44 45 46 47 48
  ssl:
    enabled: false # Whether to support ETCD secure connection mode
    tlsCert: /path/to/etcd-client.pem # path to your cert file
    tlsKey: /path/to/etcd-client-key.pem # path to your key file
    tlsCACert:  /path/to/ca.pem # path to your CACert file
    # TLS min version
    # Optional values: 1.0, 1.1, 1.2, 1.3。
    # We recommend using version 1.2 and above
    tlsMinVersion: 1.3
N
neza2017 已提交
49

50 51 52 53 54 55 56 57
# Default value: etcd
# Valid values: [etcd, mysql]
metastore:
  type: etcd

# Related configuration of mysql, used to store Milvus metadata.
mysql:
  username: root
58
  password: 123456
59 60 61 62 63 64 65
  address: localhost
  port: 3306
  dbName: milvus_meta
  driverName: mysql
  maxOpenConns: 20
  maxIdleConns: 5

66
# please adjust in embedded Milvus: /tmp/milvus/data/
G
godchen 已提交
67 68 69
localStorage:
  path: /var/lib/milvus/data/

70
# Related configuration of minio, which is responsible for data persistence for Milvus.
X
XuanYang-cn 已提交
71
minio:
72
  address: localhost # Address of MinIO/S3
73
  port: 9000   # Port of MinIO/S3
74
  accessKeyID: minioadmin # accessKeyID of MinIO/S3
75
  secretAccessKey: minioadmin # MinIO/S3 encryption string
76
  useSSL: false # Access to MinIO/S3 with SSL
77
  bucketName: "a-bucket" # Bucket name in MinIO/S3
78
  rootPath: files # The root path where the message is stored in MinIO/S3
79 80
  # Whether to use AWS IAM role to access S3 instead of access/secret keys
  # For more infomation, refer to https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
81 82
  useIAM: false
  # Custom endpoint for fetch IAM role credentials.
83
  # Leave it empty if you want to use AWS default endpoint
84
  iamEndpoint: ""
X
XuanYang-cn 已提交
85

J
jaime 已提交
86 87 88 89 90
# Milvus supports three MQ: rocksmq(based on RockDB), Pulsar and Kafka, which should be reserved in config what you use.
# There is a note about enabling priority if we config multiple mq in this file
# 1. standalone(local) mode: rockskmq(default) > Pulsar > Kafka
# 2. cluster mode:  Pulsar(default) > Kafka (rocksmq is unsupported)

B
Bennu 已提交
91
# Related configuration of pulsar, used to manage Milvus logs of recent mutation operations, output streaming log, and provide log publish-subscribe services.
B
bigsheeper 已提交
92
pulsar:
93
  address: localhost # Address of pulsar
94
  port: 6650 # Port of pulsar
X
Xiaofan 已提交
95
  webport: 80 # Web port of pulsar, if you connect direcly without proxy, should use 8080
B
Bennu 已提交
96
  maxMessageSize: 5242880 # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
N
neza2017 已提交
97

J
jaime 已提交
98
# If you want to enable kafka, needs to comment the pulsar configs
W
wgcn 已提交
99 100 101 102 103
kafka:
  producer:
    client.id: dc
  consumer:
    client.id: dc1
J
jaime 已提交
104
#  brokerList: localhost1:9092,localhost2:9092,localhost3:9092
105 106
#  saslUsername: username
#  saslPassword: password
107 108
#  saslMechanisms: PLAIN
#  securityProtocol: SASL_SSL
J
jaime 已提交
109

110
rocksmq:
111
  # please adjust in embedded Milvus: /tmp/milvus/rdb_data
112
  path: /var/lib/milvus/rdb_data # The path where the message is stored in rocksmq
113
  rocksmqPageSize: 2147483648 # 2 GB, 2 * 1024 * 1024 * 1024 bytes, The size of each page of messages in rocksmq
114
  retentionTimeInMinutes: 7200 # 5 days, 5 * 24 * 60 minutes, The retention time of the message in rocksmq.
115
  retentionSizeInMB: 8192 # 8 GB, 8 * 1024 MB, The retention size of the message in rocksmq.
116
  compactionInterval: 86400 # 1 day, trigger rocksdb compaction every day to remove deleted data
117
  lrucacheratio:  0.06 # rocksdb cache memory ratio
118

119
# Related configuration of rootCoord, used to handle data definition language (DDL) and data control language (DCL) requests
120
rootCoord:
B
bigsheeper 已提交
121 122
  address: localhost
  port: 53100
N
neza2017 已提交
123

124 125 126 127
  dmlChannelNum: 256 # The number of dml channels created at system startup
  maxPartitionNum: 4096 # Maximum number of partitions in a collection
  minSegmentSizeToEnableIndex: 1024 # It's a threshold. When the segment size is less than this value, the segment will not be indexed

128
  # (in seconds) Duration after which an import task will expire (be killed). Default 900 seconds (15 minutes).
129
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
130
  importTaskExpiration: 900
131 132 133 134
  # (in seconds) Milvus will keep the record of import tasks for at least `importTaskRetention` seconds. Default 86400
  # seconds (24 hours).
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
  importTaskRetention: 86400
135 136 137 138 139 140 141 142 143
  # (in seconds) Check an import task's segment loading state in queryNodes every `importSegmentStateCheckInterval`
  # seconds. Default 10 seconds.
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
  importSegmentStateCheckInterval: 10
  # (in seconds) Maximum time to wait for segments in a single import task to be loaded in queryNodes.
  # Default 60 seconds (1 minute).
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
  importSegmentStateWaitLimit: 60
  # (in seconds) Check the building status of a task's segments' indices every `importIndexCheckInterval` seconds.
144
  # Default 10 seconds.
145
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
146
  importIndexCheckInterval: 10
147
  # (in seconds) Maximum time to wait for indices to be built on a single import task's segments.
148
  # Default 600 seconds (10 minutes).
149
  # Note: If default value is to be changed, change also the default in: internal/util/paramtable/component_param.go
150
  importIndexWaitLimit: 600
151

152
# Related configuration of proxy, used to validate client requests and reduce the returned results.
153
proxy:
154
  port: 19530
E
exia 已提交
155
  internalPort: 19529
156 157
  http:
    enabled: true # Whether to enable the http server
158
    debug_mode: false # Whether to enable http server debug mode
159

160 161 162 163
  timeTickInterval: 200 # ms, the interval that proxy synchronize the time tick
  msgStream:
    timeTick:
      bufSize: 512
164 165 166
  maxNameLength: 255  # Maximum length of name for a collection or alias
  maxFieldNum: 256     # Maximum number of fields in a collection
  maxDimension: 32768 # Maximum dimension of a vector
167 168
  maxShardNum: 256 # Maximum number of shards in a collection
  maxTaskNum: 1024 # max task number of proxy task queue
169
  # please adjust in embedded Milvus: false
170
  ginLogging: true # Whether to produce gin logs.
171

172

173
# Related configuration of queryCoord, used to manage topology and load balancing for the query nodes, and handoff from growing segments to sealed segments.
174
queryCoord:
X
xige-16 已提交
175 176
  address: localhost
  port: 19531
177
  autoHandoff: true # Enable auto handoff
178
  autoBalance: true # Enable auto balance
179
  overloadedMemoryThresholdPercentage: 90 # The threshold percentage that memory overload
180 181
  balanceIntervalSeconds: 60
  memoryUsageMaxDifferencePercentage: 30
B
Bingyi Sun 已提交
182 183 184 185 186 187 188 189
  checkInterval: 1000
  channelTaskTimeout: 60000 # 1 minute
  segmentTaskTimeout: 15000 # 15 seconds
  distPullInterval: 500
  loadTimeoutSeconds: 600
  checkHandoffInterval: 5000
  

X
xige-16 已提交
190

191
# Related configuration of queryNode, used to run hybrid search between vector and scalar data.
B
bigsheeper 已提交
192
queryNode:
193
  cacheSize: 32 # GB, default 32 GB, `cacheSize` is the memory used for caching data for faster query. The `cacheSize` must be less than system memory size.
Z
zhenshan.cao 已提交
194
  port: 21123
195
  loadMemoryUsageFactor: 3 # The multiply factor of calculating the memory usage while loading segments
196

197 198 199 200 201 202
  stats:
    publishInterval: 1000 # Interval for querynode to report node information (milliseconds)
  dataSync:
    flowGraph:
      maxQueueLength: 1024 # Maximum length of task queue in flowgraph
      maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
203
  # Segcore will divide a segment into multiple chunks to enbale small index
204
  segcore:
J
Jiquan Long 已提交
205
    chunkRows: 1024 # The number of vectors in a chunk.
206 207
    # Note: we have disabled segment small index since @2022.05.12. So below related configurations won't work.
    # We won't create small index for growing segments and search on these segments will directly use bruteforce scan.
208
    smallIndex:
J
Jiquan Long 已提交
209
      nlist: 128 # small index nlist, recommend to set sqrt(chunkRows), must smaller than chunkRows/8
210
      nprobe: 16 # nprobe to search small index, based on your accuracy requirement, must smaller than nlist
G
godchen 已提交
211 212 213
  cache:
    enabled: true
    memoryLimit: 2147483648 # 2 GB, 2 * 1024 *1024 *1024
G
godchen 已提交
214

215
  scheduler:
216 217
    receiveChanSize: 10240
    unsolvedQueueSize: 10240
218 219 220 221 222
    # maxReadConcurrentRatio is the concurrency ratio of read task (search task and query task).
    # Max read concurrency would be the value of `runtime.NumCPU * maxReadConcurrentRatio`.
    # It defaults to 2.0, which means max read concurrency would be the value of runtime.NumCPU * 2.
    # Max read concurrency must greater than or equal to 1, and less than or equal to runtime.NumCPU * 100.
    maxReadConcurrentRatio: 2.0 # (0, 100]
223 224 225 226
    cpuRatio: 10.0 # ratio used to estimate read task cpu usage.

  grouping:
    enabled: true
227 228 229
    maxNQ: 1000
    topKMergeRatio: 10.0

230
indexCoord:
231
  address: localhost
C
cai.zhang 已提交
232
  port: 31000
X
XuanYang-cn 已提交
233

234 235
  minSegmentNumRowsToEnableIndex: 1024 # It's a threshold. When the segment num rows is less than this value, the segment will not be indexed

236 237 238
  gc:
    interval: 600 # gc interval in seconds

Z
zhenshan.cao 已提交
239 240 241
indexNode:
  port: 21121

242 243 244
  scheduler:
    buildParallel: 1

245
dataCoord:
X
xige-16 已提交
246 247
  address: localhost
  port: 13333
248
  enableCompaction: true # Enable data segment compaction
249
  enableGarbageCollection: true
250

251 252
  segment:
    maxSize: 512 # Maximum size of a segment in MB
253
    sealProportion: 0.25 # It's the minimum proportion for a segment which can be sealed
254
    assignmentExpiration: 2000 # The time of the assignment expiration in ms
255
    maxLife: 86400 # The max lifetime of segment in seconds, 24*60*60
256 257 258 259
    # If a segment didn't accept dml records in `maxIdleTime` and the size of segment is greater than
    # `minSizeFromIdleToSealed`, Milvus will automatically seal it.
    maxIdleTime: 600 # The max idle time of segment in seconds, 10*60.
    minSizeFromIdleToSealed: 16 # The min size in MB of segment which can be idle from sealed.
260

261
  compaction:
T
ThreadDao 已提交
262
    enableAutoCompaction: true
263

264 265 266 267 268 269
  gc:
    interval: 3600 # gc interval in seconds
    missingTolerance: 86400 # file meta missing tolerance duration in seconds, 60*24
    dropTolerance: 86400 # file belongs to dropped entity tolerance duration in seconds, 60*24


X
XuanYang-cn 已提交
270 271
dataNode:
  port: 21124
272

273 274 275 276 277 278 279 280
  dataSync:
    flowGraph:
      maxQueueLength: 1024 # Maximum length of task queue in flowgraph
      maxParallelism: 1024 # Maximum number of tasks executed in parallel in the flowgraph
  flush:
    # Max buffer size to flush for a single segment.
    insertBufSize: 16777216 # Bytes, 16 MB

S
shiyu22 已提交
281
# Configures the system log output.
282
log:
283
  level: debug # Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
284
  file:
285
    # please adjust in embedded Milvus: /tmp/milvus/logs
286
    rootPath: "" # default to stdout, stderr
287
    maxSize: 300 # MB
288
    maxAge: 10 # Maximum time for log retention in day.
289
    maxBackups: 20
Z
zhenshan.cao 已提交
290
  format: text # text/json
291

X
Xiaofan 已提交
292 293 294 295 296 297 298 299 300 301 302 303
grpc:
  log:
    level: WARNING

  serverMaxRecvSize: 2147483647 # math.MaxInt32
  serverMaxSendSize: 2147483647 # math.MaxInt32
  clientMaxRecvSize: 104857600 # 100 MB, 100 * 1024 * 1024
  clientMaxSendSize: 104857600 # 100 MB, 100 * 1024 * 1024

  client:
    dialTimeout:      5000
    keepAliveTime:    10000
304
    keepAliveTimeout: 20000
305 306 307 308
    maxMaxAttempts: 5
    initialBackOff: 1.0
    maxBackoff: 60.0
    backoffMultiplier: 2.0
X
Xiaofan 已提交
309

E
exia 已提交
310 311 312 313 314 315 316
# Configure the proxy tls enable.
tls:
  serverPemPath: configs/cert/server.pem
  serverKeyPath: configs/cert/server.key
  caPemPath: configs/cert/ca.pem


317
common:
318
  # Channel name generation rule: ${namePrefix}-${ChannelIdx}
319
  chanNamePrefix:
320
    cluster: "by-dev"
321 322 323
    rootCoordTimeTick: "rootcoord-timetick"
    rootCoordStatistics: "rootcoord-statistics"
    rootCoordDml: "rootcoord-dml"
324
    rootCoordDelta: "rootcoord-delta"
325 326 327 328
    search: "search"
    searchResult: "searchResult"
    queryTimeTick: "queryTimeTick"
    queryNodeStats: "query-node-stats"
329
    # Cmd for loadIndex, flush, etc...
330 331 332 333 334
    cmd: "cmd"
    dataCoordStatistic: "datacoord-statistics-channel"
    dataCoordTimeTick: "datacoord-timetick-channel"
    dataCoordSegmentInfo: "segment-info-channel"

335
  # Sub name generation rule: ${subNamePrefix}-${NodeID}
336 337 338 339 340 341 342 343
  subNamePrefix:
    rootCoordSubNamePrefix: "rootCoord"
    proxySubNamePrefix: "proxy"
    queryNodeSubNamePrefix: "queryNode"
    dataNodeSubNamePrefix: "dataNode"
    dataCoordSubNamePrefix: "dataCoord"

  defaultPartitionName: "_default"  # default partition name for a collection
344
  defaultIndexName: "_default_idx"  # default index name
345
  retentionDuration: 86400 # time travel reserved time, insert/delete will not be cleaned in this period. 1 days in seconds
X
Xiaofan 已提交
346 347
  entityExpiration:  -1     # Entity expiration in seconds, CAUTION make sure entityExpiration >= retentionDuration and -1 means never expire

348
  gracefulTime: 5000 # milliseconds. it represents the interval (in ms) by which the request arrival time needs to be subtracted in the case of Bounded Consistency.
X
Xiaofan 已提交
349 350

  # Default value: auto
351
  # Valid values: [auto, avx512, avx2, avx, sse4_2]
X
Xiaofan 已提交
352 353
  # This configuration is only used by querynode and indexnode, it selects CPU instruction set for Searching and Index-building.
  simdType: auto
354
  indexSliceSize: 16 # MB
G
godchen 已提交
355

356
  # please adjust in embedded Milvus: local
J
jaime 已提交
357 358
  storageType: minio

C
codeman 已提交
359 360
  security:
    authorizationEnabled: false
361 362 363
    # tls mode values [0, 1, 2]
    # 0 is close, 1 is one-way authentication, 2 is two-way authentication.
    tlsMode: 0
364 365 366 367 368 369 370 371 372 373 374

# QuotaConfig, configurations of Milvus quota and limits.
# By default, we enable:
#   1. TT protection;
#   2. Memory protection.
# You can enable:
#   1. DML throughput limitation;
#   2. DDL, DQL qps/rps limitation;
#   3. DQL Queue length/latency protection;
# If necessary, you can also manually force to deny RW requests.
quotaAndLimits:
B
bigsheeper 已提交
375
  enable: false # `true` to enable quota and limits, `false` to disable.
376 377 378 379 380

  # quotaCenterCollectInterval is the time interval that quotaCenter
  # collects metrics from Query cluster and Data cluster.
  quotaCenterCollectInterval: 3 # seconds, (0 ~ 65536)

B
bigsheeper 已提交
381
  ddl: # ddl limit rates, default no limit.
382 383 384 385 386 387
    #collectionRate: # requests per minute, default no limit, rate for CreateCollection, DropCollection, HasCollection, DescribeCollection, LoadCollection, ReleaseCollection
    #partitionRate: # requests per minute, default no limit, rate for CreatePartition, DropPartition, HasPartition, LoadPartition, ReleasePartition
    #indexRate: # requests per minute, default no limit, rate for CreateIndex, DropIndex, DescribeIndex
    #flushRate: # requests per minute, default no limit, rate for flush
    #compactionRate: # requests per minute, default no limit, rate for manualCompaction

B
bigsheeper 已提交
388
  # dml limit rates, default no limit.
389 390 391 392 393 394 395 396 397 398 399 400 401
  # The maximum rate will not be greater than `max`,
  # and the rate after handling back pressure will not be less than `min`.
  dml:
    insertRate:
      #max: # MB/s, default no limit
      #min: # MB/s, default 0
    deleteRate:
      #max: # MB/s, default no limit
      #min: # MB/s, default 0
    bulkLoadRate: # not support yet. TODO: limit bulkLoad rate
      #max: # MB/s, default no limit
      #min: # MB/s, default 0

B
bigsheeper 已提交
402
  # dql limit rates, default no limit.
403 404 405 406 407 408 409
  # The maximum rate will not be greater than `max`,
  # and the rate after handling back pressure will not be less than `min`.
  dql:
    searchRate:
      #max: # vps, default no limit
      #min: # vps, default 0
    queryRate:
B
bigsheeper 已提交
410 411
      #max: # qps, default no limit
      #min: # qps, default 0
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439

  # limitWriting decides whether dml requests are allowed.
  limitWriting:
    # forceDeny `false` means dml requests are allowed (except for some
    # specific conditions, such as memory of nodes to water marker), `true` means always reject all dml requests.
    forceDeny: false
    # maxTimeTickDelay indicates the backpressure for DML Operations.
    # DML rates would be reduced according to the ratio of time tick delay to maxTimeTickDelay,
    # if time tick delay is greater than maxTimeTickDelay, all DML requests would be rejected.
    maxTimeTickDelay: 30 # in seconds
    # When memory usage > memoryHighWaterLevel, all dml requests would be rejected;
    # When memoryLowWaterLevel < memory usage < memoryHighWaterLevel, reduce the dml rate;
    # When memory usage < memoryLowWaterLevel, no action.
    # memoryLowWaterLevel should be less than memoryHighWaterLevel.
    dataNodeMemoryLowWaterLevel: 0.8 # (0, 1], memoryLowWaterLevel in DataNodes
    dataNodeMemoryHighWaterLevel: 0.9 # (0, 1], memoryHighWaterLevel in DataNodes
    queryNodeMemoryLowWaterLevel: 0.8 # (0, 1], memoryLowWaterLevel in QueryNodes
    queryNodeMemoryHighWaterLevel: 0.9 # (0, 1], memoryHighWaterLevel in QueryNodes

  # limitReading decides whether dql requests are allowed.
  limitReading:
    # forceDeny `false` means dql requests are allowed (except for some
    # specific conditions, such as collection has been dropped), `true` means always reject all dql requests.
    forceDeny: false

    # NQInQueueThreshold indicated that the system was under backpressure for Search/Query path.
    # If NQ in any QueryNode's queue is greater than NQInQueueThreshold, search&query rates would gradually cool off
    # until the NQ in queue no longer exceeds NQInQueueThreshold. We think of the NQ of query request as 1.
B
bigsheeper 已提交
440
    #NQInQueueThreshold: # int, default no limit
441 442 443 444 445 446 447 448 449

    # queueLatencyThreshold indicated that the system was under backpressure for Search/Query path.
    # If dql latency of queuing is greater than queueLatencyThreshold, search&query rates would gradually cool off
    # until the latency of queuing no longer exceeds queueLatencyThreshold.
    # The latency here refers to the averaged latency over a period of time.
    #queueLatencyThreshold: # milliseconds, default no limit

    # coolOffSpeed is the speed of search&query rates cool off.
    #coolOffSpeed: 0.9 # (0, 1]