提交 4e281854 编写于 作者: H Heisenberg

merge the newest code


Former-commit-id: 69be61b7715976b4852881717743e2dc121fbf49
......@@ -5,6 +5,7 @@ Please mark all change in change log and use the ticket from JIRA.
# Milvus 0.4.0 (2019-07-28)
## Bug
- MS-119 - The problem of combining the log files
- MS-411 - Fix metric unittest linking error
- MS-412 - Fix gpu cache logical error
- MS-416 - ExecutionEngineImpl::GpuCache has not return value cause crash
......@@ -32,6 +33,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-510 - unittest out of memory and crashed
- MS-119 - The problem of combining the log files
- MS-121 - The problem that user can't change the time zone
- MS-507 - Dataset 10m-512, index type sq8,performance in-normal when set CPU_CACHE to 16 or 64
## Improvement
- MS-327 - Clean code for milvus
......
......@@ -303,13 +303,15 @@ Status ExecutionEngineImpl::Search(long n,
}
Status ExecutionEngineImpl::Cache() {
zilliz::milvus::cache::CpuCacheMgr::GetInstance()->InsertItem(location_, index_);
cache::DataObjPtr obj = std::make_shared<cache::DataObj>(index_, PhysicalSize());
zilliz::milvus::cache::CpuCacheMgr::GetInstance()->InsertItem(location_, obj);
return Status::OK();
}
Status ExecutionEngineImpl::GpuCache(uint64_t gpu_id) {
zilliz::milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, index_);
cache::DataObjPtr obj = std::make_shared<cache::DataObj>(index_, PhysicalSize());
zilliz::milvus::cache::GpuCacheMgr::GetInstance(gpu_id)->InsertItem(location_, obj);
return Status::OK();
}
......
server_config:
address: 0.0.0.0
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # the port milvus listen to, default: 19530, range: 1025 ~ 65534
gpu_index: 0 # the gpu milvus use, default: 0, range: 0 ~ gpu number - 1
mode: single # milvus deployment type: single, cluster, read_only
db_config:
db_path: /tmp/milvus # milvus data storage path
#URI format: dialect://username:password@host:port/database
#All parts except dialect are optional, but you MUST include the delimiters
#Currently supports mysql or sqlite
db_backend_url: mysql://root:1234@:/test # meta database uri
index_building_threshold: 1024 # index building trigger threshold, default: 1024, unit: MB
archive_disk_threshold: 512 # triger archive action if storage size exceed this value, unit: GB
archive_days_threshold: 30 # files older than x days will be archived, unit: day
db_slave_path: # secondry data storage path, split by semicolon
metric_config:
is_startup: off # if monitoring start: on, off
collector: prometheus # metrics collector: prometheus
prometheus_config: # following are prometheus configure
port: 8080 # the port prometheus use to fetch metrics
push_gateway_ip_address: 127.0.0.1 # push method configure: push gateway ip address
push_gateway_port: 9091 # push method configure: push gateway port
# URI format: dialect://username:password@host:port/database
# All parts except dialect are optional, but you MUST include the delimiters
# Currently dialect supports mysql or sqlite
db_backend_url: sqlite://:@:/
archive_disk_threshold: 0 # triger archive action if storage size exceed this value, 0 means no limit, unit: GB
archive_days_threshold: 0 # files older than x days will be archived, 0 means no limit, unit: day
insert_buffer_size: 4 # maximum insert buffer size allowed, default: 4, unit: GB, should be at least 1 GB.
# the sum of insert_buffer_size and cpu_cache_capacity should be less than total memory, unit: GB
license_config: # license configure
license_path: "/tmp/milvus/system.license" # license file path
metric_config:
is_startup: off # if monitoring start: on, off
collector: prometheus # metrics collector: prometheus
prometheus_config: # following are prometheus configure
port: 8080 # the port prometheus use to fetch metrics
push_gateway_ip_address: 127.0.0.1 # push method configure: push gateway ip address
push_gateway_port: 9091 # push method configure: push gateway port
cache_config: # cache configure
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
cache_config:
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
cpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
insert_cache_immediately: false # insert data will be load into cache immediately for hot query
gpu_cache_capacity: 5 # how many memory are used as cache in gpu, unit: GB, RANGE: 0 ~ less than total memory
gpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
gpu_ids: # gpu id
- 0
- 1
engine_config:
nprobe: 3000
\ No newline at end of file
use_blas_threshold: 20
resource_config:
# resource list, length: 0~N
# please set a DISK resource and a CPU resource least, or system will not return query result.
#
# example:
# resource_name: # resource name, just using in connections below
# type: DISK # resource type, optional: DISK/CPU/GPU
# device_id: 0
# enable_executor: false # if is enable executor, optional: true, false
resources:
ssda:
type: DISK
device_id: 0
enable_executor: false
cpu:
type: CPU
device_id: 0
enable_executor: false
gpu0:
type: GPU
device_id: 0
enable_executor: true
gpu_resource_num: 2
pinned_memory: 300
temp_memory: 300
# connection list, length: 0~N
# example:
# connection_name:
# speed: 100 # unit: MS/s
# endpoint: ===
connections:
io:
speed: 500
endpoint: ssda===cpu
pcie0:
speed: 11000
endpoint: cpu===gpu0
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册