提交 b5127c44 编写于 作者: P peng.xu

Merge branch 'branch-0.5.0' into 'branch-0.5.0'

MS-574 update config unittest

See merge request megasearch/milvus!624

Former-commit-id: 68f323600415e1d05f00bee94c00709a409364b3
* GLOBAL:
FORMAT = "%datetime | %level | %logger | %msg"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-global.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-global.log"
ENABLED = true
TO_FILE = true
TO_STANDARD_OUTPUT = false
SUBSECOND_PRECISION = 3
PERFORMANCE_TRACKING = false
MAX_LOG_FILE_SIZE = 2097152 ## Throw log files away after 2MB
MAX_LOG_FILE_SIZE = 209715200 ## Throw log files away after 200MB
* DEBUG:
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-debug.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-debug.log"
ENABLED = true
* WARNING:
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-warning.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-warning.log"
* TRACE:
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-trace.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-trace.log"
* VERBOSE:
FORMAT = "%datetime{%d/%M/%y} | %level-%vlevel | %msg"
TO_FILE = false
......@@ -21,7 +21,7 @@
## Error logs
* ERROR:
ENABLED = true
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-error.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-error.log"
* FATAL:
ENABLED = true
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%H:%m}-fatal.log"
FILENAME = "/tmp/milvus/logs/milvus-%datetime{%y-%M-%d-%H:%m}-fatal.log"
# All the following configurations are default values.
server_config:
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # the port milvus listen to, default: 19530, range: 1025 ~ 65534
mode: single # milvus deployment type: single, cluster, read_only
time_zone: UTC+8 # Use the UTC-x or UTC+x to specify a time zone. eg. UTC+8 for China Standard Time
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # port range: 1025 ~ 65534
deploy_mode: single # deployment type: single, cluster_readonly, cluster_writable
time_zone: UTC+8
db_config:
db_path: /tmp/milvus # milvus data storage path
db_slave_path: # secondry data storage path, split by semicolon
primary_path: /tmp/milvus # path used to store data and meta
secondary_path: # path used to store data only, split by semicolon
# URI format: dialect://username:password@host:port/database
# All parts except dialect are optional, but you MUST include the delimiters
# Currently dialect supports mysql or sqlite
db_backend_url: sqlite://:@:/
backend_url: sqlite://:@:/ # URI format: dialect://username:password@host:port/database
# Keep 'dialect://:@:/', and replace other texts with real values.
# Replace 'dialect' with 'mysql' or 'sqlite'
archive_disk_threshold: 0 # triger archive action if storage size exceed this value, 0 means no limit, unit: GB
archive_days_threshold: 0 # files older than x days will be archived, 0 means no limit, unit: day
insert_buffer_size: 4 # maximum insert buffer size allowed, default: 4, unit: GB, should be at least 1 GB.
# the sum of insert_buffer_size and cpu_cache_capacity should be less than total memory, unit: GB
build_index_gpu: 0 # which gpu is used to build index, default: 0, range: 0 ~ gpu number - 1
insert_buffer_size: 4 # GB, maximum insert buffer size allowed
build_index_gpu: 0 # gpu id used for building index
metric_config:
is_startup: off # if monitoring start: on, off
collector: prometheus # metrics collector: prometheus
prometheus_config: # following are prometheus configure
port: 8080 # the port prometheus use to fetch metrics
push_gateway_ip_address: 127.0.0.1 # push method configure: push gateway ip address
push_gateway_port: 9091 # push method configure: push gateway port
enable_monitor: false # enable monitoring or not
collector: prometheus # prometheus
prometheus_config:
port: 8080 # port prometheus used to fetch metrics
cache_config:
cpu_cache_capacity: 16 # how many memory are used as cache, unit: GB, range: 0 ~ less than total memory
cpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
insert_cache_immediately: false # insert data will be load into cache immediately for hot query
gpu_cache_capacity: 5 # how many memory are used as cache in gpu, unit: GB, RANGE: 0 ~ less than total memory
gpu_cache_free_percent: 0.85 # old data will be erased from cache when cache is full, this value specify how much memory should be kept, range: greater than zero ~ 1.0
cpu_mem_capacity: 16 # GB, CPU memory used for cache
cpu_mem_threshold: 0.85 # percentage of data kept when cache cleanup triggered
cache_insert_data: false # whether load inserted data into cache
engine_config:
use_blas_threshold: 20
blas_threshold: 20
resource_config:
mode: simple
resources:
# - cpu
resource_pool:
- cpu
- gpu0
......@@ -99,60 +99,15 @@ TEST(ConfigTest, CONFIG_TEST) {
server_config.ClearSequences();
auto seqs = server_config.GetSequences();
ASSERT_TRUE(seqs.empty());
const server::ConfigNode const_node = root_config.GetChild("cache_config");
float flt = const_node.GetFloatValue("cpu_cache_capacity");
ASSERT_GT(flt, 0.0);
}
TEST(ConfigTest, SERVER_CONFIG_TEST) {
auto status = server::Config::GetInstance().LoadConfigFile(CONFIG_FILE_PATH);
ASSERT_TRUE(status.ok());
server::Config& config = server::Config::GetInstance();
Status s = config.LoadConfigFile(CONFIG_FILE_PATH);
ASSERT_TRUE(s.ok());
// status = server::Config::GetInstance().ValidateConfig();
// ASSERT_TRUE(status.ok());
//
// const server::ServerConfig& config_const = config;
// server::ConfigNode node1 = config_const.GetConfig("server_config");
// server::ConfigNode& node2 = config.GetConfig("cache_config");
// node1.Combine(node2);
//
// int32_t cap = node1.GetInt32Value("cpu_cache_capacity");
// ASSERT_GT(cap, 0);
//
// node1.SetValue("bool", "true");
// bool bt = node1.GetBoolValue("bool");
// ASSERT_TRUE(bt);
s = config.ValidateConfig();
ASSERT_TRUE(s.ok());
// server::Config::GetInstance().PrintAll();
//
// unsigned long total_mem = 0, free_mem = 0;
// server::CommonUtil::GetSystemMemInfo(total_mem, free_mem);
//
// size_t gpu_mem = 0;
// server::ValidationUtil::GetGpuMemory(0, gpu_mem);
//
// server::ConfigNode& server_config = config.GetConfig("server_config");
// server::ConfigNode& db_config = config.GetConfig("db_config");
// server::ConfigNode& cache_config = config.GetConfig(server::CONFIG_CACHE);
// cache_config.SetValue(server::CACHE_FREE_PERCENT, "2.0");
// status = config.ValidateConfig();
// ASSERT_FALSE(status.ok());
//
// size_t cache_cap = 16;
// size_t insert_buffer_size = (total_mem - cache_cap*GB + 1*GB)/GB;
// db_config.SetValue(server::CONFIG_DB_INSERT_BUFFER_SIZE, std::to_string(insert_buffer_size));
// cache_config.SetValue(server::CONFIG_CPU_CACHE_CAPACITY, std::to_string(cache_cap));
// status = config.ValidateConfig();
// ASSERT_FALSE(status.ok());
//
// cache_cap = total_mem/GB + 2;
// cache_config.SetValue(server::CONFIG_CPU_CACHE_CAPACITY, std::to_string(cache_cap));
// status = config.ValidateConfig();
// ASSERT_FALSE(status.ok());
//
// insert_buffer_size = total_mem/GB + 2;
// db_config.SetValue(server::CONFIG_DB_INSERT_BUFFER_SIZE, std::to_string(insert_buffer_size));
// status = config.ValidateConfig();
// ASSERT_FALSE(status.ok());
config.PrintAll();
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册