未验证 提交 4ccf5bac 编写于 作者: J Jin Hai 提交者: GitHub

Merge pull request #538 from XuPeng-SH/track_0.6.0

(shards): fix mysql backend broken issue
......@@ -29,3 +29,6 @@ cmake_build
.coverage
*.pyc
cov_html/
# temp
shards/all_in_one_with_mysql/metadata/
......@@ -11,3 +11,4 @@ __pycache__
*.md
*.yml
*.yaml
*/metadata/
......@@ -13,6 +13,12 @@ clean_deploy:
cd all_in_one && docker-compose -f all_in_one.yml down && cd -
probe_deploy:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py"
deploy_m: clean_deploy_m
cd all_in_one_with_mysql && docker-compose -f all_in_one.yml up -d && cd -
clean_deploy_m:
cd all_in_one_with_mysql && docker-compose -f all_in_one.yml down && cd -
probe_deploy_m:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one_with_mysql/probe_test.py"
cluster:
cd kubernetes_demo;./start.sh baseup;sleep 10;./start.sh appup;cd -
clean_cluster:
......@@ -26,7 +32,7 @@ probe:
docker run --rm --name probe --net=host milvusdb/mishards /bin/bash -c "python all_in_one/probe_test.py --port=${PORT} --host=${HOST}"
clean_coverage:
rm -rf cov_html
clean: clean_coverage clean_deploy clean_cluster
clean: clean_coverage clean_deploy clean_cluster clean_deploy_m
style:
pycodestyle --config=.
coverage:
......
version: "2.3"
services:
milvus-mysql:
restart: always
image: mysql:5.7
volumes:
- ./mysqld.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf
- ./metadata:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: 'milvusroot'
MYSQL_DATABASE: 'milvus'
healthcheck:
test: ["CMD", "sleep", "5"]
interval: 1s
timeout: 10s
retries: 2
milvus_wr:
runtime: nvidia
restart: always
image: milvusdb/milvus
volumes:
- /tmp/milvus/db:/opt/milvus/db
- ./wr_server.yml:/opt/milvus/conf/server_config.yaml
depends_on:
milvus-mysql:
condition: service_healthy
milvus_ro:
runtime: nvidia
restart: always
image: milvusdb/milvus
volumes:
- /tmp/milvus/db:/opt/milvus/db
- ./ro_server.yml:/opt/milvus/conf/server_config.yaml
depends_on:
- milvus-mysql
- milvus_wr
jaeger:
restart: always
image: jaegertracing/all-in-one:1.14
ports:
- "0.0.0.0:5775:5775/udp"
- "0.0.0.0:16686:16686"
- "0.0.0.0:9441:9441"
environment:
COLLECTOR_ZIPKIN_HTTP_PORT: 9411
mishards:
restart: always
image: milvusdb/mishards
ports:
- "0.0.0.0:19531:19531"
- "0.0.0.0:19532:19532"
volumes:
- /tmp/milvus/db:/tmp/milvus/db
# - /tmp/mishards_env:/source/mishards/.env
command: ["python", "mishards/main.py"]
environment:
FROM_EXAMPLE: 'true'
SQLALCHEMY_DATABASE_URI: mysql+pymysql://root:milvusroot@milvus-mysql:3306/milvus?charset=utf8mb4
DEBUG: 'true'
SERVER_PORT: 19531
WOSERVER: tcp://milvus_wr:19530
DISCOVERY_PLUGIN_PATH: static
DISCOVERY_STATIC_HOSTS: milvus_wr,milvus_ro
TRACER_CLASS_NAME: jaeger
TRACING_SERVICE_NAME: mishards-demo
TRACING_REPORTING_HOST: jaeger
TRACING_REPORTING_PORT: 5775
depends_on:
- milvus_wr
- milvus_ro
- milvus-mysql
- jaeger
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
log-error = /var/log/mysql/error.log
bind-address = 0.0.0.0
symbolic-links=0
character-set-server = utf8mb4
collation-server = utf8mb4_unicode_ci
init_connect='SET NAMES utf8mb4'
skip-character-set-client-handshake = true
max_connections = 1000
wait_timeout = 31536000
table_open_cache = 128
external-locking = FALSE
binlog_cache_size = 1M
max_heap_table_size = 8M
tmp_table_size = 16M
read_rnd_buffer_size = 8M
sort_buffer_size = 8M
join_buffer_size = 8M
thread_cache_size = 32
query_cache_size = 64M
innodb_buffer_pool_size = 64M
innodb_flush_log_at_trx_commit = 0
innodb_log_buffer_size = 2M
max_allowed_packet=64M
explicit_defaults_for_timestamp=true
\ No newline at end of file
from milvus import Milvus
RED = '\033[0;31m'
GREEN = '\033[0;32m'
ENDC = ''
def test(host='127.0.0.1', port=19531):
client = Milvus()
try:
status = client.connect(host=host, port=port)
if status.OK():
print('{}Pass: Connected{}'.format(GREEN, ENDC))
return 0
else:
print('{}Error: {}{}'.format(RED, status, ENDC))
return 1
except Exception as exc:
print('{}Error: {}{}'.format(RED, exc, ENDC))
return 1
if __name__ == '__main__':
import fire
fire.Fire(test)
server_config:
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # port range: 1025 ~ 65534
deploy_mode: cluster_readonly # deployment type: single, cluster_readonly, cluster_writable
time_zone: UTC+8
db_config:
primary_path: /opt/milvus # path used to store data and meta
secondary_path: # path used to store data only, split by semicolon
backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus
# URI format: dialect://username:password@host:port/database
# Keep 'dialect://:@:/', and replace other texts with real values
# Replace 'dialect' with 'mysql' or 'sqlite'
insert_buffer_size: 1 # GB, maximum insert buffer size allowed
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
# you can specify preload tables like this: table1,table2,table3
metric_config:
enable_monitor: false # enable monitoring or not
collector: prometheus # prometheus
prometheus_config:
port: 8080 # port prometheus uses to fetch metrics
cache_config:
cpu_cache_capacity: 4 # GB, CPU memory used for cache
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
gpu_cache_capacity: 1 # GB, GPU memory used for cache
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
cache_insert_data: false # whether to load inserted data into cache
engine_config:
use_blas_threshold: 800 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
resource_config:
search_resources: # define the GPUs used for search computation, valid value: gpux
- gpu0
index_build_device: gpu0 # GPU used for building index
server_config:
address: 0.0.0.0 # milvus server ip address (IPv4)
port: 19530 # port range: 1025 ~ 65534
deploy_mode: cluster_writable # deployment type: single, cluster_readonly, cluster_writable
time_zone: UTC+8
db_config:
primary_path: /opt/milvus # path used to store data and meta
secondary_path: # path used to store data only, split by semicolon
backend_url: mysql://root:milvusroot@milvus-mysql:3306/milvus # URI format: dialect://username:password@host:port/database
# Keep 'dialect://:@:/', and replace other texts with real values
# Replace 'dialect' with 'mysql' or 'sqlite'
insert_buffer_size: 2 # GB, maximum insert buffer size allowed
# sum of insert_buffer_size and cpu_cache_capacity cannot exceed total memory
preload_table: # preload data at startup, '*' means load all tables, empty value means no preload
# you can specify preload tables like this: table1,table2,table3
metric_config:
enable_monitor: false # enable monitoring or not
collector: prometheus # prometheus
prometheus_config:
port: 8080 # port prometheus uses to fetch metrics
cache_config:
cpu_cache_capacity: 2 # GB, CPU memory used for cache
cpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
gpu_cache_capacity: 2 # GB, GPU memory used for cache
gpu_cache_threshold: 0.85 # percentage of data that will be kept when cache cleanup is triggered
cache_insert_data: false # whether to load inserted data into cache
engine_config:
use_blas_threshold: 800 # if nq < use_blas_threshold, use SSE, faster with fluctuated response times
# if nq >= use_blas_threshold, use OpenBlas, slower with stable response times
resource_config:
search_resources: # define the GPUs used for search computation, valid value: gpux
- gpu0
index_build_device: gpu0 # GPU used for building index
......@@ -28,7 +28,12 @@ class DB:
if url.get_backend_name() == 'sqlite':
self.engine = create_engine(url)
else:
self.engine = create_engine(uri, pool_size, pool_recycle, pool_timeout, pool_pre_ping, echo, max_overflow)
self.engine = create_engine(uri, pool_size=pool_size,
pool_recycle=pool_size,
pool_timeout=pool_timeout,
pool_pre_ping=pool_pre_ping,
echo=echo,
max_overflow=max_overflow)
self.uri = uri
self.url = url
......
......@@ -49,7 +49,7 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
status = status_pb2.Status(error_code=status_pb2.SUCCESS,
reason="Success")
if not files_n_topk_results:
return status, []
return status, [], []
merge_id_results = []
merge_dis_results = []
......@@ -58,8 +58,8 @@ class ServiceHandler(milvus_pb2_grpc.MilvusServiceServicer):
for files_collection in files_n_topk_results:
if isinstance(files_collection, tuple):
status, _ = files_collection
return status, []
return status, [], []
row_num = files_collection.row_num
ids = files_collection.ids
diss = files_collection.distances # distance collections
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册