未验证 提交 3ef67c04 编写于 作者: T ThreadDao 提交者: GitHub

[skip ci] Update scale query node test (#12729)

Signed-off-by: NThreadDao <yufen.zong@zilliz.com>
上级 a2f6e31d
import pdb import threading
import random import time
import pytest import pytest
from base.collection_wrapper import ApiCollectionWrapper from base.collection_wrapper import ApiCollectionWrapper
from common.common_type import CaseLabel from common.common_type import CaseLabel
from scale.helm_env import HelmEnv from customize.milvus_operator import MilvusOperator
from common import common_func as cf from common import common_func as cf
from common import common_type as ct from common import common_type as ct
from scale import constants from scale import constants
from pymilvus import Index, connections from pymilvus import Index, connections
from utils.util_log import test_log as log
from utils.util_k8s import wait_pods_ready
prefix = "search_scale" prefix = "search_scale"
nb = 5000 nb = 5000
...@@ -22,98 +24,86 @@ default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": ...@@ -22,98 +24,86 @@ default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params":
class TestQueryNodeScale: class TestQueryNodeScale:
@pytest.mark.tags(CaseLabel.L3) @pytest.mark.tags(CaseLabel.L3)
def test_expand_query_node(self): def test_scale_query_node(self):
release_name = "scale-query" release_name = "scale-query"
env = HelmEnv(release_name=release_name) query_config = {
host = env.helm_install_cluster_milvus() 'metadata.namespace': constants.NAMESPACE,
'metadata.name': release_name,
'spec.components.image': 'harbor.zilliz.cc/milvus/milvus:master-20211202-ed546d0',
'spec.components.proxy.serviceType': 'LoadBalancer',
'spec.components.queryNode.replicas': 1,
'spec.config.dataCoord.enableCompaction': True,
'spec.config.dataCoord.enableGarbageCollection': True
}
mic = MilvusOperator()
mic.install(query_config)
healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
log.info(f"milvus healthy: {healthy}")
host = mic.endpoint(release_name, constants.NAMESPACE).split(':')[0]
# host = "10.98.0.8"
# connect # connect
connections.add_connection(default={"host": host, "port": 19530}) connections.add_connection(default={"host": host, "port": 19530})
connections.connect(alias='default') connections.connect(alias='default')
# create # create
c_name = "query_scale_one" c_name = cf.gen_unique_str("scale_query")
# c_name = 'scale_query_DymS7kI4'
collection_w = ApiCollectionWrapper() collection_w = ApiCollectionWrapper()
collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema()) collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema(), shards_num=2)
# insert
data = cf.gen_default_list_data(ct.default_nb) # insert two segments
mutation_res, _ = collection_w.insert(data) for i in range(3):
assert mutation_res.insert_count == ct.default_nb df = cf.gen_default_dataframe_data(nb)
# # create index collection_w.insert(df)
# collection_w.create_index(ct.default_float_vec_field_name, default_index_params) log.debug(collection_w.num_entities)
# assert collection_w.has_index()[0]
# assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name, # create index
# default_index_params) collection_w.create_index(ct.default_float_vec_field_name, default_index_params)
assert collection_w.has_index()[0]
assert collection_w.index()[0] == Index(collection_w.collection, ct.default_float_vec_field_name,
default_index_params)
# load
collection_w.load() collection_w.load()
# vectors = [[random.random() for _ in range(ct.default_dim)] for _ in range(5)]
res1, _ = collection_w.search(data[-1][:5], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
# scale queryNode pod # scale queryNode to 5
env.helm_upgrade_cluster_milvus(queryNode=2) mic.upgrade(release_name, {'spec.components.queryNode.replicas': 5}, constants.NAMESPACE)
c_name_2 = "query_scale_two" # continuously search
collection_w2 = ApiCollectionWrapper() def do_search():
collection_w2.init_collection(name=c_name_2, schema=cf.gen_default_collection_schema()) while True:
collection_w2.insert(data) search_res, _ = collection_w.search(cf.gen_vectors(1, ct.default_dim),
assert collection_w2.num_entities == ct.default_nb ct.default_float_vec_field_name,
collection_w2.load()
res2, _ = collection_w2.search(data[-1][:5], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit) ct.default_search_params, ct.default_limit)
log.debug(search_res[0].ids)
assert len(search_res[0].ids) == ct.default_limit
assert res1[0].ids == res2[0].ids t_search = threading.Thread(target=do_search, args=(), daemon=True)
t_search.start()
@pytest.mark.tags(CaseLabel.L3) # wait new QN running, continuously insert
def test_shrink_query_node(self): # time.sleep(10)
""" healthy = mic.wait_for_healthy(release_name, constants.NAMESPACE, timeout=1200)
target: test shrink queryNode from 2 to 1 log.info(f"milvus healthy after scale up: {healthy}")
method: 1.deploy two queryNode # wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
2.search two collections in two queryNode
3.upgrade queryNode from 2 to 1
4.search second collection
expected: search result is correct
"""
# deploy
release_name = "scale-query"
env = HelmEnv(release_name=release_name, queryNode=2)
host = env.helm_install_cluster_milvus(image_pull_policy=constants.IF_NOT_PRESENT)
# connect def do_insert():
connections.add_connection(default={"host": host, "port": 19530}) while True:
connections.connect(alias='default') tmp_df = cf.gen_default_dataframe_data(1000)
collection_w.insert(tmp_df)
# collection one t_insert = threading.Thread(target=do_insert, args=(), daemon=True)
data = cf.gen_default_list_data(nb) t_insert.start()
c_name = "query_scale_one"
collection_w = ApiCollectionWrapper()
collection_w.init_collection(name=c_name, schema=cf.gen_default_collection_schema())
collection_w.insert(data)
assert collection_w.num_entities == nb
collection_w.load()
res1, _ = collection_w.search(data[-1][:nq], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert res1[0].ids[0] == data[0][0]
# collection two
c_name_2 = "query_scale_two"
collection_w2 = ApiCollectionWrapper()
collection_w2.init_collection(name=c_name_2, schema=cf.gen_default_collection_schema())
collection_w2.insert(data)
assert collection_w2.num_entities == nb
collection_w2.load()
res2, _ = collection_w2.search(data[-1][:nq], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert res2[0].ids[0] == data[0][0]
# scale queryNode pod log.debug(collection_w.num_entities)
env.helm_upgrade_cluster_milvus(queryNode=1) time.sleep(20)
log.debug("Expand querynode test finished")
# search mic.upgrade(release_name, {'spec.components.queryNode.replicas': 3}, constants.NAMESPACE)
res1, _ = collection_w.search(data[-1][:nq], ct.default_float_vec_field_name, time.sleep(60)
ct.default_search_params, ct.default_limit) wait_pods_ready(constants.NAMESPACE, f"app.kubernetes.io/instance={release_name}")
assert res1[0].ids[0] == data[0][0]
res2, _ = collection_w2.search(data[-1][:nq], ct.default_float_vec_field_name,
ct.default_search_params, ct.default_limit)
assert res2[0].ids[0] == data[0][0]
# env.helm_uninstall_cluster_milvus() log.debug(collection_w.num_entities)
\ No newline at end of file time.sleep(60)
log.debug("Shrink querynode test finished")
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册