未验证 提交 a8f73051 编写于 作者: N nico 提交者: GitHub

Update pymilvus version and update some test cases (#25479)

Signed-off-by: Nnico <cheng.yuan@zilliz.com>
上级 f30b44d4
......@@ -196,6 +196,7 @@ pipeline {
def clusterEnabled = "false"
def mqMode='pulsar'
int e2e_timeout_seconds = 5 * 60 * 60
int parallel_num = 6
def tag="L0 L1 L2"
if ("${MILVUS_SERVER_TYPE}" == "distributed-pulsar") {
clusterEnabled = "true"
......@@ -209,6 +210,7 @@ pipeline {
e2e_timeout_seconds = 6 * 60 * 60
} else if("${MILVUS_SERVER_TYPE}" == "standalone-authentication") {
tag="RBAC"
parallel_num = 1
e2e_timeout_seconds = 1 * 60 * 60
}
if ("${MILVUS_CLIENT}" == "pymilvus") {
......@@ -218,7 +220,7 @@ pipeline {
MILVUS_CLUSTER_ENABLED="${clusterEnabled}" \
TEST_TIMEOUT="${e2e_timeout_seconds}" \
MQ_MODE="${mqMode}" \
./ci_e2e.sh "-n 6 --tags ${tag}"
./ci_e2e.sh "-n ${parallel_num} --tags ${tag}"
"""
} else {
error "Error: Unsupported Milvus client: ${MILVUS_CLIENT}"
......
......@@ -1100,7 +1100,7 @@ def install_milvus_operator_specific_config(namespace, milvus_mode, release_name
def get_wildcard_output_field_names(collection_w, output_fields):
all_fields = collection_w.schema.fields
all_fields = [field.name for field in collection_w.schema.fields]
if "*" in output_fields:
output_fields.remove("*")
output_fields.extend(all_fields)
......
......@@ -12,7 +12,7 @@ allure-pytest==2.7.0
pytest-print==0.2.1
pytest-level==0.1.1
pytest-xdist==2.5.0
pymilvus==2.4.0.dev81
pymilvus==2.4.0.dev100
pytest-rerunfailures==9.1.1
git+https://github.com/Projectplace/pytest-tags
ndg-httpsclient
......
......@@ -3802,9 +3802,9 @@ class TestCollectionString(TestcaseBase):
vec_field = cf.gen_float_vec_field()
string_field = cf.gen_string_field(is_primary=True, auto_id=True)
fields = [int_field, string_field, vec_field]
error = {ct.err_code: 0, ct.err_msg: "The auto_id can only be specified on field with DataType.INT64"}
self.collection_schema_wrap.init_collection_schema(fields=fields,
check_task=CheckTasks.err_res, check_items=error)
schema = self.collection_schema_wrap.init_collection_schema(fields=fields)[0]
self.init_collection_wrap(schema=schema, check_task=CheckTasks.check_collection_property,
check_items={"schema": schema, "primary": ct.default_string_field_name})
class TestCollectionJSON(TestcaseBase):
"""
......
......@@ -10,7 +10,7 @@ from utils.util_log import test_log as log
prefix = "db"
@pytest.mark.tags(CaseLabel.L3)
@pytest.mark.tags(CaseLabel.RBAC)
class TestDatabaseParams(TestcaseBase):
""" Test case of database """
......@@ -220,7 +220,7 @@ class TestDatabaseParams(TestcaseBase):
check_items={ct.err_code: 1, ct.err_msg: "database not exist"})
@pytest.mark.tags(CaseLabel.L3)
@pytest.mark.tags(CaseLabel.RBAC)
class TestDatabaseOperation(TestcaseBase):
def teardown_method(self, method):
......@@ -531,7 +531,7 @@ class TestDatabaseOperation(TestcaseBase):
assert collection_w.name in colls
@pytest.mark.tags(CaseLabel.L3)
@pytest.mark.tags(CaseLabel.RBAC)
class TestDatabaseOtherApi(TestcaseBase):
""" test other interface that has db_name params"""
......
......@@ -1939,7 +1939,6 @@ class TestAutoIndex(TestcaseBase):
expected: raise exception
"""
collection_w = self.init_collection_general(prefix, is_binary=True, is_index=False)[0]
collection_w.create_index(binary_field_name, {},
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "float vector is only supported"})
collection_w.create_index(binary_field_name, {})
actual_index_params = collection_w.index()[0].params
assert default_autoindex_params == actual_index_params
......@@ -417,6 +417,10 @@ class TestInsertOperation(TestcaseBase):
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[ct.default_int64_field_name, ct.default_string_field_name])
def pk_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connection(self):
"""
......@@ -655,34 +659,34 @@ class TestInsertOperation(TestcaseBase):
assert collection_w.indexes[0] == index
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true(self):
def test_insert_auto_id_true(self, pk_field):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert without ids
expected: verify primary_keys and num_entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
schema = cf.gen_default_collection_schema(primary_field=pk_field, auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data()
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df.drop(pk_field, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_twice_auto_id_true(self):
def test_insert_twice_auto_id_true(self, pk_field):
"""
target: test insert ids fields twice when auto_id=True
method: 1.create collection with auto_id=True 2.insert twice
expected: verify primary_keys unique
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
schema = cf.gen_default_collection_schema(primary_field=pk_field, auto_id=True)
nb = 10
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb)
df.drop(ct.default_int64_field_name, axis=1, inplace=True)
df.drop(pk_field, axis=1, inplace=True)
mutation_res, _ = collection_w.insert(data=df)
primary_keys = mutation_res.primary_keys
assert cf._check_primary_keys(primary_keys, nb)
......@@ -692,30 +696,34 @@ class TestInsertOperation(TestcaseBase):
assert collection_w.num_entities == nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_list_data(self):
def test_insert_auto_id_true_list_data(self, pk_field):
"""
target: test insert ids fields values when auto_id=True
method: 1.create collection with auto_id=True 2.insert list data with ids field values
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
schema = cf.gen_default_collection_schema(primary_field=pk_field, auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data()
mutation_res, _ = collection_w.insert(data=data[1:])
if pk_field == ct.default_int64_field_name:
mutation_res, _ = collection_w.insert(data=data[1:])
else:
del data[2]
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert cf._check_primary_keys(mutation_res.primary_keys, ct.default_nb)
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_auto_id_true_with_dataframe_values(self):
def test_insert_auto_id_true_with_dataframe_values(self, pk_field):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
schema = cf.gen_default_collection_schema(primary_field=pk_field, auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
df = cf.gen_default_dataframe_data(nb=100)
error = {ct.err_code: 1, ct.err_msg: "Please don't provide data for auto_id primary field: int64"}
......@@ -723,14 +731,14 @@ class TestInsertOperation(TestcaseBase):
assert collection_w.is_empty
@pytest.mark.tags(CaseLabel.L2)
def test_insert_auto_id_true_with_list_values(self):
def test_insert_auto_id_true_with_list_values(self, pk_field):
"""
target: test insert with auto_id=True
method: create collection with auto_id=True
expected: 1.verify num entities 2.verify ids
"""
c_name = cf.gen_unique_str(prefix)
schema = cf.gen_default_collection_schema(auto_id=True)
schema = cf.gen_default_collection_schema(primary_field=pk_field, auto_id=True)
collection_w = self.init_collection_wrap(name=c_name, schema=schema)
data = cf.gen_default_list_data(nb=100)
error = {ct.err_code: 1, ct.err_msg: "The fields don't match with schema fields, "
......@@ -1167,7 +1175,22 @@ class TestInsertInvalid(TestcaseBase):
error = {ct.err_code: 1, ct.err_msg: "Data type is not support."}
mutation_res, _ = collection_w.insert(data=df, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_invalid_with_pk_varchar_auto_id_true(self):
"""
target: test insert invalid with pk varchar and auto id true
method: set pk varchar max length < 18, insert data
expected: raise exception
"""
string_field = cf.gen_string_field(is_primary=True, max_length=6)
embedding_field = cf.gen_float_vec_field()
schema = cf.gen_collection_schema([string_field, embedding_field], auto_id=True)
collection_w = self.init_collection_wrap(schema=schema)
data = [[[random.random() for _ in range(ct.default_dim)] for _ in range(2)]]
error = {ct.err_code: 1, ct.err_msg: "the length (18) of 0th string exceeds max length (6)"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_over_resource_limit(self):
"""
target: test insert over RPC limitation 64MB (67108864)
......
......@@ -5,8 +5,6 @@ import numpy
import threading
import pytest
import pandas as pd
import decimal
from decimal import Decimal, getcontext
from time import sleep
import heapq
......@@ -3311,11 +3309,7 @@ class TestCollectionSearch(TestcaseBase):
output_fields=[binary_field_name])[0]
# 4. check the result vectors should be equal to the inserted
log.info(res[0][0].id)
log.info(res[0][0].entity.float_vector)
log.info(data['binary_vector'][0])
assert res[0][0].entity.binary_vector == data[binary_field_name][res[0][0].id]
# log.info(data['float_vector'][1])
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("dim", [32, 128, 768])
......@@ -3429,9 +3423,8 @@ class TestCollectionSearch(TestcaseBase):
"output_fields": [field_name]})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("wildcard_output_fields", [["*"], ["*", default_float_field_name],
["*", default_search_field],
["%"], ["%", default_float_field_name], ["*", "%"]])
@pytest.mark.parametrize("wildcard_output_fields", [["*"], ["*", default_int64_field_name],
["*", default_search_field]])
def test_search_with_output_field_wildcard(self, wildcard_output_fields, auto_id, _async, enable_dynamic_field):
"""
target: test search with output fields using wildcard
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册