提交 0b3f7e15 编写于 作者: G groot

Merge remote-tracking branch 'source/0.6.0' into ongoing

......@@ -34,6 +34,7 @@ Please mark all change in change log and use the ticket from JIRA.
- \#533 - NSG build failed with MetricType Inner Product
- \#543 - client raise exception in shards when search results is empty
- \#545 - Avoid dead circle of build index thread when error occurs
- \#547 - NSG build failed using GPU-edition if set gpu_enable false
- \#552 - Server down during building index_type: IVF_PQ using GPU-edition
- \#561 - Milvus server should report exception/error message or terminate on mysql metadata backend error
- \#596 - Frequently insert operation cost too much disk space
......@@ -73,6 +74,7 @@ Please mark all change in change log and use the ticket from JIRA.
- \#449 - Add ShowPartitions example for C++ SDK
- \#470 - Small raw files should not be build index
- \#584 - Intergrate internal FAISS
- \#611 - Remove MILVUS_CPU_VERSION
## Task
......
......@@ -146,7 +146,6 @@ if (CUSTOMIZATION)
add_compile_definitions(CUSTOMIZATION)
endif ()
set(MILVUS_CPU_VERSION false)
if (MILVUS_GPU_VERSION)
message(STATUS "Building Milvus GPU version")
add_compile_definitions("MILVUS_GPU_VERSION")
......@@ -155,8 +154,6 @@ if (MILVUS_GPU_VERSION)
set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -Xcompiler -fPIC -std=c++11 -D_FORCE_INLINES --expt-extended-lambda")
else ()
message(STATUS "Building Milvus CPU version")
set(MILVUS_CPU_VERSION true)
add_compile_definitions("MILVUS_CPU_VERSION")
endif ()
if (MILVUS_WITH_PROMETHEUS)
......
......@@ -1044,10 +1044,10 @@ DBImpl::BuildTableIndexRecursively(const std::string& table_id, const TableIndex
if (!failed_files.empty()) {
std::string msg = "Failed to build index for " + std::to_string(failed_files.size()) +
((failed_files.size() == 1) ? " file" : " files");
#ifdef MILVUS_CPU_VERSION
msg += ", please double check index parameters.";
#else
#ifdef MILVUS_GPU_VERSION
msg += ", file size is too large or gpu memory is not enough.";
#else
msg += ", please double check index parameters.";
#endif
return Status(DB_ERROR, msg);
}
......
......@@ -93,18 +93,18 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
break;
}
case EngineType::FAISS_IVFFLAT: {
#ifdef MILVUS_CPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU);
#else
#ifdef MILVUS_GPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_MIX);
#else
index = GetVecIndexFactory(IndexType::FAISS_IVFFLAT_CPU);
#endif
break;
}
case EngineType::FAISS_IVFSQ8: {
#ifdef MILVUS_CPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_CPU);
#else
#ifdef MILVUS_GPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_MIX);
#else
index = GetVecIndexFactory(IndexType::FAISS_IVFSQ8_CPU);
#endif
break;
}
......@@ -119,10 +119,10 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
}
#endif
case EngineType::FAISS_PQ: {
#ifdef MILVUS_CPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
#else
#ifdef MILVUS_GPU_VERSION
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_MIX);
#else
index = GetVecIndexFactory(IndexType::FAISS_IVFPQ_CPU);
#endif
break;
}
......@@ -618,6 +618,9 @@ ExecutionEngineImpl::Init() {
server::Config& config = server::Config::GetInstance();
std::vector<int64_t> gpu_ids;
Status s = config.GetGpuResourceConfigBuildIndexResources(gpu_ids);
if (!s.ok()) {
gpu_num_ = knowhere::INVALID_VALUE;
}
for (auto id : gpu_ids) {
if (gpu_num_ == id) {
return Status::OK();
......
We manually change two APIs in "milvus.pb.h":
add_vector_data()
add_row_id_array()
add_ids()
add_distances()
If proto files need be generated again, remember to re-change above APIs.
\ No newline at end of file
......@@ -116,17 +116,28 @@ NSG::Train(const DatasetPtr& dataset, const Config& config) {
}
// TODO(linxj): dev IndexFactory, support more IndexType
Graph knng;
#ifdef MILVUS_GPU_VERSION
auto preprocess_index = std::make_shared<GPUIVF>(build_cfg->gpu_id);
if (build_cfg->gpu_id == knowhere::INVALID_VALUE) {
auto preprocess_index = std::make_shared<IVF>();
auto model = preprocess_index->Train(dataset, config);
preprocess_index->set_index_model(model);
preprocess_index->AddWithoutIds(dataset, config);
preprocess_index->GenGraph(build_cfg->knng, knng, dataset, config);
} else {
auto preprocess_index = std::make_shared<GPUIVF>(build_cfg->gpu_id);
auto model = preprocess_index->Train(dataset, config);
preprocess_index->set_index_model(model);
preprocess_index->AddWithoutIds(dataset, config);
preprocess_index->GenGraph(build_cfg->knng, knng, dataset, config);
}
#else
auto preprocess_index = std::make_shared<IVF>();
#endif
auto model = preprocess_index->Train(dataset, config);
preprocess_index->set_index_model(model);
preprocess_index->AddWithoutIds(dataset, config);
Graph knng;
preprocess_index->GenGraph(build_cfg->knng, knng, dataset, config);
#endif
algo::BuildParams b_params;
b_params.candidate_pool_size = build_cfg->candidate_pool_size;
......
......@@ -58,10 +58,10 @@ print_banner() {
<< "OpenBLAS"
#endif
<< " library." << std::endl;
#ifdef MILVUS_CPU_VERSION
std::cout << "You are using Milvus CPU edition" << std::endl;
#else
#ifdef MILVUS_GPU_VERSION
std::cout << "You are using Milvus GPU edition" << std::endl;
#else
std::cout << "You are using Milvus CPU edition" << std::endl;
#endif
std::cout << std::endl;
}
......
......@@ -25,6 +25,7 @@
#include "optimizer/BuildIndexPass.h"
#include "optimizer/FaissFlatPass.h"
#include "optimizer/FaissIVFFlatPass.h"
#include "optimizer/FaissIVFPQPass.h"
#include "optimizer/FaissIVFSQ8HPass.h"
#include "optimizer/FaissIVFSQ8Pass.h"
#include "optimizer/FallbackPass.h"
......@@ -129,7 +130,10 @@ class OptimizerInst {
pass_list.push_back(std::make_shared<FaissFlatPass>());
pass_list.push_back(std::make_shared<FaissIVFFlatPass>());
pass_list.push_back(std::make_shared<FaissIVFSQ8Pass>());
#ifdef CUSTOMIZATION
pass_list.push_back(std::make_shared<FaissIVFSQ8HPass>());
#endif
pass_list.push_back(std::make_shared<FaissIVFPQPass>());
}
#endif
pass_list.push_back(std::make_shared<FallbackPass>());
......
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#ifdef MILVUS_GPU_VERSION
#include "scheduler/optimizer/FaissIVFPQPass.h"
#include "cache/GpuCacheMgr.h"
#include "scheduler/SchedInst.h"
#include "scheduler/Utils.h"
#include "scheduler/task/SearchTask.h"
#include "scheduler/tasklabel/SpecResLabel.h"
#include "server/Config.h"
#include "utils/Log.h"
namespace milvus {
namespace scheduler {
void
FaissIVFPQPass::Init() {
#ifdef MILVUS_GPU_VERSION
server::Config& config = server::Config::GetInstance();
Status s = config.GetEngineConfigGpuSearchThreshold(threshold_);
if (!s.ok()) {
threshold_ = std::numeric_limits<int32_t>::max();
}
s = config.GetGpuResourceConfigSearchResources(gpus);
if (!s.ok()) {
throw;
}
#endif
}
bool
FaissIVFPQPass::Run(const TaskPtr& task) {
if (task->Type() != TaskType::SearchTask) {
return false;
}
auto search_task = std::static_pointer_cast<XSearchTask>(task);
if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_PQ) {
return false;
}
auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
ResourcePtr res_ptr;
if (search_job->nq() < threshold_) {
SERVER_LOG_DEBUG << "FaissIVFPQPass: nq < gpu_search_threshold, specify cpu to search!";
res_ptr = ResMgrInst::GetInstance()->GetResource("cpu");
} else {
auto best_device_id = count_ % gpus.size();
SERVER_LOG_DEBUG << "FaissIVFPQPass: nq > gpu_search_threshold, specify gpu" << best_device_id << " to search!";
count_++;
res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, gpus[best_device_id]);
}
auto label = std::make_shared<SpecResLabel>(res_ptr);
task->label() = label;
return true;
}
} // namespace scheduler
} // namespace milvus
#endif
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#ifdef MILVUS_GPU_VERSION
#pragma once
#include <condition_variable>
#include <deque>
#include <limits>
#include <list>
#include <memory>
#include <mutex>
#include <queue>
#include <string>
#include <thread>
#include <unordered_map>
#include <vector>
#include "Pass.h"
namespace milvus {
namespace scheduler {
class FaissIVFPQPass : public Pass {
public:
FaissIVFPQPass() = default;
public:
void
Init() override;
bool
Run(const TaskPtr& task) override;
private:
int64_t threshold_ = std::numeric_limits<int64_t>::max();
int64_t count_ = 0;
std::vector<int64_t> gpus;
};
using FaissIVFPQPassPtr = std::shared_ptr<FaissIVFPQPass>;
} // namespace scheduler
} // namespace milvus
#endif
......@@ -183,10 +183,10 @@ Server::Start() {
// print version information
SERVER_LOG_INFO << "Milvus " << BUILD_TYPE << " version: v" << MILVUS_VERSION << ", built at " << BUILD_TIME;
#ifdef MILVUS_CPU_VERSION
SERVER_LOG_INFO << "CPU edition";
#else
#ifdef MILVUS_GPU_VERSION
SERVER_LOG_INFO << "GPU edition";
#else
SERVER_LOG_INFO << "CPU edition";
#endif
server::Metrics::GetInstance().Init();
server::SystemInfo::GetInstance().Init();
......
......@@ -39,8 +39,6 @@ void
ConfAdapter::MatchBase(knowhere::Config conf) {
if (conf->metric_type == knowhere::DEFAULT_TYPE)
conf->metric_type = knowhere::METRICTYPE::L2;
if (conf->gpu_id == knowhere::INVALID_VALUE)
conf->gpu_id = 0;
}
knowhere::Config
......
......@@ -68,17 +68,16 @@ static const char* CONFIG_STR =
"engine_config:\n"
" use_blas_threshold: 20\n"
"\n"
"resource_config:\n"
#ifdef MILVUS_CPU_VERSION
" search_resources:\n"
" - cpu\n"
" index_build_device: cpu # CPU used for building index";
#else
" search_resources:\n"
" - cpu\n"
#ifdef MILVUS_GPU_VERSION
"gpu_resource_config:\n"
" enable: true # whether to enable GPU resources\n"
" cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer\n"
" search_resources: # define the GPU devices used for search computation, must be in format gpux\n"
" - gpu0\n"
" build_index_resources: # define the GPU devices used for index building, must be in format gpux\n"
" - gpu0\n"
" index_build_device: gpu0 # GPU used for building index";
#endif
"\n";
void
WriteToFile(const std::string& file_path, const char* content) {
......
......@@ -54,24 +54,21 @@ static const char* VALID_CONFIG_STR =
"cache_config:\n"
" cpu_cache_capacity: 16 # GB, CPU memory used for cache\n"
" cpu_cache_threshold: 0.85 \n"
" gpu_cache_capacity: 4 # GB, GPU memory used for cache\n"
" gpu_cache_threshold: 0.85 \n"
" cache_insert_data: false # whether to load inserted data into cache\n"
"\n"
"engine_config:\n"
" use_blas_threshold: 20 \n"
"\n"
"resource_config:\n"
#ifdef MILVUS_CPU_VERSION
" search_resources:\n"
" - cpu\n"
" index_build_device: cpu # CPU used for building index";
#else
" search_resources:\n"
" - cpu\n"
#ifdef MILVUS_GPU_VERSION
"gpu_resource_config:\n"
" enable: true # whether to enable GPU resources\n"
" cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer\n"
" search_resources: # define the GPU devices used for search computation, must be in format gpux\n"
" - gpu0\n"
" build_index_resources: # define the GPU devices used for index building, must be in format gpux\n"
" - gpu0\n"
" index_build_device: gpu0 # GPU used for building index";
#endif
"\n";
static const char* INVALID_CONFIG_STR = "*INVALID*";
......
......@@ -56,17 +56,16 @@ static const char* CONFIG_STR =
"engine_config:\n"
" blas_threshold: 20\n"
"\n"
"resource_config:\n"
#ifdef MILVUS_CPU_VERSION
" search_resources:\n"
" - cpu\n"
" index_build_device: cpu # CPU used for building index";
#else
" search_resources:\n"
" - cpu\n"
#ifdef MILVUS_GPU_VERSION
"gpu_resource_config:\n"
" enable: true # whether to enable GPU resources\n"
" cache_capacity: 4 # GB, size of GPU memory per card used for cache, must be a positive integer\n"
" search_resources: # define the GPU devices used for search computation, must be in format gpux\n"
" - gpu0\n"
" build_index_resources: # define the GPU devices used for index building, must be in format gpux\n"
" - gpu0\n"
" index_build_device: gpu0 # GPU used for building index";
#endif
"\n";
void
WriteToFile(const std::string& file_path, const char* content) {
......
......@@ -497,6 +497,7 @@ class TestIndexBase:
status, ids = connect.add_vectors(table, vectors)
for i in range(2):
status = connect.create_index(table, index_params)
assert status.OK()
status, result = connect.describe_index(table)
logging.getLogger().info(result)
......@@ -569,7 +570,10 @@ class TestIndexIP:
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, ip_table, get_index_params):
......@@ -584,7 +588,10 @@ class TestIndexIP:
status = connect.create_partition(ip_table, partition_name, tag)
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
status = connect.create_index(partition_name, index_params)
assert status.OK()
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
@pytest.mark.level(2)
def test_create_index_without_connect(self, dis_connect, ip_table):
......@@ -609,14 +616,17 @@ class TestIndexIP:
logging.getLogger().info(index_params)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
logging.getLogger().info(connect.describe_index(ip_table))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
logging.getLogger().info(connect.describe_index(ip_table))
query_vecs = [vectors[0], vectors[1], vectors[2]]
top_k = 5
status, result = connect.search_vectors(ip_table, top_k, nprobe, query_vecs)
logging.getLogger().info(result)
assert status.OK()
assert len(result) == len(query_vecs)
# TODO: enable
@pytest.mark.timeout(BUILD_TIMEOUT)
......@@ -943,16 +953,19 @@ class TestIndexIP:
index_params = get_index_params
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
def test_drop_index_partition(self, connect, ip_table, get_simple_index_params):
'''
......@@ -965,16 +978,19 @@ class TestIndexIP:
status = connect.create_partition(ip_table, partition_name, tag)
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_A(self, connect, ip_table, get_simple_index_params):
'''
......@@ -987,19 +1003,22 @@ class TestIndexIP:
status = connect.create_partition(ip_table, partition_name, tag)
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
status = connect.create_index(partition_name, index_params)
assert status.OK()
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == partition_name
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == partition_name
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_B(self, connect, ip_table, get_simple_index_params):
'''
......@@ -1012,19 +1031,22 @@ class TestIndexIP:
status = connect.create_partition(ip_table, partition_name, tag)
status, ids = connect.add_vectors(ip_table, vectors, partition_tag=tag)
status = connect.create_index(partition_name, index_params)
assert status.OK()
status = connect.drop_index(partition_name)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == partition_name
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status = connect.drop_index(partition_name)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == partition_name
assert result._index_type == IndexType.FLAT
def test_drop_index_partition_C(self, connect, ip_table, get_simple_index_params):
'''
......@@ -1040,24 +1062,27 @@ class TestIndexIP:
status = connect.create_partition(ip_table, new_partition_name, new_tag)
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status = connect.drop_index(new_partition_name)
assert status.OK()
status, result = connect.describe_index(new_partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == new_partition_name
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == partition_name
assert result._index_type == index_params["index_type"]
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == ip_table
assert result._index_type == index_params["index_type"]
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status = connect.drop_index(new_partition_name)
assert status.OK()
status, result = connect.describe_index(new_partition_name)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == new_partition_name
assert result._index_type == IndexType.FLAT
status, result = connect.describe_index(partition_name)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == partition_name
assert result._index_type == index_params["index_type"]
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == index_params["nlist"]
assert result._table_name == ip_table
assert result._index_type == index_params["index_type"]
def test_drop_index_repeatly(self, connect, ip_table, get_simple_index_params):
'''
......@@ -1068,18 +1093,21 @@ class TestIndexIP:
index_params = get_simple_index_params
status, ids = connect.add_vectors(ip_table, vectors)
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
@pytest.mark.level(2)
def test_drop_index_without_connect(self, dis_connect, ip_table):
......@@ -1120,16 +1148,19 @@ class TestIndexIP:
status, ids = connect.add_vectors(ip_table, vectors)
for i in range(2):
status = connect.create_index(ip_table, index_params)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
if index_params["index_type"] == IndexType.IVF_PQ:
assert not status.OK()
else:
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
status = connect.drop_index(ip_table)
assert status.OK()
status, result = connect.describe_index(ip_table)
logging.getLogger().info(result)
assert result._nlist == 16384
assert result._table_name == ip_table
assert result._index_type == IndexType.FLAT
def test_create_drop_index_repeatly_different_index_params(self, connect, ip_table):
'''
......
......@@ -437,7 +437,7 @@ def gen_invalid_index_params():
def gen_index_params():
index_params = []
index_types = [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H]
index_types = [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H, IndexType.IVF_PQ]
nlists = [1, 16384, 50000]
def gen_params(index_types, nlists):
......@@ -450,7 +450,7 @@ def gen_index_params():
def gen_simple_index_params():
index_params = []
index_types = [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H]
index_types = [IndexType.FLAT, IndexType.IVFLAT, IndexType.IVF_SQ8, IndexType.IVF_SQ8H, IndexType.IVF_PQ]
nlists = [1024]
def gen_params(index_types, nlists):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册