提交 fce032b3 编写于 作者: J jinhai

Merge branch 'update_unittest' into 'branch-0.5.0'

MS-648 Update unittest

See merge request megasearch/milvus!702

Former-commit-id: 8778310c238e45ea61f0266d6e983430ce75b572
......@@ -36,6 +36,7 @@ Please mark all change in change log and use the ticket from JIRA.
- MS-619 - Add optimizer class in scheduler
- MS-614 - Preload table at startup
- MS-626 - Refactor DataObj to support cache any type data
- MS-648 - Improve unittest
## New Feature
- MS-627 - Integrate new index: IVFSQHybrid
......
......@@ -71,4 +71,13 @@ GPUIVFSQ::CopyGpuToCpu(const Config& config) {
return std::make_shared<IVFSQ>(new_index);
}
void
GPUIVFSQ::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) {
#ifdef CUSTOMIZATION
GPUIVF::search_impl(n, data, k, distances, labels, cfg);
#else
IVF::search_impl(n, data, k, distances, labels, cfg);
#endif
}
} // namespace knowhere
......@@ -38,6 +38,10 @@ class GPUIVFSQ : public GPUIVF {
VectorIndexPtr
CopyGpuToCpu(const Config& config) override;
protected:
void
search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) override;
};
} // namespace knowhere
......@@ -154,8 +154,8 @@ class IVFTest : public DataGen, public TestWithParam<::std::tuple<std::string, P
INSTANTIATE_TEST_CASE_P(IVFParameters, IVFTest,
Values(std::make_tuple("IVF", ParameterType::ivf),
std::make_tuple("GPUIVF", ParameterType::ivf),
// std::make_tuple("IVFPQ", ParameterType::ivfpq),
// std::make_tuple("GPUIVFPQ", ParameterType::ivfpq),
std::make_tuple("IVFPQ", ParameterType::ivfpq),
std::make_tuple("GPUIVFPQ", ParameterType::ivfpq),
std::make_tuple("IVFSQ", ParameterType::ivfsq),
#ifdef CUSTOMIZATION
std::make_tuple("IVFSQHybrid", ParameterType::ivfsq),
......@@ -240,25 +240,26 @@ TEST_P(IVFTest, hybrid) {
auto result = hybrid_1_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
PrintResult(result, nq, k);
hybrid_1_idx->UnsetQuantizer();
}
{
auto hybrid_2_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
auto binaryset = index_->Serialize();
hybrid_2_idx->Load(binaryset);
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
quantizer_conf->mode = 1;
quantizer_conf->gpu_id = device_id;
auto q = hybrid_2_idx->LoadQuantizer(quantizer_conf);
quantizer_conf->mode = 2;
hybrid_2_idx->LoadData(q, quantizer_conf);
auto result = hybrid_2_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
PrintResult(result, nq, k);
}
// {
// auto hybrid_2_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
//
// auto binaryset = index_->Serialize();
// hybrid_2_idx->Load(binaryset);
//
// auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
// quantizer_conf->mode = 1;
// quantizer_conf->gpu_id = device_id;
// auto q = hybrid_2_idx->LoadQuantizer(quantizer_conf);
// quantizer_conf->mode = 2;
// hybrid_2_idx->LoadData(q, quantizer_conf);
//
// auto result = hybrid_2_idx->Search(query_dataset, conf);
// AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
// }
}
// TEST_P(IVFTest, gpu_to_cpu) {
......@@ -438,6 +439,7 @@ TEST_P(IVFTest, clone_test) {
}
}
#ifdef CUSTOMIZATION
TEST_P(IVFTest, seal_test) {
// FaissGpuResourceMgr::GetInstance().InitDevice(device_id);
......@@ -472,6 +474,7 @@ TEST_P(IVFTest, seal_test) {
auto with_seal = tc.RecordSection("With seal");
ASSERT_GE(without_seal, with_seal);
}
#endif
class GPURESTEST : public DataGen, public ::testing::Test {
protected:
......@@ -637,7 +640,7 @@ TEST_F(GPURESTEST, copyandsearch) {
// search and copy at the same time
printf("==================\n");
index_type = "GPUIVFSQ";
index_type = "GPUIVF";
index_ = IndexFactory(index_type);
auto conf = std::make_shared<knowhere::IVFSQCfg>();
......@@ -699,7 +702,7 @@ TEST_F(GPURESTEST, copyandsearch) {
}
TEST_F(GPURESTEST, TrainAndSearch) {
index_type = "GPUIVFSQ";
index_type = "GPUIVF";
index_ = IndexFactory(index_type);
auto conf = std::make_shared<knowhere::IVFSQCfg>();
......
......@@ -36,6 +36,7 @@ class KDTTest : public DataGen, public ::testing::Test {
protected:
void
SetUp() override {
Generate(96, 1000, 10);
index_ = std::make_shared<knowhere::CPUKDTRNG>();
auto tempconf = std::make_shared<knowhere::KDTCfg>();
......
......@@ -38,17 +38,17 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test {
SetUp() override {
// Init_with_default();
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICE_ID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
Generate(256, 1000000, 1);
Generate(256, 1000000 / 100, 1);
index_ = std::make_shared<knowhere::NSG>();
auto tmp_conf = std::make_shared<knowhere::NSGCfg>();
tmp_conf->gpu_id = DEVICE_ID;
tmp_conf->knng = 100;
tmp_conf->nprobe = 32;
tmp_conf->nlist = 16384;
tmp_conf->search_length = 60;
tmp_conf->out_degree = 70;
tmp_conf->candidate_pool_size = 500;
tmp_conf->knng = 20;
tmp_conf->nprobe = 8;
tmp_conf->nlist = 163;
tmp_conf->search_length = 40;
tmp_conf->out_degree = 30;
tmp_conf->candidate_pool_size = 100;
tmp_conf->metric_type = knowhere::METRICTYPE::L2;
train_conf = tmp_conf;
......
......@@ -297,6 +297,7 @@ TEST_F(DBTest, SEARCH_TEST) {
ASSERT_TRUE(stat.ok());
}
#ifdef CUSTOMIZATION
//test FAISS_IVFSQ8H optimizer
index.engine_type_ = (int)milvus::engine::EngineType::FAISS_IVFSQ8H;
db_->CreateIndex(TABLE_NAME, index); // wait until build index finish
......@@ -314,9 +315,7 @@ TEST_F(DBTest, SEARCH_TEST) {
stat = db_->Query(TABLE_NAME, file_ids, k, nq, 10, xq.data(), dates, results);
ASSERT_TRUE(stat.ok());
}
// TODO(lxj): add groundTruth assert
#endif
}
TEST_F(DBTest, PRELOADTABLE_TEST) {
......
......@@ -184,7 +184,7 @@ class ResourceAdvanceTest : public testing::Test {
};
TEST_F(ResourceAdvanceTest, DISK_RESOURCE_TEST) {
const uint64_t NUM = 10;
const uint64_t NUM = max_once_load;
std::vector<std::shared_ptr<TestTask>> tasks;
TableFileSchemaPtr dummy = nullptr;
for (uint64_t i = 0; i < NUM; ++i) {
......
......@@ -188,7 +188,7 @@ INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest,
10,
10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_CPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10),
// std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_MIX, "Default", DIM, NB, 10, 10),
// std::make_tuple(IndexType::NSG_MIX, "Default", 128, 250000, 10, 10),
// std::make_tuple(IndexType::SPTAG_KDT_RNT_CPU, "Default", 128, 250000, 10, 10),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册