提交 35c0819f 编写于 作者: Y Yu Kun

Merge remote-tracking branch 'upstream/0.5.0' into branch-0.5.0-yk


Former-commit-id: eb09c4746c0602dc45e91def17ad8fbe36d33c5b
...@@ -189,6 +189,8 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) { ...@@ -189,6 +189,8 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) {
if (quantizer_conf->mode != 2) { if (quantizer_conf->mode != 2) {
KNOWHERE_THROW_MSG("mode only support 2 in this func"); KNOWHERE_THROW_MSG("mode only support 2 in this func");
} }
} else {
KNOWHERE_THROW_MSG("conf error");
} }
// if (quantizer_conf->gpu_id != gpu_id_) { // if (quantizer_conf->gpu_id != gpu_id_) {
// KNOWHERE_THROW_MSG("quantizer and data must on the same gpu card"); // KNOWHERE_THROW_MSG("quantizer and data must on the same gpu card");
......
...@@ -63,7 +63,7 @@ FaissGpuResourceMgr::InitResource() { ...@@ -63,7 +63,7 @@ FaissGpuResourceMgr::InitResource() {
mutex_cache_.emplace(device_id, std::make_unique<std::mutex>()); mutex_cache_.emplace(device_id, std::make_unique<std::mutex>());
// std::cout << "Device Id: " << device_id << std::endl; // std::cout << "Device Id: " << DEVICEID << std::endl;
auto& device_param = device.second; auto& device_param = device.second;
auto& bq = idle_map_[device_id]; auto& bq = idle_map_[device_id];
...@@ -119,7 +119,7 @@ void ...@@ -119,7 +119,7 @@ void
FaissGpuResourceMgr::Dump() { FaissGpuResourceMgr::Dump() {
for (auto& item : idle_map_) { for (auto& item : idle_map_) {
auto& bq = item.second; auto& bq = item.second;
std::cout << "device_id: " << item.first << ", resource count:" << bq.Size(); std::cout << "DEVICEID: " << item.first << ", resource count:" << bq.Size();
} }
} }
......
...@@ -73,9 +73,17 @@ target_link_libraries(test_kdt ...@@ -73,9 +73,17 @@ target_link_libraries(test_kdt
SPTAGLibStatic SPTAGLibStatic
${depend_libs} ${unittest_libs} ${basic_libs}) ${depend_libs} ${unittest_libs} ${basic_libs})
add_executable(test_gpuresource test_gpuresource.cpp ${util_srcs} ${ivf_srcs})
target_link_libraries(test_gpuresource ${depend_libs} ${unittest_libs} ${basic_libs})
add_executable(test_customized_index test_customized_index.cpp ${util_srcs} ${ivf_srcs})
target_link_libraries(test_customized_index ${depend_libs} ${unittest_libs} ${basic_libs})
install(TARGETS test_ivf DESTINATION unittest) install(TARGETS test_ivf DESTINATION unittest)
install(TARGETS test_idmap DESTINATION unittest) install(TARGETS test_idmap DESTINATION unittest)
install(TARGETS test_kdt DESTINATION unittest) install(TARGETS test_kdt DESTINATION unittest)
install(TARGETS test_gpuresource DESTINATION unittest)
install(TARGETS test_customized_index DESTINATION unittest)
#add_subdirectory(faiss_ori) #add_subdirectory(faiss_ori)
add_subdirectory(test_nsg) add_subdirectory(test_nsg)
......
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <memory>
#include <string>
#include "knowhere/index/vector_index/IndexGPUIVF.h"
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
#include "knowhere/index/vector_index/IndexIVF.h"
#include "knowhere/index/vector_index/IndexIVFPQ.h"
#include "knowhere/index/vector_index/IndexIVFSQ.h"
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
constexpr int DEVICEID = 0;
constexpr int64_t DIM = 128;
constexpr int64_t NB = 10000;
constexpr int64_t NQ = 10;
constexpr int64_t K = 10;
constexpr int64_t PINMEM = 1024 * 1024 * 200;
constexpr int64_t TEMPMEM = 1024 * 1024 * 300;
constexpr int64_t RESNUM = 2;
knowhere::IVFIndexPtr
IndexFactory(const std::string& type) {
if (type == "IVF") {
return std::make_shared<knowhere::IVF>();
} else if (type == "IVFPQ") {
return std::make_shared<knowhere::IVFPQ>();
} else if (type == "GPUIVF") {
return std::make_shared<knowhere::GPUIVF>(DEVICEID);
} else if (type == "GPUIVFPQ") {
return std::make_shared<knowhere::GPUIVFPQ>(DEVICEID);
} else if (type == "IVFSQ") {
return std::make_shared<knowhere::IVFSQ>();
} else if (type == "GPUIVFSQ") {
return std::make_shared<knowhere::GPUIVFSQ>(DEVICEID);
} else if (type == "IVFSQHybrid") {
return std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
}
}
enum class ParameterType {
ivf,
ivfpq,
ivfsq,
};
class ParamGenerator {
public:
static ParamGenerator&
GetInstance() {
static ParamGenerator instance;
return instance;
}
knowhere::Config
Gen(const ParameterType& type) {
if (type == ParameterType::ivf) {
auto tempconf = std::make_shared<knowhere::IVFCfg>();
tempconf->d = DIM;
tempconf->gpu_id = DEVICEID;
tempconf->nlist = 100;
tempconf->nprobe = 4;
tempconf->k = K;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
} else if (type == ParameterType::ivfpq) {
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
tempconf->d = DIM;
tempconf->gpu_id = DEVICEID;
tempconf->nlist = 100;
tempconf->nprobe = 4;
tempconf->k = K;
tempconf->m = 4;
tempconf->nbits = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
} else if (type == ParameterType::ivfsq) {
auto tempconf = std::make_shared<knowhere::IVFSQCfg>();
tempconf->d = DIM;
tempconf->gpu_id = DEVICEID;
tempconf->nlist = 100;
tempconf->nprobe = 4;
tempconf->k = K;
tempconf->nbits = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
}
};
#include <gtest/gtest.h>
class TestGpuIndexBase : public ::testing::Test {
protected:
void
SetUp() override {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
}
void
TearDown() override {
knowhere::FaissGpuResourceMgr::GetInstance().Free();
}
};
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <gtest/gtest.h>
#include "unittest/Helper.h"
#include "unittest/utils.h"
class SingleIndexTest : public DataGen, public TestGpuIndexBase {
protected:
void
SetUp() override {
TestGpuIndexBase::SetUp();
Generate(DIM, NB, NQ);
k = K;
}
void
TearDown() override {
TestGpuIndexBase::TearDown();
}
protected:
std::string index_type;
knowhere::IVFIndexPtr index_ = nullptr;
};
#ifdef CUSTOMIZATION
TEST_F(SingleIndexTest, IVFSQHybrid) {
assert(!xb.empty());
index_type = "IVFSQHybrid";
index_ = IndexFactory(index_type);
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivfsq);
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
index_->set_index_model(model);
index_->Add(base_dataset, conf);
EXPECT_EQ(index_->Count(), nb);
EXPECT_EQ(index_->Dimension(), dim);
auto binaryset = index_->Serialize();
{
// copy cpu to gpu
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
cpu_idx->Load(binaryset);
{
for (int i = 0; i < 3; ++i) {
auto gpu_idx = cpu_idx->CopyCpuToGpu(DEVICEID, conf);
auto result = gpu_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
}
}
}
{
// quantization already in gpu, only copy data
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
cpu_idx->Load(binaryset);
auto pair = cpu_idx->CopyCpuToGpuWithQuantizer(DEVICEID, conf);
auto gpu_idx = pair.first;
auto quantization = pair.second;
auto result = gpu_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
quantizer_conf->mode = 2; // only copy data
quantizer_conf->gpu_id = DEVICEID;
for (int i = 0; i < 2; ++i) {
auto hybrid_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
hybrid_idx->Load(binaryset);
auto new_idx = hybrid_idx->LoadData(quantization, quantizer_conf);
auto result = new_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
}
}
{
// quantization already in gpu, only set quantization
auto cpu_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
cpu_idx->Load(binaryset);
auto pair = cpu_idx->CopyCpuToGpuWithQuantizer(DEVICEID, conf);
auto quantization = pair.second;
for (int i = 0; i < 2; ++i) {
auto hybrid_idx = std::make_shared<knowhere::IVFSQHybrid>(DEVICEID);
hybrid_idx->Load(binaryset);
hybrid_idx->SetQuantizer(quantization);
auto result = hybrid_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
hybrid_idx->UnsetQuantizer();
}
}
}
#endif
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include <gtest/gtest.h>
#include <iostream>
#include <thread>
#include <faiss/AutoTune.h>
#include <faiss/gpu/GpuAutoTune.h>
#include <faiss/gpu/GpuIndexIVFFlat.h>
#include "knowhere/common/Exception.h"
#include "knowhere/common/Timer.h"
#include "knowhere/index/vector_index/IndexGPUIVF.h"
#include "knowhere/index/vector_index/IndexGPUIVFPQ.h"
#include "knowhere/index/vector_index/IndexGPUIVFSQ.h"
#include "knowhere/index/vector_index/IndexIVF.h"
#include "knowhere/index/vector_index/IndexIVFPQ.h"
#include "knowhere/index/vector_index/IndexIVFSQ.h"
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
#include "knowhere/index/vector_index/helpers/Cloner.h"
#include "unittest/Helper.h"
#include "unittest/utils.h"
class GPURESTEST : public DataGen, public TestGpuIndexBase {
protected:
void
SetUp() override {
TestGpuIndexBase::SetUp();
Generate(DIM, NB, NQ);
k = K;
elems = nq * k;
ids = (int64_t*)malloc(sizeof(int64_t) * elems);
dis = (float*)malloc(sizeof(float) * elems);
}
void
TearDown() override {
delete ids;
delete dis;
TestGpuIndexBase::TearDown();
}
protected:
std::string index_type;
knowhere::IVFIndexPtr index_ = nullptr;
int64_t* ids = nullptr;
float* dis = nullptr;
int64_t elems = 0;
};
TEST_F(GPURESTEST, copyandsearch) {
// search and copy at the same time
printf("==================\n");
index_type = "GPUIVF";
index_ = IndexFactory(index_type);
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivf);
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
index_->set_index_model(model);
index_->Add(base_dataset, conf);
auto result = index_->Search(query_dataset, conf);
AssertAnns(result, nq, k);
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
cpu_idx->Seal();
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
constexpr int64_t search_count = 50;
constexpr int64_t load_count = 15;
auto search_func = [&] {
// TimeRecorder tc("search&load");
for (int i = 0; i < search_count; ++i) {
search_idx->Search(query_dataset, conf);
// if (i > search_count - 6 || i == 0)
// tc.RecordSection("search once");
}
// tc.ElapseFromBegin("search finish");
};
auto load_func = [&] {
// TimeRecorder tc("search&load");
for (int i = 0; i < load_count; ++i) {
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
// if (i > load_count -5 || i < 5)
// tc.RecordSection("Copy to gpu");
}
// tc.ElapseFromBegin("load finish");
};
knowhere::TimeRecorder tc("Basic");
knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
tc.RecordSection("Copy to gpu once");
search_idx->Search(query_dataset, conf);
tc.RecordSection("Search once");
search_func();
tc.RecordSection("Search total cost");
load_func();
tc.RecordSection("Copy total cost");
std::thread search_thread(search_func);
std::thread load_thread(load_func);
search_thread.join();
load_thread.join();
tc.RecordSection("Copy&Search total");
}
TEST_F(GPURESTEST, trainandsearch) {
index_type = "GPUIVF";
index_ = IndexFactory(index_type);
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivf);
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
auto new_index = IndexFactory(index_type);
new_index->set_index_model(model);
new_index->Add(base_dataset, conf);
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(new_index, knowhere::Config());
cpu_idx->Seal();
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
constexpr int train_count = 5;
constexpr int search_count = 200;
auto train_stage = [&] {
for (int i = 0; i < train_count; ++i) {
auto model = index_->Train(base_dataset, conf);
auto test_idx = IndexFactory(index_type);
test_idx->set_index_model(model);
test_idx->Add(base_dataset, conf);
}
};
auto search_stage = [&](knowhere::VectorIndexPtr& search_idx) {
for (int i = 0; i < search_count; ++i) {
auto result = search_idx->Search(query_dataset, conf);
AssertAnns(result, nq, k);
}
};
// TimeRecorder tc("record");
// train_stage();
// tc.RecordSection("train cost");
// search_stage(search_idx);
// tc.RecordSection("search cost");
{
// search and build parallel
std::thread search_thread(search_stage, std::ref(search_idx));
std::thread train_thread(train_stage);
train_thread.join();
search_thread.join();
}
{
// build parallel
std::thread train_1(train_stage);
std::thread train_2(train_stage);
train_1.join();
train_2.join();
}
{
// search parallel
auto search_idx_2 = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
std::thread search_1(search_stage, std::ref(search_idx));
std::thread search_2(search_stage, std::ref(search_idx_2));
search_1.join();
search_2.join();
}
}
#ifdef CompareToOriFaiss
TEST_F(GPURESTEST, gpu_ivf_resource_test) {
assert(!xb.empty());
{
index_ = std::make_shared<knowhere::GPUIVF>(-1);
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), -1);
std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->SetGpuDevice(DEVICEID);
ASSERT_EQ(std::dynamic_pointer_cast<knowhere::GPUIVF>(index_)->GetGpuDevice(), DEVICEID);
auto conf = ParamGenerator::GetInstance().Gen(ParameterType::ivfsq);
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
index_->set_index_model(model);
index_->Add(base_dataset, conf);
EXPECT_EQ(index_->Count(), nb);
EXPECT_EQ(index_->Dimension(), dim);
// knowhere::TimeRecorder tc("knowere GPUIVF");
for (int i = 0; i < search_count; ++i) {
index_->Search(query_dataset, conf);
if (i > search_count - 6 || i < 5)
// tc.RecordSection("search once");
}
// tc.ElapseFromBegin("search all");
}
knowhere::FaissGpuResourceMgr::GetInstance().Dump();
// {
// // ori faiss IVF-Search
// faiss::gpu::StandardGpuResources res;
// faiss::gpu::GpuIndexIVFFlatConfig idx_config;
// idx_config.device = DEVICEID;
// faiss::gpu::GpuIndexIVFFlat device_index(&res, dim, 1638, faiss::METRIC_L2, idx_config);
// device_index.train(nb, xb.data());
// device_index.add(nb, xb.data());
//
// knowhere::TimeRecorder tc("ori IVF");
// for (int i = 0; i < search_count; ++i) {
// device_index.search(nq, xq.data(), k, dis, ids);
// if (i > search_count - 6 || i < 5)
// tc.RecordSection("search once");
// }
// tc.ElapseFromBegin("search all");
// }
}
TEST_F(GPURESTEST, gpuivfsq) {
{
// knowhere gpu ivfsq
index_type = "GPUIVFSQ";
index_ = IndexFactory(index_type);
auto conf = std::make_shared<knowhere::IVFSQCfg>();
conf->nlist = 1638;
conf->d = dim;
conf->gpu_id = DEVICEID;
conf->metric_type = knowhere::METRICTYPE::L2;
conf->k = k;
conf->nbits = 8;
conf->nprobe = 1;
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
index_->set_index_model(model);
index_->Add(base_dataset, conf);
// auto result = index_->Search(query_dataset, conf);
// AssertAnns(result, nq, k);
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(index_, knowhere::Config());
cpu_idx->Seal();
knowhere::TimeRecorder tc("knowhere GPUSQ8");
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, DEVICEID, knowhere::Config());
tc.RecordSection("Copy to gpu");
for (int i = 0; i < search_count; ++i) {
search_idx->Search(query_dataset, conf);
if (i > search_count - 6 || i < 5)
tc.RecordSection("search once");
}
tc.ElapseFromBegin("search all");
}
{
// Ori gpuivfsq Test
const char* index_description = "IVF1638,SQ8";
faiss::Index* ori_index = faiss::index_factory(dim, index_description, faiss::METRIC_L2);
faiss::gpu::StandardGpuResources res;
auto device_index = faiss::gpu::index_cpu_to_gpu(&res, DEVICEID, ori_index);
device_index->train(nb, xb.data());
device_index->add(nb, xb.data());
auto cpu_index = faiss::gpu::index_gpu_to_cpu(device_index);
auto idx = dynamic_cast<faiss::IndexIVF*>(cpu_index);
if (idx != nullptr) {
idx->to_readonly();
}
delete device_index;
delete ori_index;
faiss::gpu::GpuClonerOptions option;
option.allInGpu = true;
knowhere::TimeRecorder tc("ori GPUSQ8");
faiss::Index* search_idx = faiss::gpu::index_cpu_to_gpu(&res, DEVICEID, cpu_index, &option);
tc.RecordSection("Copy to gpu");
for (int i = 0; i < search_count; ++i) {
search_idx->search(nq, xq.data(), k, dis, ids);
if (i > search_count - 6 || i < 5)
tc.RecordSection("search once");
}
tc.ElapseFromBegin("search all");
delete cpu_index;
delete search_idx;
}
}
#endif
...@@ -23,54 +23,28 @@ ...@@ -23,54 +23,28 @@
#include "knowhere/index/vector_index/IndexIDMAP.h" #include "knowhere/index/vector_index/IndexIDMAP.h"
#include "knowhere/index/vector_index/helpers/Cloner.h" #include "knowhere/index/vector_index/helpers/Cloner.h"
#include "Helper.h"
#include "unittest/utils.h" #include "unittest/utils.h"
static int device_id = 0; class IDMAPTest : public DataGen, public TestGpuIndexBase {
class IDMAPTest : public DataGen, public ::testing::Test {
protected: protected:
void void
SetUp() override { SetUp() override {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(device_id, 1024 * 1024 * 200, 1024 * 1024 * 300, 2); TestGpuIndexBase::SetUp();
Init_with_default(); Init_with_default();
index_ = std::make_shared<knowhere::IDMAP>(); index_ = std::make_shared<knowhere::IDMAP>();
} }
void void
TearDown() override { TearDown() override {
knowhere::FaissGpuResourceMgr::GetInstance().Free(); TestGpuIndexBase::TearDown();
} }
protected: protected:
knowhere::IDMAPPtr index_ = nullptr; knowhere::IDMAPPtr index_ = nullptr;
}; };
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
}
}
void
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
auto dists = result->array()[1];
std::stringstream ss_id;
std::stringstream ss_dist;
for (auto i = 0; i < 10; i++) {
for (auto j = 0; j < k; ++j) {
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
}
ss_id << std::endl;
ss_dist << std::endl;
}
std::cout << "id\n" << ss_id.str() << std::endl;
std::cout << "dist\n" << ss_dist.str() << std::endl;
}
TEST_F(IDMAPTest, idmap_basic) { TEST_F(IDMAPTest, idmap_basic) {
ASSERT_TRUE(!xb.empty()); ASSERT_TRUE(!xb.empty());
...@@ -87,7 +61,7 @@ TEST_F(IDMAPTest, idmap_basic) { ...@@ -87,7 +61,7 @@ TEST_F(IDMAPTest, idmap_basic) {
ASSERT_TRUE(index_->GetRawIds() != nullptr); ASSERT_TRUE(index_->GetRawIds() != nullptr);
auto result = index_->Search(query_dataset, conf); auto result = index_->Search(query_dataset, conf);
AssertAnns(result, nq, k); AssertAnns(result, nq, k);
PrintResult(result, nq, k); // PrintResult(result, nq, k);
index_->Seal(); index_->Seal();
auto binaryset = index_->Serialize(); auto binaryset = index_->Serialize();
...@@ -95,7 +69,7 @@ TEST_F(IDMAPTest, idmap_basic) { ...@@ -95,7 +69,7 @@ TEST_F(IDMAPTest, idmap_basic) {
new_index->Load(binaryset); new_index->Load(binaryset);
auto re_result = index_->Search(query_dataset, conf); auto re_result = index_->Search(query_dataset, conf);
AssertAnns(re_result, nq, k); AssertAnns(re_result, nq, k);
PrintResult(re_result, nq, k); // PrintResult(re_result, nq, k);
} }
TEST_F(IDMAPTest, idmap_serialize) { TEST_F(IDMAPTest, idmap_serialize) {
...@@ -118,7 +92,7 @@ TEST_F(IDMAPTest, idmap_serialize) { ...@@ -118,7 +92,7 @@ TEST_F(IDMAPTest, idmap_serialize) {
index_->Add(base_dataset, knowhere::Config()); index_->Add(base_dataset, knowhere::Config());
auto re_result = index_->Search(query_dataset, conf); auto re_result = index_->Search(query_dataset, conf);
AssertAnns(re_result, nq, k); AssertAnns(re_result, nq, k);
PrintResult(re_result, nq, k); // PrintResult(re_result, nq, k);
EXPECT_EQ(index_->Count(), nb); EXPECT_EQ(index_->Count(), nb);
EXPECT_EQ(index_->Dimension(), dim); EXPECT_EQ(index_->Dimension(), dim);
auto binaryset = index_->Serialize(); auto binaryset = index_->Serialize();
...@@ -138,7 +112,7 @@ TEST_F(IDMAPTest, idmap_serialize) { ...@@ -138,7 +112,7 @@ TEST_F(IDMAPTest, idmap_serialize) {
EXPECT_EQ(index_->Dimension(), dim); EXPECT_EQ(index_->Dimension(), dim);
auto result = index_->Search(query_dataset, conf); auto result = index_->Search(query_dataset, conf);
AssertAnns(result, nq, k); AssertAnns(result, nq, k);
PrintResult(result, nq, k); // PrintResult(result, nq, k);
} }
} }
...@@ -169,7 +143,7 @@ TEST_F(IDMAPTest, copy_test) { ...@@ -169,7 +143,7 @@ TEST_F(IDMAPTest, copy_test) {
{ {
// cpu to gpu // cpu to gpu
auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, conf); auto clone_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, conf);
auto clone_result = clone_index->Search(query_dataset, conf); auto clone_result = clone_index->Search(query_dataset, conf);
AssertAnns(clone_result, nq, k); AssertAnns(clone_result, nq, k);
ASSERT_THROW({ std::static_pointer_cast<knowhere::GPUIDMAP>(clone_index)->GetRawVectors(); }, ASSERT_THROW({ std::static_pointer_cast<knowhere::GPUIDMAP>(clone_index)->GetRawVectors(); },
...@@ -194,9 +168,9 @@ TEST_F(IDMAPTest, copy_test) { ...@@ -194,9 +168,9 @@ TEST_F(IDMAPTest, copy_test) {
ASSERT_TRUE(std::static_pointer_cast<knowhere::IDMAP>(host_index)->GetRawIds() != nullptr); ASSERT_TRUE(std::static_pointer_cast<knowhere::IDMAP>(host_index)->GetRawIds() != nullptr);
// gpu to gpu // gpu to gpu
auto device_index = knowhere::cloner::CopyCpuToGpu(index_, device_id, conf); auto device_index = knowhere::cloner::CopyCpuToGpu(index_, DEVICEID, conf);
auto new_device_index = auto new_device_index =
std::static_pointer_cast<knowhere::GPUIDMAP>(device_index)->CopyGpuToGpu(device_id, conf); std::static_pointer_cast<knowhere::GPUIDMAP>(device_index)->CopyGpuToGpu(DEVICEID, conf);
auto device_result = new_device_index->Search(query_dataset, conf); auto device_result = new_device_index->Search(query_dataset, conf);
AssertAnns(device_result, nq, k); AssertAnns(device_result, nq, k);
} }
......
...@@ -52,33 +52,6 @@ class KDTTest : public DataGen, public ::testing::Test { ...@@ -52,33 +52,6 @@ class KDTTest : public DataGen, public ::testing::Test {
std::shared_ptr<knowhere::CPUKDTRNG> index_ = nullptr; std::shared_ptr<knowhere::CPUKDTRNG> index_ = nullptr;
}; };
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
}
}
void
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
auto dists = result->array()[1];
std::stringstream ss_id;
std::stringstream ss_dist;
for (auto i = 0; i < 10; i++) {
for (auto j = 0; j < k; ++j) {
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
}
ss_id << std::endl;
ss_dist << std::endl;
}
std::cout << "id\n" << ss_id.str() << std::endl;
std::cout << "dist\n" << ss_dist.str() << std::endl;
}
// TODO(lxj): add test about count() and dimension() // TODO(lxj): add test about count() and dimension()
TEST_F(KDTTest, kdt_basic) { TEST_F(KDTTest, kdt_basic) {
assert(!xb.empty()); assert(!xb.empty());
......
...@@ -30,19 +30,19 @@ using ::testing::Combine; ...@@ -30,19 +30,19 @@ using ::testing::Combine;
using ::testing::TestWithParam; using ::testing::TestWithParam;
using ::testing::Values; using ::testing::Values;
constexpr int64_t DEVICE_ID = 0; constexpr int64_t DEVICEID = 0;
class NSGInterfaceTest : public DataGen, public ::testing::Test { class NSGInterfaceTest : public DataGen, public ::testing::Test {
protected: protected:
void void
SetUp() override { SetUp() override {
// Init_with_default(); // Init_with_default();
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICE_ID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2); knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
Generate(256, 1000000 / 100, 1); Generate(256, 1000000 / 100, 1);
index_ = std::make_shared<knowhere::NSG>(); index_ = std::make_shared<knowhere::NSG>();
auto tmp_conf = std::make_shared<knowhere::NSGCfg>(); auto tmp_conf = std::make_shared<knowhere::NSGCfg>();
tmp_conf->gpu_id = DEVICE_ID; tmp_conf->gpu_id = DEVICEID;
tmp_conf->knng = 20; tmp_conf->knng = 20;
tmp_conf->nprobe = 8; tmp_conf->nprobe = 8;
tmp_conf->nlist = 163; tmp_conf->nlist = 163;
...@@ -69,14 +69,6 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test { ...@@ -69,14 +69,6 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test {
knowhere::Config search_conf; knowhere::Config search_conf;
}; };
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
}
}
TEST_F(NSGInterfaceTest, basic_test) { TEST_F(NSGInterfaceTest, basic_test) {
assert(!xb.empty()); assert(!xb.empty());
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include "unittest/utils.h" #include "unittest/utils.h"
#include <gtest/gtest.h>
#include <memory> #include <memory>
#include <string> #include <string>
#include <utility> #include <utility>
...@@ -147,3 +148,30 @@ generate_query_dataset(int64_t nb, int64_t dim, float* xb) { ...@@ -147,3 +148,30 @@ generate_query_dataset(int64_t nb, int64_t dim, float* xb) {
auto dataset = std::make_shared<knowhere::Dataset>(std::move(tensors), tensor_schema); auto dataset = std::make_shared<knowhere::Dataset>(std::move(tensors), tensor_schema);
return dataset; return dataset;
} }
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(i, *(ids->data()->GetValues<int64_t>(1, i * k)));
}
}
void
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k) {
auto ids = result->array()[0];
auto dists = result->array()[1];
std::stringstream ss_id;
std::stringstream ss_dist;
for (auto i = 0; i < 10; i++) {
for (auto j = 0; j < k; ++j) {
ss_id << *(ids->data()->GetValues<int64_t>(1, i * k + j)) << " ";
ss_dist << *(dists->data()->GetValues<float>(1, i * k + j)) << " ";
}
ss_id << std::endl;
ss_dist << std::endl;
}
std::cout << "id\n" << ss_id.str() << std::endl;
std::cout << "dist\n" << ss_dist.str() << std::endl;
}
...@@ -68,6 +68,12 @@ generate_dataset(int64_t nb, int64_t dim, float* xb, int64_t* ids); ...@@ -68,6 +68,12 @@ generate_dataset(int64_t nb, int64_t dim, float* xb, int64_t* ids);
knowhere::DatasetPtr knowhere::DatasetPtr
generate_query_dataset(int64_t nb, int64_t dim, float* xb); generate_query_dataset(int64_t nb, int64_t dim, float* xb);
void
AssertAnns(const knowhere::DatasetPtr& result, const int& nq, const int& k);
void
PrintResult(const knowhere::DatasetPtr& result, const int& nq, const int& k);
struct FileIOWriter { struct FileIOWriter {
std::fstream fs; std::fstream fs;
std::string name; std::string name;
......
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "wrapper/VecIndex.h"
#include "wrapper/utils.h"
#include "knowhere/index/vector_index/helpers/FaissGpuResourceMgr.h"
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
#include <gtest/gtest.h>
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
using ::testing::TestWithParam;
using ::testing::Values;
using ::testing::Combine;
class KnowhereHybrid
: public DataGenBase, public ::testing::Test {
protected:
void SetUp() override {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
dim = 128;
nb = 10000;
nq = 100;
k = 100;
GenData(dim, nb, nq, xb, xq, ids, k, gt_ids, gt_dis);
}
void TearDown() override {
knowhere::FaissGpuResourceMgr::GetInstance().Free();
}
protected:
milvus::engine::IndexType index_type;
milvus::engine::VecIndexPtr index_ = nullptr;
knowhere::Config conf;
};
#ifdef CUSTOMIZATION
TEST_F(KnowhereHybrid, test_interface) {
assert(!xb.empty());
index_type = milvus::engine::IndexType::FAISS_IVFSQ8_HYBRID;
index_ = GetVecIndexFactory(index_type);
conf = ParamGenerator::GetInstance().Gen(index_type);
auto elems = nq * k;
std::vector<int64_t> res_ids(elems);
std::vector<float> res_dis(elems);
conf->gpu_id = DEVICEID;
conf->d = dim;
conf->k = k;
index_->BuildAll(nb, xb.data(), ids.data(), conf);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
AssertResult(res_ids, res_dis);
EXPECT_EQ(index_->Count(), nb);
EXPECT_EQ(index_->Dimension(), dim);
auto binaryset = index_->Serialize();
{
// cpu -> gpu
auto cpu_idx = GetVecIndexFactory(index_type);
cpu_idx->Load(binaryset);
{
for (int i = 0; i < 2; ++i) {
auto gpu_idx = cpu_idx->CopyToGpu(DEVICEID, conf);
gpu_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
AssertResult(res_ids, res_dis);
}
}
}
{
// quantization already in gpu, only copy data
auto cpu_idx = GetVecIndexFactory(index_type);
cpu_idx->Load(binaryset);
auto pair = cpu_idx->CopyToGpuWithQuantizer(DEVICEID, conf);
auto gpu_idx = pair.first;
auto quantization = pair.second;
gpu_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
AssertResult(res_ids, res_dis);
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
quantizer_conf->mode = 2;
quantizer_conf->gpu_id = DEVICEID;
for (int i = 0; i < 2; ++i) {
auto hybrid_idx = GetVecIndexFactory(index_type);
hybrid_idx->Load(binaryset);
hybrid_idx->LoadData(quantization, quantizer_conf);
hybrid_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
AssertResult(res_ids, res_dis);
}
}
{
// quantization already in gpu, only set quantization
auto cpu_idx = GetVecIndexFactory(index_type);
cpu_idx->Load(binaryset);
auto pair = cpu_idx->CopyToGpuWithQuantizer(DEVICEID, conf);
auto quantization = pair.second;
for (int i = 0; i < 2; ++i) {
auto hybrid_idx = GetVecIndexFactory(index_type);
hybrid_idx->Load(binaryset);
hybrid_idx->SetQuantizer(quantization);
hybrid_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
AssertResult(res_ids, res_dis);
hybrid_idx->UnsetQuantizer();
}
}
}
#endif
...@@ -25,150 +25,36 @@ ...@@ -25,150 +25,36 @@
INITIALIZE_EASYLOGGINGPP INITIALIZE_EASYLOGGINGPP
namespace {
namespace ms = milvus::engine;
namespace kw = knowhere;
} // namespace
using ::testing::TestWithParam; using ::testing::TestWithParam;
using ::testing::Values; using ::testing::Values;
using ::testing::Combine; using ::testing::Combine;
constexpr int64_t DIM = 128;
constexpr int64_t NB = 100000;
constexpr int64_t DEVICE_ID = 0;
class ParamGenerator {
public:
static ParamGenerator& GetInstance() {
static ParamGenerator instance;
return instance;
}
knowhere::Config Gen(const milvus::engine::IndexType& type) {
switch (type) {
case milvus::engine::IndexType::FAISS_IDMAP: {
auto tempconf = std::make_shared<knowhere::Cfg>();
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFFLAT_CPU:
case milvus::engine::IndexType::FAISS_IVFFLAT_GPU:
case milvus::engine::IndexType::FAISS_IVFFLAT_MIX: {
auto tempconf = std::make_shared<knowhere::IVFCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFSQ8_CPU:
case milvus::engine::IndexType::FAISS_IVFSQ8_GPU:
case milvus::engine::IndexType::FAISS_IVFSQ8_MIX: {
auto tempconf = std::make_shared<knowhere::IVFSQCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->nbits = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFPQ_CPU:
case milvus::engine::IndexType::FAISS_IVFPQ_GPU: {
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->nbits = 8;
tempconf->m = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::NSG_MIX: {
auto tempconf = std::make_shared<knowhere::NSGCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->search_length = 8;
tempconf->knng = 200;
tempconf->search_length = 40; // TODO(linxj): be 20 when search
tempconf->out_degree = 60;
tempconf->candidate_pool_size = 200;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
}
}
};
class KnowhereWrapperTest class KnowhereWrapperTest
: public TestWithParam<::std::tuple<milvus::engine::IndexType, std::string, int, int, int, int>> { : public DataGenBase,
public TestWithParam<::std::tuple<milvus::engine::IndexType, std::string, int, int, int, int>> {
protected: protected:
void SetUp() override { void SetUp() override {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICE_ID, knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICEID, PINMEM, TEMPMEM, RESNUM);
1024 * 1024 * 200,
1024 * 1024 * 300,
2);
std::string generator_type; std::string generator_type;
std::tie(index_type, generator_type, dim, nb, nq, k) = GetParam(); std::tie(index_type, generator_type, dim, nb, nq, k) = GetParam();
GenData(dim, nb, nq, xb, xq, ids, k, gt_ids, gt_dis);
auto generator = std::make_shared<DataGenBase>();
generator->GenData(dim, nb, nq, xb, xq, ids, k, gt_ids, gt_dis);
index_ = GetVecIndexFactory(index_type); index_ = GetVecIndexFactory(index_type);
conf = ParamGenerator::GetInstance().Gen(index_type); conf = ParamGenerator::GetInstance().Gen(index_type);
conf->k = k; conf->k = k;
conf->d = dim; conf->d = dim;
conf->gpu_id = DEVICE_ID; conf->gpu_id = DEVICEID;
} }
void TearDown() override { void TearDown() override {
knowhere::FaissGpuResourceMgr::GetInstance().Free(); knowhere::FaissGpuResourceMgr::GetInstance().Free();
} }
void AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis) {
EXPECT_EQ(ids.size(), nq * k);
EXPECT_EQ(dis.size(), nq * k);
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(ids[i * k], gt_ids[i * k]);
//EXPECT_EQ(dis[i * k], gt_dis[i * k]);
}
int match = 0;
for (int i = 0; i < nq; ++i) {
for (int j = 0; j < k; ++j) {
for (int l = 0; l < k; ++l) {
if (ids[i * nq + j] == gt_ids[i * nq + l]) match++;
}
}
}
auto precision = float(match) / (nq * k);
EXPECT_GT(precision, 0.5);
std::cout << std::endl << "Precision: " << precision
<< ", match: " << match
<< ", total: " << nq * k
<< std::endl;
}
protected: protected:
milvus::engine::IndexType index_type; milvus::engine::IndexType index_type;
knowhere::Config conf;
int dim = DIM;
int nb = NB;
int nq = 10;
int k = 10;
std::vector<float> xb;
std::vector<float> xq;
std::vector<int64_t> ids;
milvus::engine::VecIndexPtr index_ = nullptr; milvus::engine::VecIndexPtr index_ = nullptr;
knowhere::Config conf;
// Ground Truth
std::vector<int64_t> gt_ids;
std::vector<float> gt_dis;
}; };
INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest, INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest,
...@@ -220,7 +106,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) { ...@@ -220,7 +106,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
AssertResult(res_ids, res_dis); AssertResult(res_ids, res_dis);
{ {
auto dev_idx = index_->CopyToGpu(DEVICE_ID); auto dev_idx = index_->CopyToGpu(DEVICEID);
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf); dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
} }
...@@ -232,7 +118,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) { ...@@ -232,7 +118,7 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
write_index(index_, file_location); write_index(index_, file_location);
auto new_index = milvus::engine::read_index(file_location); auto new_index = milvus::engine::read_index(file_location);
auto dev_idx = new_index->CopyToGpu(DEVICE_ID); auto dev_idx = new_index->CopyToGpu(DEVICEID);
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf); dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), conf);
} }
...@@ -240,10 +126,6 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) { ...@@ -240,10 +126,6 @@ TEST_P(KnowhereWrapperTest, TO_GPU_TEST) {
} }
} }
//TEST_P(KnowhereWrapperTest, TO_CPU_TEST) {
// // dev
//}
TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) { TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
EXPECT_EQ(index_->GetType(), index_type); EXPECT_EQ(index_->GetType(), index_type);
...@@ -282,8 +164,3 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) { ...@@ -282,8 +164,3 @@ TEST_P(KnowhereWrapperTest, SERIALIZE_TEST) {
AssertResult(res_ids, res_dis); AssertResult(res_ids, res_dis);
} }
} }
// TODO(linxj): add exception test
//TEST_P(KnowhereWrapperTest, exception_test) {
//}
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
// under the License. // under the License.
#include <gtest/gtest.h>
#include <faiss/IndexFlat.h> #include <faiss/IndexFlat.h>
#include "wrapper/utils.h" #include "wrapper/utils.h"
...@@ -59,3 +60,30 @@ DataGenBase::GenData(const int &dim, ...@@ -59,3 +60,30 @@ DataGenBase::GenData(const int &dim,
gt_dis.resize(nq * k); gt_dis.resize(nq * k);
GenData(dim, nb, nq, xb.data(), xq.data(), ids.data(), k, gt_ids.data(), gt_dis.data()); GenData(dim, nb, nq, xb.data(), xq.data(), ids.data(), k, gt_ids.data(), gt_dis.data());
} }
void
DataGenBase::AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis) {
EXPECT_EQ(ids.size(), nq * k);
EXPECT_EQ(dis.size(), nq * k);
for (auto i = 0; i < nq; i++) {
EXPECT_EQ(ids[i * k], gt_ids[i * k]);
//EXPECT_EQ(dis[i * k], gt_dis[i * k]);
}
int match = 0;
for (int i = 0; i < nq; ++i) {
for (int j = 0; j < k; ++j) {
for (int l = 0; l < k; ++l) {
if (ids[i * nq + j] == gt_ids[i * nq + l]) match++;
}
}
}
auto precision = float(match) / (nq * k);
EXPECT_GT(precision, 0.5);
std::cout << std::endl << "Precision: " << precision
<< ", match: " << match
<< ", total: " << nq * k
<< std::endl;
}
...@@ -24,24 +24,110 @@ ...@@ -24,24 +24,110 @@
#include <cstdio> #include <cstdio>
#include <fstream> #include <fstream>
#include "wrapper/VecIndex.h"
#include "wrapper/utils.h"
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
class DataGenBase; class DataGenBase;
using DataGenPtr = std::shared_ptr<DataGenBase>; using DataGenPtr = std::shared_ptr<DataGenBase>;
constexpr int64_t DIM = 128;
constexpr int64_t NB = 100000;
constexpr int64_t NQ = 10;
constexpr int64_t DEVICEID = 0;
constexpr int64_t PINMEM = 1024 * 1024 * 200;
constexpr int64_t TEMPMEM = 1024 * 1024 * 300;
constexpr int64_t RESNUM = 2;
class DataGenBase { class DataGenBase {
public: public:
virtual void GenData(const int &dim, const int &nb, const int &nq, float *xb, float *xq, int64_t *ids, virtual void GenData(const int& dim, const int& nb, const int& nq, float* xb, float* xq, int64_t* ids,
const int &k, int64_t *gt_ids, float *gt_dis); const int& k, int64_t* gt_ids, float* gt_dis);
virtual void GenData(const int &dim, virtual void GenData(const int& dim,
const int &nb, const int& nb,
const int &nq, const int& nq,
std::vector<float> &xb, std::vector<float>& xb,
std::vector<float> &xq, std::vector<float>& xq,
std::vector<int64_t> &ids, std::vector<int64_t>& ids,
const int &k, const int& k,
std::vector<int64_t> &gt_ids, std::vector<int64_t>& gt_ids,
std::vector<float> &gt_dis); std::vector<float>& gt_dis);
void AssertResult(const std::vector<int64_t>& ids, const std::vector<float>& dis);
int dim = DIM;
int nb = NB;
int nq = NQ;
int k = 10;
std::vector<float> xb;
std::vector<float> xq;
std::vector<int64_t> ids;
// Ground Truth
std::vector<int64_t> gt_ids;
std::vector<float> gt_dis;
};
class ParamGenerator {
public:
static ParamGenerator& GetInstance() {
static ParamGenerator instance;
return instance;
}
knowhere::Config Gen(const milvus::engine::IndexType& type) {
switch (type) {
case milvus::engine::IndexType::FAISS_IDMAP: {
auto tempconf = std::make_shared<knowhere::Cfg>();
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFFLAT_CPU:
case milvus::engine::IndexType::FAISS_IVFFLAT_GPU:
case milvus::engine::IndexType::FAISS_IVFFLAT_MIX: {
auto tempconf = std::make_shared<knowhere::IVFCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFSQ8_HYBRID:
case milvus::engine::IndexType::FAISS_IVFSQ8_CPU:
case milvus::engine::IndexType::FAISS_IVFSQ8_GPU:
case milvus::engine::IndexType::FAISS_IVFSQ8_MIX: {
auto tempconf = std::make_shared<knowhere::IVFSQCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->nbits = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::FAISS_IVFPQ_CPU:
case milvus::engine::IndexType::FAISS_IVFPQ_GPU: {
auto tempconf = std::make_shared<knowhere::IVFPQCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->nbits = 8;
tempconf->m = 8;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
case milvus::engine::IndexType::NSG_MIX: {
auto tempconf = std::make_shared<knowhere::NSGCfg>();
tempconf->nlist = 100;
tempconf->nprobe = 16;
tempconf->search_length = 8;
tempconf->knng = 200;
tempconf->search_length = 40; // TODO(linxj): be 20 when search
tempconf->out_degree = 60;
tempconf->candidate_pool_size = 200;
tempconf->metric_type = knowhere::METRICTYPE::L2;
return tempconf;
}
}
}
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册