提交 fc787705 编写于 作者: X xiaojun.lin

update v1


Former-commit-id: c012da78a4fd1074660e0c83bc0a594c77a923cc
上级 7c244ec8
......@@ -71,4 +71,13 @@ GPUIVFSQ::CopyGpuToCpu(const Config& config) {
return std::make_shared<IVFSQ>(new_index);
}
void
GPUIVFSQ::search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) {
#ifdef CUSTOMIZATION
GPUIVF::search_impl(n, data, k, distances, labels, cfg);
#else
IVF::search_impl(n, data, k, distances, labels, cfg);
#endif
}
} // namespace knowhere
......@@ -38,6 +38,10 @@ class GPUIVFSQ : public GPUIVF {
VectorIndexPtr
CopyGpuToCpu(const Config& config) override;
protected:
void
search_impl(int64_t n, const float* data, int64_t k, float* distances, int64_t* labels, const Config& cfg) override;
};
} // namespace knowhere
......@@ -154,8 +154,8 @@ class IVFTest : public DataGen, public TestWithParam<::std::tuple<std::string, P
INSTANTIATE_TEST_CASE_P(IVFParameters, IVFTest,
Values(std::make_tuple("IVF", ParameterType::ivf),
std::make_tuple("GPUIVF", ParameterType::ivf),
// std::make_tuple("IVFPQ", ParameterType::ivfpq),
// std::make_tuple("GPUIVFPQ", ParameterType::ivfpq),
std::make_tuple("IVFPQ", ParameterType::ivfpq),
std::make_tuple("GPUIVFPQ", ParameterType::ivfpq),
std::make_tuple("IVFSQ", ParameterType::ivfsq),
#ifdef CUSTOMIZATION
std::make_tuple("IVFSQHybrid", ParameterType::ivfsq),
......@@ -240,25 +240,26 @@ TEST_P(IVFTest, hybrid) {
auto result = hybrid_1_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
PrintResult(result, nq, k);
hybrid_1_idx->UnsetQuantizer();
}
{
auto hybrid_2_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
auto binaryset = index_->Serialize();
hybrid_2_idx->Load(binaryset);
auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
quantizer_conf->mode = 1;
quantizer_conf->gpu_id = device_id;
auto q = hybrid_2_idx->LoadQuantizer(quantizer_conf);
quantizer_conf->mode = 2;
hybrid_2_idx->LoadData(q, quantizer_conf);
auto result = hybrid_2_idx->Search(query_dataset, conf);
AssertAnns(result, nq, conf->k);
PrintResult(result, nq, k);
}
// {
// auto hybrid_2_idx = std::make_shared<knowhere::IVFSQHybrid>(device_id);
//
// auto binaryset = index_->Serialize();
// hybrid_2_idx->Load(binaryset);
//
// auto quantizer_conf = std::make_shared<knowhere::QuantizerCfg>();
// quantizer_conf->mode = 1;
// quantizer_conf->gpu_id = device_id;
// auto q = hybrid_2_idx->LoadQuantizer(quantizer_conf);
// quantizer_conf->mode = 2;
// hybrid_2_idx->LoadData(q, quantizer_conf);
//
// auto result = hybrid_2_idx->Search(query_dataset, conf);
// AssertAnns(result, nq, conf->k);
// PrintResult(result, nq, k);
// }
}
// TEST_P(IVFTest, gpu_to_cpu) {
......@@ -438,6 +439,7 @@ TEST_P(IVFTest, clone_test) {
}
}
#ifdef CUSTOMIZATION
TEST_P(IVFTest, seal_test) {
// FaissGpuResourceMgr::GetInstance().InitDevice(device_id);
......@@ -472,6 +474,7 @@ TEST_P(IVFTest, seal_test) {
auto with_seal = tc.RecordSection("With seal");
ASSERT_GE(without_seal, with_seal);
}
#endif
class GPURESTEST : public DataGen, public ::testing::Test {
protected:
......@@ -637,7 +640,7 @@ TEST_F(GPURESTEST, copyandsearch) {
// search and copy at the same time
printf("==================\n");
index_type = "GPUIVFSQ";
index_type = "GPUIVF";
index_ = IndexFactory(index_type);
auto conf = std::make_shared<knowhere::IVFSQCfg>();
......@@ -693,54 +696,6 @@ TEST_F(GPURESTEST, copyandsearch) {
std::thread search_thread(search_func);
std::thread load_thread(load_func);
search_thread.join();
load_thread.join();
tc.RecordSection("Copy&search total");
}
TEST_F(GPURESTEST, TrainAndSearch) {
index_type = "GPUIVFSQ";
index_ = IndexFactory(index_type);
auto conf = std::make_shared<knowhere::IVFSQCfg>();
conf->nlist = 1638;
conf->d = dim;
conf->gpu_id = device_id;
conf->metric_type = knowhere::METRICTYPE::L2;
conf->k = k;
conf->nbits = 8;
conf->nprobe = 1;
auto preprocessor = index_->BuildPreprocessor(base_dataset, conf);
index_->set_preprocessor(preprocessor);
auto model = index_->Train(base_dataset, conf);
auto new_index = IndexFactory(index_type);
new_index->set_index_model(model);
new_index->Add(base_dataset, conf);
auto cpu_idx = knowhere::cloner::CopyGpuToCpu(new_index, knowhere::Config());
cpu_idx->Seal();
auto search_idx = knowhere::cloner::CopyCpuToGpu(cpu_idx, device_id, knowhere::Config());
constexpr int train_count = 1;
constexpr int search_count = 5000;
auto train_stage = [&] {
for (int i = 0; i < train_count; ++i) {
auto model = index_->Train(base_dataset, conf);
auto test_idx = IndexFactory(index_type);
test_idx->set_index_model(model);
test_idx->Add(base_dataset, conf);
}
};
auto search_stage = [&](knowhere::VectorIndexPtr& search_idx) {
for (int i = 0; i < search_count; ++i) {
auto result = search_idx->Search(query_dataset, conf);
AssertAnns(result, nq, k);
}
};
// TimeRecorder tc("record");
// train_stage();
// tc.RecordSection("train cost");
// search_stage(search_idx);
// tc.RecordSection("search cost");
......
......@@ -36,6 +36,7 @@ class KDTTest : public DataGen, public ::testing::Test {
protected:
void
SetUp() override {
Generate(96, 1000, 10);
index_ = std::make_shared<knowhere::CPUKDTRNG>();
auto tempconf = std::make_shared<knowhere::KDTCfg>();
......
......@@ -38,17 +38,17 @@ class NSGInterfaceTest : public DataGen, public ::testing::Test {
SetUp() override {
// Init_with_default();
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(DEVICE_ID, 1024 * 1024 * 200, 1024 * 1024 * 600, 2);
Generate(256, 1000000, 1);
Generate(256, 1000000 / 100, 1);
index_ = std::make_shared<knowhere::NSG>();
auto tmp_conf = std::make_shared<knowhere::NSGCfg>();
tmp_conf->gpu_id = DEVICE_ID;
tmp_conf->knng = 100;
tmp_conf->nprobe = 32;
tmp_conf->nlist = 16384;
tmp_conf->search_length = 60;
tmp_conf->out_degree = 70;
tmp_conf->candidate_pool_size = 500;
tmp_conf->knng = 20;
tmp_conf->nprobe = 8;
tmp_conf->nlist = 163;
tmp_conf->search_length = 40;
tmp_conf->out_degree = 30;
tmp_conf->candidate_pool_size = 100;
tmp_conf->metric_type = knowhere::METRICTYPE::L2;
train_conf = tmp_conf;
......
......@@ -184,7 +184,7 @@ class ResourceAdvanceTest : public testing::Test {
};
TEST_F(ResourceAdvanceTest, DISK_RESOURCE_TEST) {
const uint64_t NUM = 10;
const uint64_t NUM = max_once_load;
std::vector<std::shared_ptr<TestTask>> tasks;
TableFileSchemaPtr dummy = nullptr;
for (uint64_t i = 0; i < NUM; ++i) {
......
......@@ -188,7 +188,7 @@ INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest,
10,
10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_CPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10),
// std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_GPU, "Default", DIM, NB, 10, 10),
std::make_tuple(milvus::engine::IndexType::FAISS_IVFSQ8_MIX, "Default", DIM, NB, 10, 10),
// std::make_tuple(IndexType::NSG_MIX, "Default", 128, 250000, 10, 10),
// std::make_tuple(IndexType::SPTAG_KDT_RNT_CPU, "Default", 128, 250000, 10, 10),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册