提交 aea7aae9 编写于 作者: J jinhai

Merge branch 'branch-0.4.0' into 'branch-0.4.0'

MS-442 make it work

See merge request megasearch/milvus!458

Former-commit-id: c79aaefba6e0c78622b531a9d2ebe58af9126fed
......@@ -34,7 +34,7 @@ else()
endif()
message(STATUS "Build type = ${BUILD_TYPE}")
#add_definitions(-DNEW_SCHEDULER)
add_definitions(-DNEW_SCHEDULER)
project(milvus VERSION "${MILVUS_VERSION}")
project(milvus_engine LANGUAGES CUDA CXX)
......
......@@ -66,24 +66,24 @@ resource_config:
enable_loader: true
enable_executor: true
gtx1060:
type: GPU
memory: 6
device_id: 0
enable_loader: true
enable_executor: true
# gtx1060:
# type: GPU
# memory: 6
# device_id: 0
# enable_loader: true
# enable_executor: true
gtx1660:
type: GPU
memory: 6
device_id: 1
enable_loader: true
enable_executor: true
# gtx1660:
# type: GPU
# memory: 6
# device_id: 1
# enable_loader: false
# enable_executor: false
# connection list, length: 0~N
# format: -${resource_name}===${resource_name}
connections:
- ssda===cpu
- cpu===gtx1060
- cpu===gtx1660
# - cpu===gtx1060
# - cpu===gtx1660
......@@ -7,6 +7,7 @@
#include "SchedInst.h"
#include "server/ServerConfig.h"
#include "ResourceFactory.h"
#include "knowhere/index/vector_index/gpu_ivf.h"
namespace zilliz {
namespace milvus {
......@@ -36,8 +37,12 @@ SchedServInit() {
device_id,
enable_loader,
enable_executor));
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(device_id);
}
knowhere::FaissGpuResourceMgr::GetInstance().InitResource();
auto default_connection = Connection("default_connection", 500.0);
auto connections = config.GetSequence(server::CONFIG_RESOURCE_CONNECTIONS);
for (auto &conn : connections) {
......
......@@ -241,8 +241,9 @@ server::KnowhereError IVFMixIndex::BuildAll(const long &nb,
index_->Add(dataset, cfg);
if (auto device_index = std::dynamic_pointer_cast<GPUIVF>(index_)) {
auto host_index = device_index->Copy_index_gpu_to_cpu();
auto host_index = device_index->CopyGpuToCpu(Config());
index_ = host_index;
type = TransferToCpuIndexType(type);
} else {
WRAPPER_LOG_ERROR << "Build IVFMIXIndex Failed";
}
......
......@@ -106,6 +106,10 @@ VecIndexPtr GetVecIndexFactory(const IndexType &type) {
index = std::make_shared<zilliz::knowhere::GPUIVFSQ>(0);
return std::make_shared<IVFMixIndex>(index, IndexType::FAISS_IVFSQ8_MIX);
}
case IndexType::FAISS_IVFSQ8: {
index = std::make_shared<zilliz::knowhere::IVFSQ>();
break;
}
case IndexType::NSG_MIX: { // TODO(linxj): bug.
index = std::make_shared<zilliz::knowhere::NSG>(0);
break;
......@@ -194,10 +198,10 @@ server::KnowhereError write_index(VecIndexPtr index, const std::string &location
// TODO(linxj): redo here.
void AutoGenParams(const IndexType &type, const long &size, zilliz::knowhere::Config &cfg) {
auto nlist = cfg.get_with_default("nlist", 0);
if (size <= TYPICAL_COUNT/16384 + 1) {
if (size <= TYPICAL_COUNT / 16384 + 1) {
//handle less row count, avoid nlist set to 0
cfg["nlist"] = 1;
} else if (int(size/TYPICAL_COUNT) * nlist == 0) {
} else if (int(size / TYPICAL_COUNT) * nlist == 0) {
//calculate a proper nlist if nlist not specified or size less than TYPICAL_COUNT
cfg["nlist"] = int(size / TYPICAL_COUNT * 16384);
}
......@@ -225,6 +229,20 @@ void AutoGenParams(const IndexType &type, const long &size, zilliz::knowhere::Co
}
}
IndexType TransferToCpuIndexType(const IndexType &type) {
switch (type) {
case IndexType::FAISS_IVFFLAT_MIX: {
return IndexType::FAISS_IVFFLAT_CPU;
}
case IndexType::FAISS_IVFSQ8_MIX: {
return IndexType::FAISS_IVFSQ8;
}
default: {
return IndexType::INVALID;
}
}
}
}
}
}
......@@ -32,6 +32,7 @@ enum class IndexType {
FAISS_IVFPQ_GPU,
SPTAG_KDT_RNT_CPU,
FAISS_IVFSQ8_MIX,
FAISS_IVFSQ8,
NSG_MIX,
};
......@@ -88,6 +89,8 @@ extern VecIndexPtr LoadVecIndex(const IndexType &index_type, const zilliz::knowh
extern void AutoGenParams(const IndexType& type, const long& size, Config& cfg);
extern IndexType TransferToCpuIndexType(const IndexType& type);
}
}
}
......@@ -8,13 +8,14 @@
#include <easylogging++.h>
#include <wrapper/knowhere/vec_index.h>
#include "knowhere/index/vector_index/gpu_ivf.h"
#include "utils.h"
INITIALIZE_EASYLOGGINGPP
using namespace zilliz::milvus::engine;
using namespace zilliz::knowhere;
//using namespace zilliz::knowhere;
using ::testing::TestWithParam;
using ::testing::Values;
......@@ -66,8 +67,8 @@ class KnowhereWrapperTest
Config train_cfg;
Config search_cfg;
int dim = 64;
int nb = 10000;
int dim = 512;
int nb = 1000000;
int nq = 10;
int k = 10;
std::vector<float> xb;
......@@ -94,27 +95,27 @@ INSTANTIATE_TEST_CASE_P(WrapperParam, KnowhereWrapperTest,
// Config::object{{"nlist", 100}, {"dim", 64}},
// Config::object{{"dim", 64}, {"k", 10}, {"nprobe", 40}}
//),
std::make_tuple(IndexType::FAISS_IVFFLAT_MIX, "Default",
64, 100000, 10, 10,
Config::object{{"nlist", 1000}, {"dim", 64}, {"metric_type", "L2"}},
Config::object{{"dim", 64}, {"k", 10}, {"nprobe", 5}}
),
std::make_tuple(IndexType::FAISS_IDMAP, "Default",
64, 100000, 10, 10,
Config::object{{"dim", 64}, {"metric_type", "L2"}},
Config::object{{"dim", 64}, {"k", 10}}
),
// std::make_tuple(IndexType::FAISS_IVFFLAT_MIX, "Default",
// 64, 100000, 10, 10,
// Config::object{{"nlist", 1000}, {"dim", 64}, {"metric_type", "L2"}},
// Config::object{{"dim", 64}, {"k", 10}, {"nprobe", 5}}
// ),
// std::make_tuple(IndexType::FAISS_IDMAP, "Default",
// 64, 100000, 10, 10,
// Config::object{{"dim", 64}, {"metric_type", "L2"}},
// Config::object{{"dim", 64}, {"k", 10}}
// ),
std::make_tuple(IndexType::FAISS_IVFSQ8_MIX, "Default",
64, 100000, 10, 10,
Config::object{{"dim", 64}, {"nlist", 1000}, {"nbits", 8}, {"metric_type", "L2"}},
Config::object{{"dim", 64}, {"k", 10}, {"nprobe", 5}}
),
std::make_tuple(IndexType::NSG_MIX, "Default",
128, 250000, 10, 10,
Config::object{{"dim", 128}, {"nlist", 8192}, {"nprobe", 16}, {"metric_type", "L2"},
{"knng", 200}, {"search_length", 40}, {"out_degree", 60}, {"candidate_pool_size", 200}},
Config::object{{"k", 10}, {"search_length", 20}}
512, 1000000, 10, 10,
Config::object{{"dim", 512}, {"nlist", 1000}, {"nbits", 8}, {"metric_type", "L2"}},
Config::object{{"dim", 512}, {"k", 10}, {"nprobe", 5}}
)
// std::make_tuple(IndexType::NSG_MIX, "Default",
// 128, 250000, 10, 10,
// Config::object{{"dim", 128}, {"nlist", 8192}, {"nprobe", 16}, {"metric_type", "L2"},
// {"knng", 200}, {"search_length", 40}, {"out_degree", 60}, {"candidate_pool_size", 200}},
// Config::object{{"k", 10}, {"search_length", 20}}
// )
//std::make_tuple(IndexType::SPTAG_KDT_RNT_CPU, "Default",
// 64, 10000, 10, 10,
// Config::object{{"TPTNumber", 1}, {"dim", 64}},
......@@ -135,6 +136,34 @@ TEST_P(KnowhereWrapperTest, base_test) {
AssertResult(res_ids, res_dis);
}
TEST_P(KnowhereWrapperTest, to_gpu_test) {
EXPECT_EQ(index_->GetType(), index_type);
zilliz::knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(0);
zilliz::knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(1);
auto elems = nq * k;
std::vector<int64_t> res_ids(elems);
std::vector<float> res_dis(elems);
index_->BuildAll(nb, xb.data(), ids.data(), train_cfg);
index_->Search(nq, xq.data(), res_dis.data(), res_ids.data(), search_cfg);
AssertResult(res_ids, res_dis);
{
index_->CopyToGpu(1);
}
std::string file_location = "/tmp/whatever";
write_index(index_, file_location);
auto new_index = read_index(file_location);
auto dev_idx = new_index->CopyToGpu(1);
for (int i = 0; i < 10000; ++i) {
dev_idx->Search(nq, xq.data(), res_dis.data(), res_ids.data(), search_cfg);
}
AssertResult(res_ids, res_dis);
}
TEST_P(KnowhereWrapperTest, serialize) {
EXPECT_EQ(index_->GetType(), index_type);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册