提交 c6ccf78e 编写于 作者: P peng.xu

Merge branch 'branch-0.3.1' into 'branch-0.3.1'

fix server test

See merge request megasearch/milvus!184

Former-commit-id: 5be0048119bbba154eb25c306b210a6ab20d984b
......@@ -37,7 +37,7 @@ set(unittest_libs
${CUDA_TOOLKIT_ROOT_DIR}/lib64/stubs/libnvidia-ml.so
)
#add_subdirectory(server)
add_subdirectory(server)
add_subdirectory(db)
add_subdirectory(index_wrapper)
#add_subdirectory(faiss_wrapper)
......
......@@ -8,6 +8,7 @@
#include "cache/GpuCacheMgr.h"
#include "wrapper/Index.h"
#include "wrapper/knowhere/vec_index.h"
using namespace zilliz::milvus;
......@@ -26,6 +27,54 @@ public:
}
};
class MockVecIndex : public engine::VecIndex {
public:
virtual void BuildAll(const long &nb,
const float *xb,
const long *ids,
const engine::Config &cfg,
const long &nt = 0,
const float *xt = nullptr) {
}
virtual void Add(const long &nb,
const float *xb,
const long *ids,
const engine::Config &cfg = engine::Config()) {
}
virtual void Search(const long &nq,
const float *xq,
float *dist,
long *ids,
const engine::Config &cfg = engine::Config()) {
}
virtual int64_t Dimension() {
return dimension_;
}
virtual int64_t Count() {
return ntotal_;
}
virtual zilliz::knowhere::BinarySet Serialize() {
zilliz::knowhere::BinarySet binset;
return binset;
}
virtual void Load(const zilliz::knowhere::BinarySet &index_binary) {
}
public:
int64_t dimension_ = 512;
int64_t ntotal_ = 0;
};
}
TEST(CacheTest, CPU_CACHE_TEST) {
......@@ -40,9 +89,9 @@ TEST(CacheTest, CPU_CACHE_TEST) {
const int dim = 256;
for (int i = 0; i < 20; i++) {
std::shared_ptr<faiss::Index> raw_index(faiss::index_factory(dim, "IDMap,Flat"));
engine::Index_ptr index = std::make_shared<engine::Index>(raw_index);
index->ntotal = 1000000;//less 1G per index
MockVecIndex* mock_index = new MockVecIndex();
mock_index->ntotal_ = 1000000;//less 1G per index
engine::Index_ptr index(mock_index);
cpu_mgr->InsertItem("index_" + std::to_string(i), index);
}
......@@ -65,9 +114,9 @@ TEST(CacheTest, CPU_CACHE_TEST) {
g_num = 5;
cpu_mgr->SetCapacity(g_num * gbyte);
std::shared_ptr<faiss::Index> raw_index(faiss::index_factory(dim, "IDMap,Flat"));
engine::Index_ptr index = std::make_shared<engine::Index>(raw_index);
index->ntotal = 6000000;//6G less
MockVecIndex* mock_index = new MockVecIndex();
mock_index->ntotal_ = 6000000;//6G less
engine::Index_ptr index(mock_index);
cpu_mgr->InsertItem("index_6g", index);
ASSERT_EQ(cpu_mgr->ItemCount(), 0);//data greater than capacity can not be inserted sucessfully
......@@ -82,9 +131,9 @@ TEST(CacheTest, GPU_CACHE_TEST) {
const int dim = 256;
for(int i = 0; i < 20; i++) {
std::shared_ptr<faiss::Index> raw_index(faiss::index_factory(dim, "IDMap,Flat"));
engine::Index_ptr index = std::make_shared<engine::Index>(raw_index);
index->ntotal = 1000;
MockVecIndex* mock_index = new MockVecIndex();
mock_index->ntotal_ = 1000;
engine::Index_ptr index(mock_index);
cache::DataObjPtr obj = std::make_shared<cache::DataObj>(index);
......@@ -117,9 +166,9 @@ TEST(CacheTest, INVALID_TEST) {
{
LessItemCacheMgr mgr;
for(int i = 0; i < 20; i++) {
std::shared_ptr<faiss::Index> raw_index(faiss::index_factory(2, "IDMap,Flat"));
engine::Index_ptr index = std::make_shared<engine::Index>(raw_index);
index->ntotal = 2;
MockVecIndex* mock_index = new MockVecIndex();
mock_index->ntotal_ = 2;
engine::Index_ptr index(mock_index);
cache::DataObjPtr obj = std::make_shared<cache::DataObj>(index);
mgr.InsertItem("index_" + std::to_string(i), obj);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册