提交 3c072f31 编写于 作者: Y Yu Kun

clang tidy and clang format


Former-commit-id: a451b79ef4d43595d2a81b85e444eaa71da6c830
上级 360655d9
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#pragma once #pragma once
#include <memory> #include <memory>
namespace milvus { namespace milvus {
...@@ -27,7 +26,6 @@ class DataObj { ...@@ -27,7 +26,6 @@ class DataObj {
public: public:
virtual int64_t virtual int64_t
Size() = 0; Size() = 0;
}; };
using DataObjPtr = std::shared_ptr<DataObj>; using DataObjPtr = std::shared_ptr<DataObj>;
......
...@@ -93,56 +93,56 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) { ...@@ -93,56 +93,56 @@ ExecutionEngineImpl::CreatetVecIndex(EngineType type) {
void void
ExecutionEngineImpl::HybridLoad() { ExecutionEngineImpl::HybridLoad() {
// if (index_type_ != EngineType::FAISS_IVFSQ8Hybrid) { // if (index_type_ != EngineType::FAISS_IVFSQ8Hybrid) {
// return; // return;
// } // }
// //
// const std::string key = location_ + ".quantizer"; // const std::string key = location_ + ".quantizer";
// std::vector<uint64_t> gpus; // std::vector<uint64_t> gpus;
// //
// // cache hit // // cache hit
// { // {
// int64_t selected = -1; // int64_t selected = -1;
// void* quantizer = nullptr; // void* quantizer = nullptr;
// for (auto& gpu : gpus) { // for (auto& gpu : gpus) {
// auto cache = cache::GpuCacheMgr::GetInstance(gpu); // auto cache = cache::GpuCacheMgr::GetInstance(gpu);
// if (auto quan = cache->GetIndex(key)) { // if (auto quan = cache->GetIndex(key)) {
// selected = gpu; // selected = gpu;
// quantizer = quan; // quantizer = quan;
// } // }
// } // }
// //
// if (selected != -1) { // if (selected != -1) {
// // set quantizer into index; // // set quantizer into index;
// return; // return;
// } // }
// } // }
// //
// // cache miss // // cache miss
// { // {
// std::vector<int64_t> all_free_mem; // std::vector<int64_t> all_free_mem;
// for (auto& gpu : gpus) { // for (auto& gpu : gpus) {
// auto cache = cache::GpuCacheMgr::GetInstance(gpu); // auto cache = cache::GpuCacheMgr::GetInstance(gpu);
// auto free_mem = cache->CacheCapacity() - cache->CacheUsage(); // auto free_mem = cache->CacheCapacity() - cache->CacheUsage();
// all_free_mem.push_back(free_mem); // all_free_mem.push_back(free_mem);
// } // }
// //
// auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end()); // auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end());
// auto best = std::distance(all_free_mem.begin(), max_e); // auto best = std::distance(all_free_mem.begin(), max_e);
// //
// // load to best device; // // load to best device;
// // cache quantizer // // cache quantizer
// } // }
// //
// // if index_type == Hybrid // // if index_type == Hybrid
// //
// // 1. quantizer in which gpu // // 1. quantizer in which gpu
// //
// // 2.1 which gpu cache best // // 2.1 which gpu cache best
// //
// // 2.2 load to that gpu cache // // 2.2 load to that gpu cache
// //
// // set quantizer into index // // set quantizer into index
} }
Status Status
......
...@@ -113,10 +113,13 @@ class BFIndex : public VecIndexImpl { ...@@ -113,10 +113,13 @@ class BFIndex : public VecIndexImpl {
class ToIndexData : public cache::DataObj { class ToIndexData : public cache::DataObj {
public: public:
ToIndexData(int64_t size) : size_(size) {} explicit ToIndexData(int64_t size) : size_(size) {
}
int64_t int64_t
Size() override {return size_;} Size() override {
return size_;
}
private: private:
int64_t size_; int64_t size_;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册