提交 afbf5512 编写于 作者: J jinhai

Merge branch 'branch-0.5.0-yk' into '0.5.0'

annotate some unused code to improve code coverage

See merge request megasearch/milvus!758

Former-commit-id: 8c0fb2bcc017583c7c241611a978005ca882c070
......@@ -397,6 +397,7 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
Status
ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels,
bool hybrid) {
#if 0
if (index_type_ == EngineType::FAISS_IVFSQ8H) {
if (!hybrid) {
const std::string key = location_ + ".quantizer";
......@@ -449,6 +450,7 @@ ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t npr
}
}
}
#endif
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
......
......@@ -26,48 +26,48 @@
namespace milvus {
namespace scheduler {
bool
LargeSQ8HPass::Run(const TaskPtr& task) {
// if (task->Type() != TaskType::SearchTask) {
// return false;
// }
//
// auto search_task = std::static_pointer_cast<XSearchTask>(task);
// if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFSQ8H) {
// return false;
// }
//
// auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
//
// // TODO: future, Index::IVFSQ8H, if nq < threshold set cpu, else set gpu
// if (search_job->nq() < 100) {
// return false;
// }
//
// std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
// std::vector<int64_t> all_free_mem;
// for (auto& gpu : gpus) {
// auto cache = cache::GpuCacheMgr::GetInstance(gpu);
// auto free_mem = cache->CacheCapacity() - cache->CacheUsage();
// all_free_mem.push_back(free_mem);
// }
//
// auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end());
// auto best_index = std::distance(all_free_mem.begin(), max_e);
// auto best_device_id = gpus[best_index];
//
// ResourcePtr res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
// if (not res_ptr) {
// SERVER_LOG_ERROR << "GpuResource " << best_device_id << " invalid.";
// // TODO: throw critical error and exit
// return false;
// }
//
// auto label = std::make_shared<SpecResLabel>(std::weak_ptr<Resource>(res_ptr));
// task->label() = label;
//
// return true;
}
// bool
// LargeSQ8HPass::Run(const TaskPtr& task) {
// if (task->Type() != TaskType::SearchTask) {
// return false;
// }
//
// auto search_task = std::static_pointer_cast<XSearchTask>(task);
// if (search_task->file_->engine_type_ != (int)engine::EngineType::FAISS_IVFSQ8H) {
// return false;
// }
//
// auto search_job = std::static_pointer_cast<SearchJob>(search_task->job_.lock());
//
// // TODO: future, Index::IVFSQ8H, if nq < threshold set cpu, else set gpu
// if (search_job->nq() < 100) {
// return false;
// }
//
// std::vector<uint64_t> gpus = scheduler::get_gpu_pool();
// std::vector<int64_t> all_free_mem;
// for (auto& gpu : gpus) {
// auto cache = cache::GpuCacheMgr::GetInstance(gpu);
// auto free_mem = cache->CacheCapacity() - cache->CacheUsage();
// all_free_mem.push_back(free_mem);
// }
//
// auto max_e = std::max_element(all_free_mem.begin(), all_free_mem.end());
// auto best_index = std::distance(all_free_mem.begin(), max_e);
// auto best_device_id = gpus[best_index];
//
// ResourcePtr res_ptr = ResMgrInst::GetInstance()->GetResource(ResourceType::GPU, best_device_id);
// if (not res_ptr) {
// SERVER_LOG_ERROR << "GpuResource " << best_device_id << " invalid.";
// // TODO: throw critical error and exit
// return false;
// }
//
// auto label = std::make_shared<SpecResLabel>(std::weak_ptr<Resource>(res_ptr));
// task->label() = label;
//
// return true;
// }
} // namespace scheduler
} // namespace milvus
......@@ -37,8 +37,8 @@ class LargeSQ8HPass : public Pass {
LargeSQ8HPass() = default;
public:
bool
Run(const TaskPtr& task) override;
// bool
// Run(const TaskPtr& task) override;
};
using LargeSQ8HPassPtr = std::shared_ptr<LargeSQ8HPass>;
......
......@@ -20,12 +20,12 @@
namespace milvus {
namespace scheduler {
void
Optimizer::Init() {
// for (auto& pass : pass_list_) {
// pass->Init();
// }
}
// void
// Optimizer::Init() {
// for (auto& pass : pass_list_) {
// pass->Init();
// }
// }
bool
Optimizer::Run(const TaskPtr& task) {
......
......@@ -38,8 +38,8 @@ class Optimizer {
explicit Optimizer(std::vector<PassPtr> pass_list) : pass_list_(std::move(pass_list)) {
}
void
Init();
// void
// Init();
bool
Run(const TaskPtr& task);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册