提交 8387a365 编写于 作者: W wxyu

format code


Former-commit-id: 2655376e3f0d6a2ca6ae01c818847cf4396268cc
上级 fd7444e4
......@@ -115,19 +115,18 @@ IVF::Search(const DatasetPtr& dataset, const Config& config) {
search_impl(rows, (float*)p_data, search_cfg->k, res_dis, res_ids, config);
// std::stringstream ss_res_id, ss_res_dist;
// for (int i = 0; i < 10; ++i) {
// printf("%llu", res_ids[i]);
// printf("\n");
// printf("%.6f", res_dis[i]);
// printf("\n");
// ss_res_id << res_ids[i] << " ";
// ss_res_dist << res_dis[i] << " ";
// }
// std::cout << std::endl << "after search: " << std::endl;
// std::cout << ss_res_id.str() << std::endl;
// std::cout << ss_res_dist.str() << std::endl << std::endl;
// std::stringstream ss_res_id, ss_res_dist;
// for (int i = 0; i < 10; ++i) {
// printf("%llu", res_ids[i]);
// printf("\n");
// printf("%.6f", res_dis[i]);
// printf("\n");
// ss_res_id << res_ids[i] << " ";
// ss_res_dist << res_dis[i] << " ";
// }
// std::cout << std::endl << "after search: " << std::endl;
// std::cout << ss_res_id.str() << std::endl;
// std::cout << ss_res_dist.str() << std::endl << std::endl;
auto id_buf = MakeMutableBufferSmart((uint8_t*)res_ids, sizeof(int64_t) * elems);
auto dist_buf = MakeMutableBufferSmart((uint8_t*)res_dis, sizeof(float) * elems);
......
......@@ -17,6 +17,7 @@
// under the License.
#include "knowhere/index/vector_index/IndexIVFSQHybrid.h"
#include <utility>
#include "faiss/AutoTune.h"
#include "faiss/gpu/GpuAutoTune.h"
#include "faiss/gpu/GpuIndexIVF.h"
......@@ -176,9 +177,9 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) {
KNOWHERE_THROW_MSG("mode only support 2 in this func");
}
}
// if (quantizer_conf->gpu_id != gpu_id_) {
// KNOWHERE_THROW_MSG("quantizer and data must on the same gpu card");
// }
// if (quantizer_conf->gpu_id != gpu_id_) {
// KNOWHERE_THROW_MSG("quantizer and data must on the same gpu card");
// }
gpu_id_ = quantizer_conf->gpu_id;
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(gpu_id_)) {
......@@ -208,7 +209,6 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& q, const Config& conf) {
std::pair<VectorIndexPtr, QuantizerPtr>
IVFSQHybrid::CopyCpuToGpuWithQuantizer(const int64_t& device_id, const Config& config) {
if (auto res = FaissGpuResourceMgr::GetInstance().GetRes(device_id)) {
ResScope rs(res, device_id, false);
faiss::gpu::GpuClonerOptions option;
option.allInGpu = true;
......@@ -222,7 +222,7 @@ IVFSQHybrid::CopyCpuToGpuWithQuantizer(const int64_t& device_id, const Config& c
std::shared_ptr<faiss::Index> device_index;
device_index.reset(gpu_index);
auto new_idx = std::make_shared<IVFSQHybrid>(device_index, device_id, res);
auto new_idx = std::make_shared<IVFSQHybrid>(device_index, device_id, res);
auto q = std::make_shared<FaissIVFQuantizer>();
q->quantizer = index_composition.quantizer;
......
......@@ -19,6 +19,7 @@
#include <faiss/index_io.h>
#include <memory>
#include <utility>
#include "IndexGPUIVFSQ.h"
#include "Quantizer.h"
......
......@@ -80,7 +80,8 @@ class ExecutionEngine {
Merge(const std::string& location) = 0;
virtual Status
Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels, bool hybrid) const = 0;
Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels,
bool hybrid) const = 0;
virtual std::shared_ptr<ExecutionEngine>
BuildIndex(const std::string& location, EngineType engine_type) = 0;
......
......@@ -31,11 +31,11 @@
#include "wrapper/ConfAdapter.h"
#include "wrapper/ConfAdapterMgr.h"
#include <src/core/knowhere/knowhere/index/vector_index/IndexIVFSQHybrid.h>
#include <src/scheduler/Utils.h>
#include <stdexcept>
#include <utility>
#include <vector>
#include <src/core/knowhere/knowhere/index/vector_index/IndexIVFSQHybrid.h>
namespace milvus {
namespace engine {
......@@ -414,8 +414,8 @@ ExecutionEngineImpl::BuildIndex(const std::string& location, EngineType engine_t
}
Status
ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances,
int64_t* labels, bool hybrid) const {
ExecutionEngineImpl::Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels,
bool hybrid) const {
if (index_ == nullptr) {
ENGINE_LOG_ERROR << "ExecutionEngineImpl: index is null, failed to search";
return Status(DB_ERROR, "index is null");
......
......@@ -71,12 +71,7 @@ class ExecutionEngineImpl : public ExecutionEngine {
Merge(const std::string& location) override;
Status
Search(int64_t n,
const float* data,
int64_t k,
int64_t nprobe,
float* distances,
int64_t* labels,
Search(int64_t n, const float* data, int64_t k, int64_t nprobe, float* distances, int64_t* labels,
bool hybrid = false) const override;
ExecutionEnginePtr
......
......@@ -19,10 +19,10 @@
#include "SchedInst.h"
#include "TaskCreator.h"
#include "optimizer/Optimizer.h"
#include "task/Task.h"
#include "scheduler/tasklabel/SpecResLabel.h"
#include "scheduler/optimizer/Optimizer.h"
#include "scheduler/Algorithm.h"
#include "scheduler/optimizer/Optimizer.h"
#include "scheduler/tasklabel/SpecResLabel.h"
#include "task/Task.h"
#include <utility>
......@@ -62,9 +62,7 @@ void
JobMgr::worker_function() {
while (running_) {
std::unique_lock<std::mutex> lock(mutex_);
cv_.wait(lock, [this] {
return !queue_.empty();
});
cv_.wait(lock, [this] { return !queue_.empty(); });
auto job = queue_.front();
queue_.pop();
lock.unlock();
......@@ -77,7 +75,7 @@ JobMgr::worker_function() {
OptimizerInst::GetInstance()->Run(task);
}
for (auto& task: tasks) {
for (auto& task : tasks) {
calculate_path(task);
}
......
......@@ -145,37 +145,38 @@ Action::SpecifiedResourceLabelTaskScheduler(ResourceMgrWPtr res_mgr, ResourcePtr
transport_costs.push_back(transport_cost);
paths.emplace_back(path);
}
// if (task->job_.lock()->type() == JobType::SEARCH) {
// auto label = task->label();
// auto spec_label = std::static_pointer_cast<SpecResLabel>(label);
// if (spec_label->resource().lock()->type() == ResourceType::CPU) {
// std::vector<std::string> spec_path;
// spec_path.push_back(spec_label->resource().lock()->name());
// spec_path.push_back(resource->name());
// task->path() = Path(spec_path, spec_path.size() - 1);
// } else {
// // step 2: select min cost, cost(resource) = avg_cost * task_to_do + transport_cost
// uint64_t min_cost = std::numeric_limits<uint64_t>::max();
// uint64_t min_cost_idx = 0;
// for (uint64_t i = 0; i < compute_resources.size(); ++i) {
// if (compute_resources[i]->TotalTasks() == 0) {
// min_cost_idx = i;
// break;
// }
// uint64_t cost = compute_resources[i]->TaskAvgCost() * compute_resources[i]->NumOfTaskToExec() +
// transport_costs[i];
// if (min_cost > cost) {
// min_cost = cost;
// min_cost_idx = i;
// }
// }
//
// // step 3: set path in task
// Path task_path(paths[min_cost_idx], paths[min_cost_idx].size() - 1);
// task->path() = task_path;
// }
//
// } else
// if (task->job_.lock()->type() == JobType::SEARCH) {
// auto label = task->label();
// auto spec_label = std::static_pointer_cast<SpecResLabel>(label);
// if (spec_label->resource().lock()->type() == ResourceType::CPU) {
// std::vector<std::string> spec_path;
// spec_path.push_back(spec_label->resource().lock()->name());
// spec_path.push_back(resource->name());
// task->path() = Path(spec_path, spec_path.size() - 1);
// } else {
// // step 2: select min cost, cost(resource) = avg_cost * task_to_do + transport_cost
// uint64_t min_cost = std::numeric_limits<uint64_t>::max();
// uint64_t min_cost_idx = 0;
// for (uint64_t i = 0; i < compute_resources.size(); ++i) {
// if (compute_resources[i]->TotalTasks() == 0) {
// min_cost_idx = i;
// break;
// }
// uint64_t cost = compute_resources[i]->TaskAvgCost() *
// compute_resources[i]->NumOfTaskToExec() +
// transport_costs[i];
// if (min_cost > cost) {
// min_cost = cost;
// min_cost_idx = i;
// }
// }
//
// // step 3: set path in task
// Path task_path(paths[min_cost_idx], paths[min_cost_idx].size() - 1);
// task->path() = task_path;
// }
//
// } else
if (task->job_.lock()->type() == JobType::BUILD) {
// step2: Read device id in config
// get build index gpu resource
......
......@@ -15,10 +15,10 @@
// specific language governing permissions and limitations
// under the License.
#include "cache/GpuCacheMgr.h"
#include "scheduler/Utils.h"
#include "scheduler/optimizer/LargeSQ8HPass.h"
#include "cache/GpuCacheMgr.h"
#include "scheduler/SchedInst.h"
#include "scheduler/Utils.h"
#include "scheduler/task/SearchTask.h"
#include "scheduler/tasklabel/SpecResLabel.h"
#include "utils/Log.h"
......
......@@ -19,6 +19,7 @@
#include "scheduler/Utils.h"
#include <iostream>
#include <limits>
#include <utility>
namespace milvus {
......@@ -126,9 +127,7 @@ void
Resource::loader_function() {
while (running_) {
std::unique_lock<std::mutex> lock(load_mutex_);
load_cv_.wait(lock, [&] {
return load_flag_;
});
load_cv_.wait(lock, [&] { return load_flag_; });
load_flag_ = false;
lock.unlock();
while (true) {
......@@ -154,9 +153,7 @@ Resource::executor_function() {
}
while (running_) {
std::unique_lock<std::mutex> lock(exec_mutex_);
exec_cv_.wait(lock, [&] {
return exec_flag_;
});
exec_cv_.wait(lock, [&] { return exec_flag_; });
exec_flag_ = false;
lock.unlock();
while (true) {
......
......@@ -22,11 +22,11 @@
#include "utils/Log.h"
#include "utils/TimeRecorder.h"
#include <src/scheduler/SchedInst.h>
#include <algorithm>
#include <string>
#include <thread>
#include <utility>
#include <src/scheduler/SchedInst.h>
namespace milvus {
namespace scheduler {
......
......@@ -105,8 +105,8 @@ class IVFHybridIndex : public IVFMixIndex {
Status
UnsetQuantizer() override;
std::pair<VecIndexPtr, knowhere::QuantizerPtr> CopyToGpuWithQuantizer(const int64_t& device_id,
const Config& cfg) override;
std::pair<VecIndexPtr, knowhere::QuantizerPtr>
CopyToGpuWithQuantizer(const int64_t& device_id, const Config& cfg) override;
VecIndexPtr
LoadData(const knowhere::QuantizerPtr& q, const Config& conf) override;
......
......@@ -19,6 +19,7 @@
#include <memory>
#include <string>
#include <utility>
#include "cache/DataObj.h"
#include "knowhere/common/BinarySet.h"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册