未验证 提交 74c68a1c 编写于 作者: C cqy123456 提交者: GitHub

Master codacy check (#3366)

* codacy fix
Signed-off-by: Ncqy <yaya645@126.com>

* codacy
Signed-off-by: Ncqy <yaya645@126.com>

* codacy
Signed-off-by: Ncqy <yaya645@126.com>

* codacy check
Signed-off-by: Ncqy <yaya645@126.com>

* codacy
Signed-off-by: Ncqy <yaya645@126.com>

* codacy
Signed-off-by: Ncqy <yaya645@126.com>

* codacy
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tiny
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tindy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>

* clang-tidy check
Signed-off-by: Ncqy <yaya645@126.com>
Co-authored-by: NCai Yudong <yudong.cai@zilliz.com>
上级 1a5fad87
......@@ -247,7 +247,7 @@ Snapshot::ToString() const {
for (auto& fe_id : fc_m) {
auto fe = GetResource<FieldElement>(fe_id);
ss << "\n\tFieldElement: id=" << fe_id << ",name=" << fe->GetName() << " CID=" << fe->GetCollectionId();
ss << ",fetype=" << (int)fe->GetFEtype() << ",typename=" << fe->GetTypeName();
ss << ",fetype=" << static_cast<int32_t>(fe->GetFEtype()) << ",typename=" << fe->GetTypeName();
}
}
......@@ -273,7 +273,7 @@ Snapshot::ToString() const {
auto sf = GetResource<SegmentFile>(sf_id);
ss << "\n\tSegmentFile: id=" << sf_id << ",field_element_id=" << sf->GetFieldElementId();
ss << ",size=" << sf->GetSize();
ss << ",fetype=" << (int)sf->GetFEtype();
ss << ",fetype=" << static_cast<int32_t>(sf->GetFEtype());
}
}
}
......
......@@ -90,11 +90,11 @@ KnowhereResource::Initialize() {
}
// init gpu resources
for (auto iter = gpu_resources.begin(); iter != gpu_resources.end(); ++iter) {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(iter->first, iter->second.pinned_memory,
iter->second.temp_memory, iter->second.resource_num);
for (auto& gpu_resource : gpu_resources) {
knowhere::FaissGpuResourceMgr::GetInstance().InitDevice(gpu_resource.first, gpu_resource.second.pinned_memory,
gpu_resource.second.temp_memory,
gpu_resource.second.resource_num);
}
#endif
return Status::OK();
......
......@@ -96,8 +96,8 @@ IVFConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
// CheckIntByRange(knowhere::meta::ROWS, nlist, DEFAULT_MAX_ROWS);
// auto tune params
int64_t nq = oricfg[knowhere::meta::ROWS].get<int64_t>();
int64_t nlist = oricfg[knowhere::IndexParams::nlist].get<int64_t>();
auto nq = oricfg[knowhere::meta::ROWS].get<int64_t>();
auto nlist = oricfg[knowhere::IndexParams::nlist].get<int64_t>();
oricfg[knowhere::IndexParams::nlist] = MatchNlist(nq, nlist);
// Best Practice
......@@ -153,7 +153,7 @@ IVFPQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
// CheckIntByRange(knowhere::meta::ROWS, MIN_POINTS_PER_CENTROID * nlist, MAX_POINTS_PER_CENTROID * nlist);
std::vector<int64_t> resset;
int64_t dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
auto dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
IVFPQConfAdapter::GetValidMList(dimension, resset);
CheckIntByValues(knowhere::IndexParams::m, resset);
......@@ -278,7 +278,7 @@ RHNSWPQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
CheckIntByRange(knowhere::IndexParams::M, MIN_M, MAX_M);
std::vector<int64_t> resset;
int64_t dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
auto dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
IVFPQConfAdapter::GetValidMList(dimension, resset);
CheckIntByValues(knowhere::IndexParams::PQM, resset);
......
......@@ -40,9 +40,9 @@ IndexAnnoy::Serialize(const Config& config) {
std::shared_ptr<uint8_t[]> dim_data(new uint8_t[sizeof(uint64_t)]);
memcpy(dim_data.get(), &dim, sizeof(uint64_t));
auto index_length = index_->get_index_length();
size_t index_length = index_->get_index_length();
std::shared_ptr<uint8_t[]> index_data(new uint8_t[index_length]);
memcpy(index_data.get(), index_->get_index(), (size_t)index_length);
memcpy(index_data.get(), index_->get_index(), index_length);
BinarySet res_set;
res_set.Append("annoy_metric_type", metric_type, metric_type_length);
......@@ -54,12 +54,12 @@ IndexAnnoy::Serialize(const Config& config) {
void
IndexAnnoy::Load(const BinarySet& index_binary) {
auto metric_type = index_binary.GetByName("annoy_metric_type");
metric_type_.resize((size_t)metric_type->size);
memcpy(metric_type_.data(), metric_type->data.get(), (size_t)metric_type->size);
metric_type_.resize(static_cast<size_t>(metric_type->size));
memcpy(metric_type_.data(), metric_type->data.get(), static_cast<size_t>(metric_type->size));
auto dim_data = index_binary.GetByName("annoy_dim");
uint64_t dim;
memcpy(&dim, dim_data->data.get(), (size_t)dim_data->size);
memcpy(&dim, dim_data->data.get(), static_cast<size_t>(dim_data->size));
if (metric_type_ == Metric::L2) {
index_ = std::make_shared<AnnoyIndex<int64_t, float, ::Euclidean, ::Kiss64Random>>(dim);
......@@ -98,7 +98,7 @@ IndexAnnoy::BuildAll(const DatasetPtr& dataset_ptr, const Config& config) {
}
for (int i = 0; i < rows; ++i) {
index_->add_item(p_ids[i], (const float*)p_data + dim * i);
index_->add_item(p_ids[i], static_cast<const float*>(p_data) + dim * i);
}
index_->build(config[IndexParams::n_trees].get<int64_t>());
......@@ -114,8 +114,8 @@ IndexAnnoy::Query(const DatasetPtr& dataset_ptr, const Config& config) {
auto k = config[meta::TOPK].get<int64_t>();
auto search_k = config[IndexParams::search_k].get<int64_t>();
auto all_num = rows * k;
auto p_id = (int64_t*)malloc(all_num * sizeof(int64_t));
auto p_dist = (float*)malloc(all_num * sizeof(float));
auto p_id = static_cast<int64_t*>(malloc(all_num * sizeof(int64_t)));
auto p_dist = static_cast<float*>(malloc(all_num * sizeof(float)));
faiss::ConcurrentBitsetPtr blacklist = GetBlacklist();
#pragma omp parallel for
......@@ -124,7 +124,8 @@ IndexAnnoy::Query(const DatasetPtr& dataset_ptr, const Config& config) {
result.reserve(k);
std::vector<float> distances;
distances.reserve(k);
index_->get_nns_by_vector((const float*)p_data + i * dim, k, search_k, &result, &distances, blacklist);
index_->get_nns_by_vector(static_cast<const float*>(p_data) + i * dim, k, search_k, &result, &distances,
blacklist);
int64_t result_num = result.size();
auto local_p_id = p_id + k * i;
......
......@@ -46,14 +46,14 @@ BinaryIDMAP::Query(const DatasetPtr& dataset_ptr, const Config& config) {
}
GET_TENSOR_DATA(dataset_ptr)
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
auto elems = rows * k;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
QueryImpl(rows, (uint8_t*)p_data, k, p_dist, p_id, config);
QueryImpl(rows, reinterpret_cast<const uint8_t*>(p_data), k, p_dist, p_id, config);
auto ret_ds = std::make_shared<Dataset>();
ret_ds->Set(meta::IDS, p_id);
......@@ -87,7 +87,7 @@ BinaryIDMAP::Add(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA_ID(dataset_ptr)
index_->add_with_ids(rows, (uint8_t*)p_data, p_ids);
index_->add_with_ids(rows, reinterpret_cast<const uint8_t*>(p_data), p_ids);
}
void
......@@ -97,7 +97,7 @@ BinaryIDMAP::Train(const DatasetPtr& dataset_ptr, const Config& config) {
constexpr faiss::MetricType metric_type = faiss::METRIC_Tanimoto;
const char* desc = "BFlat";
int64_t dim = config[meta::DIM].get<int64_t>();
auto dim = config[meta::DIM].get<int64_t>();
auto index = faiss::index_binary_factory(dim, desc, metric_type);
index_.reset(index);
}
......@@ -137,7 +137,7 @@ BinaryIDMAP::AddWithoutIds(const DatasetPtr& dataset_ptr, const Config& config)
new_ids[i] = i;
}
index_->add_with_ids(rows, (uint8_t*)p_data, new_ids.data());
index_->add_with_ids(rows, reinterpret_cast<const uint8_t*>(p_data), new_ids.data());
}
void
......@@ -147,8 +147,8 @@ BinaryIDMAP::QueryImpl(int64_t n, const uint8_t* data, int64_t k, float* distanc
auto bin_flat_index = dynamic_cast<faiss::IndexBinaryIDMap*>(index_.get())->index;
bin_flat_index->metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
int32_t* i_distances = reinterpret_cast<int32_t*>(distances);
bin_flat_index->search(n, (uint8_t*)data, k, i_distances, labels, bitset_);
auto i_distances = reinterpret_cast<int32_t*>(distances);
bin_flat_index->search(n, data, k, i_distances, labels, bitset_);
// if hamming, it need transform int32 to float
if (bin_flat_index->metric_type == faiss::METRIC_Hamming) {
......
......@@ -51,17 +51,18 @@ BinaryIVF::Query(const DatasetPtr& dataset_ptr, const Config& config) {
GET_TENSOR_DATA(dataset_ptr)
try {
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
auto elems = rows * k;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
QueryImpl(rows, (uint8_t*)p_data, k, p_dist, p_id, config);
QueryImpl(rows, reinterpret_cast<const uint8_t*>(p_data), k, p_dist, p_id, config);
auto ret_ds = std::make_shared<Dataset>();
ret_ds->Set(meta::IDS, p_id);
ret_ds->Set(meta::DISTANCE, p_dist);
......@@ -111,8 +112,8 @@ BinaryIVF::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::IndexBinary* coarse_quantizer = new faiss::IndexBinaryFlat(dim, metric_type);
auto index = std::make_shared<faiss::IndexBinaryIVF>(coarse_quantizer, dim, nlist, metric_type);
index->train(rows, (uint8_t*)p_data);
index->add_with_ids(rows, (uint8_t*)p_data, p_ids);
index->train(rows, static_cast<const uint8_t*>(p_data));
index->add_with_ids(rows, static_cast<const uint8_t*>(p_data), p_ids);
index_ = index;
}
......@@ -132,8 +133,8 @@ BinaryIVF::QueryImpl(int64_t n, const uint8_t* data, int64_t k, float* distances
ivf_index->nprobe = params->nprobe;
stdclock::time_point before = stdclock::now();
int32_t* i_distances = reinterpret_cast<int32_t*>(distances);
index_->search(n, (uint8_t*)data, k, i_distances, labels, bitset_);
auto i_distances = reinterpret_cast<int32_t*>(distances);
index_->search(n, data, k, i_distances, labels, bitset_);
stdclock::time_point after = stdclock::now();
double search_cost = (std::chrono::duration<double, std::micro>(after - before)).count();
......
......@@ -79,8 +79,8 @@ IndexHNSW::Load(const BinarySet& index_binary) {
void
IndexHNSW::Train(const DatasetPtr& dataset_ptr, const Config& config) {
try {
int64_t dim = dataset_ptr->Get<int64_t>(meta::DIM);
int64_t rows = dataset_ptr->Get<int64_t>(meta::ROWS);
auto dim = dataset_ptr->Get<int64_t>(meta::DIM);
auto rows = dataset_ptr->Get<int64_t>(meta::ROWS);
hnswlib::SpaceInterface<float>* space;
std::string metric_type = config[Metric::TYPE];
......@@ -131,7 +131,7 @@ IndexHNSW::Add(const DatasetPtr& dataset_ptr, const Config& config) {
#pragma omp parallel for
for (int i = 1; i < rows; ++i) {
faiss::BuilderSuspend::check_wait();
index_->addPoint(((float*)p_data + Dim() * i), p_ids[i]);
index_->addPoint((reinterpret_cast<const float*>(p_data) + Dim() * i), p_ids[i]);
}
}
......@@ -145,8 +145,8 @@ IndexHNSW::Query(const DatasetPtr& dataset_ptr, const Config& config) {
size_t k = config[meta::TOPK].get<int64_t>();
size_t id_size = sizeof(int64_t) * k;
size_t dist_size = sizeof(float) * k;
auto p_id = (int64_t*)malloc(id_size * rows);
auto p_dist = (float*)malloc(dist_size * rows);
auto p_id = static_cast<int64_t*>(malloc(id_size * rows));
auto p_dist = static_cast<float*>(malloc(dist_size * rows));
index_->setEf(config[IndexParams::ef]);
......@@ -157,7 +157,7 @@ IndexHNSW::Query(const DatasetPtr& dataset_ptr, const Config& config) {
#pragma omp parallel for
for (unsigned int i = 0; i < rows; ++i) {
std::vector<P> ret;
const float* single_query = (float*)p_data + i * Dim();
const float* single_query = reinterpret_cast<const float*>(p_data) + i * Dim();
// if (normalize) {
// std::vector<float> norm_vector(Dim());
......@@ -166,7 +166,7 @@ IndexHNSW::Query(const DatasetPtr& dataset_ptr, const Config& config) {
// } else {
// ret = index_->searchKnn((float*)single_query, config[meta::TOPK].get<int64_t>(), compare);
// }
ret = index_->searchKnn((float*)single_query, k, compare, blacklist);
ret = index_->searchKnn(single_query, k, compare, blacklist);
while (ret.size() < k) {
ret.emplace_back(std::make_pair(-1, -1));
......@@ -207,7 +207,7 @@ IndexHNSW::Dim() {
if (!index_) {
KNOWHERE_THROW_MSG("index not initialize");
}
return (*(size_t*)index_->dist_func_param_);
return (*static_cast<size_t*>(index_->dist_func_param_));
}
void
......
......@@ -59,7 +59,7 @@ IDMAP::Train(const DatasetPtr& dataset_ptr, const Config& config) {
constexpr faiss::MetricType metric_type = faiss::METRIC_L2;
const char* desc = "IDMap,Flat";
int64_t dim = config[meta::DIM].get<int64_t>();
auto dim = config[meta::DIM].get<int64_t>();
auto index = faiss::index_factory(dim, desc, metric_type);
index_.reset(index);
}
......@@ -72,7 +72,7 @@ IDMAP::Add(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA_ID(dataset_ptr)
index_->add_with_ids(rows, (float*)p_data, p_ids);
index_->add_with_ids(rows, reinterpret_cast<const float*>(p_data), p_ids);
}
void
......@@ -91,7 +91,7 @@ IDMAP::AddWithoutIds(const DatasetPtr& dataset_ptr, const Config& config) {
new_ids[i] = i;
}
index_->add_with_ids(rows, (float*)p_data, new_ids.data());
index_->add_with_ids(rows, reinterpret_cast<const float*>(p_data), new_ids.data());
}
DatasetPtr
......@@ -101,14 +101,14 @@ IDMAP::Query(const DatasetPtr& dataset_ptr, const Config& config) {
}
GET_TENSOR_DATA(dataset_ptr)
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
auto elems = rows * k;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
QueryImpl(rows, (float*)p_data, k, p_dist, p_id, config);
QueryImpl(rows, reinterpret_cast<const float*>(p_data), k, p_dist, p_id, config);
auto ret_ds = std::make_shared<Dataset>();
ret_ds->Set(meta::IDS, p_id);
......@@ -227,7 +227,7 @@ IDMAP::QueryImpl(int64_t n, const float* data, int64_t k, float* distances, int6
// assign the metric type
auto flat_index = dynamic_cast<faiss::IndexIDMap*>(index_.get())->index;
flat_index->metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
index_->search(n, (float*)data, k, distances, labels, bitset_);
index_->search(n, data, k, distances, labels, bitset_);
}
} // namespace knowhere
......
......@@ -68,9 +68,9 @@ IVF::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
int64_t nlist = config[IndexParams::nlist].get<int64_t>();
auto nlist = config[IndexParams::nlist].get<int64_t>();
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFFlat(coarse_quantizer, dim, nlist, metric_type));
index_->train(rows, (float*)p_data);
index_->train(rows, reinterpret_cast<const float*>(p_data));
}
void
......@@ -81,7 +81,7 @@ IVF::Add(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA_ID(dataset_ptr)
index_->add_with_ids(rows, (float*)p_data, p_ids);
index_->add_with_ids(rows, reinterpret_cast<const float*>(p_data), p_ids);
}
void
......@@ -92,7 +92,7 @@ IVF::AddWithoutIds(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA(dataset_ptr)
index_->add(rows, (float*)p_data);
index_->add(rows, reinterpret_cast<const float*>(p_data));
}
DatasetPtr
......@@ -106,15 +106,15 @@ IVF::Query(const DatasetPtr& dataset_ptr, const Config& config) {
try {
fiu_do_on("IVF.Search.throw_std_exception", throw std::exception());
fiu_do_on("IVF.Search.throw_faiss_exception", throw faiss::FaissException(""));
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
auto elems = rows * k;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
QueryImpl(rows, (float*)p_data, k, p_dist, p_id, config);
QueryImpl(rows, reinterpret_cast<const float*>(p_data), k, p_dist, p_id, config);
// std::stringstream ss_res_id, ss_res_dist;
// for (int i = 0; i < 10; ++i) {
......@@ -294,8 +294,8 @@ IVF::GenGraph(const float* data, const int64_t k, GraphType& graph, const Config
auto& res = res_vec[i];
res.resize(K * b_size);
auto xq = data + batch_size * dim * i;
QueryImpl(b_size, (float*)xq, K, res_dis.data(), res.data(), config);
const float* xq = data + batch_size * dim * i;
QueryImpl(b_size, xq, K, res_dis.data(), res.data(), config);
for (int j = 0; j < b_size; ++j) {
auto& node = graph[batch_size * i + j];
......@@ -327,7 +327,7 @@ IVF::QueryImpl(int64_t n, const float* data, int64_t k, float* distances, int64_
} else {
ivf_index->parallel_mode = 0;
}
ivf_index->search(n, (float*)data, k, distances, labels, bitset_);
ivf_index->search(n, data, k, distances, labels, bitset_);
stdclock::time_point after = stdclock::now();
double search_cost = (std::chrono::duration<double, std::micro>(after - before)).count();
LOG_KNOWHERE_DEBUG_ << "IVF search cost: " << search_cost
......
......@@ -41,7 +41,7 @@ IVFPQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(), config[IndexParams::m].get<int64_t>(),
config[IndexParams::nbits].get<int64_t>(), metric_type));
index_->train(rows, (float*)p_data);
index_->train(rows, reinterpret_cast<const float*>(p_data));
}
VecIndexPtr
......
......@@ -48,7 +48,7 @@ IVFSQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFScalarQuantizer(
coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(), faiss::QuantizerType::QT_8bit, metric_type));
index_->train(rows, (float*)p_data);
index_->train(rows, reinterpret_cast<const float*>(p_data));
}
VecIndexPtr
......
......@@ -53,7 +53,7 @@ IndexRHNSW::Load(const BinarySet& index_binary) {
reader.name = this->index_type() + "_Index";
auto binary = index_binary.GetByName(reader.name);
reader.total = (size_t)binary->size;
reader.total = static_cast<size_t>(binary->size);
reader.data_ = binary->data.get();
auto idx = faiss::read_index(&reader);
......@@ -75,7 +75,7 @@ IndexRHNSW::Add(const DatasetPtr& dataset_ptr, const Config& config) {
}
GET_TENSOR_DATA(dataset_ptr)
index_->add(rows, (float*)p_data);
index_->add(rows, reinterpret_cast<const float*>(p_data));
}
DatasetPtr
......@@ -85,11 +85,11 @@ IndexRHNSW::Query(const DatasetPtr& dataset_ptr, const Config& config) {
}
GET_TENSOR_DATA(dataset_ptr)
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
int64_t id_size = sizeof(int64_t) * k;
int64_t dist_size = sizeof(float) * k;
auto p_id = (int64_t*)malloc(id_size * rows);
auto p_dist = (float*)malloc(dist_size * rows);
auto p_id = static_cast<int64_t*>(malloc(id_size * rows));
auto p_dist = static_cast<float*>(malloc(dist_size * rows));
for (auto i = 0; i < k * rows; ++i) {
p_id[i] = -1;
p_dist[i] = -1;
......@@ -99,7 +99,7 @@ IndexRHNSW::Query(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::ConcurrentBitsetPtr blacklist = GetBlacklist();
real_index->hnsw.efSearch = (config[IndexParams::ef]);
real_index->search(rows, (float*)p_data, k, p_dist, p_id, blacklist);
real_index->search(rows, reinterpret_cast<const float*>(p_data), k, p_dist, p_id, blacklist);
auto ret_ds = std::make_shared<Dataset>();
ret_ds->Set(meta::IDS, p_id);
......
......@@ -66,7 +66,7 @@ IndexRHNSWFlat::Load(const BinarySet& index_binary) {
reader.name = this->index_type() + "_Data";
auto binary = index_binary.GetByName(reader.name);
reader.total = (size_t)binary->size;
reader.total = static_cast<size_t>(binary->size);
reader.data_ = binary->data.get();
auto real_idx = dynamic_cast<faiss::IndexRHNSWFlat*>(index_.get());
......
......@@ -62,7 +62,7 @@ IndexRHNSWPQ::Load(const BinarySet& index_binary) {
reader.name = this->index_type() + "_Data";
auto binary = index_binary.GetByName(reader.name);
reader.total = (size_t)binary->size;
reader.total = static_cast<size_t>(binary->size);
reader.data_ = binary->data.get();
auto real_idx = dynamic_cast<faiss::IndexRHNSWPQ*>(index_.get());
......@@ -84,7 +84,7 @@ IndexRHNSWPQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
auto idx = new faiss::IndexRHNSWPQ(int(dim), config[IndexParams::PQM], config[IndexParams::M]);
idx->hnsw.efConstruction = config[IndexParams::efConstruction];
index_ = std::shared_ptr<faiss::Index>(idx);
index_->train(rows, (float*)p_data);
index_->train(rows, reinterpret_cast<const float*>(p_data));
} catch (std::exception& e) {
KNOWHERE_THROW_MSG(e.what());
}
......
......@@ -65,7 +65,7 @@ IndexRHNSWSQ::Load(const BinarySet& index_binary) {
reader.name = this->index_type() + "_Data";
auto binary = index_binary.GetByName(reader.name);
reader.total = (size_t)binary->size;
reader.total = static_cast<size_t>(binary->size);
reader.data_ = binary->data.get();
auto real_idx = dynamic_cast<faiss::IndexRHNSWSQ*>(index_.get());
......@@ -89,7 +89,7 @@ IndexRHNSWSQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
new faiss::IndexRHNSWSQ(int(dim), faiss::QuantizerType::QT_8bit, config[IndexParams::M], metric_type);
idx->hnsw.efConstruction = config[IndexParams::efConstruction];
index_ = std::shared_ptr<faiss::Index>(idx);
index_->train(rows, (float*)p_data);
index_->train(rows, static_cast<const float*>(p_data));
} catch (std::exception& e) {
KNOWHERE_THROW_MSG(e.what());
}
......
......@@ -110,7 +110,7 @@ GPUIDMAP::QueryImpl(int64_t n, const float* data, int64_t k, float* distances, i
// assign the metric type
auto flat_index = dynamic_cast<faiss::IndexIDMap*>(index_.get())->index;
flat_index->metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
index_->search(n, (float*)data, k, distances, labels, bitset_);
index_->search(n, data, k, distances, labels, bitset_);
}
void
......@@ -133,8 +133,8 @@ GPUIDMAP::GenGraph(const float* data, const int64_t k, GraphType& graph, const C
auto& res = res_vec[i];
res.resize(K * b_size);
auto xq = data + batch_size * dim * i;
QueryImpl(b_size, (float*)xq, K, res_dis.data(), res.data(), config);
const float* xq = data + batch_size * dim * i;
QueryImpl(b_size, xq, K, res_dis.data(), res.data(), config);
for (int j = 0; j < b_size; ++j) {
auto& node = graph[batch_size * i + j];
......
......@@ -45,6 +45,8 @@ class GPUIDMAP : public IDMAP, public GPUIndex {
void
GenGraph(const float*, const int64_t, GraphType&, const Config&);
virtual ~GPUIDMAP() = default;
protected:
BinarySet
SerializeImpl(const IndexType&) override;
......
......@@ -42,7 +42,7 @@ GPUIVF::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
auto device_index =
new faiss::gpu::GpuIndexIVFFlat(gpu_res->faiss_res.get(), dim, nlist, metric_type, idx_config);
device_index->train(rows, (float*)p_data);
device_index->train(rows, reinterpret_cast<const float*>(p_data));
index_.reset(device_index);
res_ = gpu_res;
......@@ -150,7 +150,8 @@ GPUIVF::QueryImpl(int64_t n, const float* data, int64_t k, float* distances, int
int64_t dim = device_index->d;
for (int64_t i = 0; i < n; i += block_size) {
int64_t search_size = (n - i > block_size) ? block_size : (n - i);
device_index->search(search_size, (float*)data + i * dim, k, distances + i * k, labels + i * k, bitset_);
device_index->search(search_size, reinterpret_cast<const float*>(data) + i * dim, k, distances + i * k,
labels + i * k, bitset_);
}
} else {
KNOWHERE_THROW_MSG("Not a GpuIndexIVF type.");
......
......@@ -37,7 +37,7 @@ GPUIVFPQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
new faiss::gpu::GpuIndexIVFPQ(gpu_res->faiss_res.get(), dim, config[IndexParams::nlist].get<int64_t>(),
config[IndexParams::m], config[IndexParams::nbits],
GetMetricType(config[Metric::TYPE].get<std::string>())); // IP not support
device_index->train(rows, (float*)p_data);
device_index->train(rows, reinterpret_cast<const float*>(p_data));
index_.reset(device_index);
res_ = gpu_res;
......
......@@ -36,6 +36,8 @@ class GPUIVFPQ : public GPUIVF {
VecIndexPtr
CopyGpuToCpu(const Config&) override;
virtual ~GPUIVFPQ() = default;
protected:
std::shared_ptr<faiss::IVFSearchParameters>
GenParams(const Config& config) override;
......
......@@ -46,7 +46,7 @@ GPUIVFSQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
if (gpu_res != nullptr) {
ResScope rs(gpu_res, gpu_id_, true);
auto device_index = faiss::gpu::index_cpu_to_gpu(gpu_res->faiss_res.get(), gpu_id_, build_index);
device_index->train(rows, (float*)p_data);
device_index->train(rows, reinterpret_cast<const float*>(p_data));
index_.reset(device_index);
res_ = gpu_res;
......
......@@ -35,6 +35,8 @@ class GPUIVFSQ : public GPUIVF {
VecIndexPtr
CopyGpuToCpu(const Config&) override;
virtual ~GPUIVFSQ() = default;
};
using GPUIVFSQPtr = std::shared_ptr<GPUIVFSQ>;
......
......@@ -44,7 +44,7 @@ IVFSQHybrid::Train(const DatasetPtr& dataset_ptr, const Config& config) {
if (gpu_res != nullptr) {
ResScope rs(gpu_res, gpu_id_, true);
auto device_index = faiss::gpu::index_cpu_to_gpu(gpu_res->faiss_res.get(), gpu_id_, build_index);
device_index->train(rows, (float*)p_data);
device_index->train(rows, reinterpret_cast<const float*>(p_data));
index_.reset(device_index);
res_ = gpu_res;
......@@ -132,8 +132,9 @@ IVFSQHybrid::LoadData(const knowhere::QuantizerPtr& quantizer_ptr, const Config&
option.allInGpu = true;
auto ivf_quantizer = std::dynamic_pointer_cast<FaissIVFQuantizer>(quantizer_ptr);
if (ivf_quantizer == nullptr)
if (ivf_quantizer == nullptr) {
KNOWHERE_THROW_MSG("quantizer type not faissivfquantizer");
}
auto index_composition = new faiss::IndexComposition;
index_composition->index = index_.get();
......@@ -188,9 +189,9 @@ IVFSQHybrid::SetQuantizer(const QuantizerPtr& quantizer_ptr) {
KNOWHERE_THROW_MSG("Quantizer type error");
}
faiss::IndexIVF* ivf_index = dynamic_cast<faiss::IndexIVF*>(index_.get());
auto ivf_index = dynamic_cast<faiss::IndexIVF*>(index_.get());
faiss::gpu::GpuIndexFlat* is_gpu_flat_index = dynamic_cast<faiss::gpu::GpuIndexFlat*>(ivf_index->quantizer);
auto is_gpu_flat_index = dynamic_cast<faiss::gpu::GpuIndexFlat*>(ivf_index->quantizer);
if (is_gpu_flat_index == nullptr) {
// delete ivf_index->quantizer;
ivf_index->quantizer = ivf_quantizer->quantizer;
......
......@@ -232,12 +232,12 @@ DistanceIP::Compare(const float* a, const float* b, unsigned size) const {
float
DistanceL2::Compare(const float* a, const float* b, unsigned size) const {
return faiss::fvec_L2sqr(a, b, (size_t)size);
return faiss::fvec_L2sqr(a, b, static_cast<size_t>(size));
}
float
DistanceIP::Compare(const float* a, const float* b, unsigned size) const {
return -(faiss::fvec_inner_product(a, b, (size_t)size));
return -(faiss::fvec_inner_product(a, b, static_cast<size_t>(size)));
}
#endif
......
......@@ -63,12 +63,12 @@ IVF_NM::Load(const BinarySet& binary_set) {
// Construct arranged data from original data
auto binary = binary_set.GetByName(RAW_DATA);
const float* original_data = (const float*)binary->data.get();
auto original_data = reinterpret_cast<const float*>(binary->data.get());
auto ivf_index = dynamic_cast<faiss::IndexIVF*>(index_.get());
auto invlists = ivf_index->invlists;
auto d = ivf_index->d;
auto nb = (size_t)(binary->size / invlists->code_size);
auto arranged_data = new uint8_t[d * sizeof(float) * nb];
size_t nb = binary->size / invlists->code_size;
auto arranged_data = new float[d * nb];
prefix_sum.resize(invlists->nlist);
size_t curr_index = 0;
......@@ -77,8 +77,7 @@ IVF_NM::Load(const BinarySet& binary_set) {
for (size_t i = 0; i < invlists->nlist; i++) {
auto list_size = ails->ids[i].size();
for (size_t j = 0; j < list_size; j++) {
memcpy(arranged_data + d * sizeof(float) * (curr_index + j), original_data + d * ails->ids[i][j],
d * sizeof(float));
memcpy(arranged_data + d * (curr_index + j), original_data + d * ails->ids[i][j], d * sizeof(float));
}
prefix_sum[i] = curr_index;
curr_index += list_size;
......@@ -86,18 +85,18 @@ IVF_NM::Load(const BinarySet& binary_set) {
#else
auto rol = dynamic_cast<faiss::ReadOnlyArrayInvertedLists*>(invlists);
auto lengths = rol->readonly_length;
auto rol_ids = (const int64_t*)rol->pin_readonly_ids->data;
auto rol_ids = reinterpret_cast<const int64_t*>(rol->pin_readonly_ids->data);
for (size_t i = 0; i < invlists->nlist; i++) {
auto list_size = lengths[i];
for (size_t j = 0; j < list_size; j++) {
memcpy(arranged_data + d * sizeof(float) * (curr_index + j), original_data + d * rol_ids[curr_index + j],
memcpy(arranged_data + d * (curr_index + j), original_data + d * rol_ids[curr_index + j],
d * sizeof(float));
}
prefix_sum[i] = curr_index;
curr_index += list_size;
}
#endif
data_ = std::shared_ptr<uint8_t[]>(arranged_data);
data_ = std::shared_ptr<uint8_t[]>(reinterpret_cast<uint8_t*>(arranged_data));
}
void
......@@ -106,9 +105,9 @@ IVF_NM::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
int64_t nlist = config[IndexParams::nlist].get<int64_t>();
auto nlist = config[IndexParams::nlist].get<int64_t>();
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFFlat(coarse_quantizer, dim, nlist, metric_type));
index_->train(rows, (float*)p_data);
index_->train(rows, reinterpret_cast<const float*>(p_data));
}
void
......@@ -119,7 +118,7 @@ IVF_NM::Add(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA_ID(dataset_ptr)
index_->add_with_ids_without_codes(rows, (float*)p_data, p_ids);
index_->add_with_ids_without_codes(rows, reinterpret_cast<const float*>(p_data), p_ids);
}
void
......@@ -130,7 +129,7 @@ IVF_NM::AddWithoutIds(const DatasetPtr& dataset_ptr, const Config& config) {
std::lock_guard<std::mutex> lk(mutex_);
GET_TENSOR_DATA(dataset_ptr)
index_->add_without_codes(rows, (float*)p_data);
index_->add_without_codes(rows, reinterpret_cast<const float*>(p_data));
}
DatasetPtr
......@@ -144,15 +143,15 @@ IVF_NM::Query(const DatasetPtr& dataset_ptr, const Config& config) {
try {
fiu_do_on("IVF_NM.Search.throw_std_exception", throw std::exception());
fiu_do_on("IVF_NM.Search.throw_faiss_exception", throw faiss::FaissException(""));
int64_t k = config[meta::TOPK].get<int64_t>();
auto k = config[meta::TOPK].get<int64_t>();
auto elems = rows * k;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
QueryImpl(rows, (float*)p_data, k, p_dist, p_id, config);
QueryImpl(rows, reinterpret_cast<const float*>(p_data), k, p_dist, p_id, config);
auto ret_ds = std::make_shared<Dataset>();
ret_ds->Set(meta::IDS, p_id);
......@@ -278,8 +277,8 @@ IVF_NM::GenGraph(const float* data, const int64_t k, GraphType& graph, const Con
auto& res = res_vec[i];
res.resize(K * b_size);
auto xq = data + batch_size * dim * i;
QueryImpl(b_size, (float*)xq, K, res_dis.data(), res.data(), config);
const float* xq = data + batch_size * dim * i;
QueryImpl(b_size, xq, K, res_dis.data(), res.data(), config);
for (int j = 0; j < b_size; ++j) {
auto& node = graph[batch_size * i + j];
......@@ -312,8 +311,8 @@ IVF_NM::QueryImpl(int64_t n, const float* data, int64_t k, float* distances, int
ivf_index->parallel_mode = 0;
}
bool is_sq8 = (index_type_ == IndexEnum::INDEX_FAISS_IVFSQ8) ? true : false;
ivf_index->search_without_codes(n, (float*)data, (const uint8_t*)data_.get(), prefix_sum, is_sq8, k, distances,
labels, bitset_);
ivf_index->search_without_codes(n, reinterpret_cast<const float*>(data), data_.get(), prefix_sum, is_sq8, k,
distances, labels, bitset_);
stdclock::time_point after = stdclock::now();
double search_cost = (std::chrono::duration<double, std::micro>(after - before)).count();
LOG_KNOWHERE_DEBUG_ << "IVF_NM search cost: " << search_cost
......
......@@ -36,7 +36,7 @@ class IVF_NM : public VecIndex, public OffsetBaseIndex {
}
BinarySet
Serialize(const Config& config = Config()) override;
Serialize(const Config& config) override;
void
Load(const BinarySet&) override;
......
......@@ -86,8 +86,8 @@ NSG_NM::Query(const DatasetPtr& dataset_ptr, const Config& config) {
auto elems = rows * topK;
size_t p_id_size = sizeof(int64_t) * elems;
size_t p_dist_size = sizeof(float) * elems;
auto p_id = (int64_t*)malloc(p_id_size);
auto p_dist = (float*)malloc(p_dist_size);
auto p_id = static_cast<int64_t*>(malloc(p_id_size));
auto p_dist = static_cast<float*>(malloc(p_dist_size));
faiss::ConcurrentBitsetPtr blacklist = GetBlacklist();
......@@ -97,7 +97,8 @@ NSG_NM::Query(const DatasetPtr& dataset_ptr, const Config& config) {
{
std::lock_guard<std::mutex> lk(mutex_);
// index_->ori_data_ = (float*) data_.get();
index_->Search((float*)p_data, (float*)data_.get(), rows, dim, topK, p_dist, p_id, s_params, blacklist);
index_->Search(reinterpret_cast<const float*>(p_data), reinterpret_cast<float*>(data_.get()), rows, dim,
topK, p_dist, p_id, s_params, blacklist);
}
auto ret_ds = std::make_shared<Dataset>();
......@@ -116,9 +117,9 @@ NSG_NM::Train(const DatasetPtr& dataset_ptr, const Config& config) {
idmap->AddWithoutIds(dataset_ptr, config);
impl::Graph knng;
const float* raw_data = idmap->GetRawVectors();
const int64_t k = config[IndexParams::knng].get<int64_t>();
auto k = config[IndexParams::knng].get<int64_t>();
#ifdef MILVUS_GPU_VERSION
const int64_t device_id = config[knowhere::meta::DEVICEID].get<int64_t>();
const auto device_id = config[knowhere::meta::DEVICEID].get<int64_t>();
if (device_id == -1) {
auto preprocess_index = std::make_shared<IVF>();
preprocess_index->Train(dataset_ptr, config);
......@@ -154,7 +155,8 @@ NSG_NM::Train(const DatasetPtr& dataset_ptr, const Config& config) {
}
index_ = std::make_shared<impl::NsgIndex>(dim, rows, metric_type_nsg);
index_->SetKnnGraph(knng);
index_->Build_with_ids(rows, (float*)p_data, (int64_t*)p_ids, b_params);
index_->Build_with_ids(rows, reinterpret_cast<float*>(const_cast<void*>(p_data)),
reinterpret_cast<const int64_t*>(p_ids), b_params);
}
int64_t
......
......@@ -35,7 +35,7 @@ class NSG_NM : public VecIndex {
}
BinarySet
Serialize(const Config& config = Config()) override;
Serialize(const Config& config) override;
void
Load(const BinarySet&) override;
......
......@@ -43,7 +43,7 @@ GPUIVF_NM::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
auto device_index =
new faiss::gpu::GpuIndexIVFFlat(gpu_res->faiss_res.get(), dim, nlist, metric_type, idx_config);
device_index->train(rows, (float*)p_data);
device_index->train(rows, reinterpret_cast<const float*>(p_data));
index_.reset(device_index);
res_ = gpu_res;
......@@ -132,7 +132,7 @@ GPUIVF_NM::QueryImpl(int64_t n, const float* data, int64_t k, float* distances,
int64_t dim = device_index->d;
for (int64_t i = 0; i < n; i += block_size) {
int64_t search_size = (n - i > block_size) ? block_size : (n - i);
device_index->search(search_size, (float*)data + i * dim, k, distances + i * k, labels + i * k, bitset_);
device_index->search(search_size, data + i * dim, k, distances + i * k, labels + i * k, bitset_);
}
} else {
KNOWHERE_THROW_MSG("Not a GpuIndexIVF type.");
......
......@@ -79,7 +79,7 @@ TEST_F(NSGInterfaceTest, basic_test) {
fiu_init(0);
// untrained index
{
ASSERT_ANY_THROW(index_->Serialize());
ASSERT_ANY_THROW(index_->Serialize(search_conf));
ASSERT_ANY_THROW(index_->Query(query_dataset, search_conf));
ASSERT_ANY_THROW(index_->Add(base_dataset, search_conf));
ASSERT_ANY_THROW(index_->AddWithoutIds(base_dataset, search_conf));
......@@ -89,7 +89,7 @@ TEST_F(NSGInterfaceTest, basic_test) {
index_->BuildAll(base_dataset, train_conf);
// Serialize and Load before Query
milvus::knowhere::BinarySet bs = index_->Serialize();
milvus::knowhere::BinarySet bs = index_->Serialize(search_conf);
int64_t dim = base_dataset->Get<int64_t>(milvus::knowhere::meta::DIM);
int64_t rows = base_dataset->Get<int64_t>(milvus::knowhere::meta::ROWS);
......@@ -110,7 +110,7 @@ TEST_F(NSGInterfaceTest, basic_test) {
new_index_1->BuildAll(base_dataset, train_conf);
// Serialize and Load before Query
bs = new_index_1->Serialize();
bs = new_index_1->Serialize(search_conf);
dim = base_dataset->Get<int64_t>(milvus::knowhere::meta::DIM);
rows = base_dataset->Get<int64_t>(milvus::knowhere::meta::ROWS);
......@@ -151,7 +151,7 @@ TEST_F(NSGInterfaceTest, delete_test) {
index_->Train(base_dataset, train_conf);
// Serialize and Load before Query
milvus::knowhere::BinarySet bs = index_->Serialize();
milvus::knowhere::BinarySet bs = index_->Serialize(search_conf);
int64_t dim = base_dataset->Get<int64_t>(milvus::knowhere::meta::DIM);
int64_t rows = base_dataset->Get<int64_t>(milvus::knowhere::meta::ROWS);
......@@ -180,7 +180,7 @@ TEST_F(NSGInterfaceTest, delete_test) {
index_->SetBlacklist(bitset);
// Serialize and Load before Query
bs = index_->Serialize();
bs = index_->Serialize(search_conf);
dim = base_dataset->Get<int64_t>(milvus::knowhere::meta::DIM);
rows = base_dataset->Get<int64_t>(milvus::knowhere::meta::ROWS);
......
......@@ -183,7 +183,7 @@ SystemInfo::getTotalCpuTime(std::vector<int64_t>& work_time_array) {
std::vector<int64_t> total_time_array;
try {
FILE* file = fopen("/proc/stat", "r");
fiu_do_on("SystemInfo.getTotalCpuTime.open_proc", file = NULL);
fiu_do_on("SystemInfo.getTotalCpuTime.open_proc", file = nullptr);
if (file == nullptr) {
LOG_SERVER_ERROR_ << "Failed to read /proc/stat";
return total_time_array;
......@@ -195,7 +195,7 @@ SystemInfo::getTotalCpuTime(std::vector<int64_t>& work_time_array) {
for (int i = 0; i < num_processors_; i++) {
char buffer[1024];
char* ret = fgets(buffer, sizeof(buffer) - 1, file);
fiu_do_on("SystemInfo.getTotalCpuTime.read_proc", ret = NULL);
fiu_do_on("SystemInfo.getTotalCpuTime.read_proc", ret = nullptr);
if (ret == nullptr) {
LOG_SERVER_ERROR_ << "Could not read stat file";
fclose(file);
......@@ -293,7 +293,7 @@ SystemInfo::CPUTemperature() {
std::string path = "/sys/class/hwmon/";
try {
DIR* dir = opendir(path.c_str());
fiu_do_on("SystemInfo.CPUTemperature.opendir", dir = NULL);
fiu_do_on("SystemInfo.CPUTemperature.opendir", dir = nullptr);
if (!dir) {
LOG_SERVER_ERROR_ << "Could not open hwmon directory";
return result;
......@@ -311,7 +311,7 @@ SystemInfo::CPUTemperature() {
std::string object = filename;
object += "/temp1_input";
FILE* file = fopen(object.c_str(), "r");
fiu_do_on("SystemInfo.CPUTemperature.openfile", file = NULL);
fiu_do_on("SystemInfo.CPUTemperature.openfile", file = nullptr);
if (file == nullptr) {
LOG_SERVER_ERROR_ << "Could not open temperature file";
return result;
......
......@@ -97,7 +97,7 @@ PrometheusMetrics::GPUPercentGaugeSet() {
for (int i = 0; i < numDevice; ++i) {
prometheus::Gauge& GPU_percent = GPU_percent_.Add({{"DeviceNum", std::to_string(i)}});
double percent = (double)used_memory[i] / (double)used_total[i];
double percent = static_cast<double>(used_memory[i]) / static_cast<double>(used_total[i]);
GPU_percent.Set(percent * 100);
}
}
......@@ -232,8 +232,8 @@ PrometheusMetrics::CPUTemperature() {
std::vector<float> CPU_temperatures = server::SystemInfo::GetInstance().CPUTemperature();
float avg_cpu_temp = 0;
for (size_t i = 0; i < CPU_temperatures.size(); ++i) {
avg_cpu_temp += CPU_temperatures[i];
for (float CPU_temperature : CPU_temperatures) {
avg_cpu_temp += CPU_temperature;
}
avg_cpu_temp /= CPU_temperatures.size();
......
......@@ -61,7 +61,7 @@ SegmentReader::Initialize() {
for (auto& iter : field_map) {
const engine::snapshot::FieldPtr& field = iter.second->GetField();
std::string name = field->GetName();
engine::DataType ftype = static_cast<engine::DataType>(field->GetFtype());
auto ftype = static_cast<engine::DataType>(field->GetFtype());
if (engine::IsVectorField(field)) {
json params = field->GetParams();
if (params.find(knowhere::meta::DIM) == params.end()) {
......
......@@ -62,7 +62,7 @@ SegmentWriter::Initialize() {
for (auto& iter : field_map) {
const engine::snapshot::FieldPtr& field = iter.second->GetField();
std::string name = field->GetName();
engine::DataType ftype = static_cast<engine::DataType>(field->GetFtype());
auto ftype = static_cast<engine::DataType>(field->GetFtype());
if (engine::IsVectorField(field)) {
json params = field->GetParams();
if (params.find(knowhere::meta::DIM) == params.end()) {
......
......@@ -64,8 +64,9 @@ ReqScheduler::Stop() {
}
for (auto& iter : execute_threads_) {
if (iter == nullptr)
if (iter == nullptr) {
continue;
}
iter->join();
}
req_groups_.clear();
......
......@@ -67,9 +67,9 @@ CreateIndexReq::OnExecute() {
// pick up field
engine::snapshot::FieldPtr field;
for (auto field_it = fields_schema.begin(); field_it != fields_schema.end(); field_it++) {
if (field_it->first->GetName() == field_name_) {
field = field_it->first;
for (auto& field_it : fields_schema) {
if (field_it.first->GetName() == field_name_) {
field = field_it.first;
break;
}
}
......@@ -87,7 +87,7 @@ CreateIndexReq::OnExecute() {
engine::CollectionIndex index;
if (engine::IsVectorField(field)) {
auto params = field->GetParams();
int64_t dimension = params[engine::PARAM_DIMENSION].get<int64_t>();
auto dimension = params[engine::PARAM_DIMENSION].get<int64_t>();
// validate metric type
std::string metric_type;
......
......@@ -82,8 +82,9 @@ GetEntityByIDReq::OnExecute() {
if (field_names_.empty()) {
for (const auto& schema : field_mappings) {
if (schema.first->GetName() == engine::FIELD_UID)
if (schema.first->GetName() == engine::FIELD_UID) {
continue;
}
field_mappings_.insert(schema);
field_names_.emplace_back(schema.first->GetName());
}
......
......@@ -168,7 +168,7 @@ void
DeSerialization(const ::milvus::grpc::GeneralQuery& general_query, query::BooleanQueryPtr& boolean_clause,
query::QueryPtr& query_ptr) {
if (general_query.has_boolean_query()) {
boolean_clause->SetOccur((query::Occur)general_query.boolean_query().occur());
boolean_clause->SetOccur(static_cast<query::Occur>(general_query.boolean_query().occur()));
for (uint64_t i = 0; i < general_query.boolean_query().general_query_size(); ++i) {
if (general_query.boolean_query().general_query(i).has_boolean_query()) {
query::BooleanQueryPtr query = std::make_shared<query::BooleanQuery>();
......@@ -272,8 +272,9 @@ CopyDataChunkToEntity(const engine::DataChunkPtr& data_chunk,
// judge whether data exists
engine::BinaryDataPtr data = data_chunk->fixed_fields_[name];
if (data == nullptr || data->data_.empty())
if (data == nullptr || data->data_.empty()) {
continue;
}
auto single_size = (id_size != 0) ? (data->data_.size() / id_size) : 0;
......@@ -371,7 +372,7 @@ ConstructEntityResults(const std::vector<engine::AttrsData>& attrs, const std::v
if (attrs[0].attr_type_.find(field_name) != attrs[0].attr_type_.end()) {
auto grpc_field = response->add_fields();
grpc_field->set_field_name(field_name);
grpc_field->set_type((::milvus::grpc::DataType)attrs[0].attr_type_.at(field_name));
grpc_field->set_type(static_cast<::milvus::grpc::DataType>(attrs[0].attr_type_.at(field_name)));
auto grpc_attr_data = grpc_field->mutable_attr_record();
std::vector<int32_t> int32_data;
......@@ -1025,7 +1026,7 @@ GrpcRequestHandler::DescribeCollection(::grpc::ServerContext* context, const ::m
auto& field_schema = field_kv.second;
field->set_name(field_name);
field->set_type((milvus::grpc::DataType)field_schema.field_type_);
field->set_type(static_cast<milvus::grpc::DataType>(field_schema.field_type_));
auto grpc_field_param = field->add_extra_params();
grpc_field_param->set_key(EXTRA_PARAM_KEY);
......
......@@ -139,7 +139,7 @@ using FloatJson = nlohmann::basic_json<std::map, std::vector, std::string, bool,
/////////////////////////////////// Private methods ///////////////////////////////////////
void
WebRequestHandler::AddStatusToJson(nlohmann::json& json, int64_t code, const std::string& msg) {
json["code"] = (int64_t)code;
json["code"] = code;
json["message"] = msg;
}
......@@ -201,12 +201,13 @@ WebRequestHandler::CopyData2Json(const milvus::engine::DataChunkPtr& data_chunk,
std::string name = it.first->GetName();
engine::BinaryDataPtr data = data_chunk->fixed_fields_[name];
if (data == nullptr || data->data_.empty())
if (data == nullptr || data->data_.empty()) {
continue;
}
auto single_size = data->data_.size() / id_size;
switch (type) {
switch (static_cast<engine::DataType>(type)) {
case engine::DataType::INT32: {
int32_t int32_value;
int64_t offset = sizeof(int32_t) * i;
......@@ -253,6 +254,8 @@ WebRequestHandler::CopyData2Json(const milvus::engine::DataChunkPtr& data_chunk,
entity_json[name] = float_vector;
break;
}
default:
break;
}
}
one_json["entity"] = entity_json;
......@@ -342,7 +345,7 @@ WebRequestHandler::GetPageEntities(const std::string& collection_name, const int
engine::IDNumbers temp_ids;
STATUS_CHECK(req_handler_.ListIDInSegment(context_ptr_, collection_name, seg_id, temp_ids));
auto ids_begin = real_offset;
auto ids_end = std::min(temp_ids.size(), (size_t)(real_offset + real_page_size));
auto ids_end = std::min(temp_ids.size(), static_cast<size_t>(real_offset + real_page_size));
auto new_ids = std::vector<int64_t>(temp_ids.begin() + ids_begin, temp_ids.begin() + ids_end);
auto cur_size = entity_ids.size();
auto new_size = new_ids.size();
......@@ -357,16 +360,17 @@ WebRequestHandler::GetPageEntities(const std::string& collection_name, const int
}
std::vector<std::string> field_names;
STATUS_CHECK(GetEntityByIDs(collection_name, entity_ids, field_names, json_out));
return Status::OK();
}
Status
WebRequestHandler::GetSegmentVectors(const std::string& collection_name, int64_t segment_id, int64_t page_size,
int64_t offset, nlohmann::json& json_out) {
engine::IDNumbers vector_ids;
STATUS_CHECK(req_handler_.ListIDInSegment(context_ptr_, 0, segment_id, vector_ids));
STATUS_CHECK(req_handler_.ListIDInSegment(context_ptr_, nullptr, segment_id, vector_ids));
auto ids_begin = std::min(vector_ids.size(), (size_t)offset);
auto ids_end = std::min(vector_ids.size(), (size_t)(offset + page_size));
auto ids_begin = std::min(vector_ids.size(), static_cast<size_t>(offset));
auto ids_end = std::min(vector_ids.size(), static_cast<size_t>(offset + page_size));
auto new_ids = std::vector<int64_t>(vector_ids.begin() + ids_begin, vector_ids.begin() + ids_end);
nlohmann::json vectors_json;
......@@ -391,8 +395,8 @@ WebRequestHandler::GetSegmentIds(const std::string& collection_name, int64_t seg
std::vector<int64_t> ids;
auto status = req_handler_.ListIDInSegment(context_ptr_, collection_name, segment_id, ids);
if (status.ok()) {
auto ids_begin = std::min(ids.size(), (size_t)offset);
auto ids_end = std::min(ids.size(), (size_t)(offset + page_size));
auto ids_begin = std::min(ids.size(), static_cast<size_t>(offset));
auto ids_end = std::min(ids.size(), static_cast<size_t>(offset + page_size));
if (ids_begin >= ids_end) {
json_out["ids"] = std::vector<int64_t>();
......@@ -892,17 +896,17 @@ WebRequestHandler::DeleteByIDs(const std::string& collection_name, const nlohman
std::string& result_str) {
std::vector<int64_t> entity_ids;
if (!json.contains("ids")) {
return Status(BODY_FIELD_LOSS, "Field \"delete\" must contains \"ids\"");
return Status(BODY_FIELD_LOSS, R"(Field "delete" must contains "ids")");
}
auto ids = json["ids"];
if (!ids.is_array()) {
return Status(BODY_FIELD_LOSS, "\"ids\" must be an array");
return Status(BODY_FIELD_LOSS, R"("ids" must be an array)");
}
for (auto& id : ids) {
auto id_str = id.get<std::string>();
if (!ValidateStringIsNumber(id_str).ok()) {
return Status(ILLEGAL_BODY, "Members in \"ids\" must be integer string");
return Status(ILLEGAL_BODY, R"(Members in "ids" must be integer string)");
}
entity_ids.emplace_back(std::stol(id_str));
}
......@@ -1287,8 +1291,8 @@ WebRequestHandler::ShowCollections(const OQueryParams& query_params, OString& re
offset = 0;
page_size = collections.size();
} else {
offset = std::min((size_t)offset, collections.size());
page_size = std::min(collections.size() - offset, (size_t)page_size);
offset = std::min(static_cast<size_t>(offset), collections.size());
page_size = std::min(collections.size() - offset, static_cast<size_t>(page_size));
}
nlohmann::json collections_json;
......@@ -1429,14 +1433,14 @@ WebRequestHandler::ShowPartitions(const OString& collection_name, const OQueryPa
offset = 0;
page_size = partition_names.size();
} else {
offset = std::min((size_t)offset, partition_names.size());
page_size = std::min(partition_names.size() - offset, (size_t)page_size);
offset = std::min(static_cast<size_t>(offset), partition_names.size());
page_size = std::min(partition_names.size() - offset, static_cast<size_t>(page_size));
}
partition_list_dto->count = partition_names.size();
partition_list_dto->partitions = partition_list_dto->partitions.createShared();
if (offset < (int64_t)(partition_names.size())) {
if (offset < static_cast<int64_t>(partition_names.size())) {
for (int64_t i = offset; i < page_size + offset; i++) {
auto partition_dto = PartitionFieldsDto::createShared();
partition_dto->partition_tag = partition_names.at(i).c_str();
......@@ -1624,7 +1628,7 @@ WebRequestHandler::InsertEntity(const OString& collection_name, const milvus::se
if (ids.empty()) {
ids.resize(row_num * sizeof(int64_t));
}
int64_t id = entity.value().get<int64_t>();
auto id = entity.value().get<int64_t>();
int64_t id_offset = offset * sizeof(int64_t);
memcpy(ids.data() + id_offset, &id, sizeof(int64_t));
continue;
......@@ -1729,7 +1733,7 @@ WebRequestHandler::InsertEntity(const OString& collection_name, const milvus::se
auto pair = chunk_data.find(engine::FIELD_UID);
if (pair != chunk_data.end()) {
int64_t count = pair->second.size() / 8;
int64_t* pdata = reinterpret_cast<int64_t*>(pair->second.data());
auto pdata = reinterpret_cast<int64_t*>(pair->second.data());
ids_dto->ids = ids_dto->ids.createShared();
for (int64_t i = 0; i < count; ++i) {
ids_dto->ids->push_back(std::to_string(pdata[i]).c_str());
......
......@@ -51,7 +51,7 @@ DiskOperation::GetDirectory() const {
void
DiskOperation::ListDirectory(std::vector<std::string>& file_paths) {
std::experimental::filesystem::path target_path(dir_path_);
typedef std::experimental::filesystem::directory_iterator d_it;
using d_it = std::experimental::filesystem::directory_iterator;
d_it it_end;
d_it it(target_path);
if (std::experimental::filesystem::is_directory(dir_path_)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册