未验证 提交 2a21d2cb 编写于 作者: S shengjun.li 提交者: GitHub

fix leak (#4324)

Signed-off-by: Nshengjun.li <shengjun.li@zilliz.com>
上级 b6dbbccd
......@@ -323,7 +323,7 @@ class GetCollectionIDsOperation : public Operations {
GetIDs() const;
protected:
bool reversed_;
bool reversed_ = true;
IDS_TYPE ids_;
};
......
......@@ -199,7 +199,7 @@ class Operations : public std::enable_shared_from_this<Operations> {
OperationContext context_;
ScopedSnapshotT prev_ss_;
StepsHolderT holders_;
size_t last_pos_;
size_t last_pos_ = 0;
std::vector<ID_TYPE> ids_;
bool done_ = false;
Status status_;
......
......@@ -44,6 +44,7 @@ class ReferenceProxy : public std::enable_shared_from_this<ReferenceProxy> {
for (auto& cb : on_no_ref_cbs_) {
cb(this->shared_from_this());
}
on_no_ref_cbs_.clear();
}
}
......
......@@ -273,6 +273,7 @@ class Snapshot : public ReferenceProxy {
ReferenceProxy::UnRef();
if (ref_count_ == 0) {
UnRefAll();
std::apply([](auto&... holder) { ((holder.clear()), ...); }, resources_);
}
}
......
......@@ -112,6 +112,7 @@ BinaryIVF::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::IndexBinary* coarse_quantizer = new faiss::IndexBinaryFlat(dim, metric_type);
auto index = std::make_shared<faiss::IndexBinaryIVF>(coarse_quantizer, dim, nlist, metric_type);
index->own_fields = true;
index->train(rows, static_cast<const uint8_t*>(p_data));
index->add_with_ids(rows, static_cast<const uint8_t*>(p_data), p_ids);
index_ = index;
......
......@@ -67,11 +67,13 @@ void
IVF::Train(const DatasetPtr& dataset_ptr, const Config& config) {
GET_TENSOR_DATA_DIM(dataset_ptr)
int64_t nlist = config[IndexParams::nlist].get<int64_t>();
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
auto nlist = config[IndexParams::nlist].get<int64_t>();
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFFlat(coarse_quantizer, dim, nlist, metric_type));
index_->train(rows, reinterpret_cast<const float*>(p_data));
auto index = std::make_shared<faiss::IndexIVFFlat>(coarse_quantizer, dim, nlist, metric_type);
index->own_fields = true;
index->train(rows, reinterpret_cast<const float*>(p_data));
index_ = index;
}
void
......
......@@ -38,11 +38,12 @@ IVFPQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFPQ(
coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(), config[IndexParams::m].get<int64_t>(),
config[IndexParams::nbits].get<int64_t>(), metric_type));
index_->train(rows, reinterpret_cast<const float*>(p_data));
auto index = std::make_shared<faiss::IndexIVFPQ>(coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(),
config[IndexParams::m].get<int64_t>(),
config[IndexParams::nbits].get<int64_t>(), metric_type);
index->own_fields = true;
index->train(rows, reinterpret_cast<const float*>(p_data));
index_ = index;
}
VecIndexPtr
......
......@@ -37,18 +37,13 @@ void
IVFSQ::Train(const DatasetPtr& dataset_ptr, const Config& config) {
GET_TENSOR_DATA_DIM(dataset_ptr)
// std::stringstream index_type;
// index_type << "IVF" << config[IndexParams::nlist] << ","
// << "SQ" << config[IndexParams::nbits];
// index_ = std::shared_ptr<faiss::Index>(
// faiss::index_factory(dim, index_type.str().c_str(), GetMetricType(config[Metric::TYPE].get<std::string>())));
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFScalarQuantizer(
coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(), faiss::QuantizerType::QT_8bit, metric_type));
index_->train(rows, reinterpret_cast<const float*>(p_data));
auto index = std::make_shared<faiss::IndexIVFScalarQuantizer>(
coarse_quantizer, dim, config[IndexParams::nlist].get<int64_t>(), faiss::QuantizerType::QT_8bit, metric_type);
index->own_fields = true;
index->train(rows, reinterpret_cast<const float*>(p_data));
index_ = index;
}
VecIndexPtr
......
......@@ -108,11 +108,13 @@ void
IVF_NM::Train(const DatasetPtr& dataset_ptr, const Config& config) {
GET_TENSOR_DATA_DIM(dataset_ptr)
int64_t nlist = config[IndexParams::nlist].get<int64_t>();
faiss::MetricType metric_type = GetMetricType(config[Metric::TYPE].get<std::string>());
faiss::Index* coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
auto nlist = config[IndexParams::nlist].get<int64_t>();
index_ = std::shared_ptr<faiss::Index>(new faiss::IndexIVFFlat(coarse_quantizer, dim, nlist, metric_type));
index_->train(rows, reinterpret_cast<const float*>(p_data));
auto coarse_quantizer = new faiss::IndexFlat(dim, metric_type);
auto index = std::make_shared<faiss::IndexIVFFlat>(coarse_quantizer, dim, nlist, metric_type);
index->own_fields = true;
index->train(rows, reinterpret_cast<const float*>(p_data));
index_ = index;
}
void
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册