From c16ab5b35c2fbc108b366339f2d2948399751513 Mon Sep 17 00:00:00 2001 From: Cai Yudong Date: Wed, 15 Sep 2021 10:06:00 +0800 Subject: [PATCH] Use better segcoe variable name (#7869) Signed-off-by: yudong.cai --- internal/core/src/segcore/InsertRecord.h | 8 ++++---- internal/core/src/segcore/SegmentSealedImpl.cpp | 16 ++++++++-------- internal/core/src/segcore/SegmentSealedImpl.h | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/internal/core/src/segcore/InsertRecord.h b/internal/core/src/segcore/InsertRecord.h index d9c800450..a05ba982b 100644 --- a/internal/core/src/segcore/InsertRecord.h +++ b/internal/core/src/segcore/InsertRecord.h @@ -29,7 +29,7 @@ struct InsertRecord { // get field data without knowing the type VectorBase* get_field_data_base(FieldOffset field_offset) const { - auto ptr = field_datas_[field_offset.get()].get(); + auto ptr = fields_data_[field_offset.get()].get(); return ptr; } @@ -58,7 +58,7 @@ struct InsertRecord { void append_field_data(int64_t size_per_chunk) { static_assert(std::is_fundamental_v); - field_datas_.emplace_back(std::make_unique>(size_per_chunk)); + fields_data_.emplace_back(std::make_unique>(size_per_chunk)); } // append a column of vector type @@ -66,10 +66,10 @@ struct InsertRecord { void append_field_data(int64_t dim, int64_t size_per_chunk) { static_assert(std::is_base_of_v); - field_datas_.emplace_back(std::make_unique>(dim, size_per_chunk)); + fields_data_.emplace_back(std::make_unique>(dim, size_per_chunk)); } private: - std::vector> field_datas_; + std::vector> fields_data_; }; } // namespace milvus::segcore diff --git a/internal/core/src/segcore/SegmentSealedImpl.cpp b/internal/core/src/segcore/SegmentSealedImpl.cpp index e44fb99aa..685edbf2b 100644 --- a/internal/core/src/segcore/SegmentSealedImpl.cpp +++ b/internal/core/src/segcore/SegmentSealedImpl.cpp @@ -139,14 +139,14 @@ SegmentSealedImpl::LoadFieldData(const LoadFieldDataInfo& info) { // write data under lock std::unique_lock lck(mutex_); update_row_count(info.row_count); - AssertInfo(field_datas_[field_offset.get()].empty(), "field data already exists"); + AssertInfo(fields_data_[field_offset.get()].empty(), "field data already exists"); if (field_meta.is_vector()) { AssertInfo(!vecindexs_.is_ready(field_offset), "field data can't be loaded when indexing exists"); - field_datas_[field_offset.get()] = std::move(vec_data); + fields_data_[field_offset.get()] = std::move(vec_data); } else { AssertInfo(!scalar_indexings_[field_offset.get()], "scalar indexing not cleared"); - field_datas_[field_offset.get()] = std::move(vec_data); + fields_data_[field_offset.get()] = std::move(vec_data); scalar_indexings_[field_offset.get()] = std::move(index); } @@ -180,7 +180,7 @@ SegmentSealedImpl::chunk_data_impl(FieldOffset field_offset, int64_t chunk_id) c "Can't get bitset element at " + std::to_string(field_offset.get())); auto& field_meta = schema_->operator[](field_offset); auto element_sizeof = field_meta.get_sizeof(); - SpanBase base(field_datas_[field_offset.get()].data(), row_count_opt_.value(), element_sizeof); + SpanBase base(fields_data_[field_offset.get()].data(), row_count_opt_.value(), element_sizeof); return base; } @@ -245,7 +245,7 @@ SegmentSealedImpl::vector_search(int64_t vec_count, "Can't get bitset element at " + std::to_string(field_offset.get())); AssertInfo(row_count_opt_.has_value(), "Can't get row count value"); auto row_count = row_count_opt_.value(); - auto chunk_data = field_datas_[field_offset.get()].data(); + auto chunk_data = fields_data_[field_offset.get()].data(); auto sub_qr = [&] { if (field_meta.get_data_type() == DataType::VECTOR_FLOAT) { @@ -283,7 +283,7 @@ SegmentSealedImpl::DropFieldData(const FieldId field_id) { std::unique_lock lck(mutex_); set_bit(field_data_ready_bitset_, field_offset, false); - auto vec = std::move(field_datas_[field_offset.get()]); + auto vec = std::move(fields_data_[field_offset.get()]); lck.unlock(); vec.clear(); @@ -328,7 +328,7 @@ SegmentSealedImpl::check_search(const query::Plan* plan) const { SegmentSealedImpl::SegmentSealedImpl(SchemaPtr schema) : schema_(schema), - field_datas_(schema->size()), + fields_data_(schema->size()), field_data_ready_bitset_(schema->size()), vecindex_ready_bitset_(schema->size()), scalar_indexings_(schema->size()) { @@ -384,7 +384,7 @@ SegmentSealedImpl::bulk_subscript(FieldOffset field_offset, return; } auto& field_meta = schema_->operator[](field_offset); - auto src_vec = field_datas_[field_offset.get()].data(); + auto src_vec = fields_data_[field_offset.get()].data(); switch (field_meta.get_data_type()) { case DataType::BOOL: { bulk_subscript_impl(src_vec, seg_offsets, count, output); diff --git a/internal/core/src/segcore/SegmentSealedImpl.h b/internal/core/src/segcore/SegmentSealedImpl.h index 5d44e4c18..77e3a0d73 100644 --- a/internal/core/src/segcore/SegmentSealedImpl.h +++ b/internal/core/src/segcore/SegmentSealedImpl.h @@ -147,7 +147,7 @@ class SegmentSealedImpl : public SegmentSealed { std::vector> scalar_indexings_; std::unique_ptr primary_key_index_; - std::vector> field_datas_; + std::vector> fields_data_; SealedIndexingRecord vecindexs_; aligned_vector row_ids_; -- GitLab