提交 6bee36a7 编写于 作者: A Abhishek Madan 提交者: Facebook Github Bot

Modify FragmentedRangeTombstoneList member layout (#4632)

Summary:
Rather than storing a `vector<RangeTombstone>`, we now store a
`vector<RangeTombstoneStack>` and a `vector<SequenceNumber>`. A
`RangeTombstoneStack` contains the start and end keys of a range tombstone
fragment, and indices into the seqnum vector to indicate which sequence
numbers the fragment is located at. The diagram below illustrates an
example:

```
tombstones_:     [a, b) [c, e) [h, k)
                   | \   /  \   /  |
                   |  \ /    \ /   |
                   v   v      v    v
tombstone_seqs_: [ 5 3 10 7 2 8 6  ]
```

This format allows binary searching the tombstone list to use less key
comparisons, which helps in cases where there are many overlapping
tombstones. Also, this format makes it easier to add DBIter-like
semantics to `FragmentedRangeTombstoneIterator` in the future.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4632

Differential Revision: D13053103

Pulled By: abhimadan

fbshipit-source-id: e8220cc712fcf5be4d602913bb23ace8ea5f8ef0
上级 f5c8cf5f
......@@ -741,12 +741,11 @@ bool MemTable::Get(const LookupKey& key, std::string* value, Status* s,
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
comparator_.comparator,
true /* one_time_use */, snapshot);
FragmentedRangeTombstoneIterator fragment_iter(&fragment_list,
FragmentedRangeTombstoneIterator fragment_iter(&fragment_list, snapshot,
comparator_.comparator);
*max_covering_tombstone_seq = std::max(
*max_covering_tombstone_seq,
MaxCoveringTombstoneSeqnum(&fragment_iter, key.internal_key(),
comparator_.comparator.user_comparator()));
*max_covering_tombstone_seq =
std::max(*max_covering_tombstone_seq,
fragment_iter.MaxCoveringTombstoneSeqnum(key.user_key()));
Slice user_key = key.user_key();
bool found_final_value = false;
......
......@@ -109,8 +109,8 @@ void FragmentedRangeTombstoneList::FragmentTombstones(
// Flush a range tombstone fragment [cur_start_key, cur_end_key), which
// should not overlap with the last-flushed tombstone fragment.
assert(tombstones_.empty() ||
icmp.user_comparator()->Compare(tombstones_.back().end_key_,
cur_start_key) <= 0);
icmp.user_comparator()->Compare(tombstones_.back().end_key,
cur_start_key) <= 0);
if (one_time_use) {
SequenceNumber max_seqnum = 0;
......@@ -118,9 +118,10 @@ void FragmentedRangeTombstoneList::FragmentTombstones(
max_seqnum = std::max(max_seqnum, flush_it->sequence);
}
// Flush only the tombstone fragment with the highest sequence number.
tombstones_.push_back(
RangeTombstone(cur_start_key, cur_end_key, max_seqnum));
size_t start_idx = tombstone_seqs_.size();
tombstone_seqs_.push_back(max_seqnum);
tombstones_.emplace_back(cur_start_key, cur_end_key, start_idx,
start_idx + 1);
} else {
// Sort the sequence numbers of the tombstones being fragmented in
// descending order, and then flush them in that order.
......@@ -130,10 +131,12 @@ void FragmentedRangeTombstoneList::FragmentTombstones(
}
std::sort(seqnums_to_flush.begin(), seqnums_to_flush.end(),
std::greater<SequenceNumber>());
for (const auto seq : seqnums_to_flush) {
tombstones_.push_back(
RangeTombstone(cur_start_key, cur_end_key, seq));
}
size_t start_idx = tombstone_seqs_.size();
size_t end_idx = start_idx + seqnums_to_flush.size();
tombstone_seqs_.insert(tombstone_seqs_.end(), seqnums_to_flush.begin(),
seqnums_to_flush.end());
tombstones_.emplace_back(cur_start_key, cur_end_key, start_idx,
end_idx);
}
cur_start_key = cur_end_key;
}
......@@ -195,12 +198,13 @@ void FragmentedRangeTombstoneList::FragmentTombstones(
}
FragmentedRangeTombstoneIterator::FragmentedRangeTombstoneIterator(
const FragmentedRangeTombstoneList* tombstones,
const FragmentedRangeTombstoneList* tombstones, SequenceNumber snapshot,
const InternalKeyComparator& icmp)
: tombstone_cmp_(icmp.user_comparator()),
icmp_(&icmp),
: tombstone_start_cmp_(icmp.user_comparator()),
tombstone_end_cmp_(icmp.user_comparator()),
ucmp_(icmp.user_comparator()),
tombstones_(tombstones) {
tombstones_(tombstones),
snapshot_(snapshot) {
assert(tombstones_ != nullptr);
pos_ = tombstones_->end();
pinned_pos_ = tombstones_->end();
......@@ -208,94 +212,129 @@ FragmentedRangeTombstoneIterator::FragmentedRangeTombstoneIterator(
FragmentedRangeTombstoneIterator::FragmentedRangeTombstoneIterator(
const std::shared_ptr<const FragmentedRangeTombstoneList>& tombstones,
const InternalKeyComparator& icmp)
: tombstone_cmp_(icmp.user_comparator()),
icmp_(&icmp),
SequenceNumber snapshot, const InternalKeyComparator& icmp)
: tombstone_start_cmp_(icmp.user_comparator()),
tombstone_end_cmp_(icmp.user_comparator()),
ucmp_(icmp.user_comparator()),
tombstones_ref_(tombstones),
tombstones_(tombstones_ref_.get()) {
tombstones_(tombstones_ref_.get()),
snapshot_(snapshot) {
assert(tombstones_ != nullptr);
pos_ = tombstones_->end();
seq_pos_ = tombstones_->seq_end();
pinned_pos_ = tombstones_->end();
pinned_seq_pos_ = tombstones_->seq_end();
}
void FragmentedRangeTombstoneIterator::SeekToFirst() {
pos_ = tombstones_->begin();
seq_pos_ = tombstones_->seq_begin();
}
void FragmentedRangeTombstoneIterator::SeekToLast() {
pos_ = tombstones_->end();
seq_pos_ = tombstones_->seq_end();
Prev();
}
void FragmentedRangeTombstoneIterator::Seek(const Slice& target) {
if (tombstones_->empty()) {
pos_ = tombstones_->end();
Invalidate();
return;
}
RangeTombstone search(ExtractUserKey(target), ExtractUserKey(target),
GetInternalKeySeqno(target));
pos_ = std::lower_bound(tombstones_->begin(), tombstones_->end(), search,
tombstone_cmp_);
SeekToCoveringTombstone(target);
while (pos_ != tombstones_->end() &&
seq_pos_ == tombstones_->seq_iter(pos_->seq_end_idx)) {
++pos_;
if (pos_ == tombstones_->end()) {
return;
}
seq_pos_ = std::lower_bound(tombstones_->seq_iter(pos_->seq_start_idx),
tombstones_->seq_iter(pos_->seq_end_idx),
snapshot_, std::greater<SequenceNumber>());
}
}
void FragmentedRangeTombstoneIterator::SeekForPrev(const Slice& target) {
Seek(target);
if (!Valid()) {
SeekToLast();
}
ParsedInternalKey parsed_target;
if (!ParseInternalKey(target, &parsed_target)) {
assert(false);
if (tombstones_->empty()) {
Invalidate();
return;
}
ParsedInternalKey parsed_start_key;
ParseKey(&parsed_start_key);
while (Valid() && icmp_->Compare(parsed_target, parsed_start_key) < 0) {
Prev();
ParseKey(&parsed_start_key);
SeekForPrevToCoveringTombstone(target);
while (pos_ != tombstones_->end() &&
seq_pos_ == tombstones_->seq_iter(pos_->seq_end_idx)) {
if (pos_ == tombstones_->begin()) {
Invalidate();
return;
}
--pos_;
seq_pos_ = std::lower_bound(tombstones_->seq_iter(pos_->seq_start_idx),
tombstones_->seq_iter(pos_->seq_end_idx),
snapshot_, std::greater<SequenceNumber>());
}
}
void FragmentedRangeTombstoneIterator::Next() { ++pos_; }
void FragmentedRangeTombstoneIterator::SeekToCoveringTombstone(
const Slice& target) {
pos_ = std::upper_bound(tombstones_->begin(), tombstones_->end(), target,
tombstone_end_cmp_);
if (pos_ == tombstones_->end()) {
// All tombstones end before target.
seq_pos_ = tombstones_->seq_end();
return;
}
seq_pos_ = std::lower_bound(tombstones_->seq_iter(pos_->seq_start_idx),
tombstones_->seq_iter(pos_->seq_end_idx),
snapshot_, std::greater<SequenceNumber>());
}
void FragmentedRangeTombstoneIterator::Prev() {
void FragmentedRangeTombstoneIterator::SeekForPrevToCoveringTombstone(
const Slice& target) {
if (tombstones_->empty()) {
Invalidate();
return;
}
pos_ = std::upper_bound(tombstones_->begin(), tombstones_->end(), target,
tombstone_start_cmp_);
if (pos_ == tombstones_->begin()) {
pos_ = tombstones_->end();
// All tombstones start after target.
Invalidate();
return;
}
--pos_;
seq_pos_ = std::lower_bound(tombstones_->seq_iter(pos_->seq_start_idx),
tombstones_->seq_iter(pos_->seq_end_idx),
snapshot_, std::greater<SequenceNumber>());
}
bool FragmentedRangeTombstoneIterator::Valid() const {
return tombstones_ != nullptr && pos_ != tombstones_->end();
void FragmentedRangeTombstoneIterator::Next() {
++seq_pos_;
if (seq_pos_ == tombstones_->seq_iter(pos_->seq_end_idx)) {
++pos_;
}
}
SequenceNumber MaxCoveringTombstoneSeqnum(
FragmentedRangeTombstoneIterator* tombstone_iter, const Slice& lookup_key,
const Comparator* ucmp) {
if (tombstone_iter == nullptr) {
return 0;
void FragmentedRangeTombstoneIterator::Prev() {
if (seq_pos_ == tombstones_->seq_begin()) {
pos_ = tombstones_->end();
seq_pos_ = tombstones_->seq_end();
return;
}
--seq_pos_;
if (pos_ == tombstones_->end() ||
seq_pos_ == tombstones_->seq_iter(pos_->seq_start_idx - 1)) {
--pos_;
}
}
SequenceNumber snapshot = GetInternalKeySeqno(lookup_key);
Slice user_key = ExtractUserKey(lookup_key);
bool FragmentedRangeTombstoneIterator::Valid() const {
return tombstones_ != nullptr && pos_ != tombstones_->end();
}
tombstone_iter->Seek(lookup_key);
SequenceNumber highest_covering_seqnum = 0;
if (!tombstone_iter->Valid()) {
// Seeked past the last tombstone
tombstone_iter->Prev();
}
while (tombstone_iter->Valid() &&
ucmp->Compare(user_key, tombstone_iter->value()) < 0) {
if (tombstone_iter->seq() <= snapshot &&
ucmp->Compare(tombstone_iter->user_key(), user_key) <= 0) {
highest_covering_seqnum =
std::max(highest_covering_seqnum, tombstone_iter->seq());
}
tombstone_iter->Prev();
}
return highest_covering_seqnum;
SequenceNumber FragmentedRangeTombstoneIterator::MaxCoveringTombstoneSeqnum(
const Slice& user_key) {
SeekToCoveringTombstone(user_key);
return ValidPos() && ucmp_->Compare(start_key(), user_key) <= 0 ? seq() : 0;
}
} // namespace rocksdb
......@@ -19,31 +19,61 @@ namespace rocksdb {
struct FragmentedRangeTombstoneList {
public:
// A compact representation of a "stack" of range tombstone fragments, which
// start and end at the same user keys but have different sequence numbers.
// The members seq_start_idx and seq_end_idx are intended to be parameters to
// seq_iter().
struct RangeTombstoneStack {
RangeTombstoneStack(const Slice& start, const Slice& end, size_t start_idx,
size_t end_idx)
: start_key(start),
end_key(end),
seq_start_idx(start_idx),
seq_end_idx(end_idx) {}
Slice start_key;
Slice end_key;
size_t seq_start_idx;
size_t seq_end_idx;
};
FragmentedRangeTombstoneList(
std::unique_ptr<InternalIterator> unfragmented_tombstones,
const InternalKeyComparator& icmp, bool one_time_use,
SequenceNumber snapshot = kMaxSequenceNumber);
std::vector<RangeTombstone>::const_iterator begin() const {
std::vector<RangeTombstoneStack>::const_iterator begin() const {
return tombstones_.begin();
}
std::vector<RangeTombstone>::const_iterator end() const {
std::vector<RangeTombstoneStack>::const_iterator end() const {
return tombstones_.end();
}
std::vector<SequenceNumber>::const_iterator seq_iter(size_t idx) const {
return std::next(tombstone_seqs_.begin(), idx);
}
std::vector<SequenceNumber>::const_iterator seq_begin() const {
return tombstone_seqs_.begin();
}
std::vector<SequenceNumber>::const_iterator seq_end() const {
return tombstone_seqs_.end();
}
bool empty() const { return tombstones_.size() == 0; }
private:
// Given an ordered range tombstone iterator unfragmented_tombstones,
// "fragment" the tombstones into non-overlapping pieces, and store them in
// tombstones_.
// tombstones_ and tombstone_seqs_.
void FragmentTombstones(
std::unique_ptr<InternalIterator> unfragmented_tombstones,
const InternalKeyComparator& icmp, bool one_time_use,
SequenceNumber snapshot = kMaxSequenceNumber);
std::vector<RangeTombstone> tombstones_;
std::vector<RangeTombstoneStack> tombstones_;
std::vector<SequenceNumber> tombstone_seqs_;
std::list<std::string> pinned_slices_;
PinnedIteratorsManager pinned_iters_mgr_;
};
......@@ -60,15 +90,28 @@ struct FragmentedRangeTombstoneList {
class FragmentedRangeTombstoneIterator : public InternalIterator {
public:
FragmentedRangeTombstoneIterator(
const FragmentedRangeTombstoneList* tombstones,
const FragmentedRangeTombstoneList* tombstones, SequenceNumber snapshot,
const InternalKeyComparator& icmp);
FragmentedRangeTombstoneIterator(
const std::shared_ptr<const FragmentedRangeTombstoneList>& tombstones,
const InternalKeyComparator& icmp);
SequenceNumber snapshot, const InternalKeyComparator& icmp);
void SeekToFirst() override;
void SeekToLast() override;
// NOTE: Seek and SeekForPrev do not behave in the way InternalIterator
// seeking should behave. This is OK because they are not currently used, but
// eventually FragmentedRangeTombstoneIterator should no longer implement
// InternalIterator.
//
// Seeks to the range tombstone that covers target at a seqnum in the
// snapshot. If no such tombstone exists, seek to the earliest tombstone in
// the snapshot that ends after target.
void Seek(const Slice& target) override;
// Seeks to the range tombstone that covers target at a seqnum in the
// snapshot. If no such tombstone exists, seek to the latest tombstone in the
// snapshot that starts before target.
void SeekForPrev(const Slice& target) override;
void Next() override;
void Prev() override;
bool Valid() const override;
......@@ -76,55 +119,88 @@ class FragmentedRangeTombstoneIterator : public InternalIterator {
MaybePinKey();
return current_start_key_.Encode();
}
Slice value() const override { return pos_->end_key_; }
Slice value() const override { return pos_->end_key; }
bool IsKeyPinned() const override { return false; }
bool IsValuePinned() const override { return true; }
Status status() const override { return Status::OK(); }
Slice user_key() const { return pos_->start_key_; }
SequenceNumber seq() const { return pos_->seq_; }
Slice start_key() const { return pos_->start_key; }
Slice end_key() const { return pos_->end_key; }
SequenceNumber seq() const { return *seq_pos_; }
SequenceNumber MaxCoveringTombstoneSeqnum(const Slice& user_key);
private:
struct FragmentedRangeTombstoneComparator {
explicit FragmentedRangeTombstoneComparator(const Comparator* c) : cmp(c) {}
bool operator()(const RangeTombstone& a, const RangeTombstone& b) const {
int user_key_cmp = cmp->Compare(a.start_key_, b.start_key_);
if (user_key_cmp != 0) {
return user_key_cmp < 0;
}
return a.seq_ > b.seq_;
using RangeTombstoneStack = FragmentedRangeTombstoneList::RangeTombstoneStack;
struct RangeTombstoneStackStartComparator {
explicit RangeTombstoneStackStartComparator(const Comparator* c) : cmp(c) {}
bool operator()(const RangeTombstoneStack& a,
const RangeTombstoneStack& b) const {
return cmp->Compare(a.start_key, b.start_key) < 0;
}
bool operator()(const RangeTombstoneStack& a, const Slice& b) const {
return cmp->Compare(a.start_key, b) < 0;
}
bool operator()(const Slice& a, const RangeTombstoneStack& b) const {
return cmp->Compare(a, b.start_key) < 0;
}
const Comparator* cmp;
};
struct RangeTombstoneStackEndComparator {
explicit RangeTombstoneStackEndComparator(const Comparator* c) : cmp(c) {}
bool operator()(const RangeTombstoneStack& a,
const RangeTombstoneStack& b) const {
return cmp->Compare(a.end_key, b.end_key) < 0;
}
bool operator()(const RangeTombstoneStack& a, const Slice& b) const {
return cmp->Compare(a.end_key, b) < 0;
}
bool operator()(const Slice& a, const RangeTombstoneStack& b) const {
return cmp->Compare(a, b.end_key) < 0;
}
const Comparator* cmp;
};
void MaybePinKey() const {
if (pos_ != tombstones_->end() && pinned_pos_ != pos_) {
current_start_key_.Set(pos_->start_key_, pos_->seq_, kTypeRangeDeletion);
if (pos_ != tombstones_->end() && seq_pos_ != tombstones_->seq_end() &&
(pinned_pos_ != pos_ || pinned_seq_pos_ != seq_pos_)) {
current_start_key_.Set(pos_->start_key, *seq_pos_, kTypeRangeDeletion);
pinned_pos_ = pos_;
pinned_seq_pos_ = seq_pos_;
}
}
void ParseKey(ParsedInternalKey* parsed) const {
parsed->user_key = pos_->start_key_;
parsed->sequence = pos_->seq_;
parsed->type = kTypeRangeDeletion;
void SeekToCoveringTombstone(const Slice& key);
void SeekForPrevToCoveringTombstone(const Slice& key);
void Invalidate() {
pos_ = tombstones_->end();
seq_pos_ = tombstones_->seq_end();
}
bool ValidPos() const {
return Valid() && seq_pos_ != tombstones_->seq_iter(pos_->seq_end_idx);
}
const FragmentedRangeTombstoneComparator tombstone_cmp_;
const InternalKeyComparator* icmp_;
const RangeTombstoneStackStartComparator tombstone_start_cmp_;
const RangeTombstoneStackEndComparator tombstone_end_cmp_;
const Comparator* ucmp_;
std::shared_ptr<const FragmentedRangeTombstoneList> tombstones_ref_;
const FragmentedRangeTombstoneList* tombstones_;
std::vector<RangeTombstone>::const_iterator pos_;
mutable std::vector<RangeTombstone>::const_iterator pinned_pos_;
SequenceNumber snapshot_;
std::vector<RangeTombstoneStack>::const_iterator pos_;
std::vector<SequenceNumber>::const_iterator seq_pos_;
mutable std::vector<RangeTombstoneStack>::const_iterator pinned_pos_;
mutable std::vector<SequenceNumber>::const_iterator pinned_seq_pos_;
mutable InternalKey current_start_key_;
PinnedIteratorsManager pinned_iters_mgr_;
};
SequenceNumber MaxCoveringTombstoneSeqnum(
FragmentedRangeTombstoneIterator* tombstone_iter, const Slice& key,
const Comparator* ucmp);
} // namespace rocksdb
......@@ -377,14 +377,14 @@ Status TableCache::Get(const ReadOptions& options,
get_context->max_covering_tombstone_seq();
if (s.ok() && max_covering_tombstone_seq != nullptr &&
!options.ignore_range_deletions) {
std::unique_ptr<InternalIterator> range_del_iter(
t->NewRangeTombstoneIterator(options));
*max_covering_tombstone_seq =
std::max(*max_covering_tombstone_seq,
MaxCoveringTombstoneSeqnum(
static_cast<FragmentedRangeTombstoneIterator*>(
range_del_iter.get()),
k, internal_comparator.user_comparator()));
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
static_cast<FragmentedRangeTombstoneIterator*>(
t->NewRangeTombstoneIterator(options)));
if (range_del_iter != nullptr) {
*max_covering_tombstone_seq = std::max(
*max_covering_tombstone_seq,
range_del_iter->MaxCoveringTombstoneSeqnum(ExtractUserKey(k)));
}
}
if (s.ok()) {
get_context->SetReplayLog(row_cache_entry); // nullptr if no cache.
......
......@@ -2314,12 +2314,16 @@ InternalIterator* BlockBasedTable::NewIterator(
}
InternalIterator* BlockBasedTable::NewRangeTombstoneIterator(
const ReadOptions& /* read_options */) {
const ReadOptions& read_options) {
if (rep_->fragmented_range_dels == nullptr) {
return nullptr;
}
return new FragmentedRangeTombstoneIterator(rep_->fragmented_range_dels,
rep_->internal_comparator);
SequenceNumber snapshot = kMaxSequenceNumber;
if (read_options.snapshot != nullptr) {
snapshot = read_options.snapshot->GetSequenceNumber();
}
return new FragmentedRangeTombstoneIterator(
rep_->fragmented_range_dels, snapshot, rep_->internal_comparator);
}
InternalIterator* BlockBasedTable::NewUnfragmentedRangeTombstoneIterator(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册