提交 5cd8240b 编写于 作者: N Nikhil Benesch 提交者: Facebook Github Bot

Test range deletions with more configurations (#4021)

Summary:
Run the basic range deletion tests against the standard set of
configurations. This testing exposed that files with hash indexes and
partitioned indexes were not handling the case where the file contained
only range deletions--i.e., where the index was empty.

Additionally file a TODO about the fact that range deletions are broken
when allow_mmap_reads = true is set.

/cc ajkr nvanbenschoten

Best viewed with ?w=1: https://github.com/facebook/rocksdb/pull/4021/files?w=1
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4021

Differential Revision: D8811860

Pulled By: ajkr

fbshipit-source-id: 3cc07e6d6210a2a00b932866481b3d5c59775343
上级 cfee7fb5
...@@ -23,51 +23,63 @@ class DBRangeDelTest : public DBTestBase { ...@@ -23,51 +23,63 @@ class DBRangeDelTest : public DBTestBase {
} }
}; };
const int kRangeDelSkipConfigs =
// Plain tables do not support range deletions.
DBRangeDelTest::kSkipPlainTable |
// MmapReads disables the iterator pinning that RangeDelAggregator requires.
DBRangeDelTest::kSkipMmapReads;
// PlainTableFactory and NumTableFilesAtLevel() are not supported in // PlainTableFactory and NumTableFilesAtLevel() are not supported in
// ROCKSDB_LITE // ROCKSDB_LITE
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) { TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) {
if (!IsMemoryMappedAccessSupported()) { // TODO: figure out why MmapReads trips the iterator pinning assertion in
return; // RangeDelAggregator. Ideally it would be supported; otherwise it should at
} // least be explicitly unsupported.
Options opts = CurrentOptions(); for (auto config : {kPlainTableAllBytesPrefix, /* kWalDirAndMmapReads */}) {
opts.table_factory.reset(new PlainTableFactory()); option_config_ = config;
opts.prefix_extractor.reset(NewNoopTransform()); DestroyAndReopen(CurrentOptions());
opts.allow_mmap_reads = true; ASSERT_TRUE(
opts.max_sequential_skip_in_iterations = 999999;
Reopen(opts);
ASSERT_TRUE(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", "dr1") db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", "dr1")
.IsNotSupported()); .IsNotSupported());
}
} }
TEST_F(DBRangeDelTest, FlushOutputHasOnlyRangeTombstones) { TEST_F(DBRangeDelTest, FlushOutputHasOnlyRangeTombstones) {
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", do {
"dr2")); DestroyAndReopen(CurrentOptions());
ASSERT_OK(db_->Flush(FlushOptions())); ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1",
ASSERT_EQ(1, NumTableFilesAtLevel(0)); "dr2"));
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0));
} while (ChangeOptions(kRangeDelSkipConfigs));
} }
TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) { TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) {
Options opts = CurrentOptions(); do {
opts.disable_auto_compactions = true; Options opts = CurrentOptions();
opts.statistics = CreateDBStatistics(); opts.disable_auto_compactions = true;
Reopen(opts); opts.statistics = CreateDBStatistics();
DestroyAndReopen(opts);
// snapshot protects range tombstone from dropping due to becoming obsolete. // snapshot protects range tombstone from dropping due to becoming obsolete.
const Snapshot* snapshot = db_->GetSnapshot(); const Snapshot* snapshot = db_->GetSnapshot();
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"); db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z");
db_->Flush(FlushOptions()); db_->Flush(FlushOptions());
ASSERT_EQ(1, NumTableFilesAtLevel(0)); ASSERT_EQ(1, NumTableFilesAtLevel(0));
ASSERT_EQ(0, NumTableFilesAtLevel(1)); ASSERT_EQ(0, NumTableFilesAtLevel(1));
dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
true /* disallow_trivial_move */); true /* disallow_trivial_move */);
ASSERT_EQ(0, NumTableFilesAtLevel(0)); ASSERT_EQ(0, NumTableFilesAtLevel(0));
ASSERT_EQ(1, NumTableFilesAtLevel(1)); ASSERT_EQ(1, NumTableFilesAtLevel(1));
ASSERT_EQ(0, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE)); ASSERT_EQ(0, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE));
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
// Skip cuckoo memtables, which do not support snapshots. Skip non-leveled
// compactions as the above assertions about the number of files in a level
// do not hold true.
} while (ChangeOptions(kRangeDelSkipConfigs | kSkipHashCuckoo |
kSkipUniversalCompaction | kSkipFIFOCompaction));
} }
TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) { TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) {
...@@ -590,48 +602,57 @@ TEST_F(DBRangeDelTest, TableEvictedDuringScan) { ...@@ -590,48 +602,57 @@ TEST_F(DBRangeDelTest, TableEvictedDuringScan) {
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) { TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) {
db_->Put(WriteOptions(), "key", "val"); do {
ASSERT_OK( DestroyAndReopen(CurrentOptions());
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z")); db_->Put(WriteOptions(), "key", "val");
ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
ReadOptions read_opts; ReadOptions read_opts;
std::string value; std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound()); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
} while (ChangeOptions(kRangeDelSkipConfigs));
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) { TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) {
Options opts = CurrentOptions(); do {
opts.max_write_buffer_number = 3; Options opts = CurrentOptions();
opts.min_write_buffer_number_to_merge = 2; opts.max_write_buffer_number = 3;
// SpecialSkipListFactory lets us specify maximum number of elements the opts.min_write_buffer_number_to_merge = 2;
// memtable can hold. It switches the active memtable to immutable (flush is // SpecialSkipListFactory lets us specify maximum number of elements the
// prevented by the above options) upon inserting an element that would // memtable can hold. It switches the active memtable to immutable (flush is
// overflow the memtable. // prevented by the above options) upon inserting an element that would
opts.memtable_factory.reset(new SpecialSkipListFactory(1)); // overflow the memtable.
Reopen(opts); opts.memtable_factory.reset(new SpecialSkipListFactory(1));
DestroyAndReopen(opts);
db_->Put(WriteOptions(), "key", "val");
ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
db_->Put(WriteOptions(), "blah", "val");
db_->Put(WriteOptions(), "key", "val"); ReadOptions read_opts;
ASSERT_OK( std::string value;
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z")); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
db_->Put(WriteOptions(), "blah", "val"); } while (ChangeOptions(kRangeDelSkipConfigs));
ReadOptions read_opts;
std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) { TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) {
db_->Put(WriteOptions(), "key", "val"); do {
// snapshot prevents key from being deleted during flush DestroyAndReopen(CurrentOptions());
const Snapshot* snapshot = db_->GetSnapshot(); db_->Put(WriteOptions(), "key", "val");
ASSERT_OK( // snapshot prevents key from being deleted during flush
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z")); const Snapshot* snapshot = db_->GetSnapshot();
ASSERT_OK(db_->Flush(FlushOptions())); ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
ASSERT_OK(db_->Flush(FlushOptions()));
ReadOptions read_opts; ReadOptions read_opts;
std::string value; std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound()); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
// Cuckoo memtables do not support snapshots.
} while (ChangeOptions(kRangeDelSkipConfigs | kSkipHashCuckoo));
} }
TEST_F(DBRangeDelTest, GetCoveredMergeOperandFromMemtable) { TEST_F(DBRangeDelTest, GetCoveredMergeOperandFromMemtable) {
......
...@@ -268,6 +268,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable { ...@@ -268,6 +268,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
// Index partitions are assumed to be consecuitive. Prefetch them all. // Index partitions are assumed to be consecuitive. Prefetch them all.
// Read the first block offset // Read the first block offset
biter.SeekToFirst(); biter.SeekToFirst();
if (!biter.Valid()) {
// Empty index.
return;
}
Slice input = biter.value(); Slice input = biter.value();
Status s = handle.DecodeFrom(&input); Status s = handle.DecodeFrom(&input);
assert(s.ok()); assert(s.ok());
...@@ -280,6 +284,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable { ...@@ -280,6 +284,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
// Read the last block's offset // Read the last block's offset
biter.SeekToLast(); biter.SeekToLast();
if (!biter.Valid()) {
// Empty index.
return;
}
input = biter.value(); input = biter.value();
s = handle.DecodeFrom(&input); s = handle.DecodeFrom(&input);
assert(s.ok()); assert(s.ok());
......
...@@ -143,7 +143,6 @@ void PartitionedIndexBuilder::AddIndexEntry( ...@@ -143,7 +143,6 @@ void PartitionedIndexBuilder::AddIndexEntry(
Status PartitionedIndexBuilder::Finish( Status PartitionedIndexBuilder::Finish(
IndexBlocks* index_blocks, const BlockHandle& last_partition_block_handle) { IndexBlocks* index_blocks, const BlockHandle& last_partition_block_handle) {
assert(!entries_.empty());
// It must be set to null after last key is added // It must be set to null after last key is added
assert(sub_index_builder_ == nullptr); assert(sub_index_builder_ == nullptr);
if (finishing_indexes == true) { if (finishing_indexes == true) {
......
...@@ -261,7 +261,9 @@ class HashIndexBuilder : public IndexBuilder { ...@@ -261,7 +261,9 @@ class HashIndexBuilder : public IndexBuilder {
virtual Status Finish( virtual Status Finish(
IndexBlocks* index_blocks, IndexBlocks* index_blocks,
const BlockHandle& last_partition_block_handle) override { const BlockHandle& last_partition_block_handle) override {
FlushPendingPrefix(); if (pending_block_num_ != 0) {
FlushPendingPrefix();
}
primary_index_builder_.Finish(index_blocks, last_partition_block_handle); primary_index_builder_.Finish(index_blocks, last_partition_block_handle);
index_blocks->meta_blocks.insert( index_blocks->meta_blocks.insert(
{kHashIndexPrefixesBlock.c_str(), prefix_block_}); {kHashIndexPrefixesBlock.c_str(), prefix_block_});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册