提交 345fd73f 编写于 作者: S sdong

Fix flaky DBTestDynamicLevel.DynamicLevelMaxBytesBase2

Summary: We added more table properties for each SST file, so when using 2KB SST file size, the estimated size of SST files is off by almost half, causing the LSM tree structure not as expected. Fix it by making file size 4x as previously, as well as LSM base size. Also avoid the sleeping based synchronization and turn to use sync points.

Test Plan: Run paralell unit tests multiple times and make sure they always pass.

Reviewers: IslamAbdelRahman, kradhakrishnan

Reviewed By: kradhakrishnan

Subscribers: leveldb, andrewkr, dhruba

Differential Revision: https://reviews.facebook.net/D58749
上级 8fc75de3
......@@ -119,21 +119,21 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) {
}
// Test specific cases in dynamic max bytes
TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
Random rnd(301);
int kMaxKey = 1000000;
Options options = CurrentOptions();
options.create_if_missing = true;
options.db_write_buffer_size = 2048;
options.write_buffer_size = 2048;
options.db_write_buffer_size = 204800;
options.write_buffer_size = 20480;
options.max_write_buffer_number = 2;
options.level0_file_num_compaction_trigger = 2;
options.level0_slowdown_writes_trigger = 9999;
options.level0_stop_writes_trigger = 9999;
options.target_file_size_base = 2048;
options.target_file_size_base = 9102;
options.level_compaction_dynamic_level_bytes = true;
options.max_bytes_for_level_base = 10240;
options.max_bytes_for_level_base = 40960;
options.max_bytes_for_level_multiplier = 4;
options.max_background_compactions = 2;
options.num_levels = 5;
......@@ -154,10 +154,10 @@ TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop);
// Put about 7K to L0
// Put about 28K to L0
for (int i = 0; i < 70; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
......@@ -167,14 +167,14 @@ TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(4U, int_prop);
// Insert extra about 3.5K to L0. After they are compacted to L4, base level
// Insert extra about 28K to L0. After they are compacted to L4, base level
// should be changed to L3.
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
for (int i = 0; i < 70; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
......@@ -199,10 +199,10 @@ TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
// Write about 10K more
// Write about 40K more
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
......@@ -218,15 +218,15 @@ TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
// Trigger a condition that the compaction changes base level and L0->Lbase
// happens at the same time.
// We try to make last levels' targets to be 10K, 40K, 160K, add triggers
// We try to make last levels' targets to be 40K, 160K, 640K, add triggers
// another compaction from 40K->160K.
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
// Write about 150K more
for (int i = 0; i < 1350; i++) {
// Write about 600K more
for (int i = 0; i < 1500; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
RandomString(&rnd, 380)));
}
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "false"},
......@@ -236,30 +236,40 @@ TEST_F(DBTestDynamicLevel, DISABLED_DynamicLevelMaxBytesBase2) {
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(2U, int_prop);
// A manual compaction will trigger the base level to become L2
// Keep Writing data until base level changed 2->1. There will be L0->L2
// compaction going on at the same time.
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
rocksdb::SyncPoint::GetInstance()->LoadDependency({
{"CompactionJob::Run():Start", "DynamicLevelMaxBytesBase2:0"},
{"DynamicLevelMaxBytesBase2:1", "CompactionJob::Run():End"},
{"DynamicLevelMaxBytesBase2:compact_range_finish",
"FlushJob::WriteLevel0Table"},
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
for (int attempt = 0; attempt <= 20; attempt++) {
// Write about 5K more data with two flushes. It should be flush to level 2
// but when it is applied, base level is already 1.
for (int i = 0; i < 50; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 80)));
}
Flush();
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
if (int_prop == 2U) {
env_->SleepForMicroseconds(50000);
} else {
break;
}
std::thread thread([this] {
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_start");
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_finish");
});
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0");
for (int i = 0; i < 2; i++) {
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
RandomString(&rnd, 380)));
}
TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1");
Flush();
thread.join();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
env_->SleepForMicroseconds(200000);
ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop));
ASSERT_EQ(1U, int_prop);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册