diff --git a/db/db_sst_test.cc b/db/db_sst_test.cc index 8a6a81764b6bdfb5742e31204ac5c1f22b7f89f4..fbbbcfdf774fde0ff0c4860bcb0183a83dbe9a9c 100644 --- a/db/db_sst_test.cc +++ b/db/db_sst_test.cc @@ -461,13 +461,15 @@ TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { sfm->WaitForEmptyTrash(); ASSERT_EQ(bg_delete_file, 8); + // Compaction will delete and regenerate a file from L1 in second db path. It + // should still be cleaned up via delete scheduler. compact_options.bottommost_level_compaction = BottommostLevelCompaction::kForce; ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); ASSERT_EQ("0,1", FilesPerLevel(0)); sfm->WaitForEmptyTrash(); - ASSERT_EQ(bg_delete_file, 8); + ASSERT_EQ(bg_delete_file, 9); rocksdb::SyncPoint::GetInstance()->DisableProcessing(); } diff --git a/util/delete_scheduler_test.cc b/util/delete_scheduler_test.cc index 0ac7972e400604c3151060533be366153508aea9..936e4d7caf075cbc31d7384f998458fd3095734f 100644 --- a/util/delete_scheduler_test.cc +++ b/util/delete_scheduler_test.cc @@ -28,15 +28,23 @@ namespace rocksdb { class DeleteSchedulerTest : public testing::Test { public: DeleteSchedulerTest() : env_(Env::Default()) { - dummy_files_dir_ = test::TmpDir(env_) + "/delete_scheduler_dummy_data_dir"; - DestroyAndCreateDir(dummy_files_dir_); + const int kNumDataDirs = 3; + dummy_files_dirs_.reserve(kNumDataDirs); + for (size_t i = 0; i < kNumDataDirs; ++i) { + dummy_files_dirs_.emplace_back(test::TmpDir(env_) + + "/delete_scheduler_dummy_data_dir" + + ToString(i)); + DestroyAndCreateDir(dummy_files_dirs_.back()); + } } ~DeleteSchedulerTest() { rocksdb::SyncPoint::GetInstance()->DisableProcessing(); rocksdb::SyncPoint::GetInstance()->LoadDependency({}); rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); - test::DestroyDir(env_, dummy_files_dir_); + for (const auto& dummy_files_dir : dummy_files_dirs_) { + test::DestroyDir(env_, dummy_files_dir); + } } void DestroyAndCreateDir(const std::string& dir) { @@ -44,23 +52,24 @@ class DeleteSchedulerTest : public testing::Test { EXPECT_OK(env_->CreateDir(dir)); } - int CountNormalFiles() { + int CountNormalFiles(size_t dummy_files_dirs_idx = 0) { std::vector files_in_dir; - EXPECT_OK(env_->GetChildren(dummy_files_dir_, &files_in_dir)); + EXPECT_OK(env_->GetChildren(dummy_files_dirs_[dummy_files_dirs_idx], + &files_in_dir)); int normal_cnt = 0; for (auto& f : files_in_dir) { if (!DeleteScheduler::IsTrashFile(f) && f != "." && f != "..") { - printf("%s\n", f.c_str()); normal_cnt++; } } return normal_cnt; } - int CountTrashFiles() { + int CountTrashFiles(size_t dummy_files_dirs_idx = 0) { std::vector files_in_dir; - EXPECT_OK(env_->GetChildren(dummy_files_dir_, &files_in_dir)); + EXPECT_OK(env_->GetChildren(dummy_files_dirs_[dummy_files_dirs_idx], + &files_in_dir)); int trash_cnt = 0; for (auto& f : files_in_dir) { @@ -71,8 +80,10 @@ class DeleteSchedulerTest : public testing::Test { return trash_cnt; } - std::string NewDummyFile(const std::string& file_name, uint64_t size = 1024) { - std::string file_path = dummy_files_dir_ + "/" + file_name; + std::string NewDummyFile(const std::string& file_name, uint64_t size = 1024, + size_t dummy_files_dirs_idx = 0) { + std::string file_path = + dummy_files_dirs_[dummy_files_dirs_idx] + "/" + file_name; std::unique_ptr f; env_->NewWritableFile(file_path, &f, EnvOptions()); std::string data(size, 'A'); @@ -93,7 +104,7 @@ class DeleteSchedulerTest : public testing::Test { } Env* env_; - std::string dummy_files_dir_; + std::vector dummy_files_dirs_; int64_t rate_bytes_per_sec_; DeleteScheduler* delete_scheduler_; std::unique_ptr sst_file_mgr_; @@ -126,7 +137,7 @@ TEST_F(DeleteSchedulerTest, BasicRateLimiting) { rocksdb::SyncPoint::GetInstance()->ClearTrace(); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - DestroyAndCreateDir(dummy_files_dir_); + DestroyAndCreateDir(dummy_files_dirs_[0]); rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024; NewDeleteScheduler(); @@ -166,6 +177,42 @@ TEST_F(DeleteSchedulerTest, BasicRateLimiting) { } } +TEST_F(DeleteSchedulerTest, MultiDirectoryDeletionsScheduled) { + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"DeleteSchedulerTest::MultiDbPathDeletionsScheduled:1", + "DeleteScheduler::BackgroundEmptyTrash"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + rate_bytes_per_sec_ = 1 << 20; // 1MB + NewDeleteScheduler(); + + // Generate dummy files in multiple directories + const size_t kNumFiles = dummy_files_dirs_.size(); + const size_t kFileSize = 1 << 10; // 1KB + std::vector generated_files; + for (size_t i = 0; i < kNumFiles; i++) { + generated_files.push_back(NewDummyFile("file", kFileSize, i)); + ASSERT_EQ(1, CountNormalFiles(i)); + } + + // Mark dummy files as trash + for (size_t i = 0; i < kNumFiles; i++) { + ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i])); + ASSERT_EQ(0, CountNormalFiles(i)); + ASSERT_EQ(1, CountTrashFiles(i)); + } + TEST_SYNC_POINT("DeleteSchedulerTest::MultiDbPathDeletionsScheduled:1"); + delete_scheduler_->WaitForEmptyTrash(); + + // Verify dummy files eventually got deleted + for (size_t i = 0; i < kNumFiles; i++) { + ASSERT_EQ(0, CountNormalFiles(i)); + ASSERT_EQ(0, CountTrashFiles(i)); + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + // Same as the BasicRateLimiting test but delete files in multiple threads. // 1- Create 100 dummy files // 2- Delete the 100 dummy files using DeleteScheduler using 10 threads @@ -194,7 +241,7 @@ TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) { rocksdb::SyncPoint::GetInstance()->ClearTrace(); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - DestroyAndCreateDir(dummy_files_dir_); + DestroyAndCreateDir(dummy_files_dirs_[0]); rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024; NewDeleteScheduler(); @@ -342,7 +389,7 @@ TEST_F(DeleteSchedulerTest, BackgroundError) { // goind to delete for (int i = 0; i < 10; i++) { std::string file_name = "data_" + ToString(i) + ".data.trash"; - ASSERT_OK(env_->DeleteFile(dummy_files_dir_ + "/" + file_name)); + ASSERT_OK(env_->DeleteFile(dummy_files_dirs_[0] + "/" + file_name)); } // Hold BackgroundEmptyTrash @@ -454,7 +501,7 @@ TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) { rocksdb::SyncPoint::GetInstance()->ClearTrace(); rocksdb::SyncPoint::GetInstance()->EnableProcessing(); - DestroyAndCreateDir(dummy_files_dir_); + DestroyAndCreateDir(dummy_files_dirs_[0]); rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024; delete_scheduler_->SetRateBytesPerSecond(rate_bytes_per_sec_); diff --git a/util/file_util.cc b/util/file_util.cc index 80376b6dfb67c3c2809eb9f9083cd6ef9024b3b7..8a1adf2bd780e59fd03063e6f4ff77d8ec3c52f1 100644 --- a/util/file_util.cc +++ b/util/file_util.cc @@ -84,11 +84,10 @@ Status CreateFile(Env* env, const std::string& destination, Status DeleteSSTFile(const ImmutableDBOptions* db_options, const std::string& fname, uint32_t path_id) { - // TODO(tec): support sst_file_manager for multiple path_ids #ifndef ROCKSDB_LITE auto sfm = static_cast(db_options->sst_file_manager.get()); - if (sfm && path_id == 0) { + if (sfm) { return sfm->ScheduleFileDeletion(fname); } else { return db_options->env->DeleteFile(fname);