From 7bc18e272791b8b23e20e62967d4c1d1040b16b9 Mon Sep 17 00:00:00 2001 From: Maysam Yabandeh Date: Tue, 20 Aug 2019 11:38:15 -0700 Subject: [PATCH] Disable snapshot refresh feature when snap_refresh_nanos is 0 (#5724) Summary: The comments of snap_refresh_nanos advertise that the snapshot refresh feature will be disabled when the option is set to 0. This contract is however not honored in the code: https://github.com/facebook/rocksdb/pull/5278 The patch fixes that and also adds an assert to ensure that the feature is not used when the option is zero. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5724 Differential Revision: D16918185 Pulled By: maysamyabandeh fbshipit-source-id: fec167287df7d85093e087fc39c0eb243e3bbd7e --- HISTORY.md | 1 + db/compaction/compaction_iterator.h | 1 + db/compaction/compaction_job_test.cc | 2 +- db/db_impl/db_impl_compaction_flush.cc | 12 ++++++++---- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 23bdccfb1..740210e4f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -2,6 +2,7 @@ ## Unreleased ### Bug Fixes * Fixed a number of data races in BlobDB. +* Fix a bug where the compaction snapshot refresh feature is not disabled as advertised when `snap_refresh_nanos` is set to 0.. ### New Features * VerifyChecksum() by default will issue readahead. Allow ReadOptions to be passed in to those functions to override the readhead size. For checksum verifying before external SST file ingestion, a new option IngestExternalFileOptions.verify_checksums_readahead_size, is added for this readahead setting. diff --git a/db/compaction/compaction_iterator.h b/db/compaction/compaction_iterator.h index 86a2b87b2..2bf847e2e 100644 --- a/db/compaction/compaction_iterator.h +++ b/db/compaction/compaction_iterator.h @@ -39,6 +39,7 @@ class SnapshotListFetchCallback { virtual void Refresh(std::vector* snapshots, SequenceNumber max) = 0; inline bool TimeToRefresh(const size_t key_index) { + assert(snap_refresh_nanos_ != 0); // skip the key if key_index % every_nth_key (which is of power 2) is not 0. if ((key_index & every_nth_key_minus_one_) != 0) { return false; diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index add491189..b813966bc 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -964,7 +964,7 @@ TEST_F(CompactionJobTest, SnapshotRefresh) { public: SnapshotListFetchCallbackTest(Env* env, Random64& rand, std::vector* snapshots) - : SnapshotListFetchCallback(env, 0 /*no time delay*/, + : SnapshotListFetchCallback(env, 1 /*short time delay*/, 1 /*fetch after each key*/), rand_(rand), snapshots_(snapshots) {} diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 672924016..ffb6b2b60 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -1008,8 +1008,10 @@ Status DBImpl::CompactFilesImpl( c->mutable_cf_options()->paranoid_file_checks, c->mutable_cf_options()->report_bg_io_stats, dbname_, &compaction_job_stats, Env::Priority::USER, - immutable_db_options_.max_subcompactions <= 1 ? &fetch_callback - : nullptr); + immutable_db_options_.max_subcompactions <= 1 && + c->mutable_cf_options()->snap_refresh_nanos > 0 + ? &fetch_callback + : nullptr); // Creating a compaction influences the compaction score because the score // takes running compactions into account (by skipping files that are already @@ -2737,8 +2739,10 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, &event_logger_, c->mutable_cf_options()->paranoid_file_checks, c->mutable_cf_options()->report_bg_io_stats, dbname_, &compaction_job_stats, thread_pri, - immutable_db_options_.max_subcompactions <= 1 ? &fetch_callback - : nullptr); + immutable_db_options_.max_subcompactions <= 1 && + c->mutable_cf_options()->snap_refresh_nanos > 0 + ? &fetch_callback + : nullptr); compaction_job.Prepare(); NotifyOnCompactionBegin(c->column_family_data(), c.get(), status, -- GitLab