diff --git a/HISTORY.md b/HISTORY.md index 70c70e80173ffce74944147c023cade69b1fade2..c41f9b6bf32fc52fb8e12f818a3a6929871a752d 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -15,6 +15,7 @@ ### Performance Improvements * Reduce thread number for multiple DB instances by re-using one global thread for statistics dumping and persisting. +* Reduce write-amp in heavy write bursts in `kCompactionStyleLevel` compaction style with `level_compaction_dynamic_level_bytes` set. ### Public API Change * Expose kTypeDeleteWithTimestamp in EntryType and update GetEntryType() accordingly. diff --git a/db/version_set.cc b/db/version_set.cc index 1a0793e2d7d3520cf7ad6a206d0edd92fa53367c..4530b689ae62dcfb4a454c9447e09878be6858b0 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -2477,9 +2477,21 @@ void VersionStorageInfo::ComputeCompactionScore( // Level-based involves L0->L0 compactions that can lead to oversized // L0 files. Take into account size as well to avoid later giant // compactions to the base level. - score = std::max( - score, static_cast(total_size) / - mutable_cf_options.max_bytes_for_level_base); + uint64_t l0_target_size = mutable_cf_options.max_bytes_for_level_base; + if (immutable_cf_options.level_compaction_dynamic_level_bytes && + level_multiplier_ != 0.0) { + // Prevent L0 to Lbase fanout from growing larger than + // `level_multiplier_`. This prevents us from getting stuck picking + // L0 forever even when it is hurting write-amp. That could happen + // in dynamic level compaction's write-burst mode where the base + // level's target size can grow to be enormous. + l0_target_size = + std::max(l0_target_size, + static_cast(level_max_bytes_[base_level_] / + level_multiplier_)); + } + score = + std::max(score, static_cast(total_size) / l0_target_size); } } } else {