diff --git a/CMakeLists.txt b/CMakeLists.txt index 80cab6f91c9df6920cd534bef8e6356b53098443..97268b3ef81924a1ae3231d88fe4a54016362974 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -222,7 +222,6 @@ set(SOURCES util/murmurhash.cc util/mutable_cf_options.cc util/options.cc - util/options_builder.cc util/options_helper.cc util/options_parser.cc util/options_sanity_check.cc diff --git a/HISTORY.md b/HISTORY.md index 6a7026d9a25884602099c076a61ac53d5e87fe19..1a1807a27fff31627f03c35a908ca73cce586977 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,7 +1,7 @@ # Unreleased ### Public API Change * Deprecate BlockBaseTableOptions.hash_index_allow_collision=false - +* Deprecate options builder (GetOptions()). # Rocksdb Change Log ## Unreleased diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index c7afd1a8434c56e2b42f9c7f9ccfc296d3417db0..fa937e1419f7bc05ae242d08550385e2a3cb1805 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -1552,17 +1552,6 @@ struct FlushOptions { FlushOptions() : wait(true) {} }; -// Get options based on some guidelines. Now only tune parameter based on -// flush/compaction and fill default parameters for other parameters. -// total_write_buffer_limit: budget for memory spent for mem tables -// read_amplification_threshold: comfortable value of read amplification -// write_amplification_threshold: comfortable value of write amplification. -// target_db_size: estimated total DB size. -extern Options GetOptions(size_t total_write_buffer_limit, - int read_amplification_threshold = 8, - int write_amplification_threshold = 32, - uint64_t target_db_size = 68719476736 /* 64GB */); - // Create a Logger from provided DBOptions extern Status CreateLoggerFromOptions(const std::string& dbname, const DBOptions& options, diff --git a/src.mk b/src.mk index 0577491710a1ec8031a48929f8010e2cc9428a3b..5e8cab4ba6d09eadba55a9e06e01ff6ee47939bf 100644 --- a/src.mk +++ b/src.mk @@ -155,7 +155,6 @@ LIB_SOURCES = \ util/murmurhash.cc \ util/mutable_cf_options.cc \ util/options.cc \ - util/options_builder.cc \ util/options_helper.cc \ util/options_parser.cc \ util/options_sanity_check.cc \ diff --git a/util/options_builder.cc b/util/options_builder.cc deleted file mode 100644 index f2677ab379640d32c77dcca1c7ff018b999d482b..0000000000000000000000000000000000000000 --- a/util/options_builder.cc +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. - -#include -#include -#include -#include "rocksdb/options.h" - -namespace rocksdb { - -namespace { - -// For now, always use 1-0 as level bytes multiplier. -const int kBytesForLevelMultiplier = 10; -const size_t kBytesForOneMb = 1024 * 1024; - -// Pick compaction style -CompactionStyle PickCompactionStyle(size_t write_buffer_size, - int read_amp_threshold, - int write_amp_threshold, - uint64_t target_db_size) { -#ifndef ROCKSDB_LITE - // Estimate read amplification and write amplification of two compaction - // styles. If there is hard limit to force a choice, make the choice. - // Otherwise, calculate a score based on threshold and expected value of - // two styles, weighing reads 4X important than writes. - int expected_levels = static_cast(ceil( - std::log(target_db_size / write_buffer_size) / std::log(kBytesForLevelMultiplier))); - - int expected_max_files_universal = - static_cast(ceil(log2(target_db_size / write_buffer_size))); - - const int kEstimatedLevel0FilesInLevelStyle = 2; - // Estimate write amplification: - // (1) 1 for every L0 file - // (2) 2 for L1 - // (3) kBytesForLevelMultiplier for the last level. It's really hard to - // predict. - // (3) kBytesForLevelMultiplier for other levels. - int expected_write_amp_level = kEstimatedLevel0FilesInLevelStyle + 2 - + (expected_levels - 2) * kBytesForLevelMultiplier - + kBytesForLevelMultiplier; - int expected_read_amp_level = - kEstimatedLevel0FilesInLevelStyle + expected_levels; - - int max_read_amp_uni = expected_max_files_universal; - if (read_amp_threshold <= max_read_amp_uni) { - return kCompactionStyleLevel; - } else if (write_amp_threshold <= expected_write_amp_level) { - return kCompactionStyleUniversal; - } - - const double kReadWriteWeight = 4; - - double level_ratio = - static_cast(read_amp_threshold) / expected_read_amp_level * - kReadWriteWeight + - static_cast(write_amp_threshold) / expected_write_amp_level; - - int expected_write_amp_uni = expected_max_files_universal / 2 + 2; - int expected_read_amp_uni = expected_max_files_universal / 2 + 1; - - double uni_ratio = - static_cast(read_amp_threshold) / expected_read_amp_uni * - kReadWriteWeight + - static_cast(write_amp_threshold) / expected_write_amp_uni; - - if (level_ratio > uni_ratio) { - return kCompactionStyleLevel; - } else { - return kCompactionStyleUniversal; - } -#else - return kCompactionStyleLevel; -#endif // !ROCKSDB_LITE -} - -// Pick mem table size -void PickWriteBufferSize(size_t total_write_buffer_limit, Options* options) { - const size_t kMaxWriteBufferSize = 128 * kBytesForOneMb; - const size_t kMinWriteBufferSize = 4 * kBytesForOneMb; - - // Try to pick up a buffer size between 4MB and 128MB. - // And try to pick 4 as the total number of write buffers. - size_t write_buffer_size = total_write_buffer_limit / 4; - if (write_buffer_size > kMaxWriteBufferSize) { - write_buffer_size = kMaxWriteBufferSize; - } else if (write_buffer_size < kMinWriteBufferSize) { - write_buffer_size = std::min(static_cast(kMinWriteBufferSize), - total_write_buffer_limit / 2); - } - - // Truncate to multiple of 1MB. - if (write_buffer_size % kBytesForOneMb != 0) { - write_buffer_size = - (write_buffer_size / kBytesForOneMb + 1) * kBytesForOneMb; - } - - options->write_buffer_size = write_buffer_size; - options->max_write_buffer_number = - static_cast(total_write_buffer_limit / write_buffer_size); - options->min_write_buffer_number_to_merge = 1; -} - -#ifndef ROCKSDB_LITE -void OptimizeForUniversal(Options* options) { - options->level0_file_num_compaction_trigger = 2; - options->level0_slowdown_writes_trigger = 30; - options->level0_stop_writes_trigger = 40; - options->max_open_files = -1; -} -#endif - -// Optimize parameters for level-based compaction -void OptimizeForLevel(int read_amplification_threshold, - int write_amplification_threshold, - uint64_t target_db_size, Options* options) { - int expected_levels_one_level0_file = - static_cast(ceil(std::log(target_db_size / options->write_buffer_size) / - std::log(kBytesForLevelMultiplier))); - - int level0_stop_writes_trigger = - read_amplification_threshold - expected_levels_one_level0_file; - - const size_t kInitialLevel0TotalSize = 128 * kBytesForOneMb; - const int kMaxFileNumCompactionTrigger = 4; - const int kMinLevel0StopTrigger = 3; - - int file_num_buffer = static_cast( - kInitialLevel0TotalSize / options->write_buffer_size + 1); - - if (level0_stop_writes_trigger > file_num_buffer) { - // Have sufficient room for multiple level 0 files - // Try enlarge the buffer up to 1GB - - // Try to enlarge the buffer up to 1GB, if still have sufficient headroom. - file_num_buffer *= - 1 << std::max(0, std::min(3, level0_stop_writes_trigger - - file_num_buffer - 2)); - - options->level0_stop_writes_trigger = level0_stop_writes_trigger; - options->level0_slowdown_writes_trigger = level0_stop_writes_trigger - 2; - options->level0_file_num_compaction_trigger = - std::min(kMaxFileNumCompactionTrigger, file_num_buffer / 2); - } else { - options->level0_stop_writes_trigger = - std::max(kMinLevel0StopTrigger, file_num_buffer); - options->level0_slowdown_writes_trigger = - options->level0_stop_writes_trigger - 1; - options->level0_file_num_compaction_trigger = 1; - } - - // This doesn't consider compaction and overheads of mem tables. But usually - // it is in the same order of magnitude. - size_t expected_level0_compaction_size = - options->level0_file_num_compaction_trigger * options->write_buffer_size; - // Enlarge level1 target file size if level0 compaction size is larger. - uint64_t max_bytes_for_level_base = 10 * kBytesForOneMb; - if (expected_level0_compaction_size > max_bytes_for_level_base) { - max_bytes_for_level_base = expected_level0_compaction_size; - } - options->max_bytes_for_level_base = max_bytes_for_level_base; - // Now always set level multiplier to be 10 - options->max_bytes_for_level_multiplier = kBytesForLevelMultiplier; - - const uint64_t kMinFileSize = 2 * kBytesForOneMb; - // Allow at least 3-way parallelism for compaction between level 1 and 2. - uint64_t max_file_size = max_bytes_for_level_base / 3; - if (max_file_size < kMinFileSize) { - options->target_file_size_base = kMinFileSize; - } else { - if (max_file_size % kBytesForOneMb != 0) { - max_file_size = (max_file_size / kBytesForOneMb + 1) * kBytesForOneMb; - } - options->target_file_size_base = max_file_size; - } - - // TODO: consider to tune num_levels too. -} - -} // namespace - -Options GetOptions(size_t total_write_buffer_limit, - int read_amplification_threshold, - int write_amplification_threshold, uint64_t target_db_size) { - Options options; - PickWriteBufferSize(total_write_buffer_limit, &options); - size_t write_buffer_size = options.write_buffer_size; - options.compaction_style = - PickCompactionStyle(write_buffer_size, read_amplification_threshold, - write_amplification_threshold, target_db_size); -#ifndef ROCKSDB_LITE - if (options.compaction_style == kCompactionStyleUniversal) { - OptimizeForUniversal(&options); - } else { -#else - { -#endif // !ROCKSDB_LITE - OptimizeForLevel(read_amplification_threshold, - write_amplification_threshold, target_db_size, &options); - } - return options; -} - -} // namespace rocksdb diff --git a/util/options_test.cc b/util/options_test.cc index 52bc31a3d8d058b472faf55420fdccf6cc66b5a3..a4c259de2a1c6907eeff76dd33683c691f59ef65 100644 --- a/util/options_test.cc +++ b/util/options_test.cc @@ -38,54 +38,8 @@ DEFINE_bool(enable_print, false, "Print options generated to console."); namespace rocksdb { -Options PrintAndGetOptions(size_t total_write_buffer_limit, - int read_amplification_threshold, - int write_amplification_threshold, - uint64_t target_db_size = 68719476736) { - StderrLogger logger; - - if (FLAGS_enable_print) { - printf("---- total_write_buffer_limit: %" ROCKSDB_PRIszt - " " - "read_amplification_threshold: %d write_amplification_threshold: %d " - "target_db_size %" PRIu64 " ----\n", - total_write_buffer_limit, read_amplification_threshold, - write_amplification_threshold, target_db_size); - } - - Options options = - GetOptions(total_write_buffer_limit, read_amplification_threshold, - write_amplification_threshold, target_db_size); - if (FLAGS_enable_print) { - options.Dump(&logger); - printf("-------------------------------------\n\n\n"); - } - return options; -} - class OptionsTest : public testing::Test {}; -TEST_F(OptionsTest, LooseCondition) { - Options options; - PrintAndGetOptions(static_cast(10) * 1024 * 1024 * 1024, 100, 100); - - // Less mem table memory budget - PrintAndGetOptions(32 * 1024 * 1024, 100, 100); - - // Tight read amplification - options = PrintAndGetOptions(128 * 1024 * 1024, 8, 100); - ASSERT_EQ(options.compaction_style, kCompactionStyleLevel); - -#ifndef ROCKSDB_LITE // Universal compaction is not supported in ROCKSDB_LITE - // Tight write amplification - options = PrintAndGetOptions(128 * 1024 * 1024, 64, 10); - ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal); -#endif // !ROCKSDB_LITE - - // Both tight amplifications - PrintAndGetOptions(128 * 1024 * 1024, 4, 8); -} - #ifndef ROCKSDB_LITE // GetOptionsFromMap is not supported in ROCKSDB_LITE TEST_F(OptionsTest, GetOptionsFromMapTest) { std::unordered_map cf_options_map = {