From 24f7983b1f33668ed1836c750bb2404d9987e003 Mon Sep 17 00:00:00 2001 From: Asad K Awan Date: Wed, 3 Oct 2012 09:58:45 -0700 Subject: [PATCH] [tools] Add a tool to stress test concurrent writing to levelDB Summary: Created a tool that runs multiple threads that concurrently read and write to levelDB. All writes to the DB are stored in an in-memory hashtable and verified at the end of the test. All writes for a given key are serialzied. Test Plan: - Verified by writing only a few keys and logging all writes and verifying that values read and written are correct. - Verified correctness of value generator. - Ran with various parameters of number of keys, locks, and threads. Reviewers: dhruba, MarkCallaghan, heyongqiang Reviewed By: dhruba Differential Revision: https://reviews.facebook.net/D5829 --- Makefile | 4 + tools/db_stress.cc | 805 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 809 insertions(+) create mode 100644 tools/db_stress.cc diff --git a/Makefile b/Makefile index 3582dfdda..56b042776 100644 --- a/Makefile +++ b/Makefile @@ -57,6 +57,7 @@ TESTS = \ TOOLS = \ manifest_dump \ sst_dump \ + db_stress \ ldb PROGRAMS = db_bench $(TESTS) $(TOOLS) @@ -112,6 +113,9 @@ $(LIBRARY): $(LIBOBJECTS) db_bench: db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) $(CXX) db/db_bench.o $(LIBOBJECTS) $(TESTUTIL) $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) +db_stress: tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL) + $(CXX) tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL) $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) + db_bench_sqlite3: doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) $(CXX) doc/bench/db_bench_sqlite3.o $(LIBOBJECTS) $(TESTUTIL) -o $@ $(LDFLAGS) -lsqlite3 diff --git a/tools/db_stress.cc b/tools/db_stress.cc new file mode 100644 index 000000000..909a20387 --- /dev/null +++ b/tools/db_stress.cc @@ -0,0 +1,805 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include +#include +#include "db/db_impl.h" +#include "db/version_set.h" +#include "db/db_statistics.h" +#include "leveldb/cache.h" +#include "leveldb/db.h" +#include "leveldb/env.h" +#include "leveldb/write_batch.h" +#include "leveldb/statistics.h" +#include "port/port.h" +#include "util/crc32c.h" +#include "util/histogram.h" +#include "util/mutexlock.h" +#include "util/random.h" +#include "util/testutil.h" +#include "hdfs/env_hdfs.h" + +static const long KB = 1024; + +// Seed for PRNG +static uint32_t FLAGS_seed = 2341234; + +// Max number of key/values to place in database +static long FLAGS_max_key = 2 * KB * KB * KB; + +// Number of concurrent threads to run. +static int FLAGS_threads = 32; + +// Size of each value will be this number times rand_int(1,3) bytes +static int FLAGS_value_size_mult = 8; + +static bool FLAGS_verify_before_write = false; + +// Print histogram of operation timings +static bool FLAGS_histogram = false; + +static bool FLAGS_verbose = false; + +// Number of bytes to buffer in memtable before compacting +// (initialized to default value by "main") +static int FLAGS_write_buffer_size = 0; + +// Number of bytes to use as a cache of uncompressed data. +static long FLAGS_cache_size = 2 * KB * KB * KB; + +// Number of bytes in a block. +static int FLAGS_block_size = 4 * KB; + +// Maximum number of files to keep open at the same time (use default if == 0) +static int FLAGS_open_files = 0; + +// Bloom filter bits per key. +// Negative means use default settings. +static int FLAGS_bloom_bits = 10; + +// Use the db with the following name. +static const char* FLAGS_db = NULL; + +// Verify checksum for every block read from storage +static bool FLAGS_verify_checksum = false; + +// Database statistics +static class leveldb::DBStatistics* dbstats; + +// Sync all writes to disk +static bool FLAGS_sync = false; + +// If true, do not wait until data is synced to disk. +static bool FLAGS_disable_data_sync = false; + +// If true, issue fsync instead of fdatasync +static bool FLAGS_use_fsync = false; + +// If true, do not write WAL for write. +static bool FLAGS_disable_wal = false; + +// Target level-0 file size for compaction +static int FLAGS_target_file_size_base = 64 * KB; + +// A multiplier to compute targe level-N file size +static int FLAGS_target_file_size_multiplier = 1; + +// Max bytes for level-0 +static int FLAGS_max_bytes_for_level_base = 256 * KB; + +// A multiplier to compute max bytes for level-N +static int FLAGS_max_bytes_for_level_multiplier = 2; + +// Number of files in level-0 that will trigger put stop. +static int FLAGS_level0_stop_writes_trigger = 12; + +// Number of files in level-0 that will slow down writes. +static int FLAGS_level0_slowdown_writes_trigger = 8; + +// Ratio of reads to writes (expressed as a percentage) +static int FLAGS_readwritepercent = 10; + +// Option to disable compation triggered by read. +static int FLAGS_disable_seek_compaction = false; + +// Algorithm to use to compress the database +static enum leveldb::CompressionType FLAGS_compression_type = + leveldb::kSnappyCompression; + +// posix or hdfs environment +static leveldb::Env* FLAGS_env = leveldb::Env::Default(); + +// Number of operations per thread. +static uint32_t FLAGS_ops_per_thread = 600000; + +// Log2 of number of keys per lock +static uint32_t FLAGS_log2_keys_per_lock = 2; // implies 2^2 keys per lock + +extern bool useOsBuffer; +extern bool useFsReadAhead; +extern bool useMmapRead; + +namespace leveldb { + +class StressTest; +namespace { + +class Stats { + private: + double start_; + double finish_; + double seconds_; + long done_; + long writes_; + int next_report_; + size_t bytes_; + double last_op_finish_; + Histogram hist_; + + public: + Stats() { } + + void Start() { + next_report_ = 100; + hist_.Clear(); + done_ = 0; + writes_ = 0; + bytes_ = 0; + seconds_ = 0; + start_ = FLAGS_env->NowMicros(); + last_op_finish_ = start_; + finish_ = start_; + } + + void Merge(const Stats& other) { + hist_.Merge(other.hist_); + done_ += other.done_; + writes_ += other.writes_; + bytes_ += other.bytes_; + seconds_ += other.seconds_; + if (other.start_ < start_) start_ = other.start_; + if (other.finish_ > finish_) finish_ = other.finish_; + } + + void Stop() { + finish_ = FLAGS_env->NowMicros(); + seconds_ = (finish_ - start_) * 1e-6; + } + + void FinishedSingleOp() { + if (FLAGS_histogram) { + double now = FLAGS_env->NowMicros(); + double micros = now - last_op_finish_; + hist_.Add(micros); + if (micros > 20000) { + fprintf(stderr, "long op: %.1f micros%30s\r", micros, ""); + fflush(stderr); + } + last_op_finish_ = now; + } + + done_++; + if (done_ >= next_report_) { + if (next_report_ < 1000) next_report_ += 100; + else if (next_report_ < 5000) next_report_ += 500; + else if (next_report_ < 10000) next_report_ += 1000; + else if (next_report_ < 50000) next_report_ += 5000; + else if (next_report_ < 100000) next_report_ += 10000; + else if (next_report_ < 500000) next_report_ += 50000; + else next_report_ += 100000; + fprintf(stderr, "... finished %ld ops%30s\r", done_, ""); + fflush(stderr); + } + } + + void AddBytesForOneWrite(size_t n) { + writes_ ++; + bytes_ += n; + } + + void Report(const char* name) { + std::string extra; + if (bytes_ < 1 || done_ < 1) { + fprintf(stderr, "No writes or ops?\n"); + return; + } + + double elapsed = (finish_ - start_) * 1e-6; + double bytes_mb = bytes_ / 1048576.0; + double rate = bytes_mb / elapsed; + double throughput = (double)done_/elapsed; + long percent_writes = (writes_ * 100) / done_; + + fprintf(stdout, "%-12s: ", name); + fprintf(stdout, "%.3f micros/op %ld ops/sec\n", + seconds_ * 1e6 / done_, (long)throughput); + fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n", + "", bytes_mb, rate, (100*writes_)/done_, done_); + + if (FLAGS_histogram) { + fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str()); + } + fflush(stdout); + } +}; + +// State shared by all concurrent executions of the same benchmark. +class SharedState { + public: + static const uint32_t SENTINEL = 0xffffffff; + + SharedState(StressTest* stress_test) : + cv_(&mu_), + seed_(FLAGS_seed), + max_key_(FLAGS_max_key), + log2_keys_per_lock_(FLAGS_log2_keys_per_lock), + num_threads_(FLAGS_threads), + num_initialized_(0), + num_populated_(0), + num_done_(0), + start_(false), + start_verify_(false), + stress_test_(stress_test) { + values_ = new uint32_t[max_key_]; + for (long i = 0; i < max_key_; i++) { + values_[i] = SENTINEL; + } + long num_locks = (max_key_ >> log2_keys_per_lock_); + if (max_key_ & ((1 << log2_keys_per_lock_) - 1)) { + num_locks ++; + } + fprintf(stdout, "Creating %ld locks\n", num_locks); + key_locks_ = new port::Mutex[num_locks]; + } + + ~SharedState() { + delete[] values_; + delete[] key_locks_; + } + + port::Mutex* GetMutex() { + return &mu_; + } + + port::CondVar* GetCondVar() { + return &cv_; + } + + StressTest* GetStressTest() const { + return stress_test_; + } + + long GetMaxKey() const { + return max_key_; + } + + uint32_t GetNumThreads() const { + return num_threads_; + } + + void IncInitialized() { + num_initialized_++; + } + + void IncPopulated() { + num_populated_++; + } + + void IncDone() { + num_done_++; + } + + bool AllInitialized() const { + return num_initialized_ >= num_threads_; + } + + bool AllPopulated() const { + return num_populated_ >= num_threads_; + } + + bool AllDone() const { + return num_done_ >= num_threads_; + } + + void SetStart() { + start_ = true; + } + + void SetStartVerify() { + start_verify_ = true; + } + + bool Started() const { + return start_; + } + + bool VerifyStarted() const { + return start_verify_; + } + + port::Mutex* GetMutexForKey(long key) { + return &key_locks_[key >> log2_keys_per_lock_]; + } + + void Put(long key, uint32_t value_base) { + values_[key] = value_base; + } + + uint32_t Get(long key) const { + return values_[key]; + } + + uint32_t GetSeed() const { + return seed_; + } + + private: + port::Mutex mu_; + port::CondVar cv_; + const uint32_t seed_; + const long max_key_; + const uint32_t log2_keys_per_lock_; + const int num_threads_; + long num_initialized_; + long num_populated_; + long num_done_; + bool start_; + bool start_verify_; + StressTest* stress_test_; + + uint32_t *values_; + port::Mutex *key_locks_; + +}; + +// Per-thread state for concurrent executions of the same benchmark. +struct ThreadState { + uint32_t tid; // 0..n-1 + Random rand; // Has different seeds for different threads + SharedState* shared; + Stats stats; + + ThreadState(uint32_t index, SharedState *shared) + : tid(index), + rand(1000 + index + shared->GetSeed()), + shared(shared) { + } +}; + +} // namespace + +class StressTest { + public: + StressTest() + : cache_(NewLRUCache(FLAGS_cache_size)), + filter_policy_(FLAGS_bloom_bits >= 0 + ? NewBloomFilterPolicy(FLAGS_bloom_bits) + : NULL), + db_(NULL) { + std::vector files; + FLAGS_env->GetChildren(FLAGS_db, &files); + for (int i = 0; i < files.size(); i++) { + if (Slice(files[i]).starts_with("heap-")) { + FLAGS_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); + } + } + DestroyDB(FLAGS_db, Options()); + } + + ~StressTest() { + delete db_; + delete cache_; + delete filter_policy_; + } + + void Run() { + PrintEnv(); + Open(); + SharedState shared(this); + uint32_t n = shared.GetNumThreads(); + + std::vector threads(n); + for (uint32_t i = 0; i < n; i++) { + threads[i] = new ThreadState(i, &shared); + FLAGS_env->StartThread(ThreadBody, threads[i]); + } + // Each thread goes through the following states: + // initializing -> wait for others to init -> populate + // wait for others to populate -> verify -> done + + { + MutexLock l(shared.GetMutex()); + while (!shared.AllInitialized()) { + shared.GetCondVar()->Wait(); + } + + fprintf(stdout, "Starting to populate db\n"); + shared.SetStart(); + shared.GetCondVar()->SignalAll(); + while (!shared.AllPopulated()) { + shared.GetCondVar()->Wait(); + } + + fprintf(stdout, "Starting verification\n"); + shared.SetStartVerify(); + shared.GetCondVar()->SignalAll(); + while (!shared.AllDone()) { + shared.GetCondVar()->Wait(); + } + } + + for (int i = 1; i < n; i++) { + threads[0]->stats.Merge(threads[i]->stats); + } + threads[0]->stats.Report("Stress Test"); + + for (int i = 0; i < n; i++) { + delete threads[i]; + threads[i] = NULL; + } + fprintf(stdout, "Verification successfull\n"); + PrintStatistics(); + } + + private: + + static void ThreadBody(void* v) { + ThreadState* thread = reinterpret_cast(v); + SharedState* shared = thread->shared; + + { + MutexLock l(shared->GetMutex()); + shared->IncInitialized(); + if (shared->AllInitialized()) { + shared->GetCondVar()->SignalAll(); + } + while (!shared->Started()) { + shared->GetCondVar()->Wait(); + } + } + + thread->shared->GetStressTest()->PopulateDb(thread); + + { + MutexLock l(shared->GetMutex()); + shared->IncPopulated(); + if (shared->AllPopulated()) { + shared->GetCondVar()->SignalAll(); + } + while (!shared->VerifyStarted()) { + shared->GetCondVar()->Wait(); + } + } + + thread->shared->GetStressTest()->VerifyDb(*(thread->shared), thread->tid); + + { + MutexLock l(shared->GetMutex()); + shared->IncDone(); + if (shared->AllDone()) { + shared->GetCondVar()->SignalAll(); + } + } + + } + + void PopulateDb(ThreadState* thread) { + ReadOptions read_opts(FLAGS_verify_checksum, true); + WriteOptions write_opts; + char value[100], prev_value[100]; + long max_key = thread->shared->GetMaxKey(); + std::string from_db; + if (FLAGS_sync) { + write_opts.sync = true; + } + write_opts.disableWAL = FLAGS_disable_wal; + + thread->stats.Start(); + for (long i=0; i < FLAGS_ops_per_thread; i++) { + long rand_key = thread->rand.Next() % max_key; + Slice key((char*)&rand_key, sizeof(rand_key)); + if (FLAGS_readwritepercent > thread->rand.Uniform(100)) { + // introduce some read load. + db_->Get(read_opts, key, &from_db); + } else { + uint32_t value_base = thread->rand.Next(); + size_t sz = GenerateValue(value_base, value, sizeof(value)); + Slice v(value, sz); + { + MutexLock l(thread->shared->GetMutexForKey(rand_key)); + if (FLAGS_verify_before_write) { + VerifyValue(rand_key, read_opts, *(thread->shared), prev_value, + sizeof(prev_value), &from_db, true); + } + thread->shared->Put(rand_key, value_base); + db_->Put(write_opts, key, v); + } + PrintKeyValue(rand_key, value, sz); + thread->stats.AddBytesForOneWrite(sz); + } + thread->stats.FinishedSingleOp(); + } + thread->stats.Stop(); + } + + void VerifyDb(const SharedState &shared, long start) const { + ReadOptions options(FLAGS_verify_checksum, true); + char value[100]; + long max_key = shared.GetMaxKey(); + long step = shared.GetNumThreads(); + for (long i = start; i < max_key; i+= step) { + std::string from_db; + VerifyValue(i, options, shared, value, sizeof(value), &from_db); + if (from_db.length()) { + PrintKeyValue(i, from_db.data(), from_db.length()); + } + } + } + + void VerifyValue(long key, const ReadOptions &opts, const SharedState &shared, + char *value, size_t value_sz, + std::string *value_from_db, bool strict=false) const { + Slice k((char*)&key, sizeof(key)); + uint32_t value_base = shared.Get(key); + if (value_base == SharedState::SENTINEL && !strict) { + return; + } + + if (db_->Get(opts, k, value_from_db).ok()) { + if (value_base == SharedState::SENTINEL) { + VerificationAbort("Unexpected value found", key); + } + size_t sz = GenerateValue(value_base, value, value_sz); + if (value_from_db->length() != sz) { + VerificationAbort("Length of value read is not equal", key); + } + if (memcmp(value_from_db->data(), value, sz) != 0) { + VerificationAbort("Contents of value read don't match", key); + } + } else { + if (value_base != SharedState::SENTINEL) { + VerificationAbort("Value not found", key); + } + } + } + + static void PrintKeyValue(uint32_t key, const char *value, size_t sz) { + if (!FLAGS_verbose) return; + fprintf(stdout, "%u ==> (%u) ", key, sz); + for (size_t i=0; i= sizeof(uint32_t)); + *((uint32_t*)v) = rand; + char c = (char) rand; + for (size_t i=sizeof(uint32_t); i < value_sz; i++) { + v[i] = (char)(rand ^ i); + } + return value_sz; // the size of the value set. + } + + void VerificationAbort(char *msg, long key) const { + fprintf(stderr, "Verification failed for key %ld: %s\n", key, msg); + exit(1); + } + + void PrintEnv() const { + fprintf(stdout, "LevelDB version : %d.%d\n", + kMajorVersion, kMinorVersion); + fprintf(stdout, "Number of threads : %d\n", FLAGS_threads); + fprintf(stdout, "Ops per thread : %ld\n", FLAGS_ops_per_thread); + fprintf(stdout, "Read percentage : %ld\n", FLAGS_readwritepercent); + fprintf(stdout, "Max key : %ld\n", FLAGS_max_key); + fprintf(stdout, "Num keys per lock : %ld\n", + 1 << FLAGS_log2_keys_per_lock); + + char* compression; + switch (FLAGS_compression_type) { + case leveldb::kNoCompression: + compression = "none"; + break; + case leveldb::kSnappyCompression: + compression = "snappy"; + break; + case leveldb::kZlibCompression: + compression = "zlib"; + break; + case leveldb::kBZip2Compression: + compression = "bzip2"; + break; + } + + fprintf(stdout, "Compression : %s\n", compression); + fprintf(stdout, "------------------------------------------------\n"); + } + + void Open() { + assert(db_ == NULL); + Options options; + options.block_cache = cache_; + options.write_buffer_size = FLAGS_write_buffer_size; + options.block_size = FLAGS_block_size; + options.filter_policy = filter_policy_; + options.max_open_files = FLAGS_open_files; + options.statistics = dbstats; + options.env = FLAGS_env; + options.disableDataSync = FLAGS_disable_data_sync; + options.use_fsync = FLAGS_use_fsync; + options.target_file_size_base = FLAGS_target_file_size_base; + options.target_file_size_multiplier = FLAGS_target_file_size_multiplier; + options.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base; + options.max_bytes_for_level_multiplier = + FLAGS_max_bytes_for_level_multiplier; + options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger; + options.level0_slowdown_writes_trigger = + FLAGS_level0_slowdown_writes_trigger; + options.compression = FLAGS_compression_type; + options.create_if_missing = true; + options.disable_seek_compaction = FLAGS_disable_seek_compaction; + Status s = DB::Open(options, FLAGS_db, &db_); + if (!s.ok()) { + fprintf(stderr, "open error: %s\n", s.ToString().c_str()); + exit(1); + } + } + + void PrintStatistics() { + if (dbstats) { + fprintf(stdout, "File opened:%ld closed:%ld errors:%ld\n", + dbstats->getNumFileOpens(), + dbstats->getNumFileCloses(), + dbstats->getNumFileErrors()); + } + } + + private: + Cache* cache_; + const FilterPolicy* filter_policy_; + DB* db_; +}; + +} // namespace leveldb + +int main(int argc, char** argv) { + FLAGS_write_buffer_size = leveldb::Options().write_buffer_size; + FLAGS_open_files = leveldb::Options().max_open_files; + // Compression test code above refers to FLAGS_block_size + FLAGS_block_size = leveldb::Options().block_size; + std::string default_db_path; + + for (int i = 1; i < argc; i++) { + double d; + int n; + uint32_t u; + long l; + char junk; + char hdfsname[2048]; + + if (sscanf(argv[i], "--seed=%uf%c", &u, &junk) == 1) { + FLAGS_seed = u; + } else if (sscanf(argv[i], "--max_key=%ld%c", &l, &junk) == 1) { + FLAGS_max_key = l; + } else if (sscanf(argv[i], "--log2_keys_per_lock=%u%c", &u, &junk) == 1) { + FLAGS_log2_keys_per_lock = u; + } else if (sscanf(argv[i], "--ops_per_thread=%u%c", &u, &junk) == 1) { + FLAGS_ops_per_thread = u; + } else if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_verbose = n; + } else if (sscanf(argv[i], "--histogram=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_histogram = n; + } else if (sscanf(argv[i], "--verify_before_write=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_verify_before_write = n; + } else if (sscanf(argv[i], "--threads=%d%c", &n, &junk) == 1) { + FLAGS_threads = n; + } else if (sscanf(argv[i], "--value_size_mult=%d%c", &n, &junk) == 1) { + FLAGS_value_size_mult = n; + } else if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) { + FLAGS_write_buffer_size = n; + } else if (sscanf(argv[i], "--cache_size=%ld%c", &l, &junk) == 1) { + FLAGS_cache_size = l; + } else if (sscanf(argv[i], "--block_size=%d%c", &n, &junk) == 1) { + FLAGS_block_size = n; + } else if (sscanf(argv[i], "--bloom_bits=%d%c", &n, &junk) == 1) { + FLAGS_bloom_bits = n; + } else if (sscanf(argv[i], "--open_files=%d%c", &n, &junk) == 1) { + FLAGS_open_files = n; + } else if (strncmp(argv[i], "--db=", 5) == 0) { + FLAGS_db = argv[i] + 5; + } else if (sscanf(argv[i], "--verify_checksum=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_verify_checksum = n; + } else if (sscanf(argv[i], "--bufferedio=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + useOsBuffer = n; + } else if (sscanf(argv[i], "--mmap_read=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + useMmapRead = n; + } else if (sscanf(argv[i], "--readhead=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + useFsReadAhead = n; + } else if (sscanf(argv[i], "--statistics=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + if (n == 1) { + dbstats = new leveldb::DBStatistics(); + } + } else if (sscanf(argv[i], "--sync=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_sync = n; + } else if (sscanf(argv[i], "--readwritepercent=%d%c", &n, &junk) == 1 && + (n > 0 || n < 100)) { + FLAGS_readwritepercent = n; + } else if (sscanf(argv[i], "--disable_data_sync=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_disable_data_sync = n; + } else if (sscanf(argv[i], "--use_fsync=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_use_fsync = n; + } else if (sscanf(argv[i], "--disable_wal=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_disable_wal = n; + } else if (sscanf(argv[i], "--hdfs=%s", hdfsname) == 1) { + FLAGS_env = new leveldb::HdfsEnv(hdfsname); + } else if (sscanf(argv[i], "--target_file_size_base=%d%c", + &n, &junk) == 1) { + FLAGS_target_file_size_base = n; + } else if ( sscanf(argv[i], "--target_file_size_multiplier=%d%c", + &n, &junk) == 1) { + FLAGS_target_file_size_multiplier = n; + } else if ( + sscanf(argv[i], "--max_bytes_for_level_base=%d%c", &n, &junk) == 1) { + FLAGS_max_bytes_for_level_base = n; + } else if (sscanf(argv[i], "--max_bytes_for_level_multiplier=%d%c", + &n, &junk) == 1) { + FLAGS_max_bytes_for_level_multiplier = n; + } else if (sscanf(argv[i],"--level0_stop_writes_trigger=%d%c", + &n, &junk) == 1) { + FLAGS_level0_stop_writes_trigger = n; + } else if (sscanf(argv[i],"--level0_slowdown_writes_trigger=%d%c", + &n, &junk) == 1) { + FLAGS_level0_slowdown_writes_trigger = n; + } else if (strncmp(argv[i], "--compression_type=", 19) == 0) { + const char* ctype = argv[i] + 19; + if (!strcasecmp(ctype, "none")) + FLAGS_compression_type = leveldb::kNoCompression; + else if (!strcasecmp(ctype, "snappy")) + FLAGS_compression_type = leveldb::kSnappyCompression; + else if (!strcasecmp(ctype, "zlib")) + FLAGS_compression_type = leveldb::kZlibCompression; + else if (!strcasecmp(ctype, "bzip2")) + FLAGS_compression_type = leveldb::kBZip2Compression; + else { + fprintf(stdout, "Cannot parse %s\n", argv[i]); + } + } else if (sscanf(argv[i], "--disable_seek_compaction=%d%c", &n, &junk) == 1 + && (n == 0 || n == 1)) { + FLAGS_disable_seek_compaction = n; + } else { + fprintf(stderr, "Invalid flag '%s'\n", argv[i]); + exit(1); + } + } + + // Choose a location for the test database if none given with --db= + if (FLAGS_db == NULL) { + leveldb::Env::Default()->GetTestDirectory(&default_db_path); + default_db_path += "/dbstress"; + FLAGS_db = default_db_path.c_str(); + } + + leveldb::StressTest stress; + stress.Run(); + if (dbstats) { + delete dbstats; + } + return 0; +} -- GitLab