db_stress.cc 49.0 KB
Newer Older
1 2 3 4 5
//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
//  This source code is licensed under the BSD-style license found in the
//  LICENSE file in the root directory of this source tree. An additional grant
//  of patent rights can be found in the PATENTS file in the same directory.
//
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
//
X
Xing Jin 已提交
10 11 12 13 14 15 16 17 18 19 20 21
// The test uses an array to compare against values written to the database.
// Keys written to the array are in 1:1 correspondence to the actual values in
// the database according to the formula in the function GenerateValue.

// Space is reserved in the array from 0 to FLAGS_max_key and values are
// randomly written/deleted/read from those positions. During verification we
// compare all the positions in the array. To shorten/elongate the running
// time, you could change the settings: FLAGS_max_key, FLAGS_ops_per_thread,
// (sometimes also FLAGS_threads).
//
// NOTE that if FLAGS_test_batches_snapshots is set, the test will have
// different behavior. See comment of the flag for details.
A
amayank 已提交
22

23 24 25
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
26
#include <gflags/gflags.h>
27 28 29
#include "db/db_impl.h"
#include "db/version_set.h"
#include "db/db_statistics.h"
30
#include "rocksdb/cache.h"
31
#include "utilities/utility_db.h"
32 33 34
#include "rocksdb/env.h"
#include "rocksdb/write_batch.h"
#include "rocksdb/statistics.h"
35
#include "port/port.h"
36
#include "util/coding.h"
37 38 39 40 41
#include "util/crc32c.h"
#include "util/histogram.h"
#include "util/mutexlock.h"
#include "util/random.h"
#include "util/testutil.h"
42 43
#include "util/logging.h"
#include "utilities/ttl/db_ttl.h"
44
#include "hdfs/env_hdfs.h"
D
Deon Nicholas 已提交
45
#include "utilities/merge_operators.h"
46 47 48 49

static const long KB = 1024;


50 51 52 53 54 55 56 57 58 59
static bool ValidateUint32Range(const char* flagname, uint64_t value) {
  if (value > std::numeric_limits<uint32_t>::max()) {
    fprintf(stderr, "Invalid value for --%s: %lu, overflow\n", flagname, value);
    return false;
  }
  return true;
}
DEFINE_uint64(seed, 2341234, "Seed for PRNG");
static const bool FLAGS_seed_dummy =
  google::RegisterFlagValidator(&FLAGS_seed, &ValidateUint32Range);
60

61 62
DEFINE_int64(max_key, 1 * KB * KB * KB,
             "Max number of key/values to place in database");
63

64 65 66 67 68 69 70 71 72 73 74
DEFINE_bool(test_batches_snapshots, false,
            "If set, the test uses MultiGet(), MultiPut() and MultiDelete()"
            " which read/write/delete multiple keys in a batch. In this mode,"
            " we do not verify db content by comparing the content with the "
            "pre-allocated array. Instead, we do partial verification inside"
            " MultiGet() by checking various values in a batch. Benefit of"
            " this mode:\n"
            "\t(a) No need to acquire mutexes during writes (less cache "
            "flushes in multi-core leading to speed up)\n"
            "\t(b) No long validation at the end (more speed up)\n"
            "\t(c) Test snapshot and atomicity of batch writes");
75

76
DEFINE_int32(threads, 32, "Number of concurrent threads to run.");
77

78 79 80 81
DEFINE_int32(ttl, -1,
             "Opens the db with this ttl value if this is not -1. "
             "Carefully specify a large value such that verifications on "
             "deleted values don't fail");
82

83 84
DEFINE_int32(value_size_mult, 8,
             "Size of value will be this number times rand_int(1,3) bytes");
85

86
DEFINE_bool(verify_before_write, false, "Verify before write");
87

88
DEFINE_bool(histogram, false, "Print histogram of operation timings");
89

90 91
DEFINE_bool(destroy_db_initially, true,
            "Destroys the database dir before start if this is true");
92

93
DEFINE_bool (verbose, false, "Verbose");
94

95 96
DEFINE_int32(write_buffer_size, rocksdb::Options().write_buffer_size,
             "Number of bytes to buffer in memtable before compacting");
97

98 99 100 101
DEFINE_int32(max_write_buffer_number,
             rocksdb::Options().max_write_buffer_number,
             "The number of in-memory memtables. "
             "Each memtable is of size FLAGS_write_buffer_size.");
102

103 104 105 106 107 108 109 110 111 112
DEFINE_int32(min_write_buffer_number_to_merge,
             rocksdb::Options().min_write_buffer_number_to_merge,
             "The minimum number of write buffers that will be merged together "
             "before writing to storage. This is cheap because it is an "
             "in-memory merge. If this feature is not enabled, then all these "
             "write buffers are flushed to L0 as separate files and this "
             "increases read amplification because a get request has to check "
             "in all of these files. Also, an in-memory merge may result in "
             "writing less data to storage if there are duplicate records in"
             " each of these individual write buffers.");
113

114 115 116
DEFINE_int32(open_files, rocksdb::Options().max_open_files,
             "Maximum number of files to keep open at the same time "
             "(use default if == 0)");
117

118
DEFINE_int32(compaction_style, rocksdb::Options().compaction_style, "");
119

120 121 122
DEFINE_int32(level0_file_num_compaction_trigger,
             rocksdb::Options().level0_file_num_compaction_trigger,
             "Level0 compaction start trigger");
123

124 125 126
DEFINE_int32(level0_slowdown_writes_trigger,
             rocksdb::Options().level0_slowdown_writes_trigger,
             "Number of files in level-0 that will slow down writes");
127

128 129 130
DEFINE_int32(level0_stop_writes_trigger,
             rocksdb::Options().level0_stop_writes_trigger,
             "Number of files in level-0 that will trigger put stop.");
131

132 133
DEFINE_int32(block_size, rocksdb::Options().block_size,
             "Number of bytes in a block.");
134

135 136 137 138
DEFINE_int32(max_background_compactions,
             rocksdb::Options().max_background_compactions,
             "The maximum number of concurrent background compactions "
             "that can occur in parallel.");
139

140 141
DEFINE_int32(universal_size_ratio, 0, "The ratio of file sizes that trigger"
             " compaction in universal style");
142

143 144
DEFINE_int32(universal_min_merge_width, 0, "The minimum number of files to "
             "compact in universal style compaction");
145

146 147
DEFINE_int32(universal_max_merge_width, 0, "The max number of files to compact"
             " in universal style compaction");
148

149 150
DEFINE_int32(universal_max_size_amplification_percent, 0,
             "The max size amplification for universal style compaction");
151

152 153
DEFINE_int64(cache_size, 2 * KB * KB * KB,
             "Number of bytes to use as a cache of uncompressed data.");
154

155 156 157 158 159 160 161 162 163 164 165
static bool ValidateInt32Positive(const char* flagname, int32_t value) {
  if (value < 0) {
    fprintf(stderr, "Invalid value for --%s: %d, must be >=0\n",
            flagname, value);
    return false;
  }
  return true;
}
DEFINE_int32(reopen, 10, "Number of times database reopens");
static const bool FLAGS_reopen_dummy =
  google::RegisterFlagValidator(&FLAGS_reopen, &ValidateInt32Positive);
166

167 168
DEFINE_int32(bloom_bits, 10, "Bloom filter bits per key. "
             "Negative means use default settings.");
169

170
DEFINE_string(db, "", "Use the db with the following name.");
171

172 173
DEFINE_bool(verify_checksum, false,
            "Verify checksum for every block read from storage");
174

175 176
DEFINE_bool(mmap_read, rocksdb::EnvOptions().use_mmap_reads,
            "Allow reads to occur via mmap-ing files");
177

178 179 180
// Database statistics
static std::shared_ptr<rocksdb::Statistics> dbstats;
DEFINE_bool(statistics, false, "Create database statistics");
181

182
DEFINE_bool(sync, false, "Sync all writes to disk");
183

184 185
DEFINE_bool(disable_data_sync, false,
            "If true, do not wait until data is synced to disk.");
A
amayank 已提交
186

187
DEFINE_bool(use_fsync, false, "If true, issue fsync instead of fdatasync");
188

189 190 191 192 193 194 195
DEFINE_int32(kill_random_test, 0,
             "If non-zero, kill at various points in source code with "
             "probability 1/this");
static const bool FLAGS_kill_random_test_dummy =
  google::RegisterFlagValidator(&FLAGS_kill_random_test,
                                &ValidateInt32Positive);
extern int rocksdb_kill_odds;
196

197
DEFINE_bool(disable_wal, false, "If true, do not write WAL for write.");
198

199 200
DEFINE_int32(target_file_size_base, 64 * KB,
             "Target level-1 file size for compaction");
201

202 203
DEFINE_int32(target_file_size_multiplier, 1,
             "A multiplier to compute targe level-N file size (N >= 2)");
204

205
DEFINE_uint64(max_bytes_for_level_base, 256 * KB, "Max bytes for level-1");
206

207 208
DEFINE_int32(max_bytes_for_level_multiplier, 2,
             "A multiplier to compute max bytes for level-N (N >= 2)");
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static bool ValidateInt32Percent(const char* flagname, int32_t value) {
  if (value < 0 || value>100) {
    fprintf(stderr, "Invalid value for --%s: %d, 0<= pct <=100 \n",
            flagname, value);
    return false;
  }
  return true;
}
DEFINE_int32(readpercent, 10,
             "Ratio of reads to total workload (expressed as a percentage)");
static const bool FLAGS_readpercent_dummy =
  google::RegisterFlagValidator(&FLAGS_readpercent, &ValidateInt32Percent);

DEFINE_int32(prefixpercent, 20,
             "Ratio of prefix iterators to total workload (expressed as a"
             " percentage)");
static const bool FLAGS_prefixpercent_dummy =
  google::RegisterFlagValidator(&FLAGS_prefixpercent, &ValidateInt32Percent);

DEFINE_int32(writepercent, 45,
             " Ratio of deletes to total workload (expressed as a percentage)");
static const bool FLAGS_writepercent_dummy =
  google::RegisterFlagValidator(&FLAGS_writepercent, &ValidateInt32Percent);

DEFINE_int32(delpercent, 15,
             "Ratio of deletes to total workload (expressed as a percentage)");
static const bool FLAGS_delpercent_dummy =
  google::RegisterFlagValidator(&FLAGS_delpercent, &ValidateInt32Percent);

DEFINE_int32(iterpercent, 10, "Ratio of iterations to total workload"
             " (expressed as a percentage)");
static const bool FLAGS_iterpercent_dummy =
  google::RegisterFlagValidator(&FLAGS_iterpercent, &ValidateInt32Percent);

DEFINE_uint64(num_iterations, 10, "Number of iterations per MultiIterate run");
static const bool FLAGS_num_iterations_dummy =
  google::RegisterFlagValidator(&FLAGS_num_iterations, &ValidateUint32Range);

DEFINE_bool(disable_seek_compaction, false,
            "Option to disable compation triggered by read.");

DEFINE_uint64(delete_obsolete_files_period_micros, 0,
              "Option to delete obsolete files periodically"
              "0 means that obsolete files are "
              " deleted after every compaction run.");

enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
  assert(ctype);

  if (!strcasecmp(ctype, "none"))
    return rocksdb::kNoCompression;
  else if (!strcasecmp(ctype, "snappy"))
    return rocksdb::kSnappyCompression;
  else if (!strcasecmp(ctype, "zlib"))
    return rocksdb::kZlibCompression;
  else if (!strcasecmp(ctype, "bzip2"))
    return rocksdb::kBZip2Compression;

  fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
  return rocksdb::kSnappyCompression; //default value
}
DEFINE_string(compression_type, "snappy",
              "Algorithm to use to compress the database");
static enum rocksdb::CompressionType FLAGS_compression_type_e =
274
    rocksdb::kSnappyCompression;
275

276
DEFINE_string(hdfs, "", "Name of hdfs environment");
277
// posix or hdfs environment
278
static rocksdb::Env* FLAGS_env = rocksdb::Env::Default();
279

280 281 282
DEFINE_uint64(ops_per_thread, 600000, "Number of operations per thread.");
static const bool FLAGS_ops_per_thread_dummy =
  google::RegisterFlagValidator(&FLAGS_ops_per_thread, &ValidateUint32Range);
283

284 285 286 287
DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
static const bool FLAGS_log2_keys_per_lock_dummy =
  google::RegisterFlagValidator(&FLAGS_log2_keys_per_lock,
                                &ValidateUint32Range);
288

289 290 291 292 293 294
DEFINE_int32(purge_redundant_percent, 50,
             "Percentage of times we want to purge redundant keys in memory "
             "before flushing");
static const bool FLAGS_purge_redundant_percent_dummy =
  google::RegisterFlagValidator(&FLAGS_purge_redundant_percent,
                                &ValidateInt32Percent);
295

296 297
DEFINE_bool(filter_deletes, false, "On true, deletes use KeyMayExist to drop"
            " the delete if key not present");
298

J
Jim Paton 已提交
299 300 301 302 303 304
enum RepFactory {
  kSkipList,
  kPrefixHash,
  kUnsorted,
  kVectorRep
};
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
enum RepFactory StringToRepFactory(const char* ctype) {
  assert(ctype);

  if (!strcasecmp(ctype, "skip_list"))
    return kSkipList;
  else if (!strcasecmp(ctype, "prefix_hash"))
    return kPrefixHash;
  else if (!strcasecmp(ctype, "unsorted"))
    return kUnsorted;
  else if (!strcasecmp(ctype, "vector"))
    return kVectorRep;

  fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
  return kSkipList;
}
J
Jim Paton 已提交
320
static enum RepFactory FLAGS_rep_factory;
321
DEFINE_string(memtablerep, "skip_list", "");
J
Jim Paton 已提交
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336
static bool ValidatePrefixSize(const char* flagname, int32_t value) {
  if (value < 0 || value>=2000000000) {
    fprintf(stderr, "Invalid value for --%s: %d. 0<= PrefixSize <=2000000000\n",
            flagname, value);
    return false;
  }
  return true;
}
DEFINE_int32(prefix_size, 0, "Control the prefix size for PrefixHashRep");
static const bool FLAGS_prefix_size_dummy =
  google::RegisterFlagValidator(&FLAGS_prefix_size, &ValidatePrefixSize);

DEFINE_bool(use_merge, false, "On true, replaces all writes with a Merge "
            "that behaves like a Put");
J
Jim Paton 已提交
337

D
Deon Nicholas 已提交
338

339
namespace rocksdb {
340

341 342 343 344 345 346 347 348 349 350 351 352 353
// convert long to a big-endian slice key
static std::string Key(long val) {
  std::string little_endian_key;
  std::string big_endian_key;
  PutFixed64(&little_endian_key, val);
  assert(little_endian_key.size() == sizeof(val));
  big_endian_key.resize(sizeof(val));
  for (int i=0; i<(int)sizeof(val); i++) {
    big_endian_key[i] = little_endian_key[sizeof(val) - 1 - i];
  }
  return big_endian_key;
}

354 355 356 357 358 359 360 361 362
class StressTest;
namespace {

class Stats {
 private:
  double start_;
  double finish_;
  double seconds_;
  long done_;
363 364
  long gets_;
  long prefixes_;
365
  long writes_;
A
amayank 已提交
366
  long deletes_;
367
  long iterator_size_sums_;
368
  long founds_;
369
  long iterations_;
370
  long errors_;
371 372 373
  int next_report_;
  size_t bytes_;
  double last_op_finish_;
374
  HistogramImpl hist_;
375 376 377 378 379 380 381 382

 public:
  Stats() { }

  void Start() {
    next_report_ = 100;
    hist_.Clear();
    done_ = 0;
383 384
    gets_ = 0;
    prefixes_ = 0;
385
    writes_ = 0;
A
amayank 已提交
386
    deletes_ = 0;
387
    iterator_size_sums_ = 0;
388
    founds_ = 0;
389
    iterations_ = 0;
390
    errors_ = 0;
391 392 393 394 395 396 397 398 399 400
    bytes_ = 0;
    seconds_ = 0;
    start_ = FLAGS_env->NowMicros();
    last_op_finish_ = start_;
    finish_ = start_;
  }

  void Merge(const Stats& other) {
    hist_.Merge(other.hist_);
    done_ += other.done_;
401 402
    gets_ += other.gets_;
    prefixes_ += other.prefixes_;
403
    writes_ += other.writes_;
A
amayank 已提交
404
    deletes_ += other.deletes_;
405
    iterator_size_sums_ += other.iterator_size_sums_;
406
    founds_ += other.founds_;
407
    iterations_ += other.iterations_;
408
    errors_ += other.errors_;
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
    bytes_ += other.bytes_;
    seconds_ += other.seconds_;
    if (other.start_ < start_) start_ = other.start_;
    if (other.finish_ > finish_) finish_ = other.finish_;
  }

  void Stop() {
    finish_ = FLAGS_env->NowMicros();
    seconds_ = (finish_ - start_) * 1e-6;
  }

  void FinishedSingleOp() {
    if (FLAGS_histogram) {
      double now = FLAGS_env->NowMicros();
      double micros = now - last_op_finish_;
      hist_.Add(micros);
      if (micros > 20000) {
426
        fprintf(stdout, "long op: %.1f micros%30s\r", micros, "");
427 428 429 430 431 432 433 434 435 436 437 438 439
      }
      last_op_finish_ = now;
    }

    done_++;
    if (done_ >= next_report_) {
      if      (next_report_ < 1000)   next_report_ += 100;
      else if (next_report_ < 5000)   next_report_ += 500;
      else if (next_report_ < 10000)  next_report_ += 1000;
      else if (next_report_ < 50000)  next_report_ += 5000;
      else if (next_report_ < 100000) next_report_ += 10000;
      else if (next_report_ < 500000) next_report_ += 50000;
      else                            next_report_ += 100000;
440
      fprintf(stdout, "... finished %ld ops%30s\r", done_, "");
441 442 443
    }
  }

444 445 446 447 448
  void AddBytesForWrites(int nwrites, size_t nbytes) {
    writes_ += nwrites;
    bytes_ += nbytes;
  }

449 450 451
  void AddGets(int ngets, int nfounds) {
    founds_ += nfounds;
    gets_ += ngets;
452 453
  }

454 455 456 457 458
  void AddPrefixes(int nprefixes, int count) {
    prefixes_ += nprefixes;
    iterator_size_sums_ += count;
  }

459 460 461 462
  void AddIterations(int n) {
    iterations_ += n;
  }

463 464 465 466
  void AddDeletes(int n) {
    deletes_ += n;
  }

467 468
  void AddErrors(int n) {
    errors_ += n;
A
amayank 已提交
469 470
  }

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
  void Report(const char* name) {
    std::string extra;
    if (bytes_ < 1 || done_ < 1) {
      fprintf(stderr, "No writes or ops?\n");
      return;
    }

    double elapsed = (finish_ - start_) * 1e-6;
    double bytes_mb = bytes_ / 1048576.0;
    double rate = bytes_mb / elapsed;
    double throughput = (double)done_/elapsed;

    fprintf(stdout, "%-12s: ", name);
    fprintf(stdout, "%.3f micros/op %ld ops/sec\n",
            seconds_ * 1e6 / done_, (long)throughput);
    fprintf(stdout, "%-12s: Wrote %.2f MB (%.2f MB/sec) (%ld%% of %ld ops)\n",
            "", bytes_mb, rate, (100*writes_)/done_, done_);
488
    fprintf(stdout, "%-12s: Wrote %ld times\n", "", writes_);
A
amayank 已提交
489
    fprintf(stdout, "%-12s: Deleted %ld times\n", "", deletes_);
X
Xing Jin 已提交
490 491
    fprintf(stdout, "%-12s: %ld read and %ld found the key\n", "",
            gets_, founds_);
492 493 494
    fprintf(stdout, "%-12s: Prefix scanned %ld times\n", "", prefixes_);
    fprintf(stdout, "%-12s: Iterator size sum is %ld\n", "",
            iterator_size_sums_);
495
    fprintf(stdout, "%-12s: Iterated %ld times\n", "", iterations_);
496
    fprintf(stdout, "%-12s: Got errors %ld times\n", "", errors_);
497 498 499 500 501 502 503 504 505 506 507 508 509

    if (FLAGS_histogram) {
      fprintf(stdout, "Microseconds per op:\n%s\n", hist_.ToString().c_str());
    }
    fflush(stdout);
  }
};

// State shared by all concurrent executions of the same benchmark.
class SharedState {
 public:
  static const uint32_t SENTINEL = 0xffffffff;

510
  explicit SharedState(StressTest* stress_test) :
511 512 513 514 515 516 517
      cv_(&mu_),
      seed_(FLAGS_seed),
      max_key_(FLAGS_max_key),
      log2_keys_per_lock_(FLAGS_log2_keys_per_lock),
      num_threads_(FLAGS_threads),
      num_initialized_(0),
      num_populated_(0),
518
      vote_reopen_(0),
519 520 521 522
      num_done_(0),
      start_(false),
      start_verify_(false),
      stress_test_(stress_test) {
523 524 525 526 527 528
    if (FLAGS_test_batches_snapshots) {
      key_locks_ = nullptr;
      values_ = nullptr;
      fprintf(stdout, "No lock creation because test_batches_snapshots set\n");
      return;
    }
529 530 531 532
    values_ = new uint32_t[max_key_];
    for (long i = 0; i < max_key_; i++) {
      values_[i] = SENTINEL;
    }
533

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
    long num_locks = (max_key_ >> log2_keys_per_lock_);
    if (max_key_ & ((1 << log2_keys_per_lock_) - 1)) {
      num_locks ++;
    }
    fprintf(stdout, "Creating %ld locks\n", num_locks);
    key_locks_ = new port::Mutex[num_locks];
  }

  ~SharedState() {
    delete[] values_;
    delete[] key_locks_;
  }

  port::Mutex* GetMutex() {
    return &mu_;
  }

  port::CondVar* GetCondVar() {
    return &cv_;
  }

  StressTest* GetStressTest() const {
    return stress_test_;
  }

  long GetMaxKey() const {
    return max_key_;
  }

  uint32_t GetNumThreads() const {
    return num_threads_;
  }

  void IncInitialized() {
    num_initialized_++;
  }

A
amayank 已提交
571
  void IncOperated() {
572 573 574 575 576 577 578
    num_populated_++;
  }

  void IncDone() {
    num_done_++;
  }

579 580 581 582
  void IncVotedReopen() {
    vote_reopen_ = (vote_reopen_ + 1) % num_threads_;
  }

583 584 585 586
  bool AllInitialized() const {
    return num_initialized_ >= num_threads_;
  }

A
amayank 已提交
587
  bool AllOperated() const {
588 589 590 591 592 593 594
    return num_populated_ >= num_threads_;
  }

  bool AllDone() const {
    return num_done_ >= num_threads_;
  }

595 596 597 598
  bool AllVotedReopen() {
    return (vote_reopen_ == 0);
  }

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
  void SetStart() {
    start_ = true;
  }

  void SetStartVerify() {
    start_verify_ = true;
  }

  bool Started() const {
    return start_;
  }

  bool VerifyStarted() const {
    return start_verify_;
  }

  port::Mutex* GetMutexForKey(long key) {
    return &key_locks_[key >> log2_keys_per_lock_];
  }

  void Put(long key, uint32_t value_base) {
    values_[key] = value_base;
  }

  uint32_t Get(long key) const {
    return values_[key];
  }

A
amayank 已提交
627 628 629 630
  void Delete(long key) const {
    values_[key] = SENTINEL;
  }

631 632 633 634 635 636 637 638 639 640 641 642 643
  uint32_t GetSeed() const {
    return seed_;
  }

 private:
  port::Mutex mu_;
  port::CondVar cv_;
  const uint32_t seed_;
  const long max_key_;
  const uint32_t log2_keys_per_lock_;
  const int num_threads_;
  long num_initialized_;
  long num_populated_;
644
  long vote_reopen_;
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
  long num_done_;
  bool start_;
  bool start_verify_;
  StressTest* stress_test_;

  uint32_t *values_;
  port::Mutex *key_locks_;

};

// Per-thread state for concurrent executions of the same benchmark.
struct ThreadState {
  uint32_t tid; // 0..n-1
  Random rand;  // Has different seeds for different threads
  SharedState* shared;
  Stats stats;

  ThreadState(uint32_t index, SharedState *shared)
      : tid(index),
        rand(1000 + index + shared->GetSeed()),
        shared(shared) {
  }
};

}  // namespace

class StressTest {
 public:
  StressTest()
      : cache_(NewLRUCache(FLAGS_cache_size)),
        filter_policy_(FLAGS_bloom_bits >= 0
                       ? NewBloomFilterPolicy(FLAGS_bloom_bits)
677
                       : nullptr),
678 679 680
        prefix_extractor_(NewFixedPrefixTransform(
                          FLAGS_test_batches_snapshots ?
                          sizeof(long) : sizeof(long)-1)),
681
        db_(nullptr),
682
        num_times_reopened_(0) {
683 684 685 686 687
    if (FLAGS_destroy_db_initially) {
      std::vector<std::string> files;
      FLAGS_env->GetChildren(FLAGS_db, &files);
      for (unsigned int i = 0; i < files.size(); i++) {
        if (Slice(files[i]).starts_with("heap-")) {
688
          FLAGS_env->DeleteFile(FLAGS_db + "/" + files[i]);
689
        }
690
      }
691
      DestroyDB(FLAGS_db, Options());
692 693 694 695 696 697
    }
  }

  ~StressTest() {
    delete db_;
    delete filter_policy_;
698
    delete prefix_extractor_;
699 700 701 702 703 704 705 706 707 708 709 710 711 712
  }

  void Run() {
    PrintEnv();
    Open();
    SharedState shared(this);
    uint32_t n = shared.GetNumThreads();

    std::vector<ThreadState*> threads(n);
    for (uint32_t i = 0; i < n; i++) {
      threads[i] = new ThreadState(i, &shared);
      FLAGS_env->StartThread(ThreadBody, threads[i]);
    }
    // Each thread goes through the following states:
A
amayank 已提交
713 714
    // initializing -> wait for others to init -> read/populate/depopulate
    // wait for others to operate -> verify -> done
715 716 717 718 719 720 721

    {
      MutexLock l(shared.GetMutex());
      while (!shared.AllInitialized()) {
        shared.GetCondVar()->Wait();
      }

722 723 724
      double now = FLAGS_env->NowMicros();
      fprintf(stdout, "%s Starting database operations\n",
              FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
A
amayank 已提交
725

726 727
      shared.SetStart();
      shared.GetCondVar()->SignalAll();
A
amayank 已提交
728
      while (!shared.AllOperated()) {
729 730 731
        shared.GetCondVar()->Wait();
      }

732
      now = FLAGS_env->NowMicros();
733 734 735 736 737 738 739
      if (FLAGS_test_batches_snapshots) {
        fprintf(stdout, "%s Limited verification already done during gets\n",
                FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
      } else {
        fprintf(stdout, "%s Starting verification\n",
                FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
      }
740

741 742 743 744 745 746 747
      shared.SetStartVerify();
      shared.GetCondVar()->SignalAll();
      while (!shared.AllDone()) {
        shared.GetCondVar()->Wait();
      }
    }

748
    for (unsigned int i = 1; i < n; i++) {
749 750 751 752
      threads[0]->stats.Merge(threads[i]->stats);
    }
    threads[0]->stats.Report("Stress Test");

753
    for (unsigned int i = 0; i < n; i++) {
754
      delete threads[i];
755
      threads[i] = nullptr;
756
    }
757
    double now = FLAGS_env->NowMicros();
758 759 760 761
    if (!FLAGS_test_batches_snapshots) {
      fprintf(stdout, "%s Verification successful\n",
              FLAGS_env->TimeToString((uint64_t) now/1000000).c_str());
    }
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
    PrintStatistics();
  }

 private:

  static void ThreadBody(void* v) {
    ThreadState* thread = reinterpret_cast<ThreadState*>(v);
    SharedState* shared = thread->shared;

    {
      MutexLock l(shared->GetMutex());
      shared->IncInitialized();
      if (shared->AllInitialized()) {
        shared->GetCondVar()->SignalAll();
      }
      while (!shared->Started()) {
        shared->GetCondVar()->Wait();
      }
    }
A
amayank 已提交
781
    thread->shared->GetStressTest()->OperateDb(thread);
782 783 784

    {
      MutexLock l(shared->GetMutex());
A
amayank 已提交
785 786
      shared->IncOperated();
      if (shared->AllOperated()) {
787 788 789 790 791 792 793
        shared->GetCondVar()->SignalAll();
      }
      while (!shared->VerifyStarted()) {
        shared->GetCondVar()->Wait();
      }
    }

794
    if (!FLAGS_test_batches_snapshots) {
M
Mayank Agarwal 已提交
795
      thread->shared->GetStressTest()->VerifyDb(thread);
796
    }
797 798 799 800 801 802 803 804 805 806 807

    {
      MutexLock l(shared->GetMutex());
      shared->IncDone();
      if (shared->AllDone()) {
        shared->GetCondVar()->SignalAll();
      }
    }

  }

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
  // Given a key K and value V, this puts ("0"+K, "0"+V), ("1"+K, "1"+V), ...
  // ("9"+K, "9"+V) in DB atomically i.e in a single batch.
  // Also refer MultiGet.
  Status MultiPut(ThreadState* thread,
                  const WriteOptions& writeoptions,
                  const Slice& key, const Slice& value, size_t sz) {
    std::string keys[10] = {"9", "8", "7", "6", "5",
                            "4", "3", "2", "1", "0"};
    std::string values[10] = {"9", "8", "7", "6", "5",
                              "4", "3", "2", "1", "0"};
    Slice value_slices[10];
    WriteBatch batch;
    Status s;
    for (int i = 0; i < 10; i++) {
      keys[i] += key.ToString();
      values[i] += value.ToString();
      value_slices[i] = values[i];
825
      if (FLAGS_use_merge) {
D
Deon Nicholas 已提交
826 827 828 829
        batch.Merge(keys[i], value_slices[i]);
      } else {
        batch.Put(keys[i], value_slices[i]);
      }
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
    }

    s = db_->Write(writeoptions, &batch);
    if (!s.ok()) {
      fprintf(stderr, "multiput error: %s\n", s.ToString().c_str());
      thread->stats.AddErrors(1);
    } else {
      // we did 10 writes each of size sz + 1
      thread->stats.AddBytesForWrites(10, (sz + 1) * 10);
    }

    return s;
  }

  // Given a key K, this deletes ("0"+K), ("1"+K),... ("9"+K)
  // in DB atomically i.e in a single batch. Also refer MultiGet.
  Status MultiDelete(ThreadState* thread,
                     const WriteOptions& writeoptions,
                     const Slice& key) {
    std::string keys[10] = {"9", "7", "5", "3", "1",
                            "8", "6", "4", "2", "0"};

    WriteBatch batch;
    Status s;
    for (int i = 0; i < 10; i++) {
      keys[i] += key.ToString();
      batch.Delete(keys[i]);
    }

    s = db_->Write(writeoptions, &batch);
    if (!s.ok()) {
      fprintf(stderr, "multidelete error: %s\n", s.ToString().c_str());
      thread->stats.AddErrors(1);
    } else {
      thread->stats.AddDeletes(10);
    }

    return s;
  }

  // Given a key K, this gets values for "0"+K, "1"+K,..."9"+K
  // in the same snapshot, and verifies that all the values are of the form
  // "0"+V, "1"+V,..."9"+V.
  // ASSUMES that MultiPut was used to put (K, V) into the DB.
  Status MultiGet(ThreadState* thread,
                  const ReadOptions& readoptions,
                  const Slice& key, std::string* value) {
    std::string keys[10] = {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"};
    Slice key_slices[10];
    std::string values[10];
    ReadOptions readoptionscopy = readoptions;
    readoptionscopy.snapshot = db_->GetSnapshot();
    Status s;
    for (int i = 0; i < 10; i++) {
      keys[i] += key.ToString();
      key_slices[i] = keys[i];
      s = db_->Get(readoptionscopy, key_slices[i], value);
      if (!s.ok() && !s.IsNotFound()) {
        fprintf(stderr, "get error: %s\n", s.ToString().c_str());
        values[i] = "";
        thread->stats.AddErrors(1);
        // we continue after error rather than exiting so that we can
        // find more errors if any
      } else if (s.IsNotFound()) {
        values[i] = "";
895
        thread->stats.AddGets(1, 0);
896 897 898 899 900 901
      } else {
        values[i] = *value;

        char expected_prefix = (keys[i])[0];
        char actual_prefix = (values[i])[0];
        if (actual_prefix != expected_prefix) {
902
          fprintf(stderr, "error expected prefix = %c actual = %c\n",
903 904 905
                  expected_prefix, actual_prefix);
        }
        (values[i])[0] = ' '; // blank out the differing character
906
        thread->stats.AddGets(1, 1);
907 908 909 910 911 912 913
      }
    }
    db_->ReleaseSnapshot(readoptionscopy.snapshot);

    // Now that we retrieved all values, check that they all match
    for (int i = 1; i < 10; i++) {
      if (values[i] != values[0]) {
914
        fprintf(stderr, "error : inconsistent values for key %s: %s, %s\n",
915 916 917 918 919 920 921 922 923 924
                key.ToString().c_str(), values[0].c_str(),
                values[i].c_str());
      // we continue after error rather than exiting so that we can
      // find more errors if any
      }
    }

    return s;
  }

925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
  // Given a prefix P, this does prefix scans for "0"+P, "1"+P,..."9"+P
  // in the same snapshot.  Each of these 10 scans returns a series of
  // values; each series should be the same length, and it is verified
  // for each index i that all the i'th values are of the form "0"+V,
  // "1"+V,..."9"+V.
  // ASSUMES that MultiPut was used to put (K, V)
  Status MultiPrefixScan(ThreadState* thread,
                         const ReadOptions& readoptions,
                         const Slice& prefix) {
    std::string prefixes[10] = {"0", "1", "2", "3", "4",
                                "5", "6", "7", "8", "9"};
    Slice prefix_slices[10];
    ReadOptions readoptionscopy[10];
    const Snapshot* snapshot = db_->GetSnapshot();
    Iterator* iters[10];
    Status s = Status::OK();
    for (int i = 0; i < 10; i++) {
      prefixes[i] += prefix.ToString();
      prefix_slices[i] = prefixes[i];
      readoptionscopy[i] = readoptions;
      readoptionscopy[i].prefix = &prefix_slices[i];
      readoptionscopy[i].snapshot = snapshot;
      iters[i] = db_->NewIterator(readoptionscopy[i]);
      iters[i]->SeekToFirst();
    }

    int count = 0;
    while (iters[0]->Valid()) {
      count++;
      std::string values[10];
      // get list of all values for this iteration
      for (int i = 0; i < 10; i++) {
        // no iterator should finish before the first one
        assert(iters[i]->Valid());
        values[i] = iters[i]->value().ToString();

        char expected_first = (prefixes[i])[0];
        char actual_first = (values[i])[0];

        if (actual_first != expected_first) {
          fprintf(stderr, "error expected first = %c actual = %c\n",
                  expected_first, actual_first);
        }
        (values[i])[0] = ' '; // blank out the differing character
      }
      // make sure all values are equivalent
      for (int i = 0; i < 10; i++) {
        if (values[i] != values[0]) {
          fprintf(stderr, "error : inconsistent values for prefix %s: %s, %s\n",
                  prefix.ToString().c_str(), values[0].c_str(),
                  values[i].c_str());
          // we continue after error rather than exiting so that we can
          // find more errors if any
        }
        iters[i]->Next();
      }
    }

    // cleanup iterators and snapshot
    for (int i = 0; i < 10; i++) {
      // if the first iterator finished, they should have all finished
      assert(!iters[i]->Valid());
      assert(iters[i]->status().ok());
      delete iters[i];
    }
    db_->ReleaseSnapshot(snapshot);

    if (s.ok()) {
      thread->stats.AddPrefixes(1, count);
    } else {
      thread->stats.AddErrors(1);
    }

    return s;
  }

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
  // Given a key K, this creates an iterator which scans to K and then
  // does a random sequence of Next/Prev operations.
  Status MultiIterate(ThreadState* thread,
                      const ReadOptions& readoptions,
                      const Slice& key) {
    Status s;
    const Snapshot* snapshot = db_->GetSnapshot();
    ReadOptions readoptionscopy = readoptions;
    readoptionscopy.snapshot = snapshot;
    unique_ptr<Iterator> iter(db_->NewIterator(readoptionscopy));

    iter->Seek(key);
1013
    for (uint64_t i = 0; i < FLAGS_num_iterations && iter->Valid(); i++) {
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
      if (thread->rand.OneIn(2)) {
        iter->Next();
      } else {
        iter->Prev();
      }
    }

    if (s.ok()) {
      thread->stats.AddIterations(1);
    } else {
      thread->stats.AddErrors(1);
    }

    db_->ReleaseSnapshot(snapshot);

    return s;
  }

A
amayank 已提交
1032
  void OperateDb(ThreadState* thread) {
1033 1034
    ReadOptions read_opts(FLAGS_verify_checksum, true);
    WriteOptions write_opts;
A
amayank 已提交
1035
    char value[100];
1036 1037 1038 1039 1040 1041
    long max_key = thread->shared->GetMaxKey();
    std::string from_db;
    if (FLAGS_sync) {
      write_opts.sync = true;
    }
    write_opts.disableWAL = FLAGS_disable_wal;
1042 1043 1044
    const int prefixBound = (int)FLAGS_readpercent + (int)FLAGS_prefixpercent;
    const int writeBound = prefixBound + (int)FLAGS_writepercent;
    const int delBound = writeBound + (int)FLAGS_delpercent;
1045 1046

    thread->stats.Start();
1047
    for (uint64_t i = 0; i < FLAGS_ops_per_thread; i++) {
1048 1049
      if(i != 0 && (i % (FLAGS_ops_per_thread / (FLAGS_reopen + 1))) == 0) {
        {
1050
          thread->stats.FinishedSingleOp();
1051 1052 1053 1054 1055 1056 1057 1058 1059
          MutexLock l(thread->shared->GetMutex());
          thread->shared->IncVotedReopen();
          if (thread->shared->AllVotedReopen()) {
            thread->shared->GetStressTest()->Reopen();
            thread->shared->GetCondVar()->SignalAll();
          }
          else {
            thread->shared->GetCondVar()->Wait();
          }
1060 1061
          // Commenting this out as we don't want to reset stats on each open.
          // thread->stats.Start();
1062 1063
        }
      }
1064

1065
      long rand_key = thread->rand.Next() % max_key;
1066 1067 1068 1069 1070
      std::string keystr = Key(rand_key);
      Slice key = keystr;
      int prob_op = thread->rand.Uniform(100);

      if (prob_op >= 0 && prob_op < (int)FLAGS_readpercent) {
1071
        // OPERATION read
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
        if (!FLAGS_test_batches_snapshots) {
          Status s = db_->Get(read_opts, key, &from_db);
          if (s.ok()) {
            // found case
            thread->stats.AddGets(1, 1);
          } else if (s.IsNotFound()) {
            // not found case
            thread->stats.AddGets(1, 0);
          } else {
            // errors case
            thread->stats.AddErrors(1);
          }
        } else {
          MultiGet(thread, read_opts, key, &from_db);
        }
1087 1088
      } else if ((int)FLAGS_readpercent <= prob_op && prob_op < prefixBound) {
        // OPERATION prefix scan
1089 1090 1091 1092
        // keys are longs (e.g., 8 bytes), so we let prefixes be
        // everything except the last byte.  So there will be 2^8=256
        // keys per prefix.
        Slice prefix = Slice(key.data(), key.size() - 1);
1093
        if (!FLAGS_test_batches_snapshots) {
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
          read_opts.prefix = &prefix;
          Iterator* iter = db_->NewIterator(read_opts);
          int count = 0;
          for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
            assert(iter->key().starts_with(prefix));
            count++;
          }
          assert(count <= 256);
          if (iter->status().ok()) {
            thread->stats.AddPrefixes(1, count);
          } else {
            thread->stats.AddErrors(1);
          }
          delete iter;
1108
        } else {
1109
          MultiPrefixScan(thread, read_opts, prefix);
A
amayank 已提交
1110
        }
1111 1112
      } else if (prefixBound <= prob_op && prob_op < writeBound) {
        // OPERATION write
1113 1114 1115
        uint32_t value_base = thread->rand.Next();
        size_t sz = GenerateValue(value_base, value, sizeof(value));
        Slice v(value, sz);
1116
        if (!FLAGS_test_batches_snapshots) {
1117 1118
          MutexLock l(thread->shared->GetMutexForKey(rand_key));
          if (FLAGS_verify_before_write) {
1119 1120 1121 1122 1123 1124 1125 1126 1127
            std::string keystr = Key(rand_key);
            Slice k = keystr;
            Status s = db_->Get(read_opts, k, &from_db);
            VerifyValue(rand_key,
                        read_opts,
                        *(thread->shared),
                        from_db,
                        s,
                        true);
1128 1129
          }
          thread->shared->Put(rand_key, value_base);
1130
          if (FLAGS_use_merge) {
D
Deon Nicholas 已提交
1131 1132 1133 1134
            db_->Merge(write_opts, key, v);
          } else {
            db_->Put(write_opts, key, v);
          }
1135 1136 1137
          thread->stats.AddBytesForWrites(1, sz);
        } else {
          MultiPut(thread, write_opts, key, v, sz);
1138 1139
        }
        PrintKeyValue(rand_key, value, sz);
1140 1141
      } else if (writeBound <= prob_op && prob_op < delBound) {
        // OPERATION delete
1142 1143 1144 1145 1146 1147 1148 1149
        if (!FLAGS_test_batches_snapshots) {
          MutexLock l(thread->shared->GetMutexForKey(rand_key));
          thread->shared->Delete(rand_key);
          db_->Delete(write_opts, key);
          thread->stats.AddDeletes(1);
        } else {
          MultiDelete(thread, write_opts, key);
        }
1150 1151 1152
      } else {
        // OPERATION iterate
        MultiIterate(thread, read_opts, key);
1153
      }
1154 1155
      thread->stats.FinishedSingleOp();
    }
1156

1157 1158 1159
    thread->stats.Stop();
  }

M
Mayank Agarwal 已提交
1160
  void VerifyDb(ThreadState* thread) const {
1161
    ReadOptions options(FLAGS_verify_checksum, true);
M
Mayank Agarwal 已提交
1162 1163
    const SharedState& shared = *(thread->shared);
    static const long max_key = shared.GetMaxKey();
1164
    static const long keys_per_thread = max_key / shared.GetNumThreads();
M
Mayank Agarwal 已提交
1165
    long start = keys_per_thread * thread->tid;
1166
    long end = start + keys_per_thread;
M
Mayank Agarwal 已提交
1167
    if (thread->tid == shared.GetNumThreads() - 1) {
1168 1169
      end = max_key;
    }
M
Mayank Agarwal 已提交
1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
    if (!thread->rand.OneIn(2)) {
      // Use iterator to verify this range
      unique_ptr<Iterator> iter(db_->NewIterator(options));
      iter->Seek(Key(start));
      for (long i = start; i < end; i++) {
        std::string from_db;
        std::string keystr = Key(i);
        Slice k = keystr;
        Status s = iter->status();
        if (iter->Valid()) {
          if (iter->key().compare(k) > 0) {
            s = Status::NotFound(Slice());
          } else if (iter->key().compare(k) == 0) {
            from_db = iter->value().ToString();
            iter->Next();
          } else if (iter->key().compare(k) < 0) {
            VerificationAbort("An out of range key was found", i);
          }
        } else {
          // The iterator found no value for the key in question, so do not
          // move to the next item in the iterator
1191 1192
          s = Status::NotFound(Slice());
        }
M
Mayank Agarwal 已提交
1193 1194 1195 1196
        VerifyValue(i, options, shared, from_db, s, true);
        if (from_db.length()) {
          PrintKeyValue(i, from_db.data(), from_db.length());
        }
1197
      }
M
Mayank Agarwal 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
    }
    else {
      // Use Get to verify this range
      for (long i = start; i < end; i++) {
        std::string from_db;
        std::string keystr = Key(i);
        Slice k = keystr;
        Status s = db_->Get(options, k, &from_db);
        VerifyValue(i, options, shared, from_db, s, true);
        if (from_db.length()) {
          PrintKeyValue(i, from_db.data(), from_db.length());
        }
1210 1211 1212 1213
      }
    }
  }

1214
  void VerificationAbort(std::string msg, long key) const {
A
amayank 已提交
1215
    fprintf(stderr, "Verification failed for key %ld: %s\n",
1216 1217 1218 1219
            key, msg.c_str());
    exit(1);
  }

1220 1221 1222 1223 1224 1225 1226
  void VerifyValue(long key,
                   const ReadOptions &opts,
                   const SharedState &shared,
                   const std::string &value_from_db,
                   Status s,
                   bool strict=false) const {
    // compare value_from_db with the value in the shared state
A
amayank 已提交
1227
    char value[100];
1228 1229 1230 1231 1232
    uint32_t value_base = shared.Get(key);
    if (value_base == SharedState::SENTINEL && !strict) {
      return;
    }

1233
    if (s.ok()) {
1234 1235 1236
      if (value_base == SharedState::SENTINEL) {
        VerificationAbort("Unexpected value found", key);
      }
A
amayank 已提交
1237
      size_t sz = GenerateValue(value_base, value, sizeof(value));
1238
      if (value_from_db.length() != sz) {
1239 1240
        VerificationAbort("Length of value read is not equal", key);
      }
1241
      if (memcmp(value_from_db.data(), value, sz) != 0) {
1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
        VerificationAbort("Contents of value read don't match", key);
      }
    } else {
      if (value_base != SharedState::SENTINEL) {
        VerificationAbort("Value not found", key);
      }
    }
  }

  static void PrintKeyValue(uint32_t key, const char *value, size_t sz) {
    if (!FLAGS_verbose) return;
1253
    fprintf(stdout, "%u ==> (%u) ", key, (unsigned int)sz);
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
    for (size_t i=0; i<sz; i++) {
      fprintf(stdout, "%X", value[i]);
    }
    fprintf(stdout, "\n");
  }

  static size_t GenerateValue(uint32_t rand, char *v, size_t max_sz) {
    size_t value_sz = ((rand % 3) + 1) * FLAGS_value_size_mult;
    assert(value_sz <= max_sz && value_sz >= sizeof(uint32_t));
    *((uint32_t*)v) = rand;
    for (size_t i=sizeof(uint32_t); i < value_sz; i++) {
      v[i] = (char)(rand ^ i);
    }
1267
    v[value_sz] = '\0';
1268 1269 1270 1271 1272 1273 1274
    return value_sz; // the size of the value set.
  }

  void PrintEnv() const {
    fprintf(stdout, "LevelDB version     : %d.%d\n",
            kMajorVersion, kMinorVersion);
    fprintf(stdout, "Number of threads   : %d\n", FLAGS_threads);
1275
    fprintf(stdout, "Ops per thread      : %lu\n", FLAGS_ops_per_thread);
1276 1277 1278 1279 1280
    std::string ttl_state("unused");
    if (FLAGS_ttl > 0) {
      ttl_state = NumberToString(FLAGS_ttl);
    }
    fprintf(stdout, "Time to live(sec)   : %s\n", ttl_state.c_str());
A
amayank 已提交
1281
    fprintf(stdout, "Read percentage     : %d\n", FLAGS_readpercent);
1282 1283 1284
    fprintf(stdout, "Prefix percentage   : %d\n", FLAGS_prefixpercent);
    fprintf(stdout, "Write percentage    : %d\n", FLAGS_writepercent);
    fprintf(stdout, "Delete percentage   : %d\n", FLAGS_delpercent);
1285
    fprintf(stdout, "Iterate percentage  : %d\n", FLAGS_iterpercent);
1286
    fprintf(stdout, "Write-buffer-size   : %d\n", FLAGS_write_buffer_size);
1287
    fprintf(stdout, "Iterations          : %lu\n", FLAGS_num_iterations);
1288
    fprintf(stdout, "Max key             : %ld\n", FLAGS_max_key);
X
Xing Jin 已提交
1289 1290
    fprintf(stdout, "Ratio #ops/#keys    : %f\n",
            (1.0 * FLAGS_ops_per_thread * FLAGS_threads)/FLAGS_max_key);
1291
    fprintf(stdout, "Num times DB reopens: %d\n", FLAGS_reopen);
1292 1293
    fprintf(stdout, "Batches/snapshots   : %d\n",
            FLAGS_test_batches_snapshots);
X
Xing Jin 已提交
1294
    fprintf(stdout, "Purge redundant %%   : %d\n",
1295
            FLAGS_purge_redundant_percent);
1296
    fprintf(stdout, "Deletes use filter  : %d\n",
1297
            FLAGS_filter_deletes);
1298
    fprintf(stdout, "Num keys per lock   : %d\n",
1299 1300
            1 << FLAGS_log2_keys_per_lock);

1301
    const char* compression = "";
1302
    switch (FLAGS_compression_type_e) {
1303
      case rocksdb::kNoCompression:
1304
        compression = "none";
1305
        break;
1306
      case rocksdb::kSnappyCompression:
1307
        compression = "snappy";
1308
        break;
1309
      case rocksdb::kZlibCompression:
1310
        compression = "zlib";
1311
        break;
1312
      case rocksdb::kBZip2Compression:
1313
        compression = "bzip2";
1314 1315 1316 1317
        break;
    }

    fprintf(stdout, "Compression         : %s\n", compression);
J
Jim Paton 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336

    const char* memtablerep = "";
    switch (FLAGS_rep_factory) {
      case kSkipList:
        memtablerep = "skip_list";
        break;
      case kPrefixHash:
        memtablerep = "prefix_hash";
        break;
      case kUnsorted:
        memtablerep = "unsorted";
        break;
      case kVectorRep:
        memtablerep = "vector";
        break;
    }

    fprintf(stdout, "Memtablerep         : %s\n", memtablerep);

1337 1338 1339 1340
    fprintf(stdout, "------------------------------------------------\n");
  }

  void Open() {
1341
    assert(db_ == nullptr);
1342 1343 1344
    Options options;
    options.block_cache = cache_;
    options.write_buffer_size = FLAGS_write_buffer_size;
1345
    options.max_write_buffer_number = FLAGS_max_write_buffer_number;
1346 1347
    options.min_write_buffer_number_to_merge =
      FLAGS_min_write_buffer_number_to_merge;
1348
    options.max_background_compactions = FLAGS_max_background_compactions;
1349 1350
    options.compaction_style =
      static_cast<rocksdb::CompactionStyle>(FLAGS_compaction_style);
1351 1352
    options.block_size = FLAGS_block_size;
    options.filter_policy = filter_policy_;
1353
    options.prefix_extractor = prefix_extractor_;
1354 1355 1356 1357 1358
    options.max_open_files = FLAGS_open_files;
    options.statistics = dbstats;
    options.env = FLAGS_env;
    options.disableDataSync = FLAGS_disable_data_sync;
    options.use_fsync = FLAGS_use_fsync;
1359
    options.allow_mmap_reads = FLAGS_mmap_read;
1360
    rocksdb_kill_odds = FLAGS_kill_random_test;
1361 1362 1363 1364 1365 1366 1367 1368
    options.target_file_size_base = FLAGS_target_file_size_base;
    options.target_file_size_multiplier = FLAGS_target_file_size_multiplier;
    options.max_bytes_for_level_base = FLAGS_max_bytes_for_level_base;
    options.max_bytes_for_level_multiplier =
        FLAGS_max_bytes_for_level_multiplier;
    options.level0_stop_writes_trigger = FLAGS_level0_stop_writes_trigger;
    options.level0_slowdown_writes_trigger =
      FLAGS_level0_slowdown_writes_trigger;
1369 1370
    options.level0_file_num_compaction_trigger =
      FLAGS_level0_file_num_compaction_trigger;
1371
    options.compression = FLAGS_compression_type_e;
1372 1373
    options.create_if_missing = true;
    options.disable_seek_compaction = FLAGS_disable_seek_compaction;
1374 1375
    options.delete_obsolete_files_period_micros =
      FLAGS_delete_obsolete_files_period_micros;
A
Abhishek Kona 已提交
1376
    options.max_manifest_file_size = 1024;
1377
    options.filter_deletes = FLAGS_filter_deletes;
J
Jim Paton 已提交
1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
    if ((FLAGS_prefix_size == 0) == (FLAGS_rep_factory == kPrefixHash)) {
      fprintf(stderr,
            "prefix_size should be non-zero iff memtablerep == prefix_hash\n");
      exit(1);
    }
    switch (FLAGS_rep_factory) {
      case kPrefixHash:
        options.memtable_factory.reset(
          new PrefixHashRepFactory(NewFixedPrefixTransform(FLAGS_prefix_size))
        );
        break;
      case kUnsorted:
        options.memtable_factory.reset(
          new UnsortedRepFactory()
        );
        break;
      case kSkipList:
        // no need to do anything
        break;
      case kVectorRep:
        options.memtable_factory.reset(
          new VectorRepFactory()
        );
        break;
    }
1403
    static Random purge_percent(1000); // no benefit from non-determinism here
1404 1405
    if (static_cast<int32_t>(purge_percent.Uniform(100)) <
        FLAGS_purge_redundant_percent - 1) {
1406 1407
      options.purge_redundant_kvs_while_flush = false;
    }
1408

1409
    if (FLAGS_use_merge) {
1410
      options.merge_operator = MergeOperators::CreatePutOperator();
D
Deon Nicholas 已提交
1411 1412
    }

1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430
    // set universal style compaction configurations, if applicable
    if (FLAGS_universal_size_ratio != 0) {
      options.compaction_options_universal.size_ratio =
        FLAGS_universal_size_ratio;
    }
    if (FLAGS_universal_min_merge_width != 0) {
      options.compaction_options_universal.min_merge_width =
        FLAGS_universal_min_merge_width;
    }
    if (FLAGS_universal_max_merge_width != 0) {
      options.compaction_options_universal.max_merge_width =
        FLAGS_universal_max_merge_width;
    }
    if (FLAGS_universal_max_size_amplification_percent != 0) {
      options.compaction_options_universal.max_size_amplification_percent =
        FLAGS_universal_max_size_amplification_percent;
    }

1431
    fprintf(stdout, "DB path: [%s]\n", FLAGS_db.c_str());
1432

1433 1434 1435 1436
    Status s;
    if (FLAGS_ttl == -1) {
      s = DB::Open(options, FLAGS_db, &db_);
    } else {
1437 1438
      s = UtilityDB::OpenTtlDB(options, FLAGS_db, &sdb_, FLAGS_ttl);
      db_ = sdb_;
1439
    }
1440 1441 1442 1443 1444 1445
    if (!s.ok()) {
      fprintf(stderr, "open error: %s\n", s.ToString().c_str());
      exit(1);
    }
  }

1446
  void Reopen() {
1447 1448
    // do not close the db. Just delete the lock file. This
    // simulates a crash-recovery kind of situation.
1449 1450 1451 1452 1453
    if (FLAGS_ttl != -1) {
      ((DBWithTTL*) db_)->TEST_Destroy_DBWithTtl();
    } else {
      ((DBImpl*) db_)->TEST_Destroy_DBImpl();
    }
1454
    db_ = nullptr;
1455 1456 1457 1458 1459 1460

    num_times_reopened_++;
    double now = FLAGS_env->NowMicros();
    fprintf(stdout, "%s Reopening database for the %dth time\n",
            FLAGS_env->TimeToString((uint64_t) now/1000000).c_str(),
            num_times_reopened_);
1461 1462 1463
    Open();
  }

1464 1465
  void PrintStatistics() {
    if (dbstats) {
1466
      fprintf(stdout, "STATISTICS:\n%s\n", dbstats->ToString().c_str());
1467 1468 1469 1470
    }
  }

 private:
1471
  shared_ptr<Cache> cache_;
1472
  const FilterPolicy* filter_policy_;
1473
  const SliceTransform* prefix_extractor_;
1474
  DB* db_;
1475
  StackableDB* sdb_;
1476
  int num_times_reopened_;
1477 1478
};

1479
}  // namespace rocksdb
1480

1481 1482


1483
int main(int argc, char** argv) {
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
  google::SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
                          " [OPTIONS]...");
  google::ParseCommandLineFlags(&argc, &argv, true);

  if (FLAGS_statistics) {
    dbstats = rocksdb::CreateDBStatistics();
  }
  FLAGS_compression_type_e =
    StringToCompressionType(FLAGS_compression_type.c_str());
  if (!FLAGS_hdfs.empty()) {
    FLAGS_env  = new rocksdb::HdfsEnv(FLAGS_hdfs);
1495
  }
1496
  FLAGS_rep_factory = StringToRepFactory(FLAGS_memtablerep.c_str());
1497

1498 1499 1500 1501
  // The number of background threads should be at least as much the
  // max number of concurrent compactions.
  FLAGS_env->SetBackgroundThreads(FLAGS_max_background_compactions);

1502
  if ((FLAGS_readpercent + FLAGS_prefixpercent +
1503 1504 1505
       FLAGS_writepercent + FLAGS_delpercent + FLAGS_iterpercent) != 100) {
      fprintf(stderr,
              "Error: Read+Prefix+Write+Delete+Iterate percents != 100!\n");
A
amayank 已提交
1506 1507
      exit(1);
  }
1508 1509 1510 1511
  if (FLAGS_disable_wal == 1 && FLAGS_reopen > 0) {
      fprintf(stderr, "Error: Db cannot reopen safely with disable_wal set!\n");
      exit(1);
  }
1512 1513
  if ((unsigned)FLAGS_reopen >= FLAGS_ops_per_thread) {
      fprintf(stderr, "Error: #DB-reopens should be < ops_per_thread\n"
1514
        "Provided reopens = %d and ops_per_thread = %lu\n", FLAGS_reopen,
1515 1516 1517 1518
        FLAGS_ops_per_thread);
      exit(1);
  }

1519
  // Choose a location for the test database if none given with --db=<path>
1520 1521
  if (FLAGS_db.empty()) {
      std::string default_db_path;
1522
      rocksdb::Env::Default()->GetTestDirectory(&default_db_path);
1523
      default_db_path += "/dbstress";
1524
      FLAGS_db = default_db_path;
1525 1526
  }

1527
  rocksdb::StressTest stress;
1528 1529 1530
  stress.Run();
  return 0;
}