db_impl.cc 152.5 KB
Newer Older
1 2 3 4 5
//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
//  This source code is licensed under the BSD-style license found in the
//  LICENSE file in the root directory of this source tree. An additional grant
//  of patent rights can be found in the PATENTS file in the same directory.
//
J
jorlow@chromium.org 已提交
6 7 8 9 10 11
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/db_impl.h"

12 13
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
J
jorlow@chromium.org 已提交
14
#include <algorithm>
15 16
#include <climits>
#include <cstdio>
J
jorlow@chromium.org 已提交
17
#include <set>
18
#include <stdexcept>
19 20
#include <stdint.h>
#include <string>
21
#include <unordered_set>
T
Tomislav Novak 已提交
22
#include <utility>
23
#include <vector>
24

J
jorlow@chromium.org 已提交
25
#include "db/builder.h"
26
#include "db/db_iter.h"
K
kailiu 已提交
27
#include "db/dbformat.h"
J
jorlow@chromium.org 已提交
28 29 30 31
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
K
kailiu 已提交
32
#include "db/memtable_list.h"
33
#include "db/merge_context.h"
34
#include "db/merge_helper.h"
T
Tyler Harter 已提交
35
#include "db/prefix_filter_iterator.h"
J
jorlow@chromium.org 已提交
36
#include "db/table_cache.h"
K
kailiu 已提交
37
#include "db/table_properties_collector.h"
T
Tomislav Novak 已提交
38
#include "db/tailing_iter.h"
39
#include "db/transaction_log_impl.h"
J
jorlow@chromium.org 已提交
40 41
#include "db/version_set.h"
#include "db/write_batch_internal.h"
42
#include "port/port.h"
43
#include "port/likely.h"
44 45 46 47 48 49
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
S
Siying Dong 已提交
50
#include "rocksdb/table.h"
J
jorlow@chromium.org 已提交
51
#include "table/block.h"
52
#include "table/block_based_table_factory.h"
J
jorlow@chromium.org 已提交
53
#include "table/merger.h"
K
kailiu 已提交
54
#include "table/table_builder.h"
J
jorlow@chromium.org 已提交
55
#include "table/two_level_iterator.h"
56
#include "util/auto_roll_logger.h"
K
kailiu 已提交
57
#include "util/autovector.h"
58
#include "util/build_version.h"
J
jorlow@chromium.org 已提交
59
#include "util/coding.h"
I
Igor Canadi 已提交
60
#include "util/hash_skiplist_rep.h"
61
#include "util/hash_linklist_rep.h"
J
jorlow@chromium.org 已提交
62
#include "util/logging.h"
H
Haobo Xu 已提交
63
#include "util/log_buffer.h"
J
jorlow@chromium.org 已提交
64
#include "util/mutexlock.h"
65
#include "util/perf_context_imp.h"
66
#include "util/stop_watch.h"
67
#include "util/sync_point.h"
J
jorlow@chromium.org 已提交
68

69
namespace rocksdb {
J
jorlow@chromium.org 已提交
70

71 72 73 74
int DBImpl::SuperVersion::dummy = 0;
void* const DBImpl::SuperVersion::kSVInUse = &DBImpl::SuperVersion::dummy;
void* const DBImpl::SuperVersion::kSVObsolete = nullptr;

K
kailiu 已提交
75
void DumpLeveldbBuildVersion(Logger * log);
76

77 78 79 80 81
// Information kept for every waiting writer
struct DBImpl::Writer {
  Status status;
  WriteBatch* batch;
  bool sync;
H
heyongqiang 已提交
82
  bool disableWAL;
83 84 85 86 87 88
  bool done;
  port::CondVar cv;

  explicit Writer(port::Mutex* mu) : cv(mu) { }
};

J
jorlow@chromium.org 已提交
89 90 91
struct DBImpl::CompactionState {
  Compaction* const compaction;

92 93 94 95 96
  // If there were two snapshots with seq numbers s1 and
  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
  // entirely within s1 and s2, then the earlier version of k1 can be safely
  // deleted because that version is not visible in any snapshot.
  std::vector<SequenceNumber> existing_snapshots;
J
jorlow@chromium.org 已提交
97 98 99 100 101 102

  // Files produced by compaction
  struct Output {
    uint64_t number;
    uint64_t file_size;
    InternalKey smallest, largest;
103
    SequenceNumber smallest_seqno, largest_seqno;
J
jorlow@chromium.org 已提交
104 105
  };
  std::vector<Output> outputs;
106
  std::list<uint64_t> allocated_file_numbers;
J
jorlow@chromium.org 已提交
107 108

  // State kept for output being generated
109 110
  unique_ptr<WritableFile> outfile;
  unique_ptr<TableBuilder> builder;
J
jorlow@chromium.org 已提交
111 112 113 114 115 116 117 118 119

  uint64_t total_bytes;

  Output* current_output() { return &outputs[outputs.size()-1]; }

  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0) {
  }
120

121 122 123 124 125 126 127 128
  // Create a client visible context of this compaction
  CompactionFilter::Context GetFilterContextV1() {
    CompactionFilter::Context context;
    context.is_full_compaction = compaction->IsFullCompaction();
    context.is_manual_compaction = compaction->IsManualCompaction();
    return context;
  }

129
  // Create a client visible context of this compaction
D
Danny Guo 已提交
130 131
  CompactionFilterContext GetFilterContext() {
    CompactionFilterContext context;
132
    context.is_full_compaction = compaction->IsFullCompaction();
133
    context.is_manual_compaction = compaction->IsManualCompaction();
134 135
    return context;
  }
D
Danny Guo 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252

  std::vector<Slice> key_buf_;
  std::vector<Slice> existing_value_buf_;
  std::vector<std::string> key_str_buf_;
  std::vector<std::string> existing_value_str_buf_;
  // new_value_buf_ will only be appended if a value changes
  std::vector<std::string> new_value_buf_;
  // if values_changed_buf_[i] is true
  // new_value_buf_ will add a new entry with the changed value
  std::vector<bool> value_changed_buf_;
  // to_delete_buf_[i] is true iff key_buf_[i] is deleted
  std::vector<bool> to_delete_buf_;
  // buffer for the parsed internal keys, the string buffer is backed
  // by key_str_buf_
  std::vector<ParsedInternalKey> ikey_buf_;

  std::vector<Slice> other_key_buf_;
  std::vector<Slice> other_value_buf_;
  std::vector<std::string> other_key_str_buf_;
  std::vector<std::string> other_value_str_buf_;

  std::vector<Slice> combined_key_buf_;
  std::vector<Slice> combined_value_buf_;

  std::string cur_prefix_;

  // Buffers the kv-pair that will be run through compaction filter V2
  // in the future.
  void BufferKeyValueSlices(const Slice& key, const Slice& value) {
    key_str_buf_.emplace_back(key.ToString());
    existing_value_str_buf_.emplace_back(value.ToString());
    key_buf_.emplace_back(Slice(key_str_buf_.back()));
    existing_value_buf_.emplace_back(Slice(existing_value_str_buf_.back()));

    ParsedInternalKey ikey;
    ParseInternalKey(key_buf_.back(), &ikey);
    ikey_buf_.emplace_back(ikey);
  }

  // Buffers the kv-pair that will not be run through compaction filter V2
  // in the future.
  void BufferOtherKeyValueSlices(const Slice& key, const Slice& value) {
    other_key_str_buf_.emplace_back(key.ToString());
    other_value_str_buf_.emplace_back(value.ToString());
    other_key_buf_.emplace_back(Slice(other_key_str_buf_.back()));
    other_value_buf_.emplace_back(Slice(other_value_str_buf_.back()));
  }

  // Add a kv-pair to the combined buffer
  void AddToCombinedKeyValueSlices(const Slice& key, const Slice& value) {
    // The real strings are stored in the batch buffers
    combined_key_buf_.emplace_back(key);
    combined_value_buf_.emplace_back(value);
  }

  // Merging the two buffers
  void MergeKeyValueSliceBuffer(const InternalKeyComparator* comparator) {
    size_t i = 0;
    size_t j = 0;
    size_t total_size = key_buf_.size() + other_key_buf_.size();
    combined_key_buf_.reserve(total_size);
    combined_value_buf_.reserve(total_size);

    while (i + j < total_size) {
      int comp_res = 0;
      if (i < key_buf_.size() && j < other_key_buf_.size()) {
        comp_res = comparator->Compare(key_buf_[i], other_key_buf_[j]);
      } else if (i >= key_buf_.size() && j < other_key_buf_.size()) {
        comp_res = 1;
      } else if (j >= other_key_buf_.size() && i < key_buf_.size()) {
        comp_res = -1;
      }
      if (comp_res > 0) {
        AddToCombinedKeyValueSlices(other_key_buf_[j], other_value_buf_[j]);
        j++;
      } else if (comp_res < 0) {
        AddToCombinedKeyValueSlices(key_buf_[i], existing_value_buf_[i]);
        i++;
      }
    }
  }

  void CleanupBatchBuffer() {
    to_delete_buf_.clear();
    key_buf_.clear();
    existing_value_buf_.clear();
    key_str_buf_.clear();
    existing_value_str_buf_.clear();
    new_value_buf_.clear();
    value_changed_buf_.clear();
    ikey_buf_.clear();

    to_delete_buf_.shrink_to_fit();
    key_buf_.shrink_to_fit();
    existing_value_buf_.shrink_to_fit();
    key_str_buf_.shrink_to_fit();
    existing_value_str_buf_.shrink_to_fit();
    new_value_buf_.shrink_to_fit();
    value_changed_buf_.shrink_to_fit();
    ikey_buf_.shrink_to_fit();

    other_key_buf_.clear();
    other_value_buf_.clear();
    other_key_str_buf_.clear();
    other_value_str_buf_.clear();
    other_key_buf_.shrink_to_fit();
    other_value_buf_.shrink_to_fit();
    other_key_str_buf_.shrink_to_fit();
    other_value_str_buf_.shrink_to_fit();
  }

  void CleanupMergedBuffer() {
    combined_key_buf_.clear();
    combined_value_buf_.clear();
    combined_key_buf_.shrink_to_fit();
    combined_value_buf_.shrink_to_fit();
  }
J
jorlow@chromium.org 已提交
253 254 255
};

// Fix user-supplied options to be reasonable
256
template <class T, class V>
J
jorlow@chromium.org 已提交
257
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
D
dgrogan@chromium.org 已提交
258 259
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
J
jorlow@chromium.org 已提交
260 261 262
}
Options SanitizeOptions(const std::string& dbname,
                        const InternalKeyComparator* icmp,
S
Sanjay Ghemawat 已提交
263
                        const InternalFilterPolicy* ipolicy,
J
jorlow@chromium.org 已提交
264 265
                        const Options& src) {
  Options result = src;
266
  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
267 268 269 270
  // result.max_open_files means an "infinite" open files.
  if (result.max_open_files != -1) {
    ClipToRange(&result.max_open_files,            20,     1000000);
  }
271 272
  ClipToRange(&result.write_buffer_size,         ((size_t)64)<<10,
                                                 ((size_t)64)<<30);
S
Sanjay Ghemawat 已提交
273
  ClipToRange(&result.block_size,                1<<10,  4<<20);
274

X
Xing Jin 已提交
275 276 277 278 279 280
  // if user sets arena_block_size, we trust user to use this value. Otherwise,
  // calculate a proper value from writer_buffer_size;
  if (result.arena_block_size <= 0) {
    result.arena_block_size = result.write_buffer_size / 10;
  }

281 282
  result.min_write_buffer_number_to_merge = std::min(
    result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
283
  if (result.info_log == nullptr) {
K
Kai Liu 已提交
284 285
    Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
                                       result, &result.info_log);
J
jorlow@chromium.org 已提交
286 287
    if (!s.ok()) {
      // No place suitable for logging
288
      result.info_log = nullptr;
J
jorlow@chromium.org 已提交
289 290
    }
  }
291
  if (result.block_cache == nullptr && !result.no_block_cache) {
292 293
    result.block_cache = NewLRUCache(8 << 20);
  }
294
  result.compression_per_level = src.compression_per_level;
295 296 297
  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
    result.block_size_deviation = 0;
  }
298 299 300
  if (result.max_mem_compaction_level >= result.num_levels) {
    result.max_mem_compaction_level = result.num_levels - 1;
  }
J
Jim Paton 已提交
301 302 303
  if (result.soft_rate_limit > result.hard_rate_limit) {
    result.soft_rate_limit = result.hard_rate_limit;
  }
304 305
  if (result.compaction_filter) {
    Log(result.info_log, "Compaction filter specified, ignore factory");
306
  }
J
Jim Paton 已提交
307
  if (result.prefix_extractor) {
308 309 310 311 312 313 314 315 316
    Log(result.info_log, "prefix extractor %s in use.",
        result.prefix_extractor->Name());
  } else {
    assert(result.memtable_factory);
    Slice name = result.memtable_factory->Name();
    if (name.compare("HashSkipListRepFactory") == 0 ||
        name.compare("HashLinkListRepFactory") == 0) {
      Log(result.info_log, "prefix extractor is not provided while using %s. "
          "fallback to skiplist", name.ToString().c_str());
J
Jim Paton 已提交
317 318 319
      result.memtable_factory = std::make_shared<SkipListFactory>();
    }
  }
320 321 322 323 324

  if (result.wal_dir.empty()) {
    // Use dbname as default
    result.wal_dir = dbname;
  }
325 326 327
  if (result.wal_dir.back() == '/') {
    result.wal_dir = result.wal_dir.substr(result.wal_dir.size() - 1);
  }
328

K
kailiu 已提交
329 330 331 332 333 334
  // -- Sanitize the table properties collector
  // All user defined properties collectors will be wrapped by
  // UserKeyTablePropertiesCollector since for them they only have the
  // knowledge of the user keys; internal keys are invisible to them.
  auto& collectors = result.table_properties_collectors;
  for (size_t i = 0; i < result.table_properties_collectors.size(); ++i) {
335 336
    assert(collectors[i]);
    collectors[i] =
K
kailiu 已提交
337
      std::make_shared<UserKeyTablePropertiesCollector>(collectors[i]);
338 339 340 341
  }

  // Add collector to collect internal key statistics
  collectors.push_back(
K
kailiu 已提交
342
      std::make_shared<InternalKeyPropertiesCollector>()
343 344
  );

J
jorlow@chromium.org 已提交
345 346 347
  return result;
}

S
Siying Dong 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
CompressionType GetCompressionType(const Options& options, int level,
                                   const bool enable_compression) {
  if (!enable_compression) {
    // disable compression
    return kNoCompression;
  }
  // If the use has specified a different compression level for each level,
  // then pick the compresison for that level.
  if (!options.compression_per_level.empty()) {
    const int n = options.compression_per_level.size() - 1;
    // It is possible for level_ to be -1; in that case, we use level
    // 0's compression.  This occurs mostly in backwards compatibility
    // situations when the builder doesn't know what level the file
    // belongs to.  Likewise, if level_ is beyond the end of the
    // specified compression levels, use the last value.
    return options.compression_per_level[std::max(0, std::min(level, n))];
  } else {
    return options.compression;
  }
}

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
CompressionType GetCompressionFlush(const Options& options) {
  // Compressing memtable flushes might not help unless the sequential load
  // optimization is used for leveled compaction. Otherwise the CPU and
  // latency overhead is not offset by saving much space.

  bool can_compress;

  if  (options.compaction_style == kCompactionStyleUniversal) {
    can_compress =
        (options.compaction_options_universal.compression_size_percent < 0);
  } else {
    // For leveled compress when min_level_to_compress == 0.
    can_compress = (GetCompressionType(options, 0, true) != kNoCompression);
  }

  if (can_compress) {
    return options.compression;
  } else {
    return kNoCompression;
  }
}

J
jorlow@chromium.org 已提交
391 392
DBImpl::DBImpl(const Options& options, const std::string& dbname)
    : env_(options.env),
H
heyongqiang 已提交
393
      dbname_(dbname),
J
jorlow@chromium.org 已提交
394
      internal_comparator_(options.comparator),
395 396
      options_(SanitizeOptions(dbname, &internal_comparator_,
                               &internal_filter_policy_, options)),
H
heyongqiang 已提交
397
      internal_filter_policy_(options.filter_policy),
J
jorlow@chromium.org 已提交
398
      owns_info_log_(options_.info_log != options.info_log),
399
      db_lock_(nullptr),
H
Haobo Xu 已提交
400
      mutex_(options.use_adaptive_mutex),
401
      shutting_down_(nullptr),
J
jorlow@chromium.org 已提交
402
      bg_cv_(&mutex_),
403
      mem_(new MemTable(internal_comparator_, options_)),
I
Igor Canadi 已提交
404
      imm_(options_.min_write_buffer_number_to_merge),
405
      logfile_number_(0),
I
Igor Canadi 已提交
406
      super_version_(nullptr),
T
Tomislav Novak 已提交
407
      super_version_number_(0),
408
      local_sv_(new ThreadLocalPtr(&SuperVersionUnrefHandle)),
409
      tmp_batch_(),
410
      bg_schedule_needed_(false),
411
      bg_compaction_scheduled_(0),
412
      bg_manual_only_(0),
413
      bg_flush_scheduled_(0),
414
      bg_logstats_scheduled_(false),
415 416
      manual_compaction_(nullptr),
      logger_(nullptr),
417
      disable_delete_obsolete_files_(0),
I
Igor Canadi 已提交
418
      delete_obsolete_files_last_run_(options.env->NowMicros()),
419
      purge_wal_files_last_run_(0),
420
      last_stats_dump_time_microsec_(0),
421
      default_interval_to_delete_obsolete_WAL_(600),
422
      flush_on_destroy_(false),
I
Igor Canadi 已提交
423 424
      internal_stats_(options.num_levels, options.env,
                      options.statistics.get()),
425
      delayed_writes_(0),
426 427
      storage_options_(options),
      bg_work_gate_closed_(false),
428 429
      refitting_level_(false),
      opened_successfully_(false) {
430
  mem_->Ref();
H
heyongqiang 已提交
431
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
432

J
jorlow@chromium.org 已提交
433
  // Reserve ten files or so for other uses and give the rest to TableCache.
434 435 436 437
  // Give a large number for setting of "infinite" open files.
  const int table_cache_size =
      (options_.max_open_files == -1) ?
          4194304 : options_.max_open_files - 10;
438 439 440 441
  table_cache_.reset(new TableCache(dbname_, &options_,
                                    storage_options_, table_cache_size));
  versions_.reset(new VersionSet(dbname_, &options_, storage_options_,
                                 table_cache_.get(), &internal_comparator_));
442

K
kailiu 已提交
443
  DumpLeveldbBuildVersion(options_.info_log.get());
444
  options_.Dump(options_.info_log.get());
445

446
  char name[100];
K
kailiu 已提交
447 448
  Status s = env_->GetHostName(name, 100L);
  if (s.ok()) {
449 450 451 452 453 454
    host_name_ = name;
  } else {
    Log(options_.info_log, "Can't get hostname, use localhost as host name.");
    host_name_ = "localhost";
  }
  last_log_ts = 0;
455

I
Igor Canadi 已提交
456
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
457 458 459 460
}

DBImpl::~DBImpl() {
  // Wait for background work to finish
461
  if (flush_on_destroy_ && mem_->GetFirstSequenceNumber() != 0) {
462 463
    FlushMemTable(FlushOptions());
  }
464

465
  mutex_.Lock();
466
  shutting_down_.Release_Store(this);  // Any non-nullptr value is ok
467 468 469
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
H
hans@chromium.org 已提交
470
    bg_cv_.Wait();
J
jorlow@chromium.org 已提交
471
  }
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
  mutex_.Unlock();

  // Release SuperVersion reference kept in ThreadLocalPtr.
  // This must be done outside of mutex_ since unref handler can lock mutex.
  // It also needs to be done after FlushMemTable, which can trigger local_sv_
  // access.
  delete local_sv_;

  mutex_.Lock();
  if (options_.allow_thread_local) {
    // Clean up obsolete files due to SuperVersion release.
    // (1) Need to delete to obsolete files before closing because RepairDB()
    // scans all existing files in the file system and builds manifest file.
    // Keeping obsolete files confuses the repair process.
    // (2) Need to check if we Open()/Recover() the DB successfully before
    // deleting because if VersionSet recover fails (may be due to corrupted
    // manifest file), it is not able to identify live files correctly. As a
    // result, all "live" files can get deleted by accident. However, corrupted
    // manifest is recoverable by RepairDB().
    if (opened_successfully_) {
      DeletionState deletion_state;
      FindObsoleteFiles(deletion_state, true);
      // manifest number starting from 2
      deletion_state.manifest_file_number = 1;
496 497 498
      if (deletion_state.HaveSomethingToDelete()) {
        PurgeObsoleteFiles(deletion_state);
      }
499 500 501
    }
  }

I
Igor Canadi 已提交
502 503 504 505 506 507 508
  if (super_version_ != nullptr) {
    bool is_last_reference __attribute__((unused));
    is_last_reference = super_version_->Unref();
    assert(is_last_reference);
    super_version_->Cleanup();
    delete super_version_;
  }
J
jorlow@chromium.org 已提交
509 510
  mutex_.Unlock();

511
  if (db_lock_ != nullptr) {
J
jorlow@chromium.org 已提交
512 513 514
    env_->UnlockFile(db_lock_);
  }

515 516 517 518
  if (mem_ != nullptr) {
    delete mem_->Unref();
  }

519
  autovector<MemTable*> to_delete;
I
Igor Canadi 已提交
520
  imm_.current()->Unref(&to_delete);
521 522 523
  for (MemTable* m: to_delete) {
    delete m;
  }
524 525 526
  // versions need to be destroyed before table_cache since it can holds
  // references to table_cache.
  versions_.reset();
I
Igor Canadi 已提交
527
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
528 529
}

A
Abhishek Kona 已提交
530
// Do not flush and close database elegantly. Simulate a crash.
531 532 533 534 535 536
void DBImpl::TEST_Destroy_DBImpl() {
  // ensure that no new memtable flushes can occur
  flush_on_destroy_ = false;

  // wait till all background compactions are done.
  mutex_.Lock();
537 538 539
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
540 541
    bg_cv_.Wait();
  }
542 543 544 545 546 547 548 549 550
  mutex_.Unlock();

  // Release SuperVersion reference kept in ThreadLocalPtr.
  // This must be done outside of mutex_ since unref handler can lock mutex.
  // It also needs to be done after FlushMemTable, which can trigger local_sv_
  // access.
  delete local_sv_;

  mutex_.Lock();
I
Igor Canadi 已提交
551 552 553 554 555 556 557
  if (super_version_ != nullptr) {
    bool is_last_reference __attribute__((unused));
    is_last_reference = super_version_->Unref();
    assert(is_last_reference);
    super_version_->Cleanup();
    delete super_version_;
  }
558 559

  // Prevent new compactions from occuring.
560
  bg_work_gate_closed_ = true;
561 562
  const int LargeNumber = 10000000;
  bg_compaction_scheduled_ += LargeNumber;
563

564
  mutex_.Unlock();
I
Igor Canadi 已提交
565
  LogFlush(options_.info_log);
566 567

  // force release the lock file.
568
  if (db_lock_ != nullptr) {
569 570
    env_->UnlockFile(db_lock_);
  }
571 572 573 574

  log_.reset();
  versions_.reset();
  table_cache_.reset();
575 576
}

A
Abhishek Kona 已提交
577 578 579
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  return versions_->ManifestFileNumber();
}
580

J
jorlow@chromium.org 已提交
581
Status DBImpl::NewDB() {
582
  VersionEdit new_db;
J
jorlow@chromium.org 已提交
583
  new_db.SetComparatorName(user_comparator()->Name());
584
  new_db.SetLogNumber(0);
J
jorlow@chromium.org 已提交
585 586 587 588
  new_db.SetNextFile(2);
  new_db.SetLastSequence(0);

  const std::string manifest = DescriptorFileName(dbname_, 1);
589
  unique_ptr<WritableFile> file;
I
Igor Canadi 已提交
590 591
  Status s = env_->NewWritableFile(
      manifest, &file, env_->OptimizeForManifestWrite(storage_options_));
J
jorlow@chromium.org 已提交
592 593 594
  if (!s.ok()) {
    return s;
  }
595
  file->SetPreallocationBlockSize(options_.manifest_preallocation_size);
J
jorlow@chromium.org 已提交
596
  {
597
    log::Writer log(std::move(file));
J
jorlow@chromium.org 已提交
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
    std::string record;
    new_db.EncodeTo(&record);
    s = log.AddRecord(record);
  }
  if (s.ok()) {
    // Make "CURRENT" file that points to the new manifest file.
    s = SetCurrentFile(env_, dbname_, 1);
  } else {
    env_->DeleteFile(manifest);
  }
  return s;
}

void DBImpl::MaybeIgnoreError(Status* s) const {
  if (s->ok() || options_.paranoid_checks) {
    // No change needed
  } else {
615
    Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
J
jorlow@chromium.org 已提交
616 617 618 619
    *s = Status::OK();
  }
}

620
const Status DBImpl::CreateArchivalDirectory() {
621
  if (options_.WAL_ttl_seconds > 0 || options_.WAL_size_limit_MB > 0) {
622
    std::string archivalPath = ArchivalDirectory(options_.wal_dir);
623 624 625 626 627
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

628
void DBImpl::PrintStatistics() {
629
  auto dbstats = options_.statistics.get();
630 631
  if (dbstats) {
    Log(options_.info_log,
632 633
        "STATISTCS:\n %s",
        dbstats->ToString().c_str());
634 635 636
  }
}

637
void DBImpl::MaybeDumpStats() {
H
Haobo Xu 已提交
638 639 640 641 642 643 644 645 646 647 648 649 650
  if (options_.stats_dump_period_sec == 0) return;

  const uint64_t now_micros = env_->NowMicros();

  if (last_stats_dump_time_microsec_ +
      options_.stats_dump_period_sec * 1000000
      <= now_micros) {
    // Multiple threads could race in here simultaneously.
    // However, the last one will update last_stats_dump_time_microsec_
    // atomically. We could see more than one dump during one dump
    // period in rare cases.
    last_stats_dump_time_microsec_ = now_micros;
    std::string stats;
651
    GetProperty("rocksdb.stats", &stats);
H
Haobo Xu 已提交
652
    Log(options_.info_log, "%s", stats.c_str());
653
    PrintStatistics();
654 655 656
  }
}

I
Igor Canadi 已提交
657 658 659 660 661 662 663 664 665 666 667 668 669 670
// DBImpl::SuperVersion methods
DBImpl::SuperVersion::~SuperVersion() {
  for (auto td : to_delete) {
    delete td;
  }
}

DBImpl::SuperVersion* DBImpl::SuperVersion::Ref() {
  refs.fetch_add(1, std::memory_order_relaxed);
  return this;
}

bool DBImpl::SuperVersion::Unref() {
  assert(refs > 0);
K
Kai Liu 已提交
671
  // fetch_sub returns the previous value of yoeref
I
Igor Canadi 已提交
672 673 674 675
  return refs.fetch_sub(1, std::memory_order_relaxed) == 1;
}

void DBImpl::SuperVersion::Cleanup() {
676
  db->mutex_.AssertHeld();
I
Igor Canadi 已提交
677
  assert(refs.load(std::memory_order_relaxed) == 0);
I
Igor Canadi 已提交
678
  imm->Unref(&to_delete);
I
Igor Canadi 已提交
679 680 681 682 683 684 685
  MemTable* m = mem->Unref();
  if (m != nullptr) {
    to_delete.push_back(m);
  }
  current->Unref();
}

I
Igor Canadi 已提交
686
void DBImpl::SuperVersion::Init(MemTable* new_mem, MemTableListVersion* new_imm,
I
Igor Canadi 已提交
687
                                Version* new_current) {
688
  db->mutex_.AssertHeld();
I
Igor Canadi 已提交
689 690 691 692
  mem = new_mem;
  imm = new_imm;
  current = new_current;
  mem->Ref();
I
Igor Canadi 已提交
693
  imm->Ref();
I
Igor Canadi 已提交
694 695 696 697
  current->Ref();
  refs.store(1, std::memory_order_relaxed);
}

698
// Returns the list of live files in 'sst_live' and the list
K
kailiu 已提交
699
// of all files in the filesystem in 'candidate_files'.
I
Igor Canadi 已提交
700 701 702 703 704 705 706
// no_full_scan = true -- never do the full scan using GetChildren()
// force = false -- don't force the full scan, except every
//  options_.delete_obsolete_files_period_micros
// force = true -- force the full scan
void DBImpl::FindObsoleteFiles(DeletionState& deletion_state,
                               bool force,
                               bool no_full_scan) {
D
Dhruba Borthakur 已提交
707 708
  mutex_.AssertHeld();

709
  // if deletion is disabled, do nothing
710
  if (disable_delete_obsolete_files_ > 0) {
711 712 713
    return;
  }

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
  bool doing_the_full_scan = false;

  // logic for figurint out if we're doing the full scan
  if (no_full_scan) {
    doing_the_full_scan = false;
  } else if (force || options_.delete_obsolete_files_period_micros == 0) {
    doing_the_full_scan = true;
  } else {
    const uint64_t now_micros = env_->NowMicros();
    if (delete_obsolete_files_last_run_ +
        options_.delete_obsolete_files_period_micros < now_micros) {
      doing_the_full_scan = true;
      delete_obsolete_files_last_run_ = now_micros;
    }
  }

I
Igor Canadi 已提交
730 731 732
  // get obsolete files
  versions_->GetObsoleteFiles(&deletion_state.sst_delete_files);

I
Igor Canadi 已提交
733 734
  // store the current filenum, lognum, etc
  deletion_state.manifest_file_number = versions_->ManifestFileNumber();
735 736
  deletion_state.pending_manifest_file_number =
      versions_->PendingManifestFileNumber();
I
Igor Canadi 已提交
737 738 739
  deletion_state.log_number = versions_->LogNumber();
  deletion_state.prev_log_number = versions_->PrevLogNumber();

740 741 742 743 744 745 746 747
  if (!doing_the_full_scan && !deletion_state.HaveSomethingToDelete()) {
    // avoid filling up sst_live if we're sure that we
    // are not going to do the full scan and that we don't have
    // anything to delete at the moment
    return;
  }

  // don't delete live files
I
Igor Canadi 已提交
748 749 750 751
  deletion_state.sst_live.assign(pending_outputs_.begin(),
                                 pending_outputs_.end());
  versions_->AddLiveFiles(&deletion_state.sst_live);

752
  if (doing_the_full_scan) {
K
kailiu 已提交
753 754 755 756 757
    // set of all files in the directory. We'll exclude files that are still
    // alive in the subsequent processings.
    env_->GetChildren(
        dbname_, &deletion_state.candidate_files
    ); // Ignore errors
758 759 760 761 762

    //Add log files in wal_dir
    if (options_.wal_dir != dbname_) {
      std::vector<std::string> log_files;
      env_->GetChildren(options_.wal_dir, &log_files); // Ignore errors
K
kailiu 已提交
763 764
      deletion_state.candidate_files.insert(
        deletion_state.candidate_files.end(),
765 766 767
        log_files.begin(),
        log_files.end()
      );
768
    }
769
  }
770 771
}

D
Dhruba Borthakur 已提交
772
// Diffs the files listed in filenames and those that do not
I
Igor Canadi 已提交
773
// belong to live files are posibly removed. Also, removes all the
774
// files in sst_delete_files and log_delete_files.
775
// It is not necessary to hold the mutex when invoking this method.
D
Dhruba Borthakur 已提交
776
void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
777 778
  // we'd better have sth to delete
  assert(state.HaveSomethingToDelete());
779

I
Igor Canadi 已提交
780 781 782 783
  // this checks if FindObsoleteFiles() was run before. If not, don't do
  // PurgeObsoleteFiles(). If FindObsoleteFiles() was run, we need to also
  // run PurgeObsoleteFiles(), even if disable_delete_obsolete_files_ is true
  if (state.manifest_file_number == 0) {
I
Igor Canadi 已提交
784 785
    return;
  }
786

787 788
  // Now, convert live list to an unordered set, WITHOUT mutex held;
  // set is slow.
789 790
  std::unordered_set<uint64_t> sst_live(state.sst_live.begin(),
                                        state.sst_live.end());
I
Igor Canadi 已提交
791

K
kailiu 已提交
792 793 794 795 796 797 798
  auto& candidate_files = state.candidate_files;
  candidate_files.reserve(
      candidate_files.size() +
      state.sst_delete_files.size() +
      state.log_delete_files.size());
  // We may ignore the dbname when generating the file names.
  const char* kDumbDbName = "";
799
  for (auto file : state.sst_delete_files) {
K
kailiu 已提交
800 801 802
    candidate_files.push_back(
        TableFileName(kDumbDbName, file->number).substr(1)
    );
803
    delete file;
I
Igor Canadi 已提交
804 805
  }

K
kailiu 已提交
806 807
  for (auto file_num : state.log_delete_files) {
    if (file_num > 0) {
808
      candidate_files.push_back(LogFileName(kDumbDbName, file_num).substr(1));
I
Igor Canadi 已提交
809 810
    }
  }
811

K
kailiu 已提交
812
  // dedup state.candidate_files so we don't try to delete the same
I
Igor Canadi 已提交
813
  // file twice
K
kailiu 已提交
814
  sort(candidate_files.begin(), candidate_files.end());
815 816
  candidate_files.erase(unique(candidate_files.begin(), candidate_files.end()),
                        candidate_files.end());
J
jorlow@chromium.org 已提交
817

818 819
  std::vector<std::string> old_info_log_files;

K
kailiu 已提交
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
  for (const auto& to_delete : candidate_files) {
    uint64_t number;
    FileType type;
    // Ignore file if we cannot recognize it.
    if (!ParseFileName(to_delete, &number, &type)) {
      continue;
    }

    bool keep = true;
    switch (type) {
      case kLogFile:
        keep = ((number >= state.log_number) ||
                (number == state.prev_log_number));
        break;
      case kDescriptorFile:
        // Keep my manifest file, and any newer incarnations'
836
        // (can happen during manifest roll)
K
kailiu 已提交
837 838 839 840 841 842 843
        keep = (number >= state.manifest_file_number);
        break;
      case kTableFile:
        keep = (sst_live.find(number) != sst_live.end());
        break;
      case kTempFile:
        // Any temp files that are currently being written to must
844 845 846 847 848 849
        // be recorded in pending_outputs_, which is inserted into "live".
        // Also, SetCurrentFile creates a temp file when writing out new
        // manifest, which is equal to state.pending_manifest_file_number. We
        // should not delete that file
        keep = (sst_live.find(number) != sst_live.end()) ||
               (number == state.pending_manifest_file_number);
K
kailiu 已提交
850 851 852 853
        break;
      case kInfoLogFile:
        keep = true;
        if (number != 0) {
854
          old_info_log_files.push_back(to_delete);
J
jorlow@chromium.org 已提交
855
        }
K
kailiu 已提交
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
        break;
      case kCurrentFile:
      case kDBLockFile:
      case kIdentityFile:
      case kMetaDatabase:
        keep = true;
        break;
    }

    if (keep) {
      continue;
    }

    if (type == kTableFile) {
      // evict from cache
      table_cache_->Evict(number);
    }
873

K
kailiu 已提交
874 875 876 877
    std::string fname = ((type == kLogFile) ? options_.wal_dir : dbname_) +
        "/" + to_delete;
    if (type == kLogFile &&
        (options_.WAL_ttl_seconds > 0 || options_.WAL_size_limit_MB > 0)) {
878
      auto archived_log_name = ArchivedLogFileName(options_.wal_dir, number);
879 880
      // The sync point below is used in (DBTest,TransactionLogIteratorRace)
      TEST_SYNC_POINT("DBImpl::PurgeObsoleteFiles:1");
881
      Status s = env_->RenameFile(fname, archived_log_name);
882 883
      // The sync point below is used in (DBTest,TransactionLogIteratorRace)
      TEST_SYNC_POINT("DBImpl::PurgeObsoleteFiles:2");
884 885 886
      Log(options_.info_log,
          "Move log file %s to %s -- %s\n",
          fname.c_str(), archived_log_name.c_str(), s.ToString().c_str());
K
kailiu 已提交
887 888
    } else {
      Status s = env_->DeleteFile(fname);
889 890 891
      Log(options_.info_log, "Delete %s type=%d #%lu -- %s\n",
          fname.c_str(), type, (unsigned long)number,
          s.ToString().c_str());
J
jorlow@chromium.org 已提交
892 893
    }
  }
H
heyongqiang 已提交
894

895
  // Delete old info log files.
896
  size_t old_info_log_file_count = old_info_log_files.size();
K
Kai Liu 已提交
897 898
  // NOTE: Currently we only support log purge when options_.db_log_dir is
  // located in `dbname` directory.
899
  if (old_info_log_file_count >= options_.keep_log_file_num &&
K
Kai Liu 已提交
900
      options_.db_log_dir.empty()) {
901 902
    std::sort(old_info_log_files.begin(), old_info_log_files.end());
    size_t end = old_info_log_file_count - options_.keep_log_file_num;
903
    for (unsigned int i = 0; i <= end; i++) {
904 905 906 907 908 909 910
      std::string& to_delete = old_info_log_files.at(i);
      Log(options_.info_log, "Delete info log file %s\n", to_delete.c_str());
      Status s = env_->DeleteFile(dbname_ + "/" + to_delete);
      if (!s.ok()) {
        Log(options_.info_log, "Delete info log file %s FAILED -- %s\n",
            to_delete.c_str(), s.ToString().c_str());
      }
H
heyongqiang 已提交
911 912
    }
  }
913
  PurgeObsoleteWALFiles();
I
Igor Canadi 已提交
914
  LogFlush(options_.info_log);
D
Dhruba Borthakur 已提交
915 916 917 918 919
}

void DBImpl::DeleteObsoleteFiles() {
  mutex_.AssertHeld();
  DeletionState deletion_state;
I
Igor Canadi 已提交
920
  FindObsoleteFiles(deletion_state, true);
921 922 923
  if (deletion_state.HaveSomethingToDelete()) {
    PurgeObsoleteFiles(deletion_state);
  }
924 925
}

926 927 928 929 930 931 932 933
// 1. Go through all archived files and
//    a. if ttl is enabled, delete outdated files
//    b. if archive size limit is enabled, delete empty files,
//        compute file number and size.
// 2. If size limit is enabled:
//    a. compute how many files should be deleted
//    b. get sorted non-empty archived logs
//    c. delete what should be deleted
934
void DBImpl::PurgeObsoleteWALFiles() {
935 936 937 938 939 940
  bool const ttl_enabled = options_.WAL_ttl_seconds > 0;
  bool const size_limit_enabled =  options_.WAL_size_limit_MB > 0;
  if (!ttl_enabled && !size_limit_enabled) {
    return;
  }

941 942
  int64_t current_time;
  Status s = env_->GetCurrentTime(&current_time);
943 944 945 946 947 948 949 950
  if (!s.ok()) {
    Log(options_.info_log, "Can't get current time: %s", s.ToString().c_str());
    assert(false);
    return;
  }
  uint64_t const now_seconds = static_cast<uint64_t>(current_time);
  uint64_t const time_to_check = (ttl_enabled && !size_limit_enabled) ?
    options_.WAL_ttl_seconds / 2 : default_interval_to_delete_obsolete_WAL_;
951

952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
  if (purge_wal_files_last_run_ + time_to_check > now_seconds) {
    return;
  }

  purge_wal_files_last_run_ = now_seconds;

  std::string archival_dir = ArchivalDirectory(options_.wal_dir);
  std::vector<std::string> files;
  s = env_->GetChildren(archival_dir, &files);
  if (!s.ok()) {
    Log(options_.info_log, "Can't get archive files: %s", s.ToString().c_str());
    assert(false);
    return;
  }

  size_t log_files_num = 0;
  uint64_t log_file_size = 0;

  for (auto& f : files) {
    uint64_t number;
    FileType type;
    if (ParseFileName(f, &number, &type) && type == kLogFile) {
      std::string const file_path = archival_dir + "/" + f;
      if (ttl_enabled) {
        uint64_t file_m_time;
        Status const s = env_->GetFileModificationTime(file_path,
          &file_m_time);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file mod time: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          continue;
        }
        if (now_seconds - file_m_time > options_.WAL_ttl_seconds) {
          Status const s = env_->DeleteFile(file_path);
          if (!s.ok()) {
            Log(options_.info_log, "Can't delete file: %s: %s",
                file_path.c_str(), s.ToString().c_str());
            continue;
          }
          continue;
992
        }
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
      }

      if (size_limit_enabled) {
        uint64_t file_size;
        Status const s = env_->GetFileSize(file_path, &file_size);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file size: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          return;
        } else {
          if (file_size > 0) {
            log_file_size = std::max(log_file_size, file_size);
            ++log_files_num;
          } else {
            Status s = env_->DeleteFile(file_path);
            if (!s.ok()) {
              Log(options_.info_log, "Can't delete file: %s: %s",
                  file_path.c_str(), s.ToString().c_str());
              continue;
            }
          }
        }
      }
    }
  }

  if (0 == log_files_num || !size_limit_enabled) {
    return;
  }

  size_t const files_keep_num = options_.WAL_size_limit_MB *
    1024 * 1024 / log_file_size;
  if (log_files_num <= files_keep_num) {
    return;
  }

  size_t files_del_num = log_files_num - files_keep_num;
  VectorLogPtr archived_logs;
1031
  GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

  if (files_del_num > archived_logs.size()) {
    Log(options_.info_log, "Trying to delete more archived log files than "
        "exist. Deleting all");
    files_del_num = archived_logs.size();
  }

  for (size_t i = 0; i < files_del_num; ++i) {
    std::string const file_path = archived_logs[i]->PathName();
    Status const s = DeleteFile(file_path);
    if (!s.ok()) {
      Log(options_.info_log, "Can't delete file: %s: %s",
          file_path.c_str(), s.ToString().c_str());
      continue;
1046 1047
    }
  }
D
Dhruba Borthakur 已提交
1048 1049
}

I
Igor Canadi 已提交
1050
Status DBImpl::Recover(bool read_only, bool error_if_log_file_exist) {
J
jorlow@chromium.org 已提交
1051 1052
  mutex_.AssertHeld();

1053
  assert(db_lock_ == nullptr);
I
Igor Canadi 已提交
1054
  if (!read_only) {
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
    // We call CreateDirIfMissing() as the directory may already exist (if we
    // are reopening a DB), when this happens we don't want creating the
    // directory to cause an error. However, we need to check if creating the
    // directory fails or else we may get an obscure message about the lock
    // file not existing. One real-world example of this occurring is if
    // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
    // when dbname_ is "dir/db" but when "dir" doesn't exist.
    Status s = env_->CreateDirIfMissing(dbname_);
    if (!s.ok()) {
      return s;
    }

1067 1068 1069 1070 1071
    s = env_->NewDirectory(dbname_, &db_directory_);
    if (!s.ok()) {
      return s;
    }

1072
    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
1073 1074 1075
    if (!s.ok()) {
      return s;
    }
J
jorlow@chromium.org 已提交
1076

1077 1078
    if (!env_->FileExists(CurrentFileName(dbname_))) {
      if (options_.create_if_missing) {
1079
        // TODO: add merge_operator name check
1080 1081 1082 1083 1084 1085 1086
        s = NewDB();
        if (!s.ok()) {
          return s;
        }
      } else {
        return Status::InvalidArgument(
            dbname_, "does not exist (create_if_missing is false)");
J
jorlow@chromium.org 已提交
1087 1088
      }
    } else {
1089 1090 1091 1092
      if (options_.error_if_exists) {
        return Status::InvalidArgument(
            dbname_, "exists (error_if_exists is true)");
      }
J
jorlow@chromium.org 已提交
1093
    }
M
Mayank Agarwal 已提交
1094 1095 1096 1097 1098 1099 1100
    // Check for the IDENTITY file and create it if not there
    if (!env_->FileExists(IdentityFileName(dbname_))) {
      s = SetIdentityFile(env_, dbname_);
      if (!s.ok()) {
        return s;
      }
    }
J
jorlow@chromium.org 已提交
1101 1102
  }

1103
  Status s = versions_->Recover();
I
Igor Canadi 已提交
1104 1105 1106
  if (options_.paranoid_checks && s.ok()) {
    s = CheckConsistency();
  }
J
jorlow@chromium.org 已提交
1107 1108
  if (s.ok()) {
    SequenceNumber max_sequence(0);
1109 1110 1111 1112 1113 1114 1115

    // Recover from all newer log files than the ones named in the
    // descriptor (new log files may have been added by the previous
    // incarnation without registering them in the descriptor).
    //
    // Note that PrevLogNumber() is no longer used, but we pay
    // attention to it in case we are recovering a database
1116
    // produced by an older version of rocksdb.
1117 1118 1119
    const uint64_t min_log = versions_->LogNumber();
    const uint64_t prev_log = versions_->PrevLogNumber();
    std::vector<std::string> filenames;
1120
    s = env_->GetChildren(options_.wal_dir, &filenames);
1121 1122
    if (!s.ok()) {
      return s;
1123
    }
K
kailiu 已提交
1124

1125 1126
    std::vector<uint64_t> logs;
    for (size_t i = 0; i < filenames.size(); i++) {
K
kailiu 已提交
1127 1128
      uint64_t number;
      FileType type;
1129 1130 1131 1132 1133
      if (ParseFileName(filenames[i], &number, &type)
          && type == kLogFile
          && ((number >= min_log) || (number == prev_log))) {
        logs.push_back(number);
      }
J
jorlow@chromium.org 已提交
1134
    }
1135

H
heyongqiang 已提交
1136 1137 1138 1139 1140 1141
    if (logs.size() > 0 && error_if_log_file_exist) {
      return Status::Corruption(""
          "The db was opened in readonly mode with error_if_log_file_exist"
          "flag but a log file already exists");
    }

1142 1143
    // Recover in the order in which the logs were generated
    std::sort(logs.begin(), logs.end());
K
kailiu 已提交
1144
    for (const auto& log : logs) {
1145 1146 1147
      // The previous incarnation may not have written any MANIFEST
      // records after allocating this log number.  So we manually
      // update the file number allocation counter in VersionSet.
K
kailiu 已提交
1148
      versions_->MarkFileNumberUsed(log);
K
Kai Liu 已提交
1149
      s = RecoverLogFile(log, &max_sequence, read_only);
1150 1151
    }

J
jorlow@chromium.org 已提交
1152
    if (s.ok()) {
1153 1154 1155
      if (versions_->LastSequence() < max_sequence) {
        versions_->SetLastSequence(max_sequence);
      }
1156
      SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER,
1157
                     versions_->LastSequence());
J
jorlow@chromium.org 已提交
1158 1159 1160 1161 1162 1163
    }
  }

  return s;
}

I
Igor Canadi 已提交
1164 1165
Status DBImpl::RecoverLogFile(uint64_t log_number, SequenceNumber* max_sequence,
                              bool read_only) {
J
jorlow@chromium.org 已提交
1166 1167
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
1168
    Logger* info_log;
J
jorlow@chromium.org 已提交
1169
    const char* fname;
1170 1171
    Status* status;  // nullptr if options_.paranoid_checks==false or
                     //            options_.skip_log_error_on_recovery==true
J
jorlow@chromium.org 已提交
1172
    virtual void Corruption(size_t bytes, const Status& s) {
1173
      Log(info_log, "%s%s: dropping %d bytes; %s",
1174
          (this->status == nullptr ? "(ignoring error) " : ""),
J
jorlow@chromium.org 已提交
1175
          fname, static_cast<int>(bytes), s.ToString().c_str());
1176
      if (this->status != nullptr && this->status->ok()) *this->status = s;
J
jorlow@chromium.org 已提交
1177 1178 1179 1180 1181
    }
  };

  mutex_.AssertHeld();

I
Igor Canadi 已提交
1182 1183
  VersionEdit edit;

J
jorlow@chromium.org 已提交
1184
  // Open the log file
1185
  std::string fname = LogFileName(options_.wal_dir, log_number);
1186
  unique_ptr<SequentialFile> file;
1187
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
J
jorlow@chromium.org 已提交
1188 1189 1190 1191 1192 1193 1194 1195
  if (!status.ok()) {
    MaybeIgnoreError(&status);
    return status;
  }

  // Create the log reader.
  LogReporter reporter;
  reporter.env = env_;
1196
  reporter.info_log = options_.info_log.get();
J
jorlow@chromium.org 已提交
1197
  reporter.fname = fname.c_str();
1198 1199
  reporter.status = (options_.paranoid_checks &&
                     !options_.skip_log_error_on_recovery ? &status : nullptr);
J
jorlow@chromium.org 已提交
1200 1201 1202 1203
  // We intentially make log::Reader do checksumming even if
  // paranoid_checks==false so that corruptions cause entire commits
  // to be skipped instead of propagating bad information (like overly
  // large sequence numbers).
1204
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1205
                     0/*initial_offset*/);
K
Kai Liu 已提交
1206 1207
  Log(options_.info_log, "Recovering log #%lu",
      (unsigned long) log_number);
J
jorlow@chromium.org 已提交
1208 1209 1210 1211 1212

  // Read all the records and add to a memtable
  std::string scratch;
  Slice record;
  WriteBatch batch;
I
Igor Canadi 已提交
1213 1214
  bool memtable_empty = true;
  while (reader.ReadRecord(&record, &scratch)) {
J
jorlow@chromium.org 已提交
1215 1216 1217 1218 1219 1220 1221
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      continue;
    }
    WriteBatchInternal::SetContents(&batch, record);

I
Igor Canadi 已提交
1222 1223
    status = WriteBatchInternal::InsertInto(&batch, mem_, &options_);
    memtable_empty = false;
J
jorlow@chromium.org 已提交
1224 1225
    MaybeIgnoreError(&status);
    if (!status.ok()) {
I
Igor Canadi 已提交
1226
      return status;
J
jorlow@chromium.org 已提交
1227 1228 1229 1230 1231 1232 1233 1234
    }
    const SequenceNumber last_seq =
        WriteBatchInternal::Sequence(&batch) +
        WriteBatchInternal::Count(&batch) - 1;
    if (last_seq > *max_sequence) {
      *max_sequence = last_seq;
    }

1235
    if (!read_only && mem_->ShouldFlush()) {
I
Igor Canadi 已提交
1236 1237 1238 1239 1240 1241
      status = WriteLevel0TableForRecovery(mem_, &edit);
      // we still want to clear memtable, even if the recovery failed
      delete mem_->Unref();
      mem_ = new MemTable(internal_comparator_, options_);
      mem_->Ref();
      memtable_empty = true;
J
jorlow@chromium.org 已提交
1242 1243 1244
      if (!status.ok()) {
        // Reflect errors immediately so that conditions like full
        // file-systems cause the DB::Open() to fail.
I
Igor Canadi 已提交
1245
        return status;
J
jorlow@chromium.org 已提交
1246 1247 1248 1249
      }
    }
  }

I
Igor Canadi 已提交
1250 1251 1252 1253 1254 1255 1256 1257
  if (!memtable_empty && !read_only) {
    status = WriteLevel0TableForRecovery(mem_, &edit);
    delete mem_->Unref();
    mem_ = new MemTable(internal_comparator_, options_);
    mem_->Ref();
    if (!status.ok()) {
      return status;
    }
J
jorlow@chromium.org 已提交
1258 1259
  }

I
Igor Canadi 已提交
1260 1261 1262 1263 1264 1265 1266 1267 1268
  if (edit.NumEntries() > 0) {
    // if read_only, NumEntries() will be 0
    assert(!read_only);
    // writing log number in the manifest means that any log file
    // with number strongly less than (log_number + 1) is already
    // recovered and should be ignored on next reincarnation.
    // Since we already recovered log_number, we want all logs
    // with numbers `<= log_number` (includes this one) to be ignored
    edit.SetLogNumber(log_number + 1);
1269 1270 1271 1272 1273
    // we must mark the next log number as used, even though it's
    // not actually used. that is because VersionSet assumes
    // VersionSet::next_file_number_ always to be strictly greater than any log
    // number
    versions_->MarkFileNumberUsed(log_number + 1);
I
Igor Canadi 已提交
1274
    status = versions_->LogAndApply(&edit, &mutex_);
1275
  }
I
Igor Canadi 已提交
1276

J
jorlow@chromium.org 已提交
1277 1278 1279
  return status;
}

1280
Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
J
jorlow@chromium.org 已提交
1281
  mutex_.AssertHeld();
1282
  const uint64_t start_micros = env_->NowMicros();
J
jorlow@chromium.org 已提交
1283 1284 1285 1286
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  pending_outputs_.insert(meta.number);
  Iterator* iter = mem->NewIterator();
1287 1288 1289
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
    mem->GetFirstSequenceNumber();
K
Kai Liu 已提交
1290 1291
  Log(options_.info_log, "Level-0 table #%lu: started",
      (unsigned long) meta.number);
1292 1293 1294 1295

  Status s;
  {
    mutex_.Unlock();
1296
    s = BuildTable(dbname_, env_, options_, storage_options_,
1297 1298
                   table_cache_.get(), iter, &meta, internal_comparator_,
                   newest_snapshot, earliest_seqno_in_memtable,
1299
                   GetCompressionFlush(options_));
I
Igor Canadi 已提交
1300
    LogFlush(options_.info_log);
1301 1302 1303
    mutex_.Lock();
  }

K
Kai Liu 已提交
1304 1305 1306
  Log(options_.info_log, "Level-0 table #%lu: %lu bytes %s",
      (unsigned long) meta.number,
      (unsigned long) meta.file_size,
J
jorlow@chromium.org 已提交
1307 1308
      s.ToString().c_str());
  delete iter;
1309

1310
  pending_outputs_.erase(meta.number);
1311 1312 1313 1314 1315 1316

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    edit->AddFile(level, meta.number, meta.file_size,
1317 1318
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
1319 1320
  }

I
Igor Canadi 已提交
1321
  InternalStats::CompactionStats stats;
1322 1323
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
M
Mark Callaghan 已提交
1324
  stats.files_out_levelnp1 = 1;
I
Igor Canadi 已提交
1325
  internal_stats_.AddCompactionStats(level, stats);
1326
  RecordTick(options_.statistics.get(), COMPACT_WRITE_BYTES, meta.file_size);
J
jorlow@chromium.org 已提交
1327 1328 1329
  return s;
}

1330

K
Kai Liu 已提交
1331
Status DBImpl::WriteLevel0Table(autovector<MemTable*>& mems, VersionEdit* edit,
H
Haobo Xu 已提交
1332 1333
                                uint64_t* filenumber,
                                LogBuffer* log_buffer) {
J
jorlow@chromium.org 已提交
1334
  mutex_.AssertHeld();
1335 1336 1337 1338 1339
  const uint64_t start_micros = env_->NowMicros();
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  *filenumber = meta.number;
  pending_outputs_.insert(meta.number);
1340

1341 1342
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
1343
    mems[0]->GetFirstSequenceNumber();
1344
  Version* base = versions_->current();
1345
  base->Ref();          // it is likely that we do not need this reference
1346 1347 1348
  Status s;
  {
    mutex_.Unlock();
H
Haobo Xu 已提交
1349
    log_buffer->FlushBufferToLog();
K
Kai Liu 已提交
1350
    std::vector<Iterator*> memtables;
1351 1352 1353 1354
    for (MemTable* m : mems) {
      Log(options_.info_log,
          "Flushing memtable with log file: %lu\n",
          (unsigned long)m->GetLogNumber());
K
Kai Liu 已提交
1355
      memtables.push_back(m->NewIterator());
1356
    }
K
Kai Liu 已提交
1357 1358
    Iterator* iter = NewMergingIterator(
        env_, &internal_comparator_, &memtables[0], memtables.size());
1359 1360 1361
    Log(options_.info_log,
        "Level-0 flush table #%lu: started",
        (unsigned long)meta.number);
1362

1363
    s = BuildTable(dbname_, env_, options_, storage_options_,
1364 1365 1366
                   table_cache_.get(), iter, &meta, internal_comparator_,
                   newest_snapshot, earliest_seqno_in_memtable,
                   GetCompressionFlush(options_));
I
Igor Canadi 已提交
1367
    LogFlush(options_.info_log);
1368 1369 1370 1371 1372
    delete iter;
    Log(options_.info_log, "Level-0 flush table #%lu: %lu bytes %s",
        (unsigned long) meta.number,
        (unsigned long) meta.file_size,
        s.ToString().c_str());
1373 1374 1375
    if (!options_.disableDataSync) {
      db_directory_->Fsync();
    }
1376 1377
    mutex_.Lock();
  }
1378 1379
  base->Unref();

1380 1381 1382 1383 1384 1385 1386 1387
  // re-acquire the most current version
  base = versions_->current();

  // There could be multiple threads writing to its own level-0 file.
  // The pending_outputs cannot be cleared here, otherwise this newly
  // created file might not be considered as a live-file by another
  // compaction thread that is concurrently deleting obselete files.
  // The pending_outputs can be cleared only after the new version is
A
Abhishek Kona 已提交
1388
  // committed so that other threads can recognize this file as a
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
  // valid one.
  // pending_outputs_.erase(meta.number);

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    const Slice min_user_key = meta.smallest.user_key();
    const Slice max_user_key = meta.largest.user_key();
    // if we have more than 1 background thread, then we cannot
    // insert files directly into higher levels because some other
    // threads could be concurrently producing compacted files for
    // that key range.
1402
    if (base != nullptr && options_.max_background_compactions <= 1 &&
1403
        options_.compaction_style == kCompactionStyleLevel) {
1404 1405 1406
      level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
    }
    edit->AddFile(level, meta.number, meta.file_size,
1407 1408
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
1409 1410
  }

I
Igor Canadi 已提交
1411
  InternalStats::CompactionStats stats;
1412 1413
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
I
Igor Canadi 已提交
1414
  internal_stats_.AddCompactionStats(level, stats);
1415
  RecordTick(options_.statistics.get(), COMPACT_WRITE_BYTES, meta.file_size);
1416 1417 1418
  return s;
}

I
Igor Canadi 已提交
1419
Status DBImpl::FlushMemTableToOutputFile(bool* madeProgress,
H
Haobo Xu 已提交
1420 1421
                                         DeletionState& deletion_state,
                                         LogBuffer* log_buffer) {
1422 1423
  mutex_.AssertHeld();
  assert(imm_.size() != 0);
L
Lei Jin 已提交
1424
  assert(imm_.IsFlushPending());
1425 1426 1427

  // Save the contents of the earliest memtable as a new Table
  uint64_t file_number;
K
Kai Liu 已提交
1428
  autovector<MemTable*> mems;
1429 1430
  imm_.PickMemtablesToFlush(&mems);
  if (mems.empty()) {
H
Haobo Xu 已提交
1431
    LogToBuffer(log_buffer, "Nothing in memstore to flush");
L
Lei Jin 已提交
1432
    return Status::OK();
1433 1434 1435
  }

  // record the logfile_number_ before we release the mutex
1436 1437 1438
  // entries mems are (implicitly) sorted in ascending order by their created
  // time. We will use the first memtable's `edit` to keep the meta info for
  // this flush.
1439
  MemTable* m = mems[0];
1440 1441
  VersionEdit* edit = m->GetEdits();
  edit->SetPrevLogNumber(0);
1442 1443
  // SetLogNumber(log_num) indicates logs with number smaller than log_num
  // will no longer be picked up for recovery.
1444
  edit->SetLogNumber(mems.back()->GetNextLogNumber());
1445 1446 1447 1448 1449

  std::vector<uint64_t> logs_to_delete;
  for (auto mem : mems) {
    logs_to_delete.push_back(mem->GetLogNumber());
  }
1450

1451
  // This will release and re-acquire the mutex.
H
Haobo Xu 已提交
1452
  Status s = WriteLevel0Table(mems, edit, &file_number, log_buffer);
1453

1454
  if (s.ok() && shutting_down_.Acquire_Load()) {
L
Lei Jin 已提交
1455
    s = Status::ShutdownInProgress(
1456
        "Database shutdown started during memtable compaction");
1457
  }
J
jorlow@chromium.org 已提交
1458

L
Lei Jin 已提交
1459 1460
  if (!s.ok()) {
    imm_.RollbackMemtableFlush(mems, file_number, &pending_outputs_);
1461 1462 1463 1464 1465
  } else {
    // Replace immutable memtable with the generated Table
    s = imm_.InstallMemtableFlushResults(
        mems, versions_.get(), &mutex_, options_.info_log.get(), file_number,
        pending_outputs_, &deletion_state.memtables_to_free,
1466
        db_directory_.get(), log_buffer);
L
Lei Jin 已提交
1467 1468
  }

J
jorlow@chromium.org 已提交
1469
  if (s.ok()) {
I
Igor Canadi 已提交
1470
    InstallSuperVersion(deletion_state);
1471 1472 1473
    if (madeProgress) {
      *madeProgress = 1;
    }
1474

1475
    MaybeScheduleLogDBDeployStats();
I
Igor Canadi 已提交
1476

1477
    if (disable_delete_obsolete_files_ == 0) {
I
Igor Canadi 已提交
1478
      // add to deletion state
1479 1480 1481 1482
      deletion_state.log_delete_files.insert(
          deletion_state.log_delete_files.end(),
          logs_to_delete.begin(),
          logs_to_delete.end());
1483
    }
J
jorlow@chromium.org 已提交
1484
  }
1485 1486 1487 1488 1489 1490 1491

  if (!s.ok() && !s.IsShutdownInProgress() && options_.paranoid_checks &&
      bg_error_.ok()) {
    // if a bad error happened (not ShutdownInProgress) and paranoid_checks is
    // true, mark DB read-only
    bg_error_ = s;
  }
J
jorlow@chromium.org 已提交
1492 1493 1494
  return s;
}

L
Lei Jin 已提交
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
Status DBImpl::CompactRange(const Slice* begin,
                            const Slice* end,
                            bool reduce_level,
                            int target_level) {
  Status s = FlushMemTable(FlushOptions());
  if (!s.ok()) {
    LogFlush(options_.info_log);
    return s;
  }

G
Gabor Cselle 已提交
1505 1506 1507 1508
  int max_level_with_files = 1;
  {
    MutexLock l(&mutex_);
    Version* base = versions_->current();
1509
    for (int level = 1; level < NumberLevels(); level++) {
G
Gabor Cselle 已提交
1510 1511 1512 1513 1514
      if (base->OverlapInLevel(level, begin, end)) {
        max_level_with_files = level;
      }
    }
  }
1515 1516 1517 1518 1519
  for (int level = 0; level <= max_level_with_files; level++) {
    // in case the compaction is unversal or if we're compacting the
    // bottom-most level, the output level will be the same as input one
    if (options_.compaction_style == kCompactionStyleUniversal ||
        level == max_level_with_files) {
L
Lei Jin 已提交
1520
      s = RunManualCompaction(level, level, begin, end);
1521
    } else {
L
Lei Jin 已提交
1522 1523 1524 1525 1526
      s = RunManualCompaction(level, level + 1, begin, end);
    }
    if (!s.ok()) {
      LogFlush(options_.info_log);
      return s;
1527
    }
G
Gabor Cselle 已提交
1528
  }
1529 1530

  if (reduce_level) {
L
Lei Jin 已提交
1531
    s = ReFitLevel(max_level_with_files, target_level);
1532
  }
I
Igor Canadi 已提交
1533
  LogFlush(options_.info_log);
L
Lei Jin 已提交
1534 1535

  return s;
1536 1537 1538 1539 1540
}

// return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting(int level) {
  mutex_.AssertHeld();
1541
  Version* current = versions_->current();
1542
  int minimum_level = level;
1543
  for (int i = level - 1; i > 0; --i) {
1544
    // stop if level i is not empty
1545
    if (current->NumLevelFiles(i) > 0) break;
1546
    // stop if level i is too small (cannot fit the level files)
1547
    if (versions_->MaxBytesForLevel(i) < current->NumLevelBytes(level)) break;
1548 1549 1550 1551 1552 1553

    minimum_level = i;
  }
  return minimum_level;
}

L
Lei Jin 已提交
1554
Status DBImpl::ReFitLevel(int level, int target_level) {
1555 1556
  assert(level < NumberLevels());

I
Igor Canadi 已提交
1557
  SuperVersion* superversion_to_free = nullptr;
K
Kai Liu 已提交
1558
  SuperVersion* new_superversion = new SuperVersion();
I
Igor Canadi 已提交
1559 1560

  mutex_.Lock();
1561 1562 1563

  // only allow one thread refitting
  if (refitting_level_) {
I
Igor Canadi 已提交
1564
    mutex_.Unlock();
1565
    Log(options_.info_log, "ReFitLevel: another thread is refitting");
I
Igor Canadi 已提交
1566
    delete new_superversion;
L
Lei Jin 已提交
1567
    return Status::NotSupported("another thread is refitting");
1568 1569 1570 1571 1572
  }
  refitting_level_ = true;

  // wait for all background threads to stop
  bg_work_gate_closed_ = true;
1573
  while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_) {
1574
    Log(options_.info_log,
1575 1576
        "RefitLevel: waiting for background threads to stop: %d %d",
        bg_compaction_scheduled_, bg_flush_scheduled_);
1577 1578 1579 1580
    bg_cv_.Wait();
  }

  // move to a smaller level
1581 1582 1583 1584
  int to_level = target_level;
  if (target_level < 0) {
    to_level = FindMinimumEmptyLevelFitting(level);
  }
1585 1586 1587

  assert(to_level <= level);

L
Lei Jin 已提交
1588
  Status status;
1589 1590 1591 1592
  if (to_level < level) {
    Log(options_.info_log, "Before refitting:\n%s",
        versions_->current()->DebugString().data());

1593
    VersionEdit edit;
1594 1595
    for (const auto& f : versions_->current()->files_[level]) {
      edit.DeleteFile(level, f->number);
1596 1597
      edit.AddFile(to_level, f->number, f->file_size, f->smallest, f->largest,
                   f->smallest_seqno, f->largest_seqno);
1598 1599 1600 1601
    }
    Log(options_.info_log, "Apply version edit:\n%s",
        edit.DebugString().data());

1602
    status = versions_->LogAndApply(&edit, &mutex_, db_directory_.get());
I
Igor Canadi 已提交
1603 1604
    superversion_to_free = InstallSuperVersion(new_superversion);
    new_superversion = nullptr;
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615

    Log(options_.info_log, "LogAndApply: %s\n", status.ToString().data());

    if (status.ok()) {
      Log(options_.info_log, "After refitting:\n%s",
          versions_->current()->DebugString().data());
    }
  }

  refitting_level_ = false;
  bg_work_gate_closed_ = false;
I
Igor Canadi 已提交
1616 1617 1618 1619

  mutex_.Unlock();
  delete superversion_to_free;
  delete new_superversion;
L
Lei Jin 已提交
1620
  return status;
G
Gabor Cselle 已提交
1621 1622
}

1623
int DBImpl::NumberLevels() {
1624
  return options_.num_levels;
1625 1626 1627
}

int DBImpl::MaxMemCompactionLevel() {
1628
  return options_.max_mem_compaction_level;
1629 1630 1631
}

int DBImpl::Level0StopWriteTrigger() {
1632
  return options_.level0_stop_writes_trigger;
1633 1634
}

T
Tomislav Novak 已提交
1635 1636 1637 1638
uint64_t DBImpl::CurrentVersionNumber() const {
  return super_version_number_.load();
}

H
heyongqiang 已提交
1639
Status DBImpl::Flush(const FlushOptions& options) {
K
kailiu 已提交
1640
  return FlushMemTable(options);
H
heyongqiang 已提交
1641 1642
}

1643
SequenceNumber DBImpl::GetLatestSequenceNumber() const {
1644 1645 1646
  return versions_->LastSequence();
}

1647 1648 1649
Status DBImpl::GetUpdatesSince(
    SequenceNumber seq, unique_ptr<TransactionLogIterator>* iter,
    const TransactionLogIterator::ReadOptions& read_options) {
1650

1651
  RecordTick(options_.statistics.get(), GET_UPDATES_SINCE_CALLS);
1652
  if (seq > versions_->LastSequence()) {
L
Lei Jin 已提交
1653 1654
    return Status::NotFound(
        "Requested sequence not yet written in the db");
1655 1656
  }
  //  Get all sorted Wal Files.
1657 1658
  //  Do binary search and open files and find the seq number.

1659 1660
  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
  Status s = GetSortedWalFiles(*wal_files);
1661 1662 1663 1664
  if (!s.ok()) {
    return s;
  }

1665
  s = RetainProbableWalFiles(*wal_files, seq);
1666 1667
  if (!s.ok()) {
    return s;
1668
  }
1669 1670 1671
  iter->reset(new TransactionLogIteratorImpl(options_.wal_dir, &options_,
                                             read_options, storage_options_,
                                             seq, std::move(wal_files), this));
1672
  return (*iter)->status();
1673 1674
}

1675 1676
Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs,
                                      const SequenceNumber target) {
1677
  long start = 0; // signed to avoid overflow when target is < first file.
1678
  long end = static_cast<long>(all_logs.size()) - 1;
1679
  // Binary Search. avoid opening all files.
1680 1681
  while (end >= start) {
    long mid = start + (end - start) / 2;  // Avoid overflow.
1682 1683
    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
    if (current_seq_num == target) {
1684
      end = mid;
1685
      break;
1686
    } else if (current_seq_num < target) {
1687
      start = mid + 1;
1688
    } else {
1689
      end = mid - 1;
1690 1691
    }
  }
1692 1693 1694
  size_t start_index = std::max(0l, end); // end could be -ve.
  // The last wal file is always included
  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
1695 1696 1697
  return Status::OK();
}

1698 1699 1700
bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type,
                                        const uint64_t number) {
  const std::string fname = (type == kAliveLogFile) ?
1701 1702
    LogFileName(options_.wal_dir, number) :
    ArchivedLogFileName(options_.wal_dir, number);
1703 1704
  uint64_t file_size;
  Status s = env_->GetFileSize(fname, &file_size);
1705
  return (s.ok() && (file_size == 0));
1706 1707
}

1708 1709
Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
                               WriteBatch* const result) {
1710

1711
  if (type == kAliveLogFile) {
1712
    std::string fname = LogFileName(options_.wal_dir, number);
1713
    Status status = ReadFirstLine(fname, result);
L
Lei Jin 已提交
1714 1715 1716
    if (status.ok() || env_->FileExists(fname)) {
      // return OK or any error that is not caused non-existing file
      return status;
1717
    }
L
Lei Jin 已提交
1718 1719 1720 1721 1722 1723 1724 1725 1726

    //  check if the file got moved to archive.
    std::string archived_file =
      ArchivedLogFileName(options_.wal_dir, number);
    Status s = ReadFirstLine(archived_file, result);
    if (s.ok() || env_->FileExists(archived_file)) {
      return s;
    }
    return Status::NotFound("Log File has been deleted: " + archived_file);
1727
  } else if (type == kArchivedLogFile) {
1728
    std::string fname = ArchivedLogFileName(options_.wal_dir, number);
1729 1730 1731
    Status status = ReadFirstLine(fname, result);
    return status;
  }
1732
  return Status::NotSupported("File Type Not Known: " + std::to_string(type));
1733 1734 1735 1736 1737 1738 1739 1740
}

Status DBImpl::ReadFirstLine(const std::string& fname,
                             WriteBatch* const batch) {
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
    Logger* info_log;
    const char* fname;
L
Lei Jin 已提交
1741 1742 1743

    Status* status;
    bool ignore_error;  // true if options_.paranoid_checks==false
1744 1745
    virtual void Corruption(size_t bytes, const Status& s) {
      Log(info_log, "%s%s: dropping %d bytes; %s",
L
Lei Jin 已提交
1746
          (this->ignore_error ? "(ignoring error) " : ""),
1747
          fname, static_cast<int>(bytes), s.ToString().c_str());
L
Lei Jin 已提交
1748 1749 1750 1751
      if (this->status->ok()) {
        // only keep the first error
        *this->status = s;
      }
1752 1753 1754
    }
  };

1755
  unique_ptr<SequentialFile> file;
1756
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
1757 1758 1759 1760 1761 1762 1763 1764

  if (!status.ok()) {
    return status;
  }


  LogReporter reporter;
  reporter.env = env_;
1765
  reporter.info_log = options_.info_log.get();
1766
  reporter.fname = fname.c_str();
L
Lei Jin 已提交
1767 1768
  reporter.status = &status;
  reporter.ignore_error = !options_.paranoid_checks;
1769
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1770 1771 1772
                     0/*initial_offset*/);
  std::string scratch;
  Slice record;
1773

L
Lei Jin 已提交
1774 1775
  if (reader.ReadRecord(&record, &scratch) &&
      (status.ok() || !options_.paranoid_checks)) {
1776 1777 1778 1779
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      //  TODO read record's till the first no corrupt entry?
L
Lei Jin 已提交
1780 1781 1782
    } else {
      WriteBatchInternal::SetContents(batch, record);
      return Status::OK();
1783 1784
    }
  }
L
Lei Jin 已提交
1785 1786 1787 1788 1789 1790

  // ReadRecord returns false on EOF, which is deemed as OK() by Reader
  if (status.ok()) {
    status = Status::Corruption("eof reached");
  }
  return status;
1791 1792
}

1793 1794 1795 1796 1797 1798 1799 1800 1801
struct CompareLogByPointer {
  bool operator() (const unique_ptr<LogFile>& a,
                   const unique_ptr<LogFile>& b) {
    LogFileImpl* a_impl = dynamic_cast<LogFileImpl*>(a.get());
    LogFileImpl* b_impl = dynamic_cast<LogFileImpl*>(b.get());
    return *a_impl < *b_impl;
  }
};

1802
Status DBImpl::GetSortedWalsOfType(const std::string& path,
1803 1804 1805
    VectorLogPtr& log_files, WalFileType log_type) {
  std::vector<std::string> all_files;
  const Status status = env_->GetChildren(path, &all_files);
1806 1807 1808
  if (!status.ok()) {
    return status;
  }
1809
  log_files.reserve(all_files.size());
1810
  for (const auto& f : all_files) {
1811 1812
    uint64_t number;
    FileType type;
1813
    if (ParseFileName(f, &number, &type) && type == kLogFile){
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831

      WriteBatch batch;
      Status s = ReadFirstRecord(log_type, number, &batch);
      if (!s.ok()) {
        if (CheckWalFileExistsAndEmpty(log_type, number)) {
          continue;
        }
        return s;
      }

      uint64_t size_bytes;
      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
      if (!s.ok()) {
        return s;
      }

      log_files.push_back(std::move(unique_ptr<LogFile>(new LogFileImpl(
        number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes))));
1832 1833
    }
  }
1834
  CompareLogByPointer compare_log_files;
1835
  std::sort(log_files.begin(), log_files.end(), compare_log_files);
1836 1837 1838
  return status;
}

L
Lei Jin 已提交
1839 1840 1841 1842
Status DBImpl::RunManualCompaction(int input_level,
                                   int output_level,
                                   const Slice* begin,
                                   const Slice* end) {
1843
  assert(input_level >= 0);
1844

G
Gabor Cselle 已提交
1845 1846
  InternalKey begin_storage, end_storage;

H
hans@chromium.org 已提交
1847
  ManualCompaction manual;
1848 1849
  manual.input_level = input_level;
  manual.output_level = output_level;
G
Gabor Cselle 已提交
1850
  manual.done = false;
1851
  manual.in_progress = false;
1852 1853 1854 1855
  // For universal compaction, we enforce every manual compaction to compact
  // all files.
  if (begin == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1856
    manual.begin = nullptr;
G
Gabor Cselle 已提交
1857 1858 1859 1860
  } else {
    begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
    manual.begin = &begin_storage;
  }
1861 1862
  if (end == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1863
    manual.end = nullptr;
G
Gabor Cselle 已提交
1864 1865 1866 1867 1868 1869
  } else {
    end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
    manual.end = &end_storage;
  }

  MutexLock l(&mutex_);
1870

1871 1872 1873 1874 1875 1876
  // When a manual compaction arrives, temporarily disable scheduling of
  // non-manual compactions and wait until the number of scheduled compaction
  // jobs drops to zero. This is needed to ensure that this manual compaction
  // can compact any range of keys/files.
  //
  // bg_manual_only_ is non-zero when at least one thread is inside
1877
  // RunManualCompaction(), i.e. during that time no other compaction will
1878 1879 1880
  // get scheduled (see MaybeScheduleFlushOrCompaction).
  //
  // Note that the following loop doesn't stop more that one thread calling
1881
  // RunManualCompaction() from getting to the second while loop below.
1882 1883 1884 1885 1886 1887 1888 1889
  // However, only one of them will actually schedule compaction, while
  // others will wait on a condition variable until it completes.

  ++bg_manual_only_;
  while (bg_compaction_scheduled_ > 0) {
    Log(options_.info_log,
        "Manual compaction waiting for all other scheduled background "
        "compactions to finish");
1890 1891
    bg_cv_.Wait();
  }
1892

1893 1894
  Log(options_.info_log, "Manual compaction starting");

1895 1896 1897 1898
  while (!manual.done && !shutting_down_.Acquire_Load() && bg_error_.ok()) {
    assert(bg_manual_only_ > 0);
    if (manual_compaction_ != nullptr) {
      // Running either this or some other manual compaction
G
Gabor Cselle 已提交
1899
      bg_cv_.Wait();
1900 1901 1902
    } else {
      manual_compaction_ = &manual;
      MaybeScheduleFlushOrCompaction();
G
Gabor Cselle 已提交
1903
    }
H
hans@chromium.org 已提交
1904
  }
1905

1906 1907 1908
  assert(!manual.in_progress);
  assert(bg_manual_only_ > 0);
  --bg_manual_only_;
L
Lei Jin 已提交
1909
  return manual.status;
J
jorlow@chromium.org 已提交
1910 1911
}

L
Lei Jin 已提交
1912 1913 1914
Status DBImpl::TEST_CompactRange(int level,
                                 const Slice* begin,
                                 const Slice* end) {
1915 1916 1917
  int output_level = (options_.compaction_style == kCompactionStyleUniversal)
                         ? level
                         : level + 1;
L
Lei Jin 已提交
1918
  return RunManualCompaction(level, output_level, begin, end);
1919 1920
}

H
heyongqiang 已提交
1921
Status DBImpl::FlushMemTable(const FlushOptions& options) {
1922 1923
  // nullptr batch means just wait for earlier writes to be done
  Status s = Write(WriteOptions(), nullptr);
H
heyongqiang 已提交
1924
  if (s.ok() && options.wait) {
1925
    // Wait until the compaction completes
1926
    s = WaitForFlushMemTable();
1927 1928
  }
  return s;
J
jorlow@chromium.org 已提交
1929 1930
}

1931
Status DBImpl::WaitForFlushMemTable() {
1932 1933 1934
  Status s;
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1935
  while (imm_.size() > 0 && bg_error_.ok()) {
1936 1937
    bg_cv_.Wait();
  }
1938
  if (imm_.size() != 0) {
1939 1940 1941
    s = bg_error_;
  }
  return s;
H
heyongqiang 已提交
1942 1943
}

1944 1945 1946 1947
Status DBImpl::TEST_FlushMemTable(bool wait) {
  FlushOptions fo;
  fo.wait = wait;
  return FlushMemTable(fo);
H
heyongqiang 已提交
1948 1949
}

1950 1951
Status DBImpl::TEST_WaitForFlushMemTable() {
  return WaitForFlushMemTable();
1952 1953 1954
}

Status DBImpl::TEST_WaitForCompact() {
1955
  // Wait until the compaction completes
1956 1957 1958 1959 1960

  // TODO: a bug here. This function actually does not necessarily
  // wait for compact. It actually waits for scheduled compaction
  // OR flush to finish.

1961
  MutexLock l(&mutex_);
1962 1963
  while ((bg_compaction_scheduled_ || bg_flush_scheduled_) &&
         bg_error_.ok()) {
1964 1965 1966
    bg_cv_.Wait();
  }
  return bg_error_;
1967 1968
}

1969
void DBImpl::MaybeScheduleFlushOrCompaction() {
J
jorlow@chromium.org 已提交
1970
  mutex_.AssertHeld();
1971
  bg_schedule_needed_ = false;
1972 1973
  if (bg_work_gate_closed_) {
    // gate closed for backgrond work
J
jorlow@chromium.org 已提交
1974 1975 1976
  } else if (shutting_down_.Acquire_Load()) {
    // DB is being deleted; no more background compactions
  } else {
I
Igor Canadi 已提交
1977
    bool is_flush_pending = imm_.IsFlushPending();
1978 1979 1980 1981 1982
    if (is_flush_pending) {
      if (bg_flush_scheduled_ < options_.max_background_flushes) {
        // memtable flush needed
        bg_flush_scheduled_++;
        env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH);
1983
      } else if (options_.max_background_flushes > 0) {
1984 1985
        bg_schedule_needed_ = true;
      }
1986 1987
    }

1988 1989 1990 1991
    // Schedule BGWorkCompaction if there's a compaction pending (or a memtable
    // flush, but the HIGH pool is not enabled). Do it only if
    // max_background_compactions hasn't been reached and, in case
    // bg_manual_only_ > 0, if it's a manual compaction.
1992
    if ((manual_compaction_ ||
1993
         versions_->current()->NeedsCompaction() ||
1994
         (is_flush_pending && (options_.max_background_flushes <= 0))) &&
1995
        (!bg_manual_only_ || manual_compaction_)) {
1996 1997 1998 1999 2000 2001
      if (bg_compaction_scheduled_ < options_.max_background_compactions) {
        bg_compaction_scheduled_++;
        env_->Schedule(&DBImpl::BGWorkCompaction, this, Env::Priority::LOW);
      } else {
        bg_schedule_needed_ = true;
      }
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
    }
  }
}

void DBImpl::BGWorkFlush(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallFlush();
}

void DBImpl::BGWorkCompaction(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallCompaction();
}

I
Igor Canadi 已提交
2014
Status DBImpl::BackgroundFlush(bool* madeProgress,
H
Haobo Xu 已提交
2015 2016
                               DeletionState& deletion_state,
                               LogBuffer* log_buffer) {
2017
  Status stat;
I
Igor Canadi 已提交
2018
  while (stat.ok() && imm_.IsFlushPending()) {
2019 2020 2021 2022
    LogToBuffer(log_buffer,
                "BackgroundCallFlush doing FlushMemTableToOutputFile, "
                "flush slots available %d",
                options_.max_background_flushes - bg_flush_scheduled_);
H
Haobo Xu 已提交
2023
    stat = FlushMemTableToOutputFile(madeProgress, deletion_state, log_buffer);
J
jorlow@chromium.org 已提交
2024
  }
2025
  return stat;
J
jorlow@chromium.org 已提交
2026 2027
}

2028
void DBImpl::BackgroundCallFlush() {
2029
  bool madeProgress = false;
K
Kai Liu 已提交
2030
  DeletionState deletion_state(true);
2031 2032
  assert(bg_flush_scheduled_);

H
Haobo Xu 已提交
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
  LogBuffer log_buffer(INFO, options_.info_log.get());
  {
    MutexLock l(&mutex_);

    Status s;
    if (!shutting_down_.Acquire_Load()) {
      s = BackgroundFlush(&madeProgress, deletion_state, &log_buffer);
      if (!s.ok()) {
        // Wait a little bit before retrying background compaction in
        // case this is an environmental problem and we do not want to
        // chew up resources for failed compactions for the duration of
        // the problem.
2045
        uint64_t error_cnt = internal_stats_.BumpAndGetBackgroundErrorCount();
H
Haobo Xu 已提交
2046 2047
        bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
        mutex_.Unlock();
2048 2049 2050 2051
        Log(options_.info_log,
            "Waiting after background flush error: %s"
            "Accumulated background error counts: %" PRIu64,
            s.ToString().c_str(), error_cnt);
H
Haobo Xu 已提交
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
        log_buffer.FlushBufferToLog();
        LogFlush(options_.info_log);
        env_->SleepForMicroseconds(1000000);
        mutex_.Lock();
      }
    }

    // If !s.ok(), this means that Flush failed. In that case, we want
    // to delete all obsolete files and we force FindObsoleteFiles()
    FindObsoleteFiles(deletion_state, !s.ok());
    // delete unnecessary files if any, this is done outside the mutex
2063
    if (deletion_state.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
2064
      mutex_.Unlock();
2065 2066 2067 2068 2069
      // Have to flush the info logs before bg_flush_scheduled_--
      // because if bg_flush_scheduled_ becomes 0 and the lock is
      // released, the deconstructor of DB can kick in and destroy all the
      // states of DB so info_log might not be available after that point.
      // It also applies to access other states that DB owns.
H
Haobo Xu 已提交
2070
      log_buffer.FlushBufferToLog();
2071 2072 2073
      if (deletion_state.HaveSomethingToDelete()) {
        PurgeObsoleteFiles(deletion_state);
      }
2074 2075 2076
      mutex_.Lock();
    }

H
Haobo Xu 已提交
2077
    bg_flush_scheduled_--;
2078 2079 2080 2081 2082
    // Any time the mutex is released After finding the work to do, another
    // thread might execute MaybeScheduleFlushOrCompaction(). It is possible
    // that there is a pending job but it is not scheduled because of the
    // max thread limit.
    if (madeProgress || bg_schedule_needed_) {
H
Haobo Xu 已提交
2083 2084 2085
      MaybeScheduleFlushOrCompaction();
    }
    bg_cv_.SignalAll();
I
Igor Canadi 已提交
2086 2087 2088 2089
    // IMPORTANT: there should be no code after calling SignalAll. This call may
    // signal the DB destructor that it's OK to proceed with destruction. In
    // that case, all DB variables will be dealloacated and referencing them
    // will cause trouble.
2090
  }
J
jorlow@chromium.org 已提交
2091 2092
}

2093

2094 2095 2096 2097
void DBImpl::TEST_PurgeObsoleteteWAL() {
  PurgeObsoleteWALFiles();
}

2098 2099 2100 2101 2102
uint64_t DBImpl::TEST_GetLevel0TotalSize() {
  MutexLock l(&mutex_);
  return versions_->current()->NumLevelBytes(0);
}

2103
void DBImpl::BackgroundCallCompaction() {
2104
  bool madeProgress = false;
K
Kai Liu 已提交
2105
  DeletionState deletion_state(true);
H
Haobo Xu 已提交
2106 2107

  MaybeDumpStats();
H
Haobo Xu 已提交
2108
  LogBuffer log_buffer(INFO, options_.info_log.get());
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
  {
    MutexLock l(&mutex_);
    // Log(options_.info_log, "XXX BG Thread %llx process new work item",
    //     pthread_self());
    assert(bg_compaction_scheduled_);
    Status s;
    if (!shutting_down_.Acquire_Load()) {
      s = BackgroundCompaction(&madeProgress, deletion_state, &log_buffer);
      if (!s.ok()) {
        // Wait a little bit before retrying background compaction in
        // case this is an environmental problem and we do not want to
        // chew up resources for failed compactions for the duration of
        // the problem.
2122
        uint64_t error_cnt = internal_stats_.BumpAndGetBackgroundErrorCount();
2123 2124
        bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
        mutex_.Unlock();
H
Haobo Xu 已提交
2125
        log_buffer.FlushBufferToLog();
2126 2127 2128 2129
        Log(options_.info_log,
            "Waiting after background compaction error: %s, "
            "Accumulated background error counts: %" PRIu64,
            s.ToString().c_str(), error_cnt);
2130 2131 2132 2133 2134
        LogFlush(options_.info_log);
        env_->SleepForMicroseconds(1000000);
        mutex_.Lock();
      }
    }
H
Haobo Xu 已提交
2135

2136 2137 2138 2139 2140 2141 2142
    // If !s.ok(), this means that Compaction failed. In that case, we want
    // to delete all obsolete files we might have created and we force
    // FindObsoleteFiles(). This is because deletion_state does not catch
    // all created files if compaction failed.
    FindObsoleteFiles(deletion_state, !s.ok());

    // delete unnecessary files if any, this is done outside the mutex
2143
    if (deletion_state.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
2144
      mutex_.Unlock();
2145 2146 2147 2148 2149
      // Have to flush the info logs before bg_compaction_scheduled_--
      // because if bg_flush_scheduled_ becomes 0 and the lock is
      // released, the deconstructor of DB can kick in and destroy all the
      // states of DB so info_log might not be available after that point.
      // It also applies to access other states that DB owns.
H
Haobo Xu 已提交
2150
      log_buffer.FlushBufferToLog();
2151 2152 2153
      if (deletion_state.HaveSomethingToDelete()) {
        PurgeObsoleteFiles(deletion_state);
      }
2154 2155 2156
      mutex_.Lock();
    }

2157
    bg_compaction_scheduled_--;
D
Dhruba Borthakur 已提交
2158

2159
    MaybeScheduleLogDBDeployStats();
2160

2161 2162 2163
    // Previous compaction may have produced too many files in a level,
    // So reschedule another compaction if we made progress in the
    // last compaction.
2164 2165 2166 2167 2168 2169
    //
    // Also, any time the mutex is released After finding the work to do,
    // another thread might execute MaybeScheduleFlushOrCompaction(). It is
    // possible  that there is a pending job but it is not scheduled because of
    // the max thread limit.
    if (madeProgress || bg_schedule_needed_) {
2170 2171 2172
      MaybeScheduleFlushOrCompaction();
    }
    bg_cv_.SignalAll();
I
Igor Canadi 已提交
2173 2174 2175 2176
    // IMPORTANT: there should be no code after calling SignalAll. This call may
    // signal the DB destructor that it's OK to proceed with destruction. In
    // that case, all DB variables will be dealloacated and referencing them
    // will cause trouble.
2177
  }
J
jorlow@chromium.org 已提交
2178 2179
}

A
Abhishek Kona 已提交
2180
Status DBImpl::BackgroundCompaction(bool* madeProgress,
2181 2182
                                    DeletionState& deletion_state,
                                    LogBuffer* log_buffer) {
2183
  *madeProgress = false;
J
jorlow@chromium.org 已提交
2184
  mutex_.AssertHeld();
2185

2186 2187 2188 2189 2190 2191 2192
  bool is_manual = (manual_compaction_ != nullptr) &&
                   (manual_compaction_->in_progress == false);
  if (is_manual) {
    // another thread cannot pick up the same work
    manual_compaction_->in_progress = true;
  }

2193
  // TODO: remove memtable flush from formal compaction
I
Igor Canadi 已提交
2194
  while (imm_.IsFlushPending()) {
2195 2196 2197 2198 2199
    LogToBuffer(log_buffer,
                "BackgroundCompaction doing FlushMemTableToOutputFile, "
                "compaction slots "
                "available %d",
                options_.max_background_compactions - bg_compaction_scheduled_);
H
Haobo Xu 已提交
2200 2201
    Status stat = FlushMemTableToOutputFile(madeProgress, deletion_state,
                                            log_buffer);
2202
    if (!stat.ok()) {
2203 2204 2205 2206 2207 2208
      if (is_manual) {
        manual_compaction_->status = stat;
        manual_compaction_->done = true;
        manual_compaction_->in_progress = false;
        manual_compaction_ = nullptr;
      }
2209 2210
      return stat;
    }
2211 2212
  }

2213
  unique_ptr<Compaction> c;
2214 2215
  InternalKey manual_end_storage;
  InternalKey* manual_end = &manual_end_storage;
H
hans@chromium.org 已提交
2216
  if (is_manual) {
G
Gabor Cselle 已提交
2217
    ManualCompaction* m = manual_compaction_;
2218
    assert(m->in_progress);
2219 2220 2221
    c.reset(versions_->CompactRange(
        m->input_level, m->output_level, m->begin, m->end, &manual_end));
    if (!c) {
2222
      m->done = true;
G
Gabor Cselle 已提交
2223
    }
2224 2225
    LogToBuffer(
        log_buffer,
2226 2227
        "Manual compaction from level-%d to level-%d from %s .. %s; will stop "
        "at %s\n",
2228
        m->input_level, m->output_level,
G
Gabor Cselle 已提交
2229 2230
        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
        (m->end ? m->end->DebugString().c_str() : "(end)"),
2231 2232 2233
        ((m->done || manual_end == nullptr)
             ? "(end)"
             : manual_end->DebugString().c_str()));
2234
  } else if (!options_.disable_auto_compactions) {
2235
    c.reset(versions_->PickCompaction(log_buffer));
J
jorlow@chromium.org 已提交
2236 2237 2238
  }

  Status status;
2239
  if (!c) {
H
hans@chromium.org 已提交
2240
    // Nothing to do
2241
    LogToBuffer(log_buffer, "Compaction nothing to do");
H
hans@chromium.org 已提交
2242
  } else if (!is_manual && c->IsTrivialMove()) {
J
jorlow@chromium.org 已提交
2243
    // Move file to next level
2244
    assert(c->num_input_files(0) == 1);
J
jorlow@chromium.org 已提交
2245 2246 2247
    FileMetaData* f = c->input(0, 0);
    c->edit()->DeleteFile(c->level(), f->number);
    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
2248 2249
                       f->smallest, f->largest,
                       f->smallest_seqno, f->largest_seqno);
2250
    status = versions_->LogAndApply(c->edit(), &mutex_, db_directory_.get());
I
Igor Canadi 已提交
2251
    InstallSuperVersion(deletion_state);
2252
    Version::LevelSummaryStorage tmp;
2253 2254 2255 2256 2257
    LogToBuffer(log_buffer, "Moved #%lld to level-%d %lld bytes %s: %s\n",
                static_cast<unsigned long long>(f->number), c->level() + 1,
                static_cast<unsigned long long>(f->file_size),
                status.ToString().c_str(),
                versions_->current()->LevelSummary(&tmp));
2258
    versions_->ReleaseCompactionFiles(c.get(), status);
2259
    *madeProgress = true;
J
jorlow@chromium.org 已提交
2260
  } else {
2261
    MaybeScheduleFlushOrCompaction(); // do more compaction work in parallel.
2262
    CompactionState* compact = new CompactionState(c.get());
H
Haobo Xu 已提交
2263
    status = DoCompactionWork(compact, deletion_state, log_buffer);
2264
    CleanupCompaction(compact, status);
2265
    versions_->ReleaseCompactionFiles(c.get(), status);
2266
    c->ReleaseInputs();
2267
    *madeProgress = true;
J
jorlow@chromium.org 已提交
2268
  }
2269
  c.reset();
J
jorlow@chromium.org 已提交
2270 2271 2272 2273 2274 2275

  if (status.ok()) {
    // Done
  } else if (shutting_down_.Acquire_Load()) {
    // Ignore compaction errors found during shutting down
  } else {
2276 2277
    Log(WARN, options_.info_log, "Compaction error: %s",
        status.ToString().c_str());
J
jorlow@chromium.org 已提交
2278 2279 2280 2281
    if (options_.paranoid_checks && bg_error_.ok()) {
      bg_error_ = status;
    }
  }
H
hans@chromium.org 已提交
2282 2283

  if (is_manual) {
G
Gabor Cselle 已提交
2284
    ManualCompaction* m = manual_compaction_;
2285
    if (!status.ok()) {
L
Lei Jin 已提交
2286
      m->status = status;
2287 2288
      m->done = true;
    }
2289 2290 2291 2292 2293 2294 2295 2296 2297
    // For universal compaction:
    //   Because universal compaction always happens at level 0, so one
    //   compaction will pick up all overlapped files. No files will be
    //   filtered out due to size limit and left for a successive compaction.
    //   So we can safely conclude the current compaction.
    //
    //   Also note that, if we don't stop here, then the current compaction
    //   writes a new file back to level 0, which will be used in successive
    //   compaction. Hence the manual compaction will never finish.
2298 2299 2300 2301 2302
    //
    // Stop the compaction if manual_end points to nullptr -- this means
    // that we compacted the whole range. manual_end should always point
    // to nullptr in case of universal compaction
    if (manual_end == nullptr) {
2303 2304
      m->done = true;
    }
G
Gabor Cselle 已提交
2305 2306 2307
    if (!m->done) {
      // We only compacted part of the requested range.  Update *m
      // to the range that is left to be compacted.
2308 2309 2310
      // Universal compaction should always compact the whole range
      assert(options_.compaction_style != kCompactionStyleUniversal);
      m->tmp_storage = *manual_end;
G
Gabor Cselle 已提交
2311 2312
      m->begin = &m->tmp_storage;
    }
2313
    m->in_progress = false; // not being processed anymore
2314
    manual_compaction_ = nullptr;
H
hans@chromium.org 已提交
2315
  }
2316
  return status;
J
jorlow@chromium.org 已提交
2317 2318
}

2319
void DBImpl::CleanupCompaction(CompactionState* compact, Status status) {
J
jorlow@chromium.org 已提交
2320
  mutex_.AssertHeld();
2321
  if (compact->builder != nullptr) {
J
jorlow@chromium.org 已提交
2322 2323
    // May happen if we get a shutdown call in the middle of compaction
    compact->builder->Abandon();
2324
    compact->builder.reset();
J
jorlow@chromium.org 已提交
2325
  } else {
2326
    assert(compact->outfile == nullptr);
J
jorlow@chromium.org 已提交
2327
  }
D
dgrogan@chromium.org 已提交
2328
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
2329 2330
    const CompactionState::Output& out = compact->outputs[i];
    pending_outputs_.erase(out.number);
2331 2332 2333 2334 2335 2336

    // If this file was inserted into the table cache then remove
    // them here because this compaction was not committed.
    if (!status.ok()) {
      table_cache_->Evict(out.number);
    }
J
jorlow@chromium.org 已提交
2337 2338 2339 2340
  }
  delete compact;
}

2341
// Allocate the file numbers for the output file. We allocate as
2342
// many output file numbers as there are files in level+1 (at least one)
2343 2344 2345
// Insert them into pending_outputs so that they do not get deleted.
void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
2346 2347
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
2348
  int filesNeeded = compact->compaction->num_input_files(1);
2349
  for (int i = 0; i < std::max(filesNeeded, 1); i++) {
2350 2351 2352 2353 2354 2355 2356 2357 2358
    uint64_t file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    compact->allocated_file_numbers.push_back(file_number);
  }
}

// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
2359
  for (const auto file_number : compact->allocated_file_numbers) {
2360 2361 2362 2363 2364
    pending_outputs_.erase(file_number);
    // Log(options_.info_log, "XXX releasing unused file num %d", file_number);
  }
}

J
jorlow@chromium.org 已提交
2365
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
2366 2367
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
J
jorlow@chromium.org 已提交
2368
  uint64_t file_number;
2369 2370 2371 2372 2373 2374 2375
  // If we have not yet exhausted the pre-allocated file numbers,
  // then use the one from the front. Otherwise, we have to acquire
  // the heavyweight lock and allocate a new file number.
  if (!compact->allocated_file_numbers.empty()) {
    file_number = compact->allocated_file_numbers.front();
    compact->allocated_file_numbers.pop_front();
  } else {
J
jorlow@chromium.org 已提交
2376 2377 2378 2379 2380
    mutex_.Lock();
    file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    mutex_.Unlock();
  }
2381 2382 2383 2384
  CompactionState::Output out;
  out.number = file_number;
  out.smallest.Clear();
  out.largest.Clear();
2385
  out.smallest_seqno = out.largest_seqno = 0;
2386
  compact->outputs.push_back(out);
J
jorlow@chromium.org 已提交
2387 2388 2389

  // Make the output file
  std::string fname = TableFileName(dbname_, file_number);
2390
  Status s = env_->NewWritableFile(fname, &compact->outfile, storage_options_);
2391

J
jorlow@chromium.org 已提交
2392
  if (s.ok()) {
2393 2394 2395
    // Over-estimate slightly so we don't end up just barely crossing
    // the threshold.
    compact->outfile->SetPreallocationBlockSize(
2396
      1.1 * versions_->MaxFileSizeForLevel(compact->compaction->output_level()));
2397

S
Siying Dong 已提交
2398 2399 2400 2401
    CompressionType compression_type = GetCompressionType(
        options_, compact->compaction->output_level(),
        compact->compaction->enable_compression());

2402 2403 2404
    compact->builder.reset(NewTableBuilder(options_, internal_comparator_,
                                           compact->outfile.get(),
                                           compression_type));
J
jorlow@chromium.org 已提交
2405
  }
I
Igor Canadi 已提交
2406
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
2407 2408 2409 2410 2411
  return s;
}

Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
                                          Iterator* input) {
2412
  assert(compact != nullptr);
2413
  assert(compact->outfile);
2414
  assert(compact->builder != nullptr);
J
jorlow@chromium.org 已提交
2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429

  const uint64_t output_number = compact->current_output()->number;
  assert(output_number != 0);

  // Check for iterator errors
  Status s = input->status();
  const uint64_t current_entries = compact->builder->NumEntries();
  if (s.ok()) {
    s = compact->builder->Finish();
  } else {
    compact->builder->Abandon();
  }
  const uint64_t current_bytes = compact->builder->FileSize();
  compact->current_output()->file_size = current_bytes;
  compact->total_bytes += current_bytes;
2430
  compact->builder.reset();
J
jorlow@chromium.org 已提交
2431 2432

  // Finish and check for file errors
2433
  if (s.ok() && !options_.disableDataSync) {
2434
    if (options_.use_fsync) {
2435
      StopWatch sw(env_, options_.statistics.get(),
2436
                   COMPACTION_OUTFILE_SYNC_MICROS, false);
2437 2438
      s = compact->outfile->Fsync();
    } else {
2439
      StopWatch sw(env_, options_.statistics.get(),
2440
                   COMPACTION_OUTFILE_SYNC_MICROS, false);
2441 2442
      s = compact->outfile->Sync();
    }
J
jorlow@chromium.org 已提交
2443 2444 2445 2446
  }
  if (s.ok()) {
    s = compact->outfile->Close();
  }
2447
  compact->outfile.reset();
J
jorlow@chromium.org 已提交
2448 2449 2450

  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
2451
    FileMetaData meta(output_number, current_bytes);
2452 2453
    Iterator* iter = table_cache_->NewIterator(ReadOptions(), storage_options_,
                                               internal_comparator_, meta);
J
jorlow@chromium.org 已提交
2454 2455 2456
    s = iter->status();
    delete iter;
    if (s.ok()) {
2457
      Log(options_.info_log,
K
Kai Liu 已提交
2458 2459 2460 2461
          "Generated table #%lu: %lu keys, %lu bytes",
          (unsigned long) output_number,
          (unsigned long) current_entries,
          (unsigned long) current_bytes);
J
jorlow@chromium.org 已提交
2462 2463 2464 2465 2466 2467
    }
  }
  return s;
}


2468 2469
Status DBImpl::InstallCompactionResults(CompactionState* compact,
                                        LogBuffer* log_buffer) {
J
jorlow@chromium.org 已提交
2470
  mutex_.AssertHeld();
2471 2472 2473 2474 2475

  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact.
2476
  if (!versions_->VerifyCompactionFileConsistency(compact->compaction)) {
2477 2478 2479 2480 2481
    Log(options_.info_log,  "Compaction %d@%d + %d@%d files aborted",
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1);
L
Lei Jin 已提交
2482
    return Status::Corruption("Compaction input files inconsistent");
2483 2484
  }

2485 2486 2487 2488
  LogToBuffer(
      log_buffer, "Compacted %d@%d + %d@%d files => %lld bytes",
      compact->compaction->num_input_files(0), compact->compaction->level(),
      compact->compaction->num_input_files(1), compact->compaction->level() + 1,
J
jorlow@chromium.org 已提交
2489 2490 2491 2492
      static_cast<long long>(compact->total_bytes));

  // Add compaction outputs
  compact->compaction->AddInputDeletions(compact->compaction->edit());
D
dgrogan@chromium.org 已提交
2493
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
2494 2495
    const CompactionState::Output& out = compact->outputs[i];
    compact->compaction->edit()->AddFile(
2496 2497
        compact->compaction->output_level(), out.number, out.file_size,
        out.smallest, out.largest, out.smallest_seqno, out.largest_seqno);
J
jorlow@chromium.org 已提交
2498
  }
2499 2500
  return versions_->LogAndApply(compact->compaction->edit(), &mutex_,
                                db_directory_.get());
J
jorlow@chromium.org 已提交
2501 2502
}

2503 2504 2505 2506 2507 2508 2509 2510
//
// Given a sequence number, return the sequence number of the
// earliest snapshot that this sequence number is visible in.
// The snapshots themselves are arranged in ascending order of
// sequence numbers.
// Employ a sequential search because the total number of
// snapshots are typically small.
inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
2511 2512
  SequenceNumber in, std::vector<SequenceNumber>& snapshots,
  SequenceNumber* prev_snapshot) {
2513
  SequenceNumber prev __attribute__((unused)) = 0;
2514 2515 2516
  for (const auto cur : snapshots) {
    assert(prev <= cur);
    if (cur >= in) {
2517
      *prev_snapshot = prev;
2518
      return cur;
2519
    }
2520 2521
    prev = cur; // assignment
    assert(prev);
2522 2523
  }
  Log(options_.info_log,
K
Kai Liu 已提交
2524 2525 2526
      "Looking for seqid %lu but maxseqid is %lu",
      (unsigned long)in,
      (unsigned long)snapshots[snapshots.size()-1]);
2527 2528 2529 2530
  assert(0);
  return 0;
}

D
Danny Guo 已提交
2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
Status DBImpl::ProcessKeyValueCompaction(
    SequenceNumber visible_at_tip,
    SequenceNumber earliest_snapshot,
    SequenceNumber latest_snapshot,
    DeletionState& deletion_state,
    bool bottommost_level,
    int64_t& imm_micros,
    Iterator* input,
    CompactionState* compact,
    bool is_compaction_v2,
    LogBuffer* log_buffer) {
  size_t combined_idx = 0;
J
jorlow@chromium.org 已提交
2543
  Status status;
D
Danny Guo 已提交
2544
  std::string compaction_filter_value;
J
jorlow@chromium.org 已提交
2545 2546 2547
  ParsedInternalKey ikey;
  std::string current_user_key;
  bool has_current_user_key = false;
D
Danny Guo 已提交
2548
  std::vector<char> delete_key;   // for compaction filter
2549 2550
  SequenceNumber last_sequence_for_key __attribute__((unused)) =
    kMaxSequenceNumber;
2551
  SequenceNumber visible_in_snapshot = kMaxSequenceNumber;
2552
  MergeHelper merge(user_comparator(), options_.merge_operator.get(),
2553
                    options_.info_log.get(),
2554
                    options_.min_partial_merge_operands,
2555
                    false /* internal key corruption is expected */);
2556 2557 2558
  auto compaction_filter = options_.compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
  if (!compaction_filter) {
2559
    auto context = compact->GetFilterContextV1();
2560 2561
    compaction_filter_from_factory =
      options_.compaction_filter_factory->CreateCompactionFilter(context);
2562 2563
    compaction_filter = compaction_filter_from_factory.get();
  }
2564

J
jorlow@chromium.org 已提交
2565
  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
2566
    // Prioritize immutable compaction work
2567
    // TODO: remove memtable flush from normal compaction work
2568
    if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
2569
      const uint64_t imm_start = env_->NowMicros();
I
Igor Canadi 已提交
2570
      LogFlush(options_.info_log);
2571
      mutex_.Lock();
I
Igor Canadi 已提交
2572
      if (imm_.IsFlushPending()) {
H
Haobo Xu 已提交
2573
        FlushMemTableToOutputFile(nullptr, deletion_state, log_buffer);
H
hans@chromium.org 已提交
2574
        bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
2575 2576
      }
      mutex_.Unlock();
H
Haobo Xu 已提交
2577
      log_buffer->FlushBufferToLog();
2578 2579 2580
      imm_micros += (env_->NowMicros() - imm_start);
    }

D
Danny Guo 已提交
2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601
    Slice key;
    Slice value;
    // If is_compaction_v2 is on, kv-pairs are reset to the prefix batch.
    // This prefix batch should contain results after calling
    // compaction_filter_v2.
    //
    // If is_compaction_v2 is off, this function will go through all the
    // kv-pairs in input.
    if (!is_compaction_v2) {
      key = input->key();
      value = input->value();
    } else {
      if (combined_idx >= compact->combined_key_buf_.size()) {
        break;
      }
      assert(combined_idx < compact->combined_key_buf_.size());
      key = compact->combined_key_buf_[combined_idx];
      value = compact->combined_value_buf_[combined_idx];

      ++combined_idx;
    }
H
Haobo Xu 已提交
2602

2603
    if (compact->compaction->ShouldStopBefore(key) &&
2604
        compact->builder != nullptr) {
D
Danny Guo 已提交
2605
      status = FinishCompactionOutputFile(compact, input);
2606 2607 2608 2609 2610 2611
      if (!status.ok()) {
        break;
      }
    }

    // Handle key/value, add to state, etc.
J
jorlow@chromium.org 已提交
2612
    bool drop = false;
2613
    bool current_entry_is_merging = false;
J
jorlow@chromium.org 已提交
2614 2615
    if (!ParseInternalKey(key, &ikey)) {
      // Do not hide error keys
2616 2617
      // TODO: error key stays in db forever? Figure out the intention/rationale
      // v10 error v8 : we cannot hide v8 even though it's pretty obvious.
J
jorlow@chromium.org 已提交
2618 2619 2620
      current_user_key.clear();
      has_current_user_key = false;
      last_sequence_for_key = kMaxSequenceNumber;
2621
      visible_in_snapshot = kMaxSequenceNumber;
J
jorlow@chromium.org 已提交
2622 2623 2624
    } else {
      if (!has_current_user_key ||
          user_comparator()->Compare(ikey.user_key,
D
Danny Guo 已提交
2625
            Slice(current_user_key)) != 0) {
J
jorlow@chromium.org 已提交
2626 2627 2628 2629
        // First occurrence of this user key
        current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
        has_current_user_key = true;
        last_sequence_for_key = kMaxSequenceNumber;
2630
        visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
2631
        // apply the compaction filter to the first occurrence of the user key
D
Danny Guo 已提交
2632
        if (compaction_filter && !is_compaction_v2 &&
H
Haobo Xu 已提交
2633 2634 2635 2636 2637 2638 2639 2640 2641 2642
            ikey.type == kTypeValue &&
            (visible_at_tip || ikey.sequence > latest_snapshot)) {
          // If the user has specified a compaction filter and the sequence
          // number is greater than any external snapshot, then invoke the
          // filter.
          // If the return value of the compaction filter is true, replace
          // the entry with a delete marker.
          bool value_changed = false;
          compaction_filter_value.clear();
          bool to_delete =
2643
            compaction_filter->Filter(compact->compaction->level(),
D
Danny Guo 已提交
2644 2645 2646
                ikey.user_key, value,
                &compaction_filter_value,
                &value_changed);
H
Haobo Xu 已提交
2647 2648 2649 2650 2651
          if (to_delete) {
            // make a copy of the original key
            delete_key.assign(key.data(), key.data() + key.size());
            // convert it to a delete
            UpdateInternalKey(&delete_key[0], delete_key.size(),
D
Danny Guo 已提交
2652
                ikey.sequence, kTypeDeletion);
H
Haobo Xu 已提交
2653 2654 2655 2656 2657 2658
            // anchor the key again
            key = Slice(&delete_key[0], delete_key.size());
            // needed because ikey is backed by key
            ParseInternalKey(key, &ikey);
            // no value associated with delete
            value.clear();
2659
            RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_USER);
H
Haobo Xu 已提交
2660 2661 2662 2663 2664
          } else if (value_changed) {
            value = compaction_filter_value;
          }
        }

J
jorlow@chromium.org 已提交
2665 2666
      }

2667 2668 2669
      // If there are no snapshots, then this kv affect visibility at tip.
      // Otherwise, search though all existing snapshots to find
      // the earlist snapshot that is affected by this kv.
2670 2671 2672 2673
      SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
      SequenceNumber visible = visible_at_tip ?
        visible_at_tip :
        findEarliestVisibleSnapshot(ikey.sequence,
D
Danny Guo 已提交
2674 2675
            compact->existing_snapshots,
            &prev_snapshot);
2676 2677 2678 2679 2680

      if (visible_in_snapshot == visible) {
        // If the earliest snapshot is which this key is visible in
        // is the same as the visibily of a previous instance of the
        // same key, then this kv is not visible in any snapshot.
J
jorlow@chromium.org 已提交
2681
        // Hidden by an newer entry for same user key
2682
        // TODO: why not > ?
2683
        assert(last_sequence_for_key >= ikey.sequence);
J
jorlow@chromium.org 已提交
2684
        drop = true;    // (A)
2685
        RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_NEWER_ENTRY);
J
jorlow@chromium.org 已提交
2686
      } else if (ikey.type == kTypeDeletion &&
D
Danny Guo 已提交
2687 2688
          ikey.sequence <= earliest_snapshot &&
          compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
J
jorlow@chromium.org 已提交
2689 2690 2691 2692 2693 2694 2695 2696
        // For this user key:
        // (1) there is no data in higher levels
        // (2) data in lower levels will have larger sequence numbers
        // (3) data in layers that are being compacted here and have
        //     smaller sequence numbers will be dropped in the next
        //     few iterations of this loop (by rule (A) above).
        // Therefore this deletion marker is obsolete and can be dropped.
        drop = true;
2697
        RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_OBSOLETE);
2698 2699 2700 2701 2702 2703 2704
      } else if (ikey.type == kTypeMerge) {
        // We know the merge type entry is not hidden, otherwise we would
        // have hit (A)
        // We encapsulate the merge related state machine in a different
        // object to minimize change to the existing flow. Turn out this
        // logic could also be nicely re-used for memtable flush purge
        // optimization in BuildTable.
D
Danny Guo 已提交
2705 2706 2707 2708 2709 2710
        int steps = 0;
        merge.MergeUntil(input, prev_snapshot, bottommost_level,
            options_.statistics.get(), &steps);
        // Skip the Merge ops
        combined_idx = combined_idx - 1 + steps;

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728
        current_entry_is_merging = true;
        if (merge.IsSuccess()) {
          // Successfully found Put/Delete/(end-of-key-range) while merging
          // Get the merge result
          key = merge.key();
          ParseInternalKey(key, &ikey);
          value = merge.value();
        } else {
          // Did not find a Put/Delete/(end-of-key-range) while merging
          // We now have some stack of merge operands to write out.
          // NOTE: key,value, and ikey are now referring to old entries.
          //       These will be correctly set below.
          assert(!merge.keys().empty());
          assert(merge.keys().size() == merge.values().size());

          // Hack to make sure last_sequence_for_key is correct
          ParseInternalKey(merge.keys().front(), &ikey);
        }
J
jorlow@chromium.org 已提交
2729 2730 2731
      }

      last_sequence_for_key = ikey.sequence;
2732
      visible_in_snapshot = visible;
J
jorlow@chromium.org 已提交
2733 2734
    }
#if 0
2735
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
2736
        "  Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
2737
        "%d smallest_snapshot: %d level: %d bottommost %d",
J
jorlow@chromium.org 已提交
2738
        ikey.user_key.ToString().c_str(),
D
dgrogan@chromium.org 已提交
2739
        (int)ikey.sequence, ikey.type, kTypeValue, drop,
J
jorlow@chromium.org 已提交
2740
        compact->compaction->IsBaseLevelForKey(ikey.user_key),
2741 2742
        (int)last_sequence_for_key, (int)earliest_snapshot,
        compact->compaction->level(), bottommost_level);
J
jorlow@chromium.org 已提交
2743 2744 2745
#endif

    if (!drop) {
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
      // We may write a single key (e.g.: for Put/Delete or successful merge).
      // Or we may instead have to write a sequence/list of keys.
      // We have to write a sequence iff we have an unsuccessful merge
      bool has_merge_list = current_entry_is_merging && !merge.IsSuccess();
      const std::deque<std::string>* keys = nullptr;
      const std::deque<std::string>* values = nullptr;
      std::deque<std::string>::const_reverse_iterator key_iter;
      std::deque<std::string>::const_reverse_iterator value_iter;
      if (has_merge_list) {
        keys = &merge.keys();
        values = &merge.values();
        key_iter = keys->rbegin();    // The back (*rbegin()) is the first key
        value_iter = values->rbegin();

        key = Slice(*key_iter);
        value = Slice(*value_iter);
2762
      }
2763

2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
      // If we have a list of keys to write, traverse the list.
      // If we have a single key to write, simply write that key.
      while (true) {
        // Invariant: key,value,ikey will always be the next entry to write
        char* kptr = (char*)key.data();
        std::string kstr;

        // Zeroing out the sequence number leads to better compression.
        // If this is the bottommost level (no files in lower levels)
        // and the earliest snapshot is larger than this seqno
        // then we can squash the seqno to zero.
2775
        if (bottommost_level && ikey.sequence < earliest_snapshot &&
2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786
            ikey.type != kTypeMerge) {
          assert(ikey.type != kTypeDeletion);
          // make a copy because updating in place would cause problems
          // with the priority queue that is managing the input key iterator
          kstr.assign(key.data(), key.size());
          kptr = (char *)kstr.c_str();
          UpdateInternalKey(kptr, key.size(), (uint64_t)0, ikey.type);
        }

        Slice newkey(kptr, key.size());
        assert((key.clear(), 1)); // we do not need 'key' anymore
2787

2788 2789 2790 2791 2792 2793 2794
        // Open output file if necessary
        if (compact->builder == nullptr) {
          status = OpenCompactionOutputFile(compact);
          if (!status.ok()) {
            break;
          }
        }
2795 2796

        SequenceNumber seqno = GetInternalKeySeqno(newkey);
2797 2798
        if (compact->builder->NumEntries() == 0) {
          compact->current_output()->smallest.DecodeFrom(newkey);
2799 2800 2801 2802
          compact->current_output()->smallest_seqno = seqno;
        } else {
          compact->current_output()->smallest_seqno =
            std::min(compact->current_output()->smallest_seqno, seqno);
2803 2804 2805
        }
        compact->current_output()->largest.DecodeFrom(newkey);
        compact->builder->Add(newkey, value);
2806 2807
        compact->current_output()->largest_seqno =
          std::max(compact->current_output()->largest_seqno, seqno);
2808 2809 2810 2811

        // Close output file if it is big enough
        if (compact->builder->FileSize() >=
            compact->compaction->MaxOutputFileSize()) {
D
Danny Guo 已提交
2812
          status = FinishCompactionOutputFile(compact, input);
2813 2814 2815
          if (!status.ok()) {
            break;
          }
J
jorlow@chromium.org 已提交
2816 2817
        }

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
        // If we have a list of entries, move to next element
        // If we only had one entry, then break the loop.
        if (has_merge_list) {
          ++key_iter;
          ++value_iter;

          // If at end of list
          if (key_iter == keys->rend() || value_iter == values->rend()) {
            // Sanity Check: if one ends, then both end
            assert(key_iter == keys->rend() && value_iter == values->rend());
            break;
          }

          // Otherwise not at end of list. Update key, value, and ikey.
          key = Slice(*key_iter);
          value = Slice(*value_iter);
          ParseInternalKey(key, &ikey);

        } else{
          // Only had one item to begin with (Put/Delete)
J
jorlow@chromium.org 已提交
2838 2839 2840 2841 2842
          break;
        }
      }
    }

2843
    // MergeUntil has moved input to the next entry
2844
    if (!current_entry_is_merging) {
2845 2846
      input->Next();
    }
J
jorlow@chromium.org 已提交
2847 2848
  }

D
Danny Guo 已提交
2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
  return status;
}

void DBImpl::CallCompactionFilterV2(CompactionState* compact,
  CompactionFilterV2* compaction_filter_v2) {
  if (compact == nullptr || compaction_filter_v2 == nullptr) {
    return;
  }

  std::vector<Slice> user_key_buf;
  for (const auto& key : compact->ikey_buf_) {
    user_key_buf.emplace_back(key.user_key);
  }

  // If the user has specified a compaction filter and the sequence
  // number is greater than any external snapshot, then invoke the
  // filter.
  // If the return value of the compaction filter is true, replace
  // the entry with a delete marker.
  compact->to_delete_buf_ = compaction_filter_v2->Filter(
      compact->compaction->level(),
      user_key_buf, compact->existing_value_buf_,
      &compact->new_value_buf_,
      &compact->value_changed_buf_);

  // new_value_buf_.size() <= to_delete__buf_.size(). "=" iff all
  // kv-pairs in this compaction run needs to be deleted.
  assert(compact->to_delete_buf_.size() ==
      compact->key_buf_.size());
  assert(compact->to_delete_buf_.size() ==
      compact->existing_value_buf_.size());
  assert(compact->to_delete_buf_.size() ==
      compact->value_changed_buf_.size());

  int new_value_idx = 0;
  for (unsigned int i = 0; i < compact->to_delete_buf_.size(); ++i) {
    if (compact->to_delete_buf_[i]) {
      // update the string buffer directly
      // the Slice buffer points to the updated buffer
      UpdateInternalKey(&compact->key_str_buf_[i][0],
          compact->key_str_buf_[i].size(),
          compact->ikey_buf_[i].sequence,
          kTypeDeletion);

      // no value associated with delete
      compact->existing_value_buf_[i].clear();
      RecordTick(options_.statistics.get(), COMPACTION_KEY_DROP_USER);
    } else if (compact->value_changed_buf_[i]) {
      compact->existing_value_buf_[i] =
        Slice(compact->new_value_buf_[new_value_idx++]);
    }
  }  // for
}

Status DBImpl::DoCompactionWork(CompactionState* compact,
                                DeletionState& deletion_state,
                                LogBuffer* log_buffer) {
  assert(compact);
  compact->CleanupBatchBuffer();
  compact->CleanupMergedBuffer();
2909
  bool prefix_initialized = false;
D
Danny Guo 已提交
2910 2911

  int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
2912 2913 2914 2915 2916 2917 2918
  LogToBuffer(log_buffer,
              "Compacting %d@%d + %d@%d files, score %.2f slots available %d",
              compact->compaction->num_input_files(0),
              compact->compaction->level(),
              compact->compaction->num_input_files(1),
              compact->compaction->output_level(), compact->compaction->score(),
              options_.max_background_compactions - bg_compaction_scheduled_);
D
Danny Guo 已提交
2919 2920
  char scratch[2345];
  compact->compaction->Summary(scratch, sizeof(scratch));
2921
  LogToBuffer(log_buffer, "Compaction start summary: %s\n", scratch);
D
Danny Guo 已提交
2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950

  assert(versions_->current()->NumLevelFiles(compact->compaction->level()) > 0);
  assert(compact->builder == nullptr);
  assert(!compact->outfile);

  SequenceNumber visible_at_tip = 0;
  SequenceNumber earliest_snapshot;
  SequenceNumber latest_snapshot = 0;
  snapshots_.getAll(compact->existing_snapshots);
  if (compact->existing_snapshots.size() == 0) {
    // optimize for fast path if there are no snapshots
    visible_at_tip = versions_->LastSequence();
    earliest_snapshot = visible_at_tip;
  } else {
    latest_snapshot = compact->existing_snapshots.back();
    // Add the current seqno as the 'latest' virtual
    // snapshot to the end of this list.
    compact->existing_snapshots.push_back(versions_->LastSequence());
    earliest_snapshot = compact->existing_snapshots[0];
  }

  // Is this compaction producing files at the bottommost level?
  bool bottommost_level = compact->compaction->BottomMostLevel();

  // Allocate the output file numbers before we release the lock
  AllocateCompactionOutputFileNumbers(compact);

  // Release mutex while we're actually doing the compaction work
  mutex_.Unlock();
2951
  log_buffer->FlushBufferToLog();
D
Danny Guo 已提交
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997

  const uint64_t start_micros = env_->NowMicros();
  unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
  input->SeekToFirst();
  shared_ptr<Iterator> backup_input(
      versions_->MakeInputIterator(compact->compaction));
  backup_input->SeekToFirst();

  Status status;
  ParsedInternalKey ikey;
  std::unique_ptr<CompactionFilterV2> compaction_filter_from_factory_v2
    = nullptr;
  auto context = compact->GetFilterContext();
  compaction_filter_from_factory_v2 =
    options_.compaction_filter_factory_v2->CreateCompactionFilterV2(context);
  auto compaction_filter_v2 =
    compaction_filter_from_factory_v2.get();

  // temp_backup_input always point to the start of the current buffer
  // temp_backup_input = backup_input;
  // iterate through input,
  // 1) buffer ineligible keys and value keys into 2 separate buffers;
  // 2) send value_buffer to compaction filter and alternate the values;
  // 3) merge value_buffer with ineligible_value_buffer;
  // 4) run the modified "compaction" using the old for loop.
  if (compaction_filter_v2) {
    for (; backup_input->Valid() && !shutting_down_.Acquire_Load(); ) {
      // Prioritize immutable compaction work
      if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
        const uint64_t imm_start = env_->NowMicros();
        LogFlush(options_.info_log);
        mutex_.Lock();
        if (imm_.IsFlushPending()) {
          FlushMemTableToOutputFile(nullptr, deletion_state, log_buffer);
          bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
        }
        mutex_.Unlock();
        imm_micros += (env_->NowMicros() - imm_start);
      }

      Slice key = backup_input->key();
      Slice value = backup_input->value();

      const SliceTransform* transformer =
        options_.compaction_filter_factory_v2->GetPrefixExtractor();
      std::string key_prefix = transformer->Transform(key).ToString();
2998
      if (!prefix_initialized) {
D
Danny Guo 已提交
2999
        compact->cur_prefix_ = key_prefix;
3000
        prefix_initialized = true;
D
Danny Guo 已提交
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121
      }
      if (!ParseInternalKey(key, &ikey)) {
        // log error
        Log(options_.info_log, "Failed to parse key: %s",
            key.ToString().c_str());
        continue;
      } else {
        // If the prefix remains the same, keep buffering
        if (key_prefix == compact->cur_prefix_) {
          // Apply the compaction filter V2 to all the kv pairs sharing
          // the same prefix
          if (ikey.type == kTypeValue &&
              (visible_at_tip || ikey.sequence > latest_snapshot)) {
            // Buffer all keys sharing the same prefix for CompactionFilterV2
            // Iterate through keys to check prefix
            compact->BufferKeyValueSlices(key, value);
          } else {
            // buffer ineligible keys
            compact->BufferOtherKeyValueSlices(key, value);
          }
          backup_input->Next();
          continue;
          // finish changing values for eligible keys
        } else {
          // Now prefix changes, this batch is done.
          // Call compaction filter on the buffered values to change the value
          if (compact->key_buf_.size() > 0) {
            CallCompactionFilterV2(compact, compaction_filter_v2);
          }
          compact->cur_prefix_ = key_prefix;
        }
      }

      // Merge this batch of data (values + ineligible keys)
      compact->MergeKeyValueSliceBuffer(&internal_comparator_);

      // Done buffering for the current prefix. Spit it out to disk
      // Now just iterate through all the kv-pairs
      status = ProcessKeyValueCompaction(
          visible_at_tip,
          earliest_snapshot,
          latest_snapshot,
          deletion_state,
          bottommost_level,
          imm_micros,
          input.get(),
          compact,
          true,
          log_buffer);

      if (!status.ok()) {
        break;
      }

      // After writing the kv-pairs, we can safely remove the reference
      // to the string buffer and clean them up
      compact->CleanupBatchBuffer();
      compact->CleanupMergedBuffer();
      // Buffer the key that triggers the mismatch in prefix
      if (ikey.type == kTypeValue &&
        (visible_at_tip || ikey.sequence > latest_snapshot)) {
        compact->BufferKeyValueSlices(key, value);
      } else {
        compact->BufferOtherKeyValueSlices(key, value);
      }
      backup_input->Next();
      if (!backup_input->Valid()) {
        // If this is the single last value, we need to merge it.
        if (compact->key_buf_.size() > 0) {
          CallCompactionFilterV2(compact, compaction_filter_v2);
        }
        compact->MergeKeyValueSliceBuffer(&internal_comparator_);

        status = ProcessKeyValueCompaction(
            visible_at_tip,
            earliest_snapshot,
            latest_snapshot,
            deletion_state,
            bottommost_level,
            imm_micros,
            input.get(),
            compact,
            true,
            log_buffer);

        compact->CleanupBatchBuffer();
        compact->CleanupMergedBuffer();
      }
    }  // done processing all prefix batches
    // finish the last batch
    if (compact->key_buf_.size() > 0) {
      CallCompactionFilterV2(compact, compaction_filter_v2);
    }
    compact->MergeKeyValueSliceBuffer(&internal_comparator_);
    status = ProcessKeyValueCompaction(
        visible_at_tip,
        earliest_snapshot,
        latest_snapshot,
        deletion_state,
        bottommost_level,
        imm_micros,
        input.get(),
        compact,
        true,
        log_buffer);
  }  // checking for compaction filter v2

  if (!compaction_filter_v2) {
    status = ProcessKeyValueCompaction(
      visible_at_tip,
      earliest_snapshot,
      latest_snapshot,
      deletion_state,
      bottommost_level,
      imm_micros,
      input.get(),
      compact,
      false,
      log_buffer);
  }

J
jorlow@chromium.org 已提交
3122
  if (status.ok() && shutting_down_.Acquire_Load()) {
L
Lei Jin 已提交
3123 3124
    status = Status::ShutdownInProgress(
        "Database shutdown started during compaction");
J
jorlow@chromium.org 已提交
3125
  }
3126
  if (status.ok() && compact->builder != nullptr) {
3127
    status = FinishCompactionOutputFile(compact, input.get());
J
jorlow@chromium.org 已提交
3128 3129 3130 3131
  }
  if (status.ok()) {
    status = input->status();
  }
3132
  input.reset();
J
jorlow@chromium.org 已提交
3133

3134 3135 3136
  if (!options_.disableDataSync) {
    db_directory_->Fsync();
  }
I
Igor Canadi 已提交
3137 3138

  InternalStats::CompactionStats stats;
3139
  stats.micros = env_->NowMicros() - start_micros - imm_micros;
I
Igor Canadi 已提交
3140
  MeasureTime(options_.statistics.get(), COMPACTION_TIME, stats.micros);
M
Mark Callaghan 已提交
3141 3142
  stats.files_in_leveln = compact->compaction->num_input_files(0);
  stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
3143 3144

  int num_output_files = compact->outputs.size();
3145
  if (compact->builder != nullptr) {
P
Pascal Borreli 已提交
3146
    // An error occurred so ignore the last output.
3147 3148 3149 3150
    assert(num_output_files > 0);
    --num_output_files;
  }
  stats.files_out_levelnp1 = num_output_files;
M
Mark Callaghan 已提交
3151

3152
  for (int i = 0; i < compact->compaction->num_input_files(0); i++) {
M
Mark Callaghan 已提交
3153
    stats.bytes_readn += compact->compaction->input(0, i)->file_size;
3154 3155 3156
    RecordTick(options_.statistics.get(), COMPACT_READ_BYTES,
               compact->compaction->input(0, i)->file_size);
  }
M
Mark Callaghan 已提交
3157

3158
  for (int i = 0; i < compact->compaction->num_input_files(1); i++) {
M
Mark Callaghan 已提交
3159
    stats.bytes_readnp1 += compact->compaction->input(1, i)->file_size;
3160 3161 3162
    RecordTick(options_.statistics.get(), COMPACT_READ_BYTES,
               compact->compaction->input(1, i)->file_size);
  }
M
Mark Callaghan 已提交
3163

3164
  for (int i = 0; i < num_output_files; i++) {
3165
    stats.bytes_written += compact->outputs[i].file_size;
3166 3167
    RecordTick(options_.statistics.get(), COMPACT_WRITE_BYTES,
               compact->outputs[i].file_size);
3168 3169
  }

I
Igor Canadi 已提交
3170
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
3171
  mutex_.Lock();
I
Igor Canadi 已提交
3172 3173
  internal_stats_.AddCompactionStats(compact->compaction->output_level(),
                                     stats);
J
jorlow@chromium.org 已提交
3174

3175 3176 3177 3178
  // if there were any unused file number (mostly in case of
  // compaction error), free up the entry from pending_putputs
  ReleaseCompactionUnusedFileNumbers(compact);

J
jorlow@chromium.org 已提交
3179
  if (status.ok()) {
3180
    status = InstallCompactionResults(compact, log_buffer);
I
Igor Canadi 已提交
3181
    InstallSuperVersion(deletion_state);
J
jorlow@chromium.org 已提交
3182
  }
3183
  Version::LevelSummaryStorage tmp;
3184 3185
  LogToBuffer(
      log_buffer,
M
Mark Callaghan 已提交
3186
      "compacted to: %s, %.1f MB/sec, level %d, files in(%d, %d) out(%d) "
3187 3188
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s\n",
3189
      versions_->current()->LevelSummary(&tmp),
M
Mark Callaghan 已提交
3190
      (stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
3191 3192 3193 3194
          (double)stats.micros,
      compact->compaction->output_level(), stats.files_in_leveln,
      stats.files_in_levelnp1, stats.files_out_levelnp1,
      stats.bytes_readn / 1048576.0, stats.bytes_readnp1 / 1048576.0,
M
Mark Callaghan 已提交
3195
      stats.bytes_written / 1048576.0,
3196
      (stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
3197 3198
          (double)stats.bytes_readn,
      stats.bytes_written / (double)stats.bytes_readn,
3199
      status.ToString().c_str());
M
Mark Callaghan 已提交
3200

J
jorlow@chromium.org 已提交
3201 3202 3203
  return status;
}

3204 3205
namespace {
struct IterState {
3206 3207 3208 3209
  IterState(DBImpl* db, port::Mutex* mu, DBImpl::SuperVersion* super_version)
    : db(db), mu(mu), super_version(super_version) {}

  DBImpl* db;
3210
  port::Mutex* mu;
3211
  DBImpl::SuperVersion* super_version;
3212 3213 3214 3215
};

static void CleanupIteratorState(void* arg1, void* arg2) {
  IterState* state = reinterpret_cast<IterState*>(arg1);
3216 3217 3218

  bool need_cleanup = state->super_version->Unref();
  if (need_cleanup) {
3219 3220
    DBImpl::DeletionState deletion_state;

3221 3222 3223 3224 3225 3226
    state->mu->Lock();
    state->super_version->Cleanup();
    state->db->FindObsoleteFiles(deletion_state, false, true);
    state->mu->Unlock();

    delete state->super_version;
3227 3228 3229
    if (deletion_state.HaveSomethingToDelete()) {
      state->db->PurgeObsoleteFiles(deletion_state);
    }
3230
  }
T
Tomislav Novak 已提交
3231

3232 3233
  delete state;
}
H
Hans Wennborg 已提交
3234
}  // namespace
3235

J
jorlow@chromium.org 已提交
3236 3237
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
                                      SequenceNumber* latest_snapshot) {
3238 3239
  mutex_.Lock();
  *latest_snapshot = versions_->LastSequence();
3240
  SuperVersion* super_version = super_version_->Ref();
3241 3242
  mutex_.Unlock();

I
Igor Canadi 已提交
3243
  std::vector<Iterator*> iterator_list;
3244 3245
  // Collect iterator for mutable mem
  iterator_list.push_back(super_version->mem->NewIterator(options));
T
Tomislav Novak 已提交
3246
  // Collect all needed child iterators for immutable memtables
3247
  super_version->imm->AddIterators(options, &iterator_list);
T
Tomislav Novak 已提交
3248
  // Collect iterators for files in L0 - Ln
3249 3250
  super_version->current->AddIterators(options, storage_options_,
                                       &iterator_list);
K
Kai Liu 已提交
3251
  Iterator* internal_iter = NewMergingIterator(
K
kailiu 已提交
3252
      env_, &internal_comparator_, &iterator_list[0], iterator_list.size());
3253 3254

  IterState* cleanup = new IterState(this, &mutex_, super_version);
3255
  internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
J
jorlow@chromium.org 已提交
3256 3257 3258 3259 3260 3261

  return internal_iter;
}

Iterator* DBImpl::TEST_NewInternalIterator() {
  SequenceNumber ignored;
3262 3263 3264 3265
  ReadOptions read_options;
  // Use prefix_seek to make the test function more useful.
  read_options.prefix_seek = true;
  return NewInternalIterator(read_options, &ignored);
J
jorlow@chromium.org 已提交
3266 3267
}

T
Tomislav Novak 已提交
3268 3269 3270 3271 3272
std::pair<Iterator*, Iterator*> DBImpl::GetTailingIteratorPair(
    const ReadOptions& options,
    uint64_t* superversion_number) {

  mutex_.Lock();
3273
  SuperVersion* super_version = super_version_->Ref();
T
Tomislav Novak 已提交
3274 3275 3276 3277 3278
  if (superversion_number != nullptr) {
    *superversion_number = CurrentVersionNumber();
  }
  mutex_.Unlock();

3279
  Iterator* mutable_iter = super_version->mem->NewIterator(options);
T
Tomislav Novak 已提交
3280 3281 3282 3283 3284
  // create a DBIter that only uses memtable content; see NewIterator()
  mutable_iter = NewDBIterator(&dbname_, env_, options_, user_comparator(),
                               mutable_iter, kMaxSequenceNumber);

  std::vector<Iterator*> list;
3285 3286 3287
  super_version->imm->AddIterators(options, &list);
  super_version->current->AddIterators(options, storage_options_, &list);
  Iterator* immutable_iter =
K
kailiu 已提交
3288
    NewMergingIterator(env_, &internal_comparator_, &list[0], list.size());
T
Tomislav Novak 已提交
3289 3290 3291 3292 3293

  // create a DBIter that only uses memtable content; see NewIterator()
  immutable_iter = NewDBIterator(&dbname_, env_, options_, user_comparator(),
                                 immutable_iter, kMaxSequenceNumber);

3294 3295 3296 3297 3298 3299 3300 3301
  // register cleanups
  mutable_iter->RegisterCleanup(CleanupIteratorState,
    new IterState(this, &mutex_, super_version), nullptr);

  // bump the ref one more time since it will be Unref'ed twice
  immutable_iter->RegisterCleanup(CleanupIteratorState,
    new IterState(this, &mutex_, super_version->Ref()), nullptr);

T
Tomislav Novak 已提交
3302 3303 3304
  return std::make_pair(mutable_iter, immutable_iter);
}

J
jorlow@chromium.org 已提交
3305
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
3306
  MutexLock l(&mutex_);
3307
  return versions_->current()->MaxNextLevelOverlappingBytes();
3308 3309
}

J
jorlow@chromium.org 已提交
3310 3311 3312
Status DBImpl::Get(const ReadOptions& options,
                   const Slice& key,
                   std::string* value) {
3313 3314 3315
  return GetImpl(options, key, value);
}

I
Igor Canadi 已提交
3316 3317 3318
// DeletionState gets created and destructed outside of the lock -- we
// use this convinently to:
// * malloc one SuperVersion() outside of the lock -- new_superversion
3319
// * delete SuperVersion()s outside of the lock -- superversions_to_free
I
Igor Canadi 已提交
3320 3321 3322 3323
//
// However, if InstallSuperVersion() gets called twice with the same,
// deletion_state, we can't reuse the SuperVersion() that got malloced because
// first call already used it. In that rare case, we take a hit and create a
3324
// new SuperVersion() inside of the mutex.
I
Igor Canadi 已提交
3325
void DBImpl::InstallSuperVersion(DeletionState& deletion_state) {
3326
  mutex_.AssertHeld();
I
Igor Canadi 已提交
3327 3328 3329 3330 3331 3332
  // if new_superversion == nullptr, it means somebody already used it
  SuperVersion* new_superversion =
    (deletion_state.new_superversion != nullptr) ?
    deletion_state.new_superversion : new SuperVersion();
  SuperVersion* old_superversion = InstallSuperVersion(new_superversion);
  deletion_state.new_superversion = nullptr;
3333
  deletion_state.superversions_to_free.push_back(old_superversion);
3334 3335 3336 3337
  // Reset SuperVersions cached in thread local storage
  if (options_.allow_thread_local) {
    ResetThreadLocalSuperVersions(&deletion_state);
  }
I
Igor Canadi 已提交
3338 3339 3340 3341 3342
}

DBImpl::SuperVersion* DBImpl::InstallSuperVersion(
    SuperVersion* new_superversion) {
  mutex_.AssertHeld();
I
Igor Canadi 已提交
3343
  new_superversion->db = this;
I
Igor Canadi 已提交
3344
  new_superversion->Init(mem_, imm_.current(), versions_->current());
I
Igor Canadi 已提交
3345 3346
  SuperVersion* old_superversion = super_version_;
  super_version_ = new_superversion;
T
Tomislav Novak 已提交
3347
  ++super_version_number_;
3348 3349
  super_version_->version_number = super_version_number_;

I
Igor Canadi 已提交
3350 3351 3352 3353 3354 3355 3356
  if (old_superversion != nullptr && old_superversion->Unref()) {
    old_superversion->Cleanup();
    return old_superversion; // will let caller delete outside of mutex
  }
  return nullptr;
}

3357 3358 3359
void DBImpl::ResetThreadLocalSuperVersions(DeletionState* deletion_state) {
  mutex_.AssertHeld();
  autovector<void*> sv_ptrs;
3360
  local_sv_->Scrape(&sv_ptrs, SuperVersion::kSVObsolete);
3361 3362
  for (auto ptr : sv_ptrs) {
    assert(ptr);
3363 3364 3365
    if (ptr == SuperVersion::kSVInUse) {
      continue;
    }
3366 3367 3368 3369 3370 3371 3372 3373
    auto sv = static_cast<SuperVersion*>(ptr);
    if (static_cast<SuperVersion*>(ptr)->Unref()) {
      sv->Cleanup();
      deletion_state->superversions_to_free.push_back(sv);
    }
  }
}

3374 3375 3376
Status DBImpl::GetImpl(const ReadOptions& options,
                       const Slice& key,
                       std::string* value,
3377
                       bool* value_found) {
3378
  Status s;
3379

3380
  StopWatch sw(env_, options_.statistics.get(), DB_GET, false);
3381 3382
  StopWatchNano snapshot_timer(env_, false);
  StartPerfTimer(&snapshot_timer);
3383
  SequenceNumber snapshot;
K
kailiu 已提交
3384

3385
  if (options.snapshot != nullptr) {
3386 3387 3388
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
3389
  }
3390

3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
  // Acquire SuperVersion
  SuperVersion* sv = nullptr;
  if (LIKELY(options_.allow_thread_local)) {
    // The SuperVersion is cached in thread local storage to avoid acquiring
    // mutex when SuperVersion does not change since the last use. When a new
    // SuperVersion is installed, the compaction or flush thread cleans up
    // cached SuperVersion in all existing thread local storage. To avoid
    // acquiring mutex for this operation, we use atomic Swap() on the thread
    // local pointer to guarantee exclusive access. If the thread local pointer
    // is being used while a new SuperVersion is installed, the cached
3401 3402 3403 3404
    // SuperVersion can become stale. In that case, the background thread would
    // have swapped in kSVObsolete. We re-check the value at the end of
    // Get, with an atomic compare and swap. The superversion will be released
    // if detected to be stale.
3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
    void* ptr = local_sv_->Swap(SuperVersion::kSVInUse);
    // Invariant:
    // (1) Scrape (always) installs kSVObsolete in ThreadLocal storage
    // (2) the Swap above (always) installs kSVInUse, ThreadLocal storage
    // should only keep kSVInUse during a GetImpl.
    assert(ptr != SuperVersion::kSVInUse);
    sv = static_cast<SuperVersion*>(ptr);
    if (sv == SuperVersion::kSVObsolete ||
        sv->version_number != super_version_number_.load(
          std::memory_order_relaxed)) {
      RecordTick(options_.statistics.get(), NUMBER_SUPERVERSION_ACQUIRES);
3416 3417 3418
      SuperVersion* sv_to_delete = nullptr;

      if (sv && sv->Unref()) {
3419
        RecordTick(options_.statistics.get(), NUMBER_SUPERVERSION_CLEANUPS);
3420
        mutex_.Lock();
3421 3422
        // TODO underlying resources held by superversion (sst files) might
        // not be released until the next background job.
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
        sv->Cleanup();
        sv_to_delete = sv;
      } else {
        mutex_.Lock();
      }
      sv = super_version_->Ref();
      mutex_.Unlock();

      delete sv_to_delete;
    }
  } else {
    mutex_.Lock();
    sv = super_version_->Ref();
    mutex_.Unlock();
  }
I
Igor Canadi 已提交
3438

3439
  bool have_stat_update = false;
3440
  Version::GetStats stats;
3441

3442
  // Prepare to store a list of merge operations if merge occurs.
3443
  MergeContext merge_context;
3444

3445
  // First look in the memtable, then in the immutable memtable (if any).
3446
  // s is both in/out. When in, s could either be OK or MergeInProgress.
3447
  // merge_operands will contain the sequence of merges in the latter case.
3448
  LookupKey lkey(key, snapshot);
3449
  BumpPerfTime(&perf_context.get_snapshot_time, &snapshot_timer);
3450
  if (sv->mem->Get(lkey, value, &s, merge_context, options_)) {
3451
    // Done
3452
    RecordTick(options_.statistics.get(), MEMTABLE_HIT);
3453
  } else if (sv->imm->Get(lkey, value, &s, merge_context, options_)) {
3454
    // Done
3455
    RecordTick(options_.statistics.get(), MEMTABLE_HIT);
3456
  } else {
3457 3458 3459
    StopWatchNano from_files_timer(env_, false);
    StartPerfTimer(&from_files_timer);

3460 3461
    sv->current->Get(options, lkey, value, &s, &merge_context, &stats,
                     options_, value_found);
3462
    have_stat_update = true;
3463
    BumpPerfTime(&perf_context.get_from_output_files_time, &from_files_timer);
3464
    RecordTick(options_.statistics.get(), MEMTABLE_MISS);
3465
  }
3466 3467 3468

  StopWatchNano post_process_timer(env_, false);
  StartPerfTimer(&post_process_timer);
3469

I
Igor Canadi 已提交
3470 3471
  if (!options_.disable_seek_compaction && have_stat_update) {
    mutex_.Lock();
3472
    if (sv->current->UpdateStats(stats)) {
I
Igor Canadi 已提交
3473 3474 3475
      MaybeScheduleFlushOrCompaction();
    }
    mutex_.Unlock();
3476 3477
  }

3478
  bool unref_sv = true;
3479 3480
  if (LIKELY(options_.allow_thread_local)) {
    // Put the SuperVersion back
3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496
    void* expected = SuperVersion::kSVInUse;
    if (local_sv_->CompareAndSwap(static_cast<void*>(sv), expected)) {
      // When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal
      // storage has not been altered and no Scrape has happend. The
      // SuperVersion is still current.
      unref_sv = false;
    } else {
      // ThreadLocal scrape happened in the process of this GetImpl call (after
      // thread local Swap() at the beginning and before CompareAndSwap()).
      // This means the SuperVersion it holds is obsolete.
      assert(expected == SuperVersion::kSVObsolete);
    }
  }

  if (unref_sv) {
    // Release SuperVersion
3497
    if (sv->Unref()) {
I
Igor Canadi 已提交
3498
      mutex_.Lock();
3499
      sv->Cleanup();
I
Igor Canadi 已提交
3500
      mutex_.Unlock();
3501
      delete sv;
3502
      RecordTick(options_.statistics.get(), NUMBER_SUPERVERSION_CLEANUPS);
I
Igor Canadi 已提交
3503
    }
3504
    RecordTick(options_.statistics.get(), NUMBER_SUPERVERSION_RELEASES);
3505
  }
3506

3507
  // Note, tickers are atomic now - no lock protection needed any more.
3508 3509
  RecordTick(options_.statistics.get(), NUMBER_KEYS_READ);
  RecordTick(options_.statistics.get(), BYTES_READ, value->size());
K
Kai Liu 已提交
3510
  BumpPerfTime(&perf_context.get_post_process_time, &post_process_timer);
3511
  return s;
J
jorlow@chromium.org 已提交
3512 3513
}

3514 3515 3516
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
                                     const std::vector<Slice>& keys,
                                     std::vector<std::string>* values) {
3517
  StopWatch sw(env_, options_.statistics.get(), DB_MULTIGET, false);
3518 3519
  StopWatchNano snapshot_timer(env_, false);
  StartPerfTimer(&snapshot_timer);
K
Kai Liu 已提交
3520

3521
  SequenceNumber snapshot;
3522

3523
  mutex_.Lock();
3524 3525 3526 3527 3528 3529
  if (options.snapshot != nullptr) {
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
  }

3530
  SuperVersion* get_version = super_version_->Ref();
3531
  mutex_.Unlock();
3532

3533 3534 3535
  bool have_stat_update = false;
  Version::GetStats stats;

3536 3537
  // Contain a list of merge operations if merge occurs.
  MergeContext merge_context;
3538

3539 3540 3541 3542 3543 3544 3545
  // Note: this always resizes the values array
  int numKeys = keys.size();
  std::vector<Status> statList(numKeys);
  values->resize(numKeys);

  // Keep track of bytes that we read for statistics-recording later
  uint64_t bytesRead = 0;
3546
  BumpPerfTime(&perf_context.get_snapshot_time, &snapshot_timer);
3547 3548 3549 3550

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
3551 3552
  // merge_operands will contain the sequence of merges in the latter case.
  for (int i=0; i<numKeys; ++i) {
3553
    merge_context.Clear();
3554 3555 3556 3557
    Status& s = statList[i];
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
3558
    if (get_version->mem->Get(lkey, value, &s, merge_context, options_)) {
3559
      // Done
3560 3561
    } else if (get_version->imm->Get(lkey, value, &s, merge_context,
                                     options_)) {
3562 3563
      // Done
    } else {
3564 3565
      get_version->current->Get(options, lkey, value, &s, &merge_context,
                                &stats, options_);
3566 3567 3568 3569 3570 3571 3572 3573 3574
      have_stat_update = true;
    }

    if (s.ok()) {
      bytesRead += value->size();
    }
  }

  // Post processing (decrement reference counts and record statistics)
3575 3576
  StopWatchNano post_process_timer(env_, false);
  StartPerfTimer(&post_process_timer);
3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
  bool delete_get_version = false;
  if (!options_.disable_seek_compaction && have_stat_update) {
    mutex_.Lock();
    if (get_version->current->UpdateStats(stats)) {
      MaybeScheduleFlushOrCompaction();
    }
    if (get_version->Unref()) {
      get_version->Cleanup();
      delete_get_version = true;
    }
    mutex_.Unlock();
  } else {
    if (get_version->Unref()) {
      mutex_.Lock();
      get_version->Cleanup();
      mutex_.Unlock();
      delete_get_version = true;
    }
  }
  if (delete_get_version) {
    delete get_version;
3598
  }
3599

3600 3601 3602
  RecordTick(options_.statistics.get(), NUMBER_MULTIGET_CALLS);
  RecordTick(options_.statistics.get(), NUMBER_MULTIGET_KEYS_READ, numKeys);
  RecordTick(options_.statistics.get(), NUMBER_MULTIGET_BYTES_READ, bytesRead);
3603
  BumpPerfTime(&perf_context.get_post_process_time, &post_process_timer);
3604 3605 3606 3607

  return statList;
}

3608 3609 3610 3611 3612
bool DBImpl::KeyMayExist(const ReadOptions& options,
                         const Slice& key,
                         std::string* value,
                         bool* value_found) {
  if (value_found != nullptr) {
K
Kai Liu 已提交
3613 3614
    // falsify later if key-may-exist but can't fetch value
    *value_found = true;
3615
  }
3616 3617
  ReadOptions roptions = options;
  roptions.read_tier = kBlockCacheTier; // read from block cache only
K
Kai Liu 已提交
3618 3619 3620 3621 3622 3623
  auto s = GetImpl(roptions, key, value, value_found);

  // If options.block_cache != nullptr and the index block of the table didn't
  // not present in block_cache, the return value will be Status::Incomplete.
  // In this case, key may still exist in the table.
  return s.ok() || s.IsIncomplete();
3624 3625
}

J
jorlow@chromium.org 已提交
3626
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
T
Tomislav Novak 已提交
3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641
  Iterator* iter;

  if (options.tailing) {
    iter = new TailingIterator(this, options, user_comparator());
  } else {
    SequenceNumber latest_snapshot;
    iter = NewInternalIterator(options, &latest_snapshot);

    iter = NewDBIterator(
      &dbname_, env_, options_, user_comparator(), iter,
      (options.snapshot != nullptr
       ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
       : latest_snapshot));
  }

T
Tyler Harter 已提交
3642 3643 3644 3645
  if (options.prefix) {
    // use extra wrapper to exclude any keys from the results which
    // don't begin with the prefix
    iter = new PrefixFilterIterator(iter, *options.prefix,
3646
                                    options_.prefix_extractor.get());
T
Tyler Harter 已提交
3647 3648
  }
  return iter;
J
jorlow@chromium.org 已提交
3649 3650 3651 3652
}

const Snapshot* DBImpl::GetSnapshot() {
  MutexLock l(&mutex_);
3653
  return snapshots_.New(versions_->LastSequence());
J
jorlow@chromium.org 已提交
3654 3655 3656 3657
}

void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  MutexLock l(&mutex_);
3658
  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
J
jorlow@chromium.org 已提交
3659 3660 3661 3662 3663 3664 3665
}

// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  return DB::Put(o, key, val);
}

3666 3667 3668 3669 3670 3671 3672 3673 3674
Status DBImpl::Merge(const WriteOptions& o, const Slice& key,
                     const Slice& val) {
  if (!options_.merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, key, val);
  }
}

J
jorlow@chromium.org 已提交
3675 3676 3677 3678
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  return DB::Delete(options, key);
}

3679
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
3680 3681
  StopWatchNano pre_post_process_timer(env_, false);
  StartPerfTimer(&pre_post_process_timer);
3682 3683 3684
  Writer w(&mutex_);
  w.batch = my_batch;
  w.sync = options.sync;
H
heyongqiang 已提交
3685
  w.disableWAL = options.disableWAL;
3686
  w.done = false;
3687

3688
  StopWatch sw(env_, options_.statistics.get(), DB_WRITE, false);
I
Igor Canadi 已提交
3689
  mutex_.Lock();
3690 3691 3692 3693
  writers_.push_back(&w);
  while (!w.done && &w != writers_.front()) {
    w.cv.Wait();
  }
3694 3695 3696 3697 3698

  if (!options.disableWAL) {
    RecordTick(options_.statistics.get(), WRITE_WITH_WAL, 1);
  }

3699
  if (w.done) {
I
Igor Canadi 已提交
3700
    mutex_.Unlock();
3701
    RecordTick(options_.statistics.get(), WRITE_DONE_BY_OTHER, 1);
3702
    return w.status;
3703 3704
  } else {
    RecordTick(options_.statistics.get(), WRITE_DONE_BY_SELF, 1);
3705 3706 3707
  }

  // May temporarily unlock and wait.
I
Igor Canadi 已提交
3708
  SuperVersion* superversion_to_free = nullptr;
S
sdong 已提交
3709 3710 3711 3712
  log::Writer* old_log = nullptr;
  Status status = MakeRoomForWrite(my_batch == nullptr,
                                   &superversion_to_free,
                                   &old_log);
D
dgrogan@chromium.org 已提交
3713
  uint64_t last_sequence = versions_->LastSequence();
3714
  Writer* last_writer = &w;
3715
  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
3716 3717
    autovector<WriteBatch*> write_batch_group;
    BuildBatchGroup(&last_writer, &write_batch_group);
3718

3719 3720 3721 3722
    // Add to log and apply to memtable.  We can release the lock
    // during this phase since &w is currently responsible for logging
    // and protects against concurrent loggers and concurrent writes
    // into mem_.
3723
    {
3724
      mutex_.Unlock();
3725 3726 3727 3728 3729 3730 3731 3732 3733 3734
      WriteBatch* updates = nullptr;
      if (write_batch_group.size() == 1) {
        updates = write_batch_group[0];
      } else {
        updates = &tmp_batch_;
        for (size_t i = 0; i < write_batch_group.size(); ++i) {
          WriteBatchInternal::Append(updates, write_batch_group[i]);
        }
      }

3735 3736 3737 3738 3739
      const SequenceNumber current_sequence = last_sequence + 1;
      WriteBatchInternal::SetSequence(updates, current_sequence);
      int my_batch_count = WriteBatchInternal::Count(updates);
      last_sequence += my_batch_count;
      // Record statistics
3740 3741 3742
      RecordTick(options_.statistics.get(),
                 NUMBER_KEYS_WRITTEN, my_batch_count);
      RecordTick(options_.statistics.get(),
3743 3744
                 BYTES_WRITTEN,
                 WriteBatchInternal::ByteSize(updates));
3745 3746
      if (options.disableWAL) {
        flush_on_destroy_ = true;
3747
      }
3748 3749
      BumpPerfTime(&perf_context.write_pre_and_post_process_time,
                   &pre_post_process_timer);
3750 3751

      if (!options.disableWAL) {
3752 3753
        StopWatchNano timer(env_);
        StartPerfTimer(&timer);
3754 3755 3756 3757
        Slice log_entry = WriteBatchInternal::Contents(updates);
        status = log_->AddRecord(log_entry);
        RecordTick(options_.statistics.get(), WAL_FILE_SYNCED, 1);
        RecordTick(options_.statistics.get(), WAL_FILE_BYTES, log_entry.size());
H
heyongqiang 已提交
3758
        if (status.ok() && options.sync) {
3759
          if (options_.use_fsync) {
3760
            StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
3761
            status = log_->file()->Fsync();
3762
          } else {
3763
            StopWatch(env_, options_.statistics.get(), WAL_FILE_SYNC_MICROS);
3764
            status = log_->file()->Sync();
3765
          }
H
heyongqiang 已提交
3766
        }
3767
        BumpPerfTime(&perf_context.write_wal_time, &timer);
3768 3769
      }
      if (status.ok()) {
3770 3771
        StopWatchNano write_memtable_timer(env_, false);
        StartPerfTimer(&write_memtable_timer);
3772 3773
        status = WriteBatchInternal::InsertInto(updates, mem_, &options_, this,
                                                options_.filter_deletes);
3774
        BumpPerfTime(&perf_context.write_memtable_time, &write_memtable_timer);
3775 3776 3777 3778 3779 3780 3781
        if (!status.ok()) {
          // Panic for in-memory corruptions
          // Note that existing logic was not sound. Any partial failure writing
          // into the memtable would result in a state that some write ops might
          // have succeeded in memtable but Status reports error for all writes.
          throw std::runtime_error("In memory WriteBatch corruption!");
        }
I
Igor Canadi 已提交
3782 3783
        SetTickerCount(options_.statistics.get(), SEQUENCE_NUMBER,
                       last_sequence);
3784
      }
3785
      StartPerfTimer(&pre_post_process_timer);
3786
      if (updates == &tmp_batch_) tmp_batch_.Clear();
3787 3788
      mutex_.Lock();
      if (status.ok()) {
3789
        versions_->SetLastSequence(last_sequence);
3790
      }
J
jorlow@chromium.org 已提交
3791 3792
    }
  }
I
Igor Canadi 已提交
3793 3794 3795
  if (options_.paranoid_checks && !status.ok() && bg_error_.ok()) {
    bg_error_ = status; // stop compaction & fail any further writes
  }
3796

3797 3798 3799 3800 3801 3802 3803
  while (true) {
    Writer* ready = writers_.front();
    writers_.pop_front();
    if (ready != &w) {
      ready->status = status;
      ready->done = true;
      ready->cv.Signal();
3804
    }
3805 3806
    if (ready == last_writer) break;
  }
3807

3808 3809 3810
  // Notify new head of write queue
  if (!writers_.empty()) {
    writers_.front()->cv.Signal();
3811
  }
I
Igor Canadi 已提交
3812
  mutex_.Unlock();
S
sdong 已提交
3813
  delete old_log;
I
Igor Canadi 已提交
3814
  delete superversion_to_free;
3815 3816
  BumpPerfTime(&perf_context.write_pre_and_post_process_time,
               &pre_post_process_timer);
J
jorlow@chromium.org 已提交
3817 3818 3819
  return status;
}

3820
// REQUIRES: Writer list must be non-empty
3821
// REQUIRES: First writer must have a non-nullptr batch
3822 3823
void DBImpl::BuildBatchGroup(Writer** last_writer,
                             autovector<WriteBatch*>* write_batch_group) {
3824 3825
  assert(!writers_.empty());
  Writer* first = writers_.front();
3826
  assert(first->batch != nullptr);
3827 3828

  size_t size = WriteBatchInternal::ByteSize(first->batch);
3829
  write_batch_group->push_back(first->batch);
3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848

  // Allow the group to grow up to a maximum size, but if the
  // original write is small, limit the growth so we do not slow
  // down the small write too much.
  size_t max_size = 1 << 20;
  if (size <= (128<<10)) {
    max_size = size + (128<<10);
  }

  *last_writer = first;
  std::deque<Writer*>::iterator iter = writers_.begin();
  ++iter;  // Advance past "first"
  for (; iter != writers_.end(); ++iter) {
    Writer* w = *iter;
    if (w->sync && !first->sync) {
      // Do not include a sync write into a batch handled by a non-sync write.
      break;
    }

H
heyongqiang 已提交
3849 3850 3851 3852 3853 3854
    if (!w->disableWAL && first->disableWAL) {
      // Do not include a write that needs WAL into a batch that has
      // WAL disabled.
      break;
    }

3855
    if (w->batch != nullptr) {
3856 3857 3858 3859 3860 3861
      size += WriteBatchInternal::ByteSize(w->batch);
      if (size > max_size) {
        // Do not make batch too big
        break;
      }

3862
      write_batch_group->push_back(w->batch);
3863 3864 3865 3866 3867
    }
    *last_writer = w;
  }
}

3868 3869 3870
// This function computes the amount of time in microseconds by which a write
// should be delayed based on the number of level-0 files according to the
// following formula:
J
Jim Paton 已提交
3871 3872 3873 3874
// if n < bottom, return 0;
// if n >= top, return 1000;
// otherwise, let r = (n - bottom) /
//                    (top - bottom)
3875 3876 3877 3878
//  and return r^2 * 1000.
// The goal of this formula is to gradually increase the rate at which writes
// are slowed. We also tried linear delay (r * 1000), but it seemed to do
// slightly worse. There is no other particular reason for choosing quadratic.
M
Mark Callaghan 已提交
3879
uint64_t DBImpl::SlowdownAmount(int n, double bottom, double top) {
3880
  uint64_t delay;
J
Jim Paton 已提交
3881
  if (n >= top) {
3882 3883
    delay = 1000;
  }
J
Jim Paton 已提交
3884
  else if (n < bottom) {
3885 3886 3887 3888
    delay = 0;
  }
  else {
    // If we are here, we know that:
J
Jim Paton 已提交
3889
    //   level0_start_slowdown <= n < level0_slowdown
3890
    // since the previous two conditions are false.
M
Mark Callaghan 已提交
3891 3892
    double how_much =
      (double) (n - bottom) /
J
Jim Paton 已提交
3893
              (top - bottom);
M
Mark Callaghan 已提交
3894
    delay = std::max(how_much * how_much * 1000, 100.0);
3895 3896 3897 3898 3899
  }
  assert(delay <= 1000);
  return delay;
}

3900
// REQUIRES: mutex_ is held
3901
// REQUIRES: this thread is currently at the front of the writer queue
I
Igor Canadi 已提交
3902
Status DBImpl::MakeRoomForWrite(bool force,
S
sdong 已提交
3903 3904
                                SuperVersion** superversion_to_free,
                                log::Writer** old_log) {
3905
  mutex_.AssertHeld();
3906
  assert(!writers_.empty());
3907
  bool allow_delay = !force;
J
Jim Paton 已提交
3908 3909
  bool allow_hard_rate_limit_delay = !force;
  bool allow_soft_rate_limit_delay = !force;
3910
  uint64_t rate_limit_delay_millis = 0;
3911
  Status s;
3912
  double score;
I
Igor Canadi 已提交
3913
  *superversion_to_free = nullptr;
3914

3915 3916 3917 3918 3919
  while (true) {
    if (!bg_error_.ok()) {
      // Yield previous error
      s = bg_error_;
      break;
3920
    } else if (allow_delay && versions_->NeedSlowdownForNumLevel0Files()) {
3921 3922 3923
      // We are getting close to hitting a hard limit on the number of
      // L0 files.  Rather than delaying a single write by several
      // seconds when we hit the hard limit, start delaying each
3924
      // individual write by 0-1ms to reduce latency variance.  Also,
3925 3926
      // this delay hands over some CPU to the compaction thread in
      // case it is sharing the same core as the writer.
3927 3928 3929 3930
      uint64_t slowdown =
          SlowdownAmount(versions_->current()->NumLevelFiles(0),
                         options_.level0_slowdown_writes_trigger,
                         options_.level0_stop_writes_trigger);
3931
      mutex_.Unlock();
3932
      uint64_t delayed;
J
Jim Paton 已提交
3933
      {
3934
        StopWatch sw(env_, options_.statistics.get(), STALL_L0_SLOWDOWN_COUNT);
3935
        env_->SleepForMicroseconds(slowdown);
3936
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
3937
      }
3938
      RecordTick(options_.statistics.get(), STALL_L0_SLOWDOWN_MICROS, delayed);
I
Igor Canadi 已提交
3939
      internal_stats_.RecordWriteStall(InternalStats::LEVEL0_SLOWDOWN, delayed);
3940 3941
      allow_delay = false;  // Do not delay a single write more than once
      mutex_.Lock();
3942
      delayed_writes_++;
3943
    } else if (!force && !mem_->ShouldFlush()) {
3944
      // There is room in current memtable
3945 3946 3947
      if (allow_delay) {
        DelayLoggingAndReset();
      }
3948
      break;
3949
    } else if (imm_.size() == options_.max_write_buffer_number - 1) {
3950
      // We have filled up the current memtable, but the previous
3951
      // ones are still being flushed, so we wait.
3952
      DelayLoggingAndReset();
3953 3954
      Log(options_.info_log, "wait for memtable flush...\n");
      MaybeScheduleFlushOrCompaction();
3955
      uint64_t stall;
J
Jim Paton 已提交
3956
      {
3957
        StopWatch sw(env_, options_.statistics.get(),
J
Jim Paton 已提交
3958 3959
          STALL_MEMTABLE_COMPACTION_COUNT);
        bg_cv_.Wait();
3960
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
3961
      }
3962 3963
      RecordTick(options_.statistics.get(),
                 STALL_MEMTABLE_COMPACTION_MICROS, stall);
I
Igor Canadi 已提交
3964 3965
      internal_stats_.RecordWriteStall(InternalStats::MEMTABLE_COMPACTION,
                                       stall);
3966
    } else if (versions_->current()->NumLevelFiles(0) >=
3967
               options_.level0_stop_writes_trigger) {
3968
      // There are too many level-0 files.
3969 3970
      DelayLoggingAndReset();
      Log(options_.info_log, "wait for fewer level0 files...\n");
3971
      uint64_t stall;
J
Jim Paton 已提交
3972
      {
3973 3974
        StopWatch sw(env_, options_.statistics.get(),
                     STALL_L0_NUM_FILES_COUNT);
J
Jim Paton 已提交
3975
        bg_cv_.Wait();
3976
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
3977
      }
3978
      RecordTick(options_.statistics.get(), STALL_L0_NUM_FILES_MICROS, stall);
I
Igor Canadi 已提交
3979
      internal_stats_.RecordWriteStall(InternalStats::LEVEL0_NUM_FILES, stall);
3980 3981 3982
    } else if (allow_hard_rate_limit_delay && options_.hard_rate_limit > 1.0 &&
               (score = versions_->current()->MaxCompactionScore()) >
                   options_.hard_rate_limit) {
3983
      // Delay a write when the compaction score for any level is too large.
3984
      int max_level = versions_->current()->MaxCompactionScoreLevel();
3985
      mutex_.Unlock();
3986
      uint64_t delayed;
J
Jim Paton 已提交
3987
      {
3988 3989
        StopWatch sw(env_, options_.statistics.get(),
                     HARD_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
3990
        env_->SleepForMicroseconds(1000);
3991
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
3992
      }
I
Igor Canadi 已提交
3993
      internal_stats_.RecordLevelNSlowdown(max_level, delayed);
3994
      // Make sure the following value doesn't round to zero.
3995 3996
      uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
      rate_limit_delay_millis += rate_limit;
3997 3998
      RecordTick(options_.statistics.get(),
                 RATE_LIMIT_DELAY_MILLIS, rate_limit);
J
Jim Paton 已提交
3999 4000 4001 4002
      if (options_.rate_limit_delay_max_milliseconds > 0 &&
          rate_limit_delay_millis >=
          (unsigned)options_.rate_limit_delay_max_milliseconds) {
        allow_hard_rate_limit_delay = false;
4003
      }
4004
      mutex_.Lock();
4005 4006 4007
    } else if (allow_soft_rate_limit_delay && options_.soft_rate_limit > 0.0 &&
               (score = versions_->current()->MaxCompactionScore()) >
                   options_.soft_rate_limit) {
J
Jim Paton 已提交
4008 4009 4010
      // Delay a write when the compaction score for any level is too large.
      // TODO: add statistics
      mutex_.Unlock();
J
Jim Paton 已提交
4011
      {
4012 4013
        StopWatch sw(env_, options_.statistics.get(),
                     SOFT_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
4014 4015 4016 4017 4018 4019 4020
        env_->SleepForMicroseconds(SlowdownAmount(
          score,
          options_.soft_rate_limit,
          options_.hard_rate_limit)
        );
        rate_limit_delay_millis += sw.ElapsedMicros();
      }
J
Jim Paton 已提交
4021 4022
      allow_soft_rate_limit_delay = false;
      mutex_.Lock();
4023

4024
    } else {
4025
      unique_ptr<WritableFile> lfile;
S
sdong 已提交
4026
      log::Writer* new_log = nullptr;
K
kailiu 已提交
4027
      MemTable* new_mem = nullptr;
4028

4029
      // Attempt to switch to a new memtable and trigger flush of old.
4030
      // Do this without holding the dbmutex lock.
4031 4032
      assert(versions_->PrevLogNumber() == 0);
      uint64_t new_log_number = versions_->NewFileNumber();
I
Igor Canadi 已提交
4033
      SuperVersion* new_superversion = nullptr;
4034 4035 4036
      mutex_.Unlock();
      {
        DelayLoggingAndReset();
4037
        s = env_->NewWritableFile(LogFileName(options_.wal_dir, new_log_number),
I
Igor Canadi 已提交
4038 4039
                                  &lfile,
                                  env_->OptimizeForLogWrite(storage_options_));
4040 4041 4042 4043
        if (s.ok()) {
          // Our final size should be less than write_buffer_size
          // (compression, etc) but err on the side of caution.
          lfile->SetPreallocationBlockSize(1.1 * options_.write_buffer_size);
S
sdong 已提交
4044
          new_log = new log::Writer(std::move(lfile));
K
kailiu 已提交
4045
          new_mem = new MemTable(internal_comparator_, options_);
K
Kai Liu 已提交
4046
          new_superversion = new SuperVersion();
4047
        }
S
sdong 已提交
4048 4049 4050
        Log(options_.info_log,
            "New memtable created with log file: #%lu\n",
            (unsigned long)new_log_number);
4051 4052
      }
      mutex_.Lock();
4053
      if (!s.ok()) {
H
heyongqiang 已提交
4054
        // Avoid chewing through file number space in a tight loop.
4055
        versions_->ReuseFileNumber(new_log_number);
K
kailiu 已提交
4056
        assert (!new_mem);
S
sdong 已提交
4057
        assert(new_log == nullptr);
4058 4059
        break;
      }
4060
      logfile_number_ = new_log_number;
S
sdong 已提交
4061 4062 4063
      assert(new_log != nullptr);
      *old_log = log_.release();
      log_.reset(new_log);
4064
      mem_->SetNextLogNumber(logfile_number_);
4065
      imm_.Add(mem_);
4066 4067 4068
      if (force) {
        imm_.FlushRequested();
      }
K
kailiu 已提交
4069
      mem_ = new_mem;
4070
      mem_->Ref();
4071
      mem_->SetLogNumber(logfile_number_);
4072
      force = false;   // Do not force another compaction if have room
4073
      MaybeScheduleFlushOrCompaction();
I
Igor Canadi 已提交
4074
      *superversion_to_free = InstallSuperVersion(new_superversion);
4075 4076 4077 4078 4079
    }
  }
  return s;
}

4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096
Status DBImpl::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
  // Increment the ref count
  mutex_.Lock();
  auto version = versions_->current();
  version->Ref();
  mutex_.Unlock();

  auto s = version->GetPropertiesOfAllTables(props);

  // Decrement the ref count
  mutex_.Lock();
  version->Unref();
  mutex_.Unlock();

  return s;
}

I
Igor Canadi 已提交
4097 4098 4099 4100
const std::string& DBImpl::GetName() const {
  return dbname_;
}

4101 4102 4103 4104
Env* DBImpl::GetEnv() const {
  return env_;
}

I
Igor Canadi 已提交
4105 4106 4107 4108
const Options& DBImpl::GetOptions() const {
  return options_;
}

4109 4110
bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  value->clear();
4111
  DBPropertyType property_type = GetPropertyType(property);
J
jorlow@chromium.org 已提交
4112
  MutexLock l(&mutex_);
4113
  return internal_stats_.GetProperty(property_type, property, value, this);
J
jorlow@chromium.org 已提交
4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141
}

void DBImpl::GetApproximateSizes(
    const Range* range, int n,
    uint64_t* sizes) {
  // TODO(opt): better implementation
  Version* v;
  {
    MutexLock l(&mutex_);
    versions_->current()->Ref();
    v = versions_->current();
  }

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
    uint64_t start = versions_->ApproximateOffsetOf(v, k1);
    uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
    sizes[i] = (limit >= start ? limit - start : 0);
  }

  {
    MutexLock l(&mutex_);
    v->Unref();
  }
}

4142 4143 4144 4145 4146 4147 4148
inline void DBImpl::DelayLoggingAndReset() {
  if (delayed_writes_ > 0) {
    Log(options_.info_log, "delayed %d write...\n", delayed_writes_ );
    delayed_writes_ = 0;
  }
}

4149 4150 4151
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
4152 4153 4154 4155
  WalFileType log_type;
  if (!ParseFileName(name, &number, &type, &log_type) ||
      (type != kTableFile && type != kLogFile)) {
    Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
4156 4157 4158
    return Status::InvalidArgument("Invalid file name");
  }

4159 4160 4161 4162
  Status status;
  if (type == kLogFile) {
    // Only allow deleting archived log files
    if (log_type != kArchivedLogFile) {
4163 4164
      Log(options_.info_log, "DeleteFile %s failed - not archived log.\n",
          name.c_str());
4165 4166 4167 4168
      return Status::NotSupported("Delete only supported for archived logs");
    }
    status = env_->DeleteFile(options_.wal_dir + "/" + name.c_str());
    if (!status.ok()) {
4169 4170
      Log(options_.info_log, "DeleteFile %s failed -- %s.\n",
          name.c_str(), status.ToString().c_str());
4171 4172 4173 4174
    }
    return status;
  }

4175
  int level;
4176
  FileMetaData* metadata;
4177
  int maxlevel = NumberLevels();
4178
  VersionEdit edit;
K
Kai Liu 已提交
4179
  DeletionState deletion_state(true);
D
Dhruba Borthakur 已提交
4180 4181 4182 4183
  {
    MutexLock l(&mutex_);
    status = versions_->GetMetadataForFile(number, &level, &metadata);
    if (!status.ok()) {
4184 4185
      Log(options_.info_log, "DeleteFile %s failed. File not found\n",
                             name.c_str());
D
Dhruba Borthakur 已提交
4186 4187 4188
      return Status::InvalidArgument("File not found");
    }
    assert((level > 0) && (level < maxlevel));
4189

D
Dhruba Borthakur 已提交
4190
    // If the file is being compacted no need to delete.
4191
    if (metadata->being_compacted) {
4192
      Log(options_.info_log,
4193
          "DeleteFile %s Skipped. File about to be compacted\n", name.c_str());
D
Dhruba Borthakur 已提交
4194
      return Status::OK();
4195 4196
    }

D
Dhruba Borthakur 已提交
4197 4198 4199 4200
    // Only the files in the last level can be deleted externally.
    // This is to make sure that any deletion tombstones are not
    // lost. Check that the level passed is the last level.
    for (int i = level + 1; i < maxlevel; i++) {
4201
      if (versions_->current()->NumLevelFiles(i) != 0) {
D
Dhruba Borthakur 已提交
4202
        Log(options_.info_log,
4203
            "DeleteFile %s FAILED. File not in last level\n", name.c_str());
D
Dhruba Borthakur 已提交
4204 4205 4206 4207
        return Status::InvalidArgument("File not in last level");
      }
    }
    edit.DeleteFile(level, number);
4208
    status = versions_->LogAndApply(&edit, &mutex_, db_directory_.get());
I
Igor Canadi 已提交
4209 4210 4211
    if (status.ok()) {
      InstallSuperVersion(deletion_state);
    }
I
Igor Canadi 已提交
4212
    FindObsoleteFiles(deletion_state, false);
D
Dhruba Borthakur 已提交
4213
  } // lock released here
I
Igor Canadi 已提交
4214
  LogFlush(options_.info_log);
I
Igor Canadi 已提交
4215
  // remove files outside the db-lock
4216 4217 4218
  if (deletion_state.HaveSomethingToDelete()) {
    PurgeObsoleteFiles(deletion_state);
  }
4219 4220 4221 4222 4223 4224
  {
    MutexLock l(&mutex_);
    // schedule flush if file deletion means we freed the space for flushes to
    // continue
    MaybeScheduleFlushOrCompaction();
  }
4225 4226 4227
  return status;
}

I
Igor Canadi 已提交
4228
void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
4229 4230 4231 4232
  MutexLock l(&mutex_);
  return versions_->GetLiveFilesMetaData(metadata);
}

I
Igor Canadi 已提交
4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259
Status DBImpl::CheckConsistency() {
  mutex_.AssertHeld();
  std::vector<LiveFileMetaData> metadata;
  versions_->GetLiveFilesMetaData(&metadata);

  std::string corruption_messages;
  for (const auto& md : metadata) {
    std::string file_path = dbname_ + md.name;
    uint64_t fsize = 0;
    Status s = env_->GetFileSize(file_path, &fsize);
    if (!s.ok()) {
      corruption_messages +=
          "Can't access " + md.name + ": " + s.ToString() + "\n";
    } else if (fsize != md.size) {
      corruption_messages += "Sst file size mismatch: " + md.name +
                             ". Size recorded in manifest " +
                             std::to_string(md.size) + ", actual size " +
                             std::to_string(fsize) + "\n";
    }
  }
  if (corruption_messages.size() == 0) {
    return Status::OK();
  } else {
    return Status::Corruption(corruption_messages);
  }
}

4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274
void DBImpl::TEST_GetFilesMetaData(
    std::vector<std::vector<FileMetaData>>* metadata) {
  MutexLock l(&mutex_);
  metadata->resize(NumberLevels());
  for (int level = 0; level < NumberLevels(); level++) {
    const std::vector<FileMetaData*>& files =
      versions_->current()->files_[level];

    (*metadata)[level].clear();
    for (const auto& f : files) {
      (*metadata)[level].push_back(*f);
    }
  }
}

4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
Status DBImpl::GetDbIdentity(std::string& identity) {
  std::string idfilename = IdentityFileName(dbname_);
  unique_ptr<SequentialFile> idfile;
  const EnvOptions soptions;
  Status s = env_->NewSequentialFile(idfilename, &idfile, soptions);
  if (!s.ok()) {
    return s;
  }
  uint64_t file_size;
  s = env_->GetFileSize(idfilename, &file_size);
  if (!s.ok()) {
    return s;
  }
  char buffer[file_size];
  Slice id;
  s = idfile->Read(file_size, &id, buffer);
  if (!s.ok()) {
    return s;
  }
  identity.assign(id.ToString());
  // If last character is '\n' remove it from identity
  if (identity.size() > 0 && identity.back() == '\n') {
    identity.pop_back();
  }
  return s;
}

J
jorlow@chromium.org 已提交
4302 4303 4304
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
4305 4306 4307 4308
  // Pre-allocate size of write batch conservatively.
  // 8 bytes are taken by header, 4 bytes for count, 1 byte for type,
  // and we allocate 11 extra bytes for key length, as well as value length.
  WriteBatch batch(key.size() + value.size() + 24);
J
jorlow@chromium.org 已提交
4309 4310 4311 4312 4313 4314 4315 4316 4317 4318
  batch.Put(key, value);
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  WriteBatch batch;
  batch.Delete(key);
  return Write(opt, &batch);
}

4319 4320 4321 4322 4323 4324 4325
Status DB::Merge(const WriteOptions& opt, const Slice& key,
                 const Slice& value) {
  WriteBatch batch;
  batch.Merge(key, value);
  return Write(opt, &batch);
}

J
jorlow@chromium.org 已提交
4326 4327
DB::~DB() { }

J
Jim Paton 已提交
4328
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
4329
  *dbptr = nullptr;
J
jorlow@chromium.org 已提交
4330

4331
  if (options.block_cache != nullptr && options.no_block_cache) {
4332
    return Status::InvalidArgument(
4333
        "no_block_cache is true while block_cache is not nullptr");
4334
  }
4335

J
jorlow@chromium.org 已提交
4336
  DBImpl* impl = new DBImpl(options, dbname);
4337 4338 4339 4340 4341 4342 4343
  Status s = impl->env_->CreateDirIfMissing(impl->options_.wal_dir);
  if (!s.ok()) {
    delete impl;
    return s;
  }

  s = impl->CreateArchivalDirectory();
4344 4345 4346 4347
  if (!s.ok()) {
    delete impl;
    return s;
  }
J
jorlow@chromium.org 已提交
4348
  impl->mutex_.Lock();
I
Igor Canadi 已提交
4349
  s = impl->Recover(); // Handles create_if_missing, error_if_exists
J
jorlow@chromium.org 已提交
4350
  if (s.ok()) {
4351
    uint64_t new_log_number = impl->versions_->NewFileNumber();
4352
    unique_ptr<WritableFile> lfile;
I
Igor Canadi 已提交
4353
    EnvOptions soptions(options);
4354
    s = impl->options_.env->NewWritableFile(
I
Igor Canadi 已提交
4355
        LogFileName(impl->options_.wal_dir, new_log_number), &lfile,
I
Igor Canadi 已提交
4356
        impl->options_.env->OptimizeForLogWrite(soptions));
J
jorlow@chromium.org 已提交
4357
    if (s.ok()) {
4358
      lfile->SetPreallocationBlockSize(1.1 * impl->options_.write_buffer_size);
I
Igor Canadi 已提交
4359
      VersionEdit edit;
4360
      edit.SetLogNumber(new_log_number);
4361
      impl->logfile_number_ = new_log_number;
4362
      impl->log_.reset(new log::Writer(std::move(lfile)));
4363 4364
      s = impl->versions_->LogAndApply(&edit, &impl->mutex_,
                                       impl->db_directory_.get());
J
jorlow@chromium.org 已提交
4365 4366
    }
    if (s.ok()) {
I
Igor Canadi 已提交
4367
      delete impl->InstallSuperVersion(new DBImpl::SuperVersion());
4368
      impl->mem_->SetLogNumber(impl->logfile_number_);
J
jorlow@chromium.org 已提交
4369
      impl->DeleteObsoleteFiles();
4370
      impl->MaybeScheduleFlushOrCompaction();
4371
      impl->MaybeScheduleLogDBDeployStats();
4372
      s = impl->db_directory_->Fsync();
J
jorlow@chromium.org 已提交
4373 4374
    }
  }
4375

4376 4377
  if (s.ok() && impl->options_.compaction_style == kCompactionStyleUniversal) {
    Version* current = impl->versions_->current();
4378
    for (int i = 1; i < impl->NumberLevels(); i++) {
4379
      int num_files = current->NumLevelFiles(i);
4380 4381 4382 4383 4384 4385 4386 4387
      if (num_files > 0) {
        s = Status::InvalidArgument("Not all files are at level 0. Cannot "
          "open with universal compaction style.");
        break;
      }
    }
  }

4388 4389
  impl->mutex_.Unlock();

J
jorlow@chromium.org 已提交
4390
  if (s.ok()) {
4391
    impl->opened_successfully_ = true;
J
jorlow@chromium.org 已提交
4392 4393 4394 4395 4396 4397 4398
    *dbptr = impl;
  } else {
    delete impl;
  }
  return s;
}

4399 4400 4401
Snapshot::~Snapshot() {
}

J
jorlow@chromium.org 已提交
4402
Status DestroyDB(const std::string& dbname, const Options& options) {
4403 4404 4405 4406 4407
  const InternalKeyComparator comparator(options.comparator);
  const InternalFilterPolicy filter_policy(options.filter_policy);
  const Options& soptions(SanitizeOptions(
    dbname, &comparator, &filter_policy, options));
  Env* env = soptions.env;
J
jorlow@chromium.org 已提交
4408
  std::vector<std::string> filenames;
4409 4410
  std::vector<std::string> archiveFiles;

4411
  std::string archivedir = ArchivalDirectory(dbname);
J
jorlow@chromium.org 已提交
4412 4413
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
4414 4415 4416 4417 4418 4419 4420

  if (dbname != soptions.wal_dir) {
    std::vector<std::string> logfilenames;
    env->GetChildren(soptions.wal_dir, &logfilenames);
    filenames.insert(filenames.end(), logfilenames.begin(), logfilenames.end());
    archivedir = ArchivalDirectory(soptions.wal_dir);
  }
4421

J
jorlow@chromium.org 已提交
4422 4423 4424 4425 4426
  if (filenames.empty()) {
    return Status::OK();
  }

  FileLock* lock;
4427 4428
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
4429 4430 4431
  if (result.ok()) {
    uint64_t number;
    FileType type;
D
dgrogan@chromium.org 已提交
4432
    for (size_t i = 0; i < filenames.size(); i++) {
4433
      if (ParseFileName(filenames[i], &number, &type) &&
4434
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
4435 4436 4437
        Status del;
        if (type == kMetaDatabase) {
          del = DestroyDB(dbname + "/" + filenames[i], options);
4438 4439
        } else if (type == kLogFile) {
          del = env->DeleteFile(soptions.wal_dir + "/" + filenames[i]);
K
Kosie van der Merwe 已提交
4440 4441 4442
        } else {
          del = env->DeleteFile(dbname + "/" + filenames[i]);
        }
J
jorlow@chromium.org 已提交
4443 4444 4445 4446 4447
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
4448

4449
    env->GetChildren(archivedir, &archiveFiles);
4450 4451
    // Delete archival files.
    for (size_t i = 0; i < archiveFiles.size(); ++i) {
4452 4453
      if (ParseFileName(archiveFiles[i], &number, &type) &&
          type == kLogFile) {
4454
        Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]);
4455 4456 4457 4458 4459
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
4460
    // ignore case where no archival directory is present.
4461
    env->DeleteDir(archivedir);
4462

J
jorlow@chromium.org 已提交
4463
    env->UnlockFile(lock);  // Ignore error since state is already gone
4464
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
4465
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
4466
    env->DeleteDir(soptions.wal_dir);
J
jorlow@chromium.org 已提交
4467 4468 4469 4470
  }
  return result;
}

4471 4472
//
// A global method that can dump out the build version
K
kailiu 已提交
4473
void DumpLeveldbBuildVersion(Logger * log) {
4474
  Log(log, "Git sha %s", rocksdb_build_git_sha);
4475
  Log(log, "Compile time %s %s",
4476
      rocksdb_build_compile_time, rocksdb_build_compile_date);
4477 4478
}

4479
}  // namespace rocksdb