db_impl.cc 111.1 KB
Newer Older
J
jorlow@chromium.org 已提交
1 2 3 4 5 6 7
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/db_impl.h"

#include <algorithm>
8 9
#include <climits>
#include <cstdio>
J
jorlow@chromium.org 已提交
10 11 12
#include <set>
#include <string>
#include <stdint.h>
13
#include <stdexcept>
J
jorlow@chromium.org 已提交
14
#include <vector>
15
#include <unordered_set>
16

J
jorlow@chromium.org 已提交
17 18 19 20 21 22 23
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
24
#include "db/memtablelist.h"
25
#include "db/merge_helper.h"
T
Tyler Harter 已提交
26
#include "db/prefix_filter_iterator.h"
J
jorlow@chromium.org 已提交
27 28 29
#include "db/table_cache.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
30
#include "db/transaction_log_impl.h"
31 32 33 34 35 36 37
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
#include "rocksdb/table_builder.h"
J
jorlow@chromium.org 已提交
38 39 40
#include "port/port.h"
#include "table/block.h"
#include "table/merger.h"
H
Haobo Xu 已提交
41
#include "table/table.h"
J
jorlow@chromium.org 已提交
42
#include "table/two_level_iterator.h"
43 44
#include "util/auto_roll_logger.h"
#include "util/build_version.h"
J
jorlow@chromium.org 已提交
45 46 47
#include "util/coding.h"
#include "util/logging.h"
#include "util/mutexlock.h"
48
#include "util/stop_watch.h"
J
jorlow@chromium.org 已提交
49 50 51

namespace leveldb {

52 53
void dumpLeveldbBuildVersion(Logger * log);

54 55 56 57 58
// Information kept for every waiting writer
struct DBImpl::Writer {
  Status status;
  WriteBatch* batch;
  bool sync;
H
heyongqiang 已提交
59
  bool disableWAL;
60 61 62 63 64 65
  bool done;
  port::CondVar cv;

  explicit Writer(port::Mutex* mu) : cv(mu) { }
};

J
jorlow@chromium.org 已提交
66 67 68
struct DBImpl::CompactionState {
  Compaction* const compaction;

69 70 71 72 73
  // If there were two snapshots with seq numbers s1 and
  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
  // entirely within s1 and s2, then the earlier version of k1 can be safely
  // deleted because that version is not visible in any snapshot.
  std::vector<SequenceNumber> existing_snapshots;
J
jorlow@chromium.org 已提交
74 75 76 77 78 79

  // Files produced by compaction
  struct Output {
    uint64_t number;
    uint64_t file_size;
    InternalKey smallest, largest;
80
    SequenceNumber smallest_seqno, largest_seqno;
J
jorlow@chromium.org 已提交
81 82
  };
  std::vector<Output> outputs;
83
  std::list<uint64_t> allocated_file_numbers;
J
jorlow@chromium.org 已提交
84 85

  // State kept for output being generated
86 87
  unique_ptr<WritableFile> outfile;
  unique_ptr<TableBuilder> builder;
J
jorlow@chromium.org 已提交
88 89 90 91 92 93 94 95 96 97 98

  uint64_t total_bytes;

  Output* current_output() { return &outputs[outputs.size()-1]; }

  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0) {
  }
};

D
Dhruba Borthakur 已提交
99 100
struct DBImpl::DeletionState {

101 102
  // the list of all live files that cannot be deleted
  std::vector<uint64_t> live;
D
Dhruba Borthakur 已提交
103 104 105 106 107 108 109 110

  // a list of all siles that exists in the db directory
  std::vector<std::string> allfiles;

  // the current filenumber, lognumber and prevlognumber
  // that corresponds to the set of files in 'live'.
  uint64_t filenumber, lognumber, prevlognumber;

111
  // the list of all files to be evicted from the table cache
D
Dhruba Borthakur 已提交
112 113 114
  std::vector<uint64_t> files_to_evict;
};

J
jorlow@chromium.org 已提交
115
// Fix user-supplied options to be reasonable
116
template <class T, class V>
J
jorlow@chromium.org 已提交
117
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
D
dgrogan@chromium.org 已提交
118 119
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
J
jorlow@chromium.org 已提交
120 121 122
}
Options SanitizeOptions(const std::string& dbname,
                        const InternalKeyComparator* icmp,
S
Sanjay Ghemawat 已提交
123
                        const InternalFilterPolicy* ipolicy,
J
jorlow@chromium.org 已提交
124 125 126
                        const Options& src) {
  Options result = src;
  result.comparator = icmp;
127
  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
128
  ClipToRange(&result.max_open_files,            20,     1000000);
129 130
  ClipToRange(&result.write_buffer_size,         ((size_t)64)<<10,
                                                 ((size_t)64)<<30);
S
Sanjay Ghemawat 已提交
131
  ClipToRange(&result.block_size,                1<<10,  4<<20);
132

X
Xing Jin 已提交
133 134 135 136 137 138
  // if user sets arena_block_size, we trust user to use this value. Otherwise,
  // calculate a proper value from writer_buffer_size;
  if (result.arena_block_size <= 0) {
    result.arena_block_size = result.write_buffer_size / 10;
  }

139 140
  result.min_write_buffer_number_to_merge = std::min(
    result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
141
  if (result.info_log == nullptr) {
K
Kai Liu 已提交
142 143
    Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
                                       result, &result.info_log);
J
jorlow@chromium.org 已提交
144 145
    if (!s.ok()) {
      // No place suitable for logging
146
      result.info_log = nullptr;
J
jorlow@chromium.org 已提交
147 148
    }
  }
149
  if (result.block_cache == nullptr && !result.no_block_cache) {
150 151
    result.block_cache = NewLRUCache(8 << 20);
  }
152
  result.compression_per_level = src.compression_per_level;
153 154 155
  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
    result.block_size_deviation = 0;
  }
156 157 158
  if (result.max_mem_compaction_level >= result.num_levels) {
    result.max_mem_compaction_level = result.num_levels - 1;
  }
J
Jim Paton 已提交
159 160 161
  if (result.soft_rate_limit > result.hard_rate_limit) {
    result.soft_rate_limit = result.hard_rate_limit;
  }
162 163 164 165
  if (result.compaction_filter &&
      result.compaction_filter_factory->CreateCompactionFilter().get()) {
    Log(result.info_log, "Both filter and factory specified. Using filter");
  }
J
Jim Paton 已提交
166 167 168 169 170 171
  if (result.prefix_extractor) {
    // If a prefix extractor has been supplied and a PrefixHashRepFactory is
    // being used, make sure that the latter uses the former as its transform
    // function.
    auto factory = dynamic_cast<PrefixHashRepFactory*>(
      result.memtable_factory.get());
172
    if (factory &&
173
        factory->GetTransform() != result.prefix_extractor) {
J
Jim Paton 已提交
174 175 176 177
      Log(result.info_log, "A prefix hash representation factory was supplied "
          "whose prefix extractor does not match options.prefix_extractor. "
          "Falling back to skip list representation factory");
      result.memtable_factory = std::make_shared<SkipListFactory>();
178 179
    } else if (factory) {
      Log(result.info_log, "Prefix hash memtable rep is in use.");
J
Jim Paton 已提交
180 181
    }
  }
J
jorlow@chromium.org 已提交
182 183 184 185 186
  return result;
}

DBImpl::DBImpl(const Options& options, const std::string& dbname)
    : env_(options.env),
H
heyongqiang 已提交
187
      dbname_(dbname),
J
jorlow@chromium.org 已提交
188
      internal_comparator_(options.comparator),
S
Sanjay Ghemawat 已提交
189 190
      options_(SanitizeOptions(
          dbname, &internal_comparator_, &internal_filter_policy_, options)),
H
heyongqiang 已提交
191
      internal_filter_policy_(options.filter_policy),
J
jorlow@chromium.org 已提交
192
      owns_info_log_(options_.info_log != options.info_log),
193
      db_lock_(nullptr),
H
Haobo Xu 已提交
194
      mutex_(options.use_adaptive_mutex),
195
      shutting_down_(nullptr),
J
jorlow@chromium.org 已提交
196
      bg_cv_(&mutex_),
J
Jim Paton 已提交
197
      mem_rep_factory_(options_.memtable_factory),
X
Xing Jin 已提交
198 199
      mem_(new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_)),
200
      logfile_number_(0),
201
      tmp_batch_(),
202
      bg_compaction_scheduled_(0),
203
      bg_flush_scheduled_(0),
204
      bg_logstats_scheduled_(false),
205 206
      manual_compaction_(nullptr),
      logger_(nullptr),
207
      disable_delete_obsolete_files_(false),
208
      delete_obsolete_files_last_run_(0),
209
      purge_wal_files_last_run_(0),
210
      last_stats_dump_time_microsec_(0),
M
Mark Callaghan 已提交
211 212 213
      stall_level0_slowdown_(0),
      stall_memtable_compaction_(0),
      stall_level0_num_files_(0),
J
Jim Paton 已提交
214 215 216
      stall_level0_slowdown_count_(0),
      stall_memtable_compaction_count_(0),
      stall_level0_num_files_count_(0),
217
      started_at_(options.env->NowMicros()),
218
      flush_on_destroy_(false),
219
      stats_(options.num_levels),
220
      delayed_writes_(0),
221
      last_flushed_sequence_(0),
222 223 224
      storage_options_(options),
      bg_work_gate_closed_(false),
      refitting_level_(false) {
225

226
  mem_->Ref();
227

H
heyongqiang 已提交
228
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
229 230

  stall_leveln_slowdown_.resize(options.num_levels);
J
Jim Paton 已提交
231 232
  stall_leveln_slowdown_count_.resize(options.num_levels);
  for (int i = 0; i < options.num_levels; ++i) {
233
    stall_leveln_slowdown_[i] = 0;
J
Jim Paton 已提交
234 235
    stall_leveln_slowdown_count_[i] = 0;
  }
236

J
jorlow@chromium.org 已提交
237
  // Reserve ten files or so for other uses and give the rest to TableCache.
238
  const int table_cache_size = options_.max_open_files - 10;
239 240
  table_cache_.reset(new TableCache(dbname_, &options_,
                                    storage_options_, table_cache_size));
J
jorlow@chromium.org 已提交
241

242 243
  versions_.reset(new VersionSet(dbname_, &options_, storage_options_,
                                 table_cache_.get(), &internal_comparator_));
244

245 246
  dumpLeveldbBuildVersion(options_.info_log.get());
  options_.Dump(options_.info_log.get());
247

248
#ifdef USE_SCRIBE
249
  logger_.reset(new ScribeLogger("localhost", 1456));
250 251 252
#endif

  char name[100];
253
  Status st = env_->GetHostName(name, 100L);
254
  if (st.ok()) {
255 256 257 258 259 260
    host_name_ = name;
  } else {
    Log(options_.info_log, "Can't get hostname, use localhost as host name.");
    host_name_ = "localhost";
  }
  last_log_ts = 0;
261

J
jorlow@chromium.org 已提交
262 263 264 265
}

DBImpl::~DBImpl() {
  // Wait for background work to finish
266
  if (flush_on_destroy_ && mem_->GetFirstSequenceNumber() != 0) {
267 268
    FlushMemTable(FlushOptions());
  }
269
  mutex_.Lock();
270
  shutting_down_.Release_Store(this);  // Any non-nullptr value is ok
271 272 273
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
H
hans@chromium.org 已提交
274
    bg_cv_.Wait();
J
jorlow@chromium.org 已提交
275 276 277
  }
  mutex_.Unlock();

278
  if (db_lock_ != nullptr) {
J
jorlow@chromium.org 已提交
279 280 281
    env_->UnlockFile(db_lock_);
  }

282
  if (mem_ != nullptr) mem_->Unref();
283
  imm_.UnrefAll();
J
jorlow@chromium.org 已提交
284 285
}

A
Abhishek Kona 已提交
286
// Do not flush and close database elegantly. Simulate a crash.
287 288 289 290 291 292
void DBImpl::TEST_Destroy_DBImpl() {
  // ensure that no new memtable flushes can occur
  flush_on_destroy_ = false;

  // wait till all background compactions are done.
  mutex_.Lock();
293 294 295
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
296 297 298 299
    bg_cv_.Wait();
  }

  // Prevent new compactions from occuring.
300
  bg_work_gate_closed_ = true;
301 302
  const int LargeNumber = 10000000;
  bg_compaction_scheduled_ += LargeNumber;
303

304 305 306
  mutex_.Unlock();

  // force release the lock file.
307
  if (db_lock_ != nullptr) {
308 309
    env_->UnlockFile(db_lock_);
  }
310 311 312 313

  log_.reset();
  versions_.reset();
  table_cache_.reset();
314 315
}

A
Abhishek Kona 已提交
316 317 318
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  return versions_->ManifestFileNumber();
}
319

J
jorlow@chromium.org 已提交
320
Status DBImpl::NewDB() {
321
  VersionEdit new_db(NumberLevels());
J
jorlow@chromium.org 已提交
322
  new_db.SetComparatorName(user_comparator()->Name());
323
  new_db.SetLogNumber(0);
J
jorlow@chromium.org 已提交
324 325 326 327
  new_db.SetNextFile(2);
  new_db.SetLastSequence(0);

  const std::string manifest = DescriptorFileName(dbname_, 1);
328
  unique_ptr<WritableFile> file;
329
  Status s = env_->NewWritableFile(manifest, &file, storage_options_);
J
jorlow@chromium.org 已提交
330 331 332
  if (!s.ok()) {
    return s;
  }
333
  file->SetPreallocationBlockSize(options_.manifest_preallocation_size);
J
jorlow@chromium.org 已提交
334
  {
335
    log::Writer log(std::move(file));
J
jorlow@chromium.org 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
    std::string record;
    new_db.EncodeTo(&record);
    s = log.AddRecord(record);
  }
  if (s.ok()) {
    // Make "CURRENT" file that points to the new manifest file.
    s = SetCurrentFile(env_, dbname_, 1);
  } else {
    env_->DeleteFile(manifest);
  }
  return s;
}

void DBImpl::MaybeIgnoreError(Status* s) const {
  if (s->ok() || options_.paranoid_checks) {
    // No change needed
  } else {
353
    Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
J
jorlow@chromium.org 已提交
354 355 356 357
    *s = Status::OK();
  }
}

358 359
const Status DBImpl::CreateArchivalDirectory() {
  if (options_.WAL_ttl_seconds > 0) {
360
    std::string archivalPath = ArchivalDirectory(dbname_);
361 362 363 364 365
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

366 367 368 369
void DBImpl::PrintStatistics() {
  auto dbstats = options_.statistics;
  if (dbstats) {
    Log(options_.info_log,
370 371
        "STATISTCS:\n %s",
        dbstats->ToString().c_str());
372 373 374
  }
}

375
void DBImpl::MaybeDumpStats() {
H
Haobo Xu 已提交
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
  if (options_.stats_dump_period_sec == 0) return;

  const uint64_t now_micros = env_->NowMicros();

  if (last_stats_dump_time_microsec_ +
      options_.stats_dump_period_sec * 1000000
      <= now_micros) {
    // Multiple threads could race in here simultaneously.
    // However, the last one will update last_stats_dump_time_microsec_
    // atomically. We could see more than one dump during one dump
    // period in rare cases.
    last_stats_dump_time_microsec_ = now_micros;
    std::string stats;
    GetProperty("leveldb.stats", &stats);
    Log(options_.info_log, "%s", stats.c_str());
391
    PrintStatistics();
392 393 394
  }
}

D
Dhruba Borthakur 已提交
395 396 397 398 399
// Returns the list of live files in 'live' and the list
// of all files in the filesystem in 'allfiles'.
void DBImpl::FindObsoleteFiles(DeletionState& deletion_state) {
  mutex_.AssertHeld();

400 401 402 403 404
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

405 406 407 408 409
  // This method is costly when the number of files is large.
  // Do not allow it to trigger more often than once in
  // delete_obsolete_files_period_micros.
  if (options_.delete_obsolete_files_period_micros != 0) {
    const uint64_t now_micros = env_->NowMicros();
410
    if (delete_obsolete_files_last_run_ +
411 412 413 414 415 416
        options_.delete_obsolete_files_period_micros > now_micros) {
      return;
    }
    delete_obsolete_files_last_run_ = now_micros;
  }

417 418 419 420
  // Make a list of all of the live files; set is slow, should not
  // be used.
  deletion_state.live.assign(pending_outputs_.begin(),
                             pending_outputs_.end());
D
Dhruba Borthakur 已提交
421 422 423 424
  versions_->AddLiveFiles(&deletion_state.live);

  // set of all files in the directory
  env_->GetChildren(dbname_, &deletion_state.allfiles); // Ignore errors
J
jorlow@chromium.org 已提交
425

D
Dhruba Borthakur 已提交
426 427 428 429 430 431
  // store the current filenum, lognum, etc
  deletion_state.filenumber = versions_->ManifestFileNumber();
  deletion_state.lognumber = versions_->LogNumber();
  deletion_state.prevlognumber = versions_->PrevLogNumber();
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
Status DBImpl::DeleteLogFile(uint64_t number) {
  Status s;
  auto filename = LogFileName(dbname_, number);
  if (options_.WAL_ttl_seconds > 0) {
    s = env_->RenameFile(filename,
                         ArchivedLogFileName(dbname_, number));

    if (!s.ok()) {
      Log(options_.info_log, "RenameFile logfile #%lu FAILED", number);
    }
  } else {
    s = env_->DeleteFile(filename);
    if(!s.ok()) {
      Log(options_.info_log, "Delete logfile #%lu FAILED", number);
    }
  }

  return s;
}

D
Dhruba Borthakur 已提交
452 453 454 455 456
// Diffs the files listed in filenames and those that do not
// belong to live files are posibly removed. If the removed file
// is a sst file, then it returns the file number in files_to_evict.
// It is not necesary to hold the mutex when invoking this method.
void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
J
jorlow@chromium.org 已提交
457 458
  uint64_t number;
  FileType type;
H
heyongqiang 已提交
459
  std::vector<std::string> old_log_files;
460

461 462 463 464 465
  // Now, convert live list to an unordered set, WITHOUT mutex held;
  // set is slow.
  std::unordered_set<uint64_t> live_set(state.live.begin(),
                                        state.live.end());

D
Dhruba Borthakur 已提交
466 467
  for (size_t i = 0; i < state.allfiles.size(); i++) {
    if (ParseFileName(state.allfiles[i], &number, &type)) {
J
jorlow@chromium.org 已提交
468 469 470
      bool keep = true;
      switch (type) {
        case kLogFile:
D
Dhruba Borthakur 已提交
471 472
          keep = ((number >= state.lognumber) ||
                  (number == state.prevlognumber));
J
jorlow@chromium.org 已提交
473 474 475 476
          break;
        case kDescriptorFile:
          // Keep my manifest file, and any newer incarnations'
          // (in case there is a race that allows other incarnations)
D
Dhruba Borthakur 已提交
477
          keep = (number >= state.filenumber);
J
jorlow@chromium.org 已提交
478 479
          break;
        case kTableFile:
480
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
481 482 483 484
          break;
        case kTempFile:
          // Any temp files that are currently being written to must
          // be recorded in pending_outputs_, which is inserted into "live"
485
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
486
          break;
H
heyongqiang 已提交
487 488 489
        case kInfoLogFile:
          keep = true;
          if (number != 0) {
D
Dhruba Borthakur 已提交
490
            old_log_files.push_back(state.allfiles[i]);
H
heyongqiang 已提交
491 492
          }
          break;
J
jorlow@chromium.org 已提交
493 494
        case kCurrentFile:
        case kDBLockFile:
K
Kosie van der Merwe 已提交
495
        case kMetaDatabase:
J
jorlow@chromium.org 已提交
496 497 498 499 500 501
          keep = true;
          break;
      }

      if (!keep) {
        if (type == kTableFile) {
D
Dhruba Borthakur 已提交
502 503
          // record the files to be evicted from the cache
          state.files_to_evict.push_back(number);
J
jorlow@chromium.org 已提交
504
        }
505 506
        Log(options_.info_log, "Delete type=%d #%lu", int(type), number);

507 508
        if (type == kLogFile) {
          DeleteLogFile(number);
509
        } else {
510
          Status st = env_->DeleteFile(dbname_ + "/" + state.allfiles[i]);
511
          if (!st.ok()) {
512 513 514 515
            Log(options_.info_log, "Delete type=%d #%lld FAILED\n",
                int(type),
                static_cast<unsigned long long>(number));
          }
H
heyongqiang 已提交
516
        }
J
jorlow@chromium.org 已提交
517 518 519
      }
    }
  }
H
heyongqiang 已提交
520

521
  // Delete old info log files.
K
Kai Liu 已提交
522 523 524 525 526
  size_t old_log_file_count = old_log_files.size();
  // NOTE: Currently we only support log purge when options_.db_log_dir is
  // located in `dbname` directory.
  if (old_log_file_count >= options_.keep_log_file_num &&
      options_.db_log_dir.empty()) {
H
heyongqiang 已提交
527
    std::sort(old_log_files.begin(), old_log_files.end());
K
Kai Liu 已提交
528
    size_t end = old_log_file_count - options_.keep_log_file_num;
529
    for (unsigned int i = 0; i <= end; i++) {
H
heyongqiang 已提交
530
      std::string& to_delete = old_log_files.at(i);
D
Dhruba Borthakur 已提交
531 532
      // Log(options_.info_log, "Delete type=%d %s\n",
      //     int(kInfoLogFile), to_delete.c_str());
H
heyongqiang 已提交
533 534 535
      env_->DeleteFile(dbname_ + "/" + to_delete);
    }
  }
536
  PurgeObsoleteWALFiles();
J
jorlow@chromium.org 已提交
537 538
}

D
Dhruba Borthakur 已提交
539 540 541 542 543 544 545 546 547 548 549 550
void DBImpl::EvictObsoleteFiles(DeletionState& state) {
  for (unsigned int i = 0; i < state.files_to_evict.size(); i++) {
    table_cache_->Evict(state.files_to_evict[i]);
  }
}

void DBImpl::DeleteObsoleteFiles() {
  mutex_.AssertHeld();
  DeletionState deletion_state;
  FindObsoleteFiles(deletion_state);
  PurgeObsoleteFiles(deletion_state);
  EvictObsoleteFiles(deletion_state);
551 552 553
}

void DBImpl::PurgeObsoleteWALFiles() {
554 555
  int64_t current_time;
  Status s = env_->GetCurrentTime(&current_time);
X
Xing Jin 已提交
556
  uint64_t now_seconds = static_cast<uint64_t>(current_time);
557 558
  assert(s.ok());

559
  if (options_.WAL_ttl_seconds != ULONG_MAX && options_.WAL_ttl_seconds > 0) {
X
Xing Jin 已提交
560
    if (purge_wal_files_last_run_ + options_.WAL_ttl_seconds > now_seconds) {
561 562 563 564 565 566 567 568 569
      return;
    }
    std::vector<std::string> wal_files;
    std::string archival_dir = ArchivalDirectory(dbname_);
    env_->GetChildren(archival_dir, &wal_files);
    for (const auto& f : wal_files) {
      uint64_t file_m_time;
      const std::string file_path = archival_dir + "/" + f;
      const Status s = env_->GetFileModificationTime(file_path, &file_m_time);
X
Xing Jin 已提交
570
      if (s.ok() && (now_seconds - file_m_time > options_.WAL_ttl_seconds)) {
571 572 573 574 575
        Status status = env_->DeleteFile(file_path);
        if (!status.ok()) {
          Log(options_.info_log,
              "Failed Deleting a WAL file Error : i%s",
              status.ToString().c_str());
576
        }
577
      } // Ignore errors.
578 579
    }
  }
X
Xing Jin 已提交
580
  purge_wal_files_last_run_ = now_seconds;
D
Dhruba Borthakur 已提交
581 582
}

583 584 585
// If externalTable is set, then apply recovered transactions
// to that table. This is used for readonly mode.
Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
H
heyongqiang 已提交
586
    bool error_if_log_file_exist) {
J
jorlow@chromium.org 已提交
587 588
  mutex_.AssertHeld();

589
  assert(db_lock_ == nullptr);
590
  if (!external_table) {
591 592 593 594 595 596 597 598 599 600 601 602 603
    // We call CreateDirIfMissing() as the directory may already exist (if we
    // are reopening a DB), when this happens we don't want creating the
    // directory to cause an error. However, we need to check if creating the
    // directory fails or else we may get an obscure message about the lock
    // file not existing. One real-world example of this occurring is if
    // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
    // when dbname_ is "dir/db" but when "dir" doesn't exist.
    Status s = env_->CreateDirIfMissing(dbname_);
    if (!s.ok()) {
      return s;
    }

    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
604 605 606
    if (!s.ok()) {
      return s;
    }
J
jorlow@chromium.org 已提交
607

608 609
    if (!env_->FileExists(CurrentFileName(dbname_))) {
      if (options_.create_if_missing) {
610
        // TODO: add merge_operator name check
611 612 613 614 615 616 617
        s = NewDB();
        if (!s.ok()) {
          return s;
        }
      } else {
        return Status::InvalidArgument(
            dbname_, "does not exist (create_if_missing is false)");
J
jorlow@chromium.org 已提交
618 619
      }
    } else {
620 621 622 623
      if (options_.error_if_exists) {
        return Status::InvalidArgument(
            dbname_, "exists (error_if_exists is true)");
      }
J
jorlow@chromium.org 已提交
624 625 626
    }
  }

627
  Status s = versions_->Recover();
J
jorlow@chromium.org 已提交
628 629
  if (s.ok()) {
    SequenceNumber max_sequence(0);
630 631 632 633 634 635 636 637 638 639 640 641 642 643

    // Recover from all newer log files than the ones named in the
    // descriptor (new log files may have been added by the previous
    // incarnation without registering them in the descriptor).
    //
    // Note that PrevLogNumber() is no longer used, but we pay
    // attention to it in case we are recovering a database
    // produced by an older version of leveldb.
    const uint64_t min_log = versions_->LogNumber();
    const uint64_t prev_log = versions_->PrevLogNumber();
    std::vector<std::string> filenames;
    s = env_->GetChildren(dbname_, &filenames);
    if (!s.ok()) {
      return s;
644
    }
645 646 647 648 649 650 651 652 653
    uint64_t number;
    FileType type;
    std::vector<uint64_t> logs;
    for (size_t i = 0; i < filenames.size(); i++) {
      if (ParseFileName(filenames[i], &number, &type)
          && type == kLogFile
          && ((number >= min_log) || (number == prev_log))) {
        logs.push_back(number);
      }
J
jorlow@chromium.org 已提交
654
    }
655

H
heyongqiang 已提交
656 657 658 659 660 661
    if (logs.size() > 0 && error_if_log_file_exist) {
      return Status::Corruption(""
          "The db was opened in readonly mode with error_if_log_file_exist"
          "flag but a log file already exists");
    }

662 663 664
    // Recover in the order in which the logs were generated
    std::sort(logs.begin(), logs.end());
    for (size_t i = 0; i < logs.size(); i++) {
665
      s = RecoverLogFile(logs[i], edit, &max_sequence, external_table);
666 667 668 669
      // The previous incarnation may not have written any MANIFEST
      // records after allocating this log number.  So we manually
      // update the file number allocation counter in VersionSet.
      versions_->MarkFileNumberUsed(logs[i]);
670 671
    }

J
jorlow@chromium.org 已提交
672
    if (s.ok()) {
673 674
      if (versions_->LastSequence() < max_sequence) {
        versions_->SetLastSequence(max_sequence);
675 676 677
        last_flushed_sequence_ = max_sequence;
      } else {
        last_flushed_sequence_ = versions_->LastSequence();
678
      }
679 680
      SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
                     versions_->LastSequence());
J
jorlow@chromium.org 已提交
681 682 683 684 685 686 687 688
    }
  }

  return s;
}

Status DBImpl::RecoverLogFile(uint64_t log_number,
                              VersionEdit* edit,
689 690
                              SequenceNumber* max_sequence,
                              MemTable* external_table) {
J
jorlow@chromium.org 已提交
691 692
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
693
    Logger* info_log;
J
jorlow@chromium.org 已提交
694
    const char* fname;
695 696
    Status* status;  // nullptr if options_.paranoid_checks==false or
                     //            options_.skip_log_error_on_recovery==true
J
jorlow@chromium.org 已提交
697
    virtual void Corruption(size_t bytes, const Status& s) {
698
      Log(info_log, "%s%s: dropping %d bytes; %s",
699
          (this->status == nullptr ? "(ignoring error) " : ""),
J
jorlow@chromium.org 已提交
700
          fname, static_cast<int>(bytes), s.ToString().c_str());
701
      if (this->status != nullptr && this->status->ok()) *this->status = s;
J
jorlow@chromium.org 已提交
702 703 704 705 706 707 708
    }
  };

  mutex_.AssertHeld();

  // Open the log file
  std::string fname = LogFileName(dbname_, log_number);
709
  unique_ptr<SequentialFile> file;
710
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
J
jorlow@chromium.org 已提交
711 712 713 714 715 716 717 718
  if (!status.ok()) {
    MaybeIgnoreError(&status);
    return status;
  }

  // Create the log reader.
  LogReporter reporter;
  reporter.env = env_;
719
  reporter.info_log = options_.info_log.get();
J
jorlow@chromium.org 已提交
720
  reporter.fname = fname.c_str();
721 722
  reporter.status = (options_.paranoid_checks &&
                     !options_.skip_log_error_on_recovery ? &status : nullptr);
J
jorlow@chromium.org 已提交
723 724 725 726
  // We intentially make log::Reader do checksumming even if
  // paranoid_checks==false so that corruptions cause entire commits
  // to be skipped instead of propagating bad information (like overly
  // large sequence numbers).
727
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
728
                     0/*initial_offset*/);
729
  Log(options_.info_log, "Recovering log #%llu",
J
jorlow@chromium.org 已提交
730 731 732 733 734 735
      (unsigned long long) log_number);

  // Read all the records and add to a memtable
  std::string scratch;
  Slice record;
  WriteBatch batch;
736
  MemTable* mem = nullptr;
737 738 739
  if (external_table) {
    mem = external_table;
  }
740
  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
J
jorlow@chromium.org 已提交
741 742 743 744 745 746 747
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      continue;
    }
    WriteBatchInternal::SetContents(&batch, record);

748
    if (mem == nullptr) {
X
Xing Jin 已提交
749 750
      mem = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
751
      mem->Ref();
J
jorlow@chromium.org 已提交
752
    }
753
    status = WriteBatchInternal::InsertInto(&batch, mem, &options_);
J
jorlow@chromium.org 已提交
754 755 756 757 758 759 760 761 762 763 764
    MaybeIgnoreError(&status);
    if (!status.ok()) {
      break;
    }
    const SequenceNumber last_seq =
        WriteBatchInternal::Sequence(&batch) +
        WriteBatchInternal::Count(&batch) - 1;
    if (last_seq > *max_sequence) {
      *max_sequence = last_seq;
    }

765 766
    if (!external_table &&
        mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
767
      status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
768 769 770 771 772
      if (!status.ok()) {
        // Reflect errors immediately so that conditions like full
        // file-systems cause the DB::Open() to fail.
        break;
      }
773
      mem->Unref();
774
      mem = nullptr;
J
jorlow@chromium.org 已提交
775 776 777
    }
  }

778
  if (status.ok() && mem != nullptr && !external_table) {
779
    status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
780 781 782 783
    // Reflect errors immediately so that conditions like full
    // file-systems cause the DB::Open() to fail.
  }

784
  if (mem != nullptr && !external_table) mem->Unref();
J
jorlow@chromium.org 已提交
785 786 787
  return status;
}

788
Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
J
jorlow@chromium.org 已提交
789
  mutex_.AssertHeld();
790
  const uint64_t start_micros = env_->NowMicros();
J
jorlow@chromium.org 已提交
791 792 793 794
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  pending_outputs_.insert(meta.number);
  Iterator* iter = mem->NewIterator();
795 796 797
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
    mem->GetFirstSequenceNumber();
798
  Log(options_.info_log, "Level-0 table #%llu: started",
J
jorlow@chromium.org 已提交
799
      (unsigned long long) meta.number);
800 801 802 803

  Status s;
  {
    mutex_.Unlock();
804 805
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
806 807
                   user_comparator(), newest_snapshot,
                   earliest_seqno_in_memtable);
808 809 810
    mutex_.Lock();
  }

811
  Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
J
jorlow@chromium.org 已提交
812 813 814 815
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;
816

817
  pending_outputs_.erase(meta.number);
818 819 820 821 822 823

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    edit->AddFile(level, meta.number, meta.file_size,
824 825
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
826 827
  }

828 829 830
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
M
Mark Callaghan 已提交
831
  stats.files_out_levelnp1 = 1;
832
  stats_[level].Add(stats);
J
jorlow@chromium.org 已提交
833 834 835
  return s;
}

836

837
Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
838
                                uint64_t* filenumber) {
J
jorlow@chromium.org 已提交
839
  mutex_.AssertHeld();
840 841 842 843 844
  const uint64_t start_micros = env_->NowMicros();
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  *filenumber = meta.number;
  pending_outputs_.insert(meta.number);
845 846 847 848 849 850 851

  std::vector<Iterator*> list;
  for (MemTable* m : mems) {
    list.push_back(m->NewIterator());
  }
  Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
                                      list.size());
852 853
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
854
    mems[0]->GetFirstSequenceNumber();
855 856
  Log(options_.info_log, "Level-0 flush table #%llu: started",
      (unsigned long long) meta.number);
J
jorlow@chromium.org 已提交
857

858
  Version* base = versions_->current();
859
  base->Ref();          // it is likely that we do not need this reference
860 861 862
  Status s;
  {
    mutex_.Unlock();
863 864
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
865 866
                   user_comparator(), newest_snapshot,
                   earliest_seqno_in_memtable);
867 868
    mutex_.Lock();
  }
869 870
  base->Unref();

871 872 873 874 875 876 877 878 879 880 881 882 883 884
  Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s",
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;

  // re-acquire the most current version
  base = versions_->current();

  // There could be multiple threads writing to its own level-0 file.
  // The pending_outputs cannot be cleared here, otherwise this newly
  // created file might not be considered as a live-file by another
  // compaction thread that is concurrently deleting obselete files.
  // The pending_outputs can be cleared only after the new version is
A
Abhishek Kona 已提交
885
  // committed so that other threads can recognize this file as a
886 887 888 889 890 891 892 893 894 895 896 897 898
  // valid one.
  // pending_outputs_.erase(meta.number);

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    const Slice min_user_key = meta.smallest.user_key();
    const Slice max_user_key = meta.largest.user_key();
    // if we have more than 1 background thread, then we cannot
    // insert files directly into higher levels because some other
    // threads could be concurrently producing compacted files for
    // that key range.
899
    if (base != nullptr && options_.max_background_compactions <= 1 &&
900
        options_.compaction_style == kCompactionStyleLevel) {
901 902 903
      level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
    }
    edit->AddFile(level, meta.number, meta.file_size,
904 905
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
906 907 908 909 910 911 912 913 914 915 916 917 918
  }

  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
  stats_[level].Add(stats);
  return s;
}

Status DBImpl::CompactMemTable(bool* madeProgress) {
  mutex_.AssertHeld();
  assert(imm_.size() != 0);

919
  if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
920 921 922 923 924 925 926
    Log(options_.info_log, "Memcompaction already in progress");
    Status s = Status::IOError("Memcompaction already in progress");
    return s;
  }

  // Save the contents of the earliest memtable as a new Table
  uint64_t file_number;
927 928 929
  std::vector<MemTable*> mems;
  imm_.PickMemtablesToFlush(&mems);
  if (mems.empty()) {
930 931 932 933 934 935
    Log(options_.info_log, "Nothing in memstore to flush");
    Status s = Status::IOError("Nothing in memstore to flush");
    return s;
  }

  // record the logfile_number_ before we release the mutex
936
  MemTable* m = mems[0];
937 938
  VersionEdit* edit = m->GetEdits();
  edit->SetPrevLogNumber(0);
939 940
  edit->SetLogNumber(m->GetNextLogNumber());  // Earlier logs no longer needed
  auto to_delete = m->GetLogNumber();
941

942
  // This will release and re-acquire the mutex.
943
  Status s = WriteLevel0Table(mems, edit, &file_number);
944

945
  if (s.ok() && shutting_down_.Acquire_Load()) {
946 947 948
    s = Status::IOError(
      "Database shutdown started during memtable compaction"
    );
949
  }
J
jorlow@chromium.org 已提交
950

951
  // Replace immutable memtable with the generated Table
952
  s = imm_.InstallMemtableFlushResults(
953
    mems, versions_.get(), s, &mutex_, options_.info_log.get(),
954
    file_number, pending_outputs_);
J
jorlow@chromium.org 已提交
955 956

  if (s.ok()) {
957 958 959
    if (madeProgress) {
      *madeProgress = 1;
    }
960

961
    MaybeScheduleLogDBDeployStats();
962 963 964 965
    // TODO: if log deletion failed for any reason, we probably
    // should store the file number in the shared state, and retry
    // However, for now, PurgeObsoleteFiles will take care of that
    // anyways.
966 967 968
    if (options_.purge_log_after_memtable_flush &&
        !disable_delete_obsolete_files_ &&
        to_delete > 0) {
969 970 971 972
      mutex_.Unlock();
      DeleteLogFile(to_delete);
      mutex_.Lock();
    }
J
jorlow@chromium.org 已提交
973 974 975 976
  }
  return s;
}

977
void DBImpl::CompactRange(const Slice* begin, const Slice* end,
978
                          bool reduce_level, int target_level) {
G
Gabor Cselle 已提交
979 980 981 982
  int max_level_with_files = 1;
  {
    MutexLock l(&mutex_);
    Version* base = versions_->current();
983
    for (int level = 1; level < NumberLevels(); level++) {
G
Gabor Cselle 已提交
984 985 986 987 988 989 990 991 992
      if (base->OverlapInLevel(level, begin, end)) {
        max_level_with_files = level;
      }
    }
  }
  TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
  for (int level = 0; level < max_level_with_files; level++) {
    TEST_CompactRange(level, begin, end);
  }
993 994

  if (reduce_level) {
995
    ReFitLevel(max_level_with_files, target_level);
996 997 998 999 1000 1001 1002
  }
}

// return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting(int level) {
  mutex_.AssertHeld();
  int minimum_level = level;
1003
  for (int i = level - 1; i > 0; --i) {
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
    // stop if level i is not empty
    if (versions_->NumLevelFiles(i) > 0) break;

    // stop if level i is too small (cannot fit the level files)
    if (versions_->MaxBytesForLevel(i) < versions_->NumLevelBytes(level)) break;

    minimum_level = i;
  }
  return minimum_level;
}

1015
void DBImpl::ReFitLevel(int level, int target_level) {
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
  assert(level < NumberLevels());

  MutexLock l(&mutex_);

  // only allow one thread refitting
  if (refitting_level_) {
    Log(options_.info_log, "ReFitLevel: another thread is refitting");
    return;
  }
  refitting_level_ = true;

  // wait for all background threads to stop
  bg_work_gate_closed_ = true;
1029
  while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_) {
1030
    Log(options_.info_log,
1031 1032
        "RefitLevel: waiting for background threads to stop: %d %d",
        bg_compaction_scheduled_, bg_flush_scheduled_);
1033 1034 1035 1036
    bg_cv_.Wait();
  }

  // move to a smaller level
1037 1038 1039 1040
  int to_level = target_level;
  if (target_level < 0) {
    to_level = FindMinimumEmptyLevelFitting(level);
  }
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

  assert(to_level <= level);

  if (to_level < level) {
    Log(options_.info_log, "Before refitting:\n%s",
        versions_->current()->DebugString().data());

    VersionEdit edit(NumberLevels());
    for (const auto& f : versions_->current()->files_[level]) {
      edit.DeleteFile(level, f->number);
1051 1052
      edit.AddFile(to_level, f->number, f->file_size, f->smallest, f->largest,
                   f->smallest_seqno, f->largest_seqno);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
    }
    Log(options_.info_log, "Apply version edit:\n%s",
        edit.DebugString().data());

    auto status = versions_->LogAndApply(&edit, &mutex_);

    Log(options_.info_log, "LogAndApply: %s\n", status.ToString().data());

    if (status.ok()) {
      Log(options_.info_log, "After refitting:\n%s",
          versions_->current()->DebugString().data());
    }
  }

  refitting_level_ = false;
  bg_work_gate_closed_ = false;
G
Gabor Cselle 已提交
1069 1070
}

1071
int DBImpl::NumberLevels() {
1072
  return options_.num_levels;
1073 1074 1075
}

int DBImpl::MaxMemCompactionLevel() {
1076
  return options_.max_mem_compaction_level;
1077 1078 1079
}

int DBImpl::Level0StopWriteTrigger() {
1080
  return options_.level0_stop_writes_trigger;
1081 1082
}

H
heyongqiang 已提交
1083 1084 1085 1086 1087
Status DBImpl::Flush(const FlushOptions& options) {
  Status status = FlushMemTable(options);
  return status;
}

1088 1089 1090 1091
SequenceNumber DBImpl::GetLatestSequenceNumber() {
  return versions_->LastSequence();
}

1092
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
1093
                               unique_ptr<TransactionLogIterator>* iter) {
1094

1095 1096 1097 1098
  if (seq > last_flushed_sequence_) {
    return Status::IOError("Requested sequence not yet written in the db");
  }
  //  Get all sorted Wal Files.
1099 1100
  //  Do binary search and open files and find the seq number.

1101 1102
  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
  Status s = GetSortedWalFiles(*wal_files);
1103 1104 1105 1106
  if (!s.ok()) {
    return s;
  }

1107
  if (wal_files->empty()) {
1108 1109 1110 1111
    return Status::IOError(" NO WAL Files present in the db");
  }
  //  std::shared_ptr would have been useful here.

1112
  s = RetainProbableWalFiles(*wal_files, seq);
1113 1114
  if (!s.ok()) {
    return s;
1115
  }
1116
  iter->reset(
1117 1118
    new TransactionLogIteratorImpl(dbname_,
                                   &options_,
1119
                                   storage_options_,
1120
                                   seq,
1121
                                   std::move(wal_files),
1122 1123
                                   &last_flushed_sequence_));
  iter->get()->Next();
1124
  return iter->get()->status();
1125 1126
}

1127 1128
Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs,
                                      const SequenceNumber target) {
1129
  long start = 0; // signed to avoid overflow when target is < first file.
1130
  long end = static_cast<long>(all_logs.size()) - 1;
1131
  // Binary Search. avoid opening all files.
1132 1133
  while (end >= start) {
    long mid = start + (end - start) / 2;  // Avoid overflow.
1134 1135
    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
    if (current_seq_num == target) {
1136
      end = mid;
1137
      break;
1138
    } else if (current_seq_num < target) {
1139
      start = mid + 1;
1140
    } else {
1141
      end = mid - 1;
1142 1143
    }
  }
1144 1145 1146
  size_t start_index = std::max(0l, end); // end could be -ve.
  // The last wal file is always included
  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
1147 1148 1149
  return Status::OK();
}

1150 1151 1152 1153
bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type,
                                        const uint64_t number) {
  const std::string fname = (type == kAliveLogFile) ?
    LogFileName(dbname_, number) : ArchivedLogFileName(dbname_, number);
1154 1155
  uint64_t file_size;
  Status s = env_->GetFileSize(fname, &file_size);
1156
  return (s.ok() && (file_size == 0));
1157 1158
}

1159 1160
Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
                               WriteBatch* const result) {
1161

1162 1163
  if (type == kAliveLogFile) {
    std::string fname = LogFileName(dbname_, number);
1164 1165 1166
    Status status = ReadFirstLine(fname, result);
    if (!status.ok()) {
      //  check if the file got moved to archive.
1167 1168
      std::string archived_file = ArchivedLogFileName(dbname_, number);
      Status s = ReadFirstLine(archived_file, result);
1169
      if (!s.ok()) {
1170
        return Status::IOError("Log File has been deleted");
1171 1172 1173
      }
    }
    return Status::OK();
1174 1175
  } else if (type == kArchivedLogFile) {
    std::string fname = ArchivedLogFileName(dbname_, number);
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
    Status status = ReadFirstLine(fname, result);
    return status;
  }
  return Status::NotSupported("File Type Not Known");
}

Status DBImpl::ReadFirstLine(const std::string& fname,
                             WriteBatch* const batch) {
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
    Logger* info_log;
    const char* fname;
1188
    Status* status;  // nullptr if options_.paranoid_checks==false
1189 1190
    virtual void Corruption(size_t bytes, const Status& s) {
      Log(info_log, "%s%s: dropping %d bytes; %s",
1191
          (this->status == nullptr ? "(ignoring error) " : ""),
1192
          fname, static_cast<int>(bytes), s.ToString().c_str());
1193
      if (this->status != nullptr && this->status->ok()) *this->status = s;
1194 1195 1196
    }
  };

1197
  unique_ptr<SequentialFile> file;
1198
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
1199 1200 1201 1202 1203 1204 1205 1206

  if (!status.ok()) {
    return status;
  }


  LogReporter reporter;
  reporter.env = env_;
1207
  reporter.info_log = options_.info_log.get();
1208
  reporter.fname = fname.c_str();
1209
  reporter.status = (options_.paranoid_checks ? &status : nullptr);
1210
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1211 1212 1213
                     0/*initial_offset*/);
  std::string scratch;
  Slice record;
1214

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
  if (reader.ReadRecord(&record, &scratch) && status.ok()) {
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      return Status::IOError("Corruption noted");
      //  TODO read record's till the first no corrupt entry?
    }
    WriteBatchInternal::SetContents(batch, record);
    return Status::OK();
  }
  return Status::IOError("Error reading from file " + fname);
}

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
struct CompareLogByPointer {
  bool operator() (const unique_ptr<LogFile>& a,
                   const unique_ptr<LogFile>& b) {
    LogFileImpl* a_impl = dynamic_cast<LogFileImpl*>(a.get());
    LogFileImpl* b_impl = dynamic_cast<LogFileImpl*>(b.get());
    return *a_impl < *b_impl;
  }
};

Status DBImpl::AppendSortedWalsOfType(const std::string& path,
    VectorLogPtr& log_files, WalFileType log_type) {
  std::vector<std::string> all_files;
  const Status status = env_->GetChildren(path, &all_files);
1241 1242 1243
  if (!status.ok()) {
    return status;
  }
1244
  log_files.reserve(log_files.size() + all_files.size());
1245 1246 1247 1248 1249 1250
  VectorLogPtr::iterator pos_start;
  if (!log_files.empty()) {
    pos_start = log_files.end() - 1;
  } else {
    pos_start = log_files.begin();
  }
1251
  for (const auto& f : all_files) {
1252 1253
    uint64_t number;
    FileType type;
1254
    if (ParseFileName(f, &number, &type) && type == kLogFile){
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272

      WriteBatch batch;
      Status s = ReadFirstRecord(log_type, number, &batch);
      if (!s.ok()) {
        if (CheckWalFileExistsAndEmpty(log_type, number)) {
          continue;
        }
        return s;
      }

      uint64_t size_bytes;
      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
      if (!s.ok()) {
        return s;
      }

      log_files.push_back(std::move(unique_ptr<LogFile>(new LogFileImpl(
        number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes))));
1273 1274
    }
  }
1275
  CompareLogByPointer compare_log_files;
1276
  std::sort(pos_start, log_files.end(), compare_log_files);
1277 1278 1279
  return status;
}

G
Gabor Cselle 已提交
1280
void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
1281 1282
  assert(level >= 0);

G
Gabor Cselle 已提交
1283 1284
  InternalKey begin_storage, end_storage;

H
hans@chromium.org 已提交
1285 1286
  ManualCompaction manual;
  manual.level = level;
G
Gabor Cselle 已提交
1287
  manual.done = false;
1288
  manual.in_progress = false;
1289 1290 1291 1292
  // For universal compaction, we enforce every manual compaction to compact
  // all files.
  if (begin == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1293
    manual.begin = nullptr;
G
Gabor Cselle 已提交
1294 1295 1296 1297
  } else {
    begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
    manual.begin = &begin_storage;
  }
1298 1299
  if (end == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1300
    manual.end = nullptr;
G
Gabor Cselle 已提交
1301 1302 1303 1304 1305 1306
  } else {
    end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
    manual.end = &end_storage;
  }

  MutexLock l(&mutex_);
1307

A
Abhishek Kona 已提交
1308 1309 1310 1311
  // When a manual compaction arrives, temporarily throttle down
  // the number of background compaction threads to 1. This is
  // needed to ensure that this manual compaction can compact
  // any range of keys/files. We artificialy increase
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
  // bg_compaction_scheduled_ by a large number, this causes
  // the system to have a single background thread. Now,
  // this manual compaction can progress without stomping
  // on any other concurrent compactions.
  const int LargeNumber = 10000000;
  const int newvalue = options_.max_background_compactions-1;
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber) {
    Log(options_.info_log, "Manual compaction request waiting for background threads to fall below 1");
    bg_cv_.Wait();
  }
  Log(options_.info_log, "Manual compaction starting");

G
Gabor Cselle 已提交
1325
  while (!manual.done) {
1326
    while (manual_compaction_ != nullptr) {
G
Gabor Cselle 已提交
1327 1328 1329
      bg_cv_.Wait();
    }
    manual_compaction_ = &manual;
1330 1331 1332
    if (bg_compaction_scheduled_ == LargeNumber) {
      bg_compaction_scheduled_ = newvalue;
    }
G
Gabor Cselle 已提交
1333 1334 1335 1336
    MaybeScheduleCompaction();
    while (manual_compaction_ == &manual) {
      bg_cv_.Wait();
    }
H
hans@chromium.org 已提交
1337
  }
1338 1339 1340 1341 1342 1343 1344 1345 1346
  assert(!manual.in_progress);

  // wait till there are no background threads scheduled
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber + newvalue) {
    Log(options_.info_log, "Manual compaction resetting background threads");
    bg_cv_.Wait();
  }
  bg_compaction_scheduled_ = 0;
J
jorlow@chromium.org 已提交
1347 1348
}

H
heyongqiang 已提交
1349
Status DBImpl::FlushMemTable(const FlushOptions& options) {
1350 1351
  // nullptr batch means just wait for earlier writes to be done
  Status s = Write(WriteOptions(), nullptr);
H
heyongqiang 已提交
1352
  if (s.ok() && options.wait) {
1353
    // Wait until the compaction completes
H
heyongqiang 已提交
1354
    s = WaitForCompactMemTable();
1355 1356
  }
  return s;
J
jorlow@chromium.org 已提交
1357 1358
}

H
heyongqiang 已提交
1359
Status DBImpl::WaitForCompactMemTable() {
1360 1361 1362
  Status s;
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1363
  while (imm_.size() > 0 && bg_error_.ok()) {
1364 1365
    bg_cv_.Wait();
  }
1366
  if (imm_.size() != 0) {
1367 1368 1369
    s = bg_error_;
  }
  return s;
H
heyongqiang 已提交
1370 1371 1372 1373 1374 1375
}

Status DBImpl::TEST_CompactMemTable() {
  return FlushMemTable(FlushOptions());
}

1376
Status DBImpl::TEST_WaitForCompactMemTable() {
1377
  return WaitForCompactMemTable();
1378 1379 1380
}

Status DBImpl::TEST_WaitForCompact() {
1381 1382
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1383 1384
  while ((bg_compaction_scheduled_ || bg_flush_scheduled_) &&
         bg_error_.ok()) {
1385 1386 1387
    bg_cv_.Wait();
  }
  return bg_error_;
1388 1389
}

J
jorlow@chromium.org 已提交
1390 1391
void DBImpl::MaybeScheduleCompaction() {
  mutex_.AssertHeld();
1392 1393
  if (bg_work_gate_closed_) {
    // gate closed for backgrond work
J
jorlow@chromium.org 已提交
1394 1395 1396
  } else if (shutting_down_.Acquire_Load()) {
    // DB is being deleted; no more background compactions
  } else {
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
    bool is_flush_pending =
      imm_.IsFlushPending(options_.min_write_buffer_number_to_merge);
    if (is_flush_pending &&
        (bg_flush_scheduled_ < options_.max_background_flushes)) {
      // memtable flush needed
      bg_flush_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH);
    }

    if ((manual_compaction_ ||
         versions_->NeedsCompaction() ||
         (is_flush_pending && (options_.max_background_flushes <= 0))) &&
        bg_compaction_scheduled_ < options_.max_background_compactions) {
      // compaction needed, or memtable flush needed but HIGH pool not enabled.
      bg_compaction_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkCompaction, this, Env::Priority::LOW);
    }
  }
}

void DBImpl::BGWorkFlush(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallFlush();
}

void DBImpl::BGWorkCompaction(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallCompaction();
}

Status DBImpl::BackgroundFlush() {
  Status stat;
  while (stat.ok() &&
         imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
    Log(options_.info_log,
        "BackgroundCallFlush doing CompactMemTable, flush slots available %d",
        options_.max_background_flushes - bg_flush_scheduled_);
    stat = CompactMemTable();
J
jorlow@chromium.org 已提交
1433
  }
1434
  return stat;
J
jorlow@chromium.org 已提交
1435 1436
}

1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
void DBImpl::BackgroundCallFlush() {
  assert(bg_flush_scheduled_);
  MutexLock l(&mutex_);

  if (!shutting_down_.Acquire_Load()) {
    Status s = BackgroundFlush();
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background flush error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
    }
  }

  bg_flush_scheduled_--;

  bg_cv_.SignalAll();
J
jorlow@chromium.org 已提交
1460 1461
}

1462

1463 1464 1465 1466
void DBImpl::TEST_PurgeObsoleteteWAL() {
  PurgeObsoleteWALFiles();
}

1467
void DBImpl::BackgroundCallCompaction() {
1468
  bool madeProgress = false;
D
Dhruba Borthakur 已提交
1469
  DeletionState deletion_state;
H
Haobo Xu 已提交
1470 1471 1472

  MaybeDumpStats();

J
jorlow@chromium.org 已提交
1473
  MutexLock l(&mutex_);
1474
  // Log(options_.info_log, "XXX BG Thread %llx process new work item", pthread_self());
J
jorlow@chromium.org 已提交
1475
  assert(bg_compaction_scheduled_);
H
hans@chromium.org 已提交
1476
  if (!shutting_down_.Acquire_Load()) {
1477
    Status s = BackgroundCompaction(&madeProgress, deletion_state);
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background compaction error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
    }
J
jorlow@chromium.org 已提交
1490
  }
1491

D
Dhruba Borthakur 已提交
1492 1493 1494 1495
  // delete unnecessary files if any, this is done outside the mutex
  if (!deletion_state.live.empty()) {
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
D
Dhruba Borthakur 已提交
1496
    EvictObsoleteFiles(deletion_state);
1497
    mutex_.Lock();
1498

D
Dhruba Borthakur 已提交
1499 1500
  }

1501
  bg_compaction_scheduled_--;
J
jorlow@chromium.org 已提交
1502

1503 1504
  MaybeScheduleLogDBDeployStats();

J
jorlow@chromium.org 已提交
1505
  // Previous compaction may have produced too many files in a level,
A
Abhishek Kona 已提交
1506
  // So reschedule another compaction if we made progress in the
1507 1508 1509 1510
  // last compaction.
  if (madeProgress) {
    MaybeScheduleCompaction();
  }
H
hans@chromium.org 已提交
1511
  bg_cv_.SignalAll();
1512

J
jorlow@chromium.org 已提交
1513 1514
}

A
Abhishek Kona 已提交
1515
Status DBImpl::BackgroundCompaction(bool* madeProgress,
1516
  DeletionState& deletion_state) {
1517
  *madeProgress = false;
J
jorlow@chromium.org 已提交
1518
  mutex_.AssertHeld();
1519

1520
  // TODO: remove memtable flush from formal compaction
1521
  while (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
A
Abhishek Kona 已提交
1522
    Log(options_.info_log,
1523 1524 1525 1526 1527 1528
        "BackgroundCompaction doing CompactMemTable, compaction slots available %d",
        options_.max_background_compactions - bg_compaction_scheduled_);
    Status stat = CompactMemTable(madeProgress);
    if (!stat.ok()) {
      return stat;
    }
1529 1530
  }

1531
  unique_ptr<Compaction> c;
1532
  bool is_manual = (manual_compaction_ != nullptr) &&
1533
                   (manual_compaction_->in_progress == false);
G
Gabor Cselle 已提交
1534
  InternalKey manual_end;
H
hans@chromium.org 已提交
1535
  if (is_manual) {
G
Gabor Cselle 已提交
1536
    ManualCompaction* m = manual_compaction_;
1537 1538
    assert(!m->in_progress);
    m->in_progress = true; // another thread cannot pick up the same work
1539 1540
    c.reset(versions_->CompactRange(m->level, m->begin, m->end));
    if (c) {
G
Gabor Cselle 已提交
1541
      manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
1542 1543
    } else {
      m->done = true;
G
Gabor Cselle 已提交
1544 1545 1546
    }
    Log(options_.info_log,
        "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
H
hans@chromium.org 已提交
1547
        m->level,
G
Gabor Cselle 已提交
1548 1549 1550
        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
        (m->end ? m->end->DebugString().c_str() : "(end)"),
        (m->done ? "(end)" : manual_end.DebugString().c_str()));
1551
  } else if (!options_.disable_auto_compactions) {
1552
    c.reset(versions_->PickCompaction());
J
jorlow@chromium.org 已提交
1553 1554 1555
  }

  Status status;
1556
  if (!c) {
H
hans@chromium.org 已提交
1557
    // Nothing to do
1558
    Log(options_.info_log, "Compaction nothing to do");
H
hans@chromium.org 已提交
1559
  } else if (!is_manual && c->IsTrivialMove()) {
J
jorlow@chromium.org 已提交
1560
    // Move file to next level
1561
    assert(c->num_input_files(0) == 1);
J
jorlow@chromium.org 已提交
1562 1563 1564
    FileMetaData* f = c->input(0, 0);
    c->edit()->DeleteFile(c->level(), f->number);
    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
1565 1566
                       f->smallest, f->largest,
                       f->smallest_seqno, f->largest_seqno);
1567
    status = versions_->LogAndApply(c->edit(), &mutex_);
H
hans@chromium.org 已提交
1568
    VersionSet::LevelSummaryStorage tmp;
1569
    Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
J
jorlow@chromium.org 已提交
1570 1571 1572
        static_cast<unsigned long long>(f->number),
        c->level() + 1,
        static_cast<unsigned long long>(f->file_size),
H
hans@chromium.org 已提交
1573 1574
        status.ToString().c_str(),
        versions_->LevelSummary(&tmp));
1575
    versions_->ReleaseCompactionFiles(c.get(), status);
1576
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1577
  } else {
1578
    MaybeScheduleCompaction(); // do more compaction work in parallel.
1579
    CompactionState* compact = new CompactionState(c.get());
J
jorlow@chromium.org 已提交
1580 1581
    status = DoCompactionWork(compact);
    CleanupCompaction(compact);
1582
    versions_->ReleaseCompactionFiles(c.get(), status);
1583
    c->ReleaseInputs();
D
Dhruba Borthakur 已提交
1584
    FindObsoleteFiles(deletion_state);
1585
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1586
  }
1587
  c.reset();
J
jorlow@chromium.org 已提交
1588 1589 1590 1591 1592 1593

  if (status.ok()) {
    // Done
  } else if (shutting_down_.Acquire_Load()) {
    // Ignore compaction errors found during shutting down
  } else {
1594
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
1595 1596 1597 1598 1599
        "Compaction error: %s", status.ToString().c_str());
    if (options_.paranoid_checks && bg_error_.ok()) {
      bg_error_ = status;
    }
  }
H
hans@chromium.org 已提交
1600 1601

  if (is_manual) {
G
Gabor Cselle 已提交
1602
    ManualCompaction* m = manual_compaction_;
1603 1604 1605
    if (!status.ok()) {
      m->done = true;
    }
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
    // For universal compaction:
    //   Because universal compaction always happens at level 0, so one
    //   compaction will pick up all overlapped files. No files will be
    //   filtered out due to size limit and left for a successive compaction.
    //   So we can safely conclude the current compaction.
    //
    //   Also note that, if we don't stop here, then the current compaction
    //   writes a new file back to level 0, which will be used in successive
    //   compaction. Hence the manual compaction will never finish.
    if (options_.compaction_style == kCompactionStyleUniversal) {
      m->done = true;
    }
G
Gabor Cselle 已提交
1618 1619 1620 1621 1622 1623
    if (!m->done) {
      // We only compacted part of the requested range.  Update *m
      // to the range that is left to be compacted.
      m->tmp_storage = manual_end;
      m->begin = &m->tmp_storage;
    }
1624
    m->in_progress = false; // not being processed anymore
1625
    manual_compaction_ = nullptr;
H
hans@chromium.org 已提交
1626
  }
1627
  return status;
J
jorlow@chromium.org 已提交
1628 1629 1630 1631
}

void DBImpl::CleanupCompaction(CompactionState* compact) {
  mutex_.AssertHeld();
1632
  if (compact->builder != nullptr) {
J
jorlow@chromium.org 已提交
1633 1634
    // May happen if we get a shutdown call in the middle of compaction
    compact->builder->Abandon();
1635
    compact->builder.reset();
J
jorlow@chromium.org 已提交
1636
  } else {
1637
    assert(compact->outfile == nullptr);
J
jorlow@chromium.org 已提交
1638
  }
D
dgrogan@chromium.org 已提交
1639
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1640 1641 1642 1643 1644 1645
    const CompactionState::Output& out = compact->outputs[i];
    pending_outputs_.erase(out.number);
  }
  delete compact;
}

1646 1647 1648 1649 1650
// Allocate the file numbers for the output file. We allocate as
// many output file numbers as there are files in level+1.
// Insert them into pending_outputs so that they do not get deleted.
void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1651 1652
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
1653
  int filesNeeded = compact->compaction->num_input_files(1);
1654
  for (int i = 0; i < filesNeeded; i++) {
1655 1656 1657 1658 1659 1660 1661 1662 1663
    uint64_t file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    compact->allocated_file_numbers.push_back(file_number);
  }
}

// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1664
  for (const auto file_number : compact->allocated_file_numbers) {
1665 1666 1667 1668 1669
    pending_outputs_.erase(file_number);
    // Log(options_.info_log, "XXX releasing unused file num %d", file_number);
  }
}

J
jorlow@chromium.org 已提交
1670
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
1671 1672
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
J
jorlow@chromium.org 已提交
1673
  uint64_t file_number;
1674 1675 1676 1677 1678 1679 1680
  // If we have not yet exhausted the pre-allocated file numbers,
  // then use the one from the front. Otherwise, we have to acquire
  // the heavyweight lock and allocate a new file number.
  if (!compact->allocated_file_numbers.empty()) {
    file_number = compact->allocated_file_numbers.front();
    compact->allocated_file_numbers.pop_front();
  } else {
J
jorlow@chromium.org 已提交
1681 1682 1683 1684 1685
    mutex_.Lock();
    file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    mutex_.Unlock();
  }
1686 1687 1688 1689
  CompactionState::Output out;
  out.number = file_number;
  out.smallest.Clear();
  out.largest.Clear();
1690
  out.smallest_seqno = out.largest_seqno = 0;
1691
  compact->outputs.push_back(out);
J
jorlow@chromium.org 已提交
1692 1693 1694

  // Make the output file
  std::string fname = TableFileName(dbname_, file_number);
1695
  Status s = env_->NewWritableFile(fname, &compact->outfile, storage_options_);
1696

J
jorlow@chromium.org 已提交
1697
  if (s.ok()) {
1698 1699 1700
    // Over-estimate slightly so we don't end up just barely crossing
    // the threshold.
    compact->outfile->SetPreallocationBlockSize(
1701
      1.1 * versions_->MaxFileSizeForLevel(compact->compaction->output_level()));
1702

1703
    compact->builder.reset(new TableBuilder(options_, compact->outfile.get(),
1704
                                            compact->compaction->output_level()));
J
jorlow@chromium.org 已提交
1705 1706 1707 1708 1709 1710
  }
  return s;
}

Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
                                          Iterator* input) {
1711
  assert(compact != nullptr);
1712
  assert(compact->outfile);
1713
  assert(compact->builder != nullptr);
J
jorlow@chromium.org 已提交
1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728

  const uint64_t output_number = compact->current_output()->number;
  assert(output_number != 0);

  // Check for iterator errors
  Status s = input->status();
  const uint64_t current_entries = compact->builder->NumEntries();
  if (s.ok()) {
    s = compact->builder->Finish();
  } else {
    compact->builder->Abandon();
  }
  const uint64_t current_bytes = compact->builder->FileSize();
  compact->current_output()->file_size = current_bytes;
  compact->total_bytes += current_bytes;
1729
  compact->builder.reset();
J
jorlow@chromium.org 已提交
1730 1731

  // Finish and check for file errors
1732
  if (s.ok() && !options_.disableDataSync) {
1733
    if (options_.use_fsync) {
1734
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1735 1736
      s = compact->outfile->Fsync();
    } else {
1737
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1738 1739
      s = compact->outfile->Sync();
    }
J
jorlow@chromium.org 已提交
1740 1741 1742 1743
  }
  if (s.ok()) {
    s = compact->outfile->Close();
  }
1744
  compact->outfile.reset();
J
jorlow@chromium.org 已提交
1745 1746 1747

  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
J
jorlow@chromium.org 已提交
1748
    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
1749
                                               storage_options_,
J
jorlow@chromium.org 已提交
1750 1751
                                               output_number,
                                               current_bytes);
J
jorlow@chromium.org 已提交
1752 1753 1754
    s = iter->status();
    delete iter;
    if (s.ok()) {
1755
      Log(options_.info_log,
J
jorlow@chromium.org 已提交
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
          "Generated table #%llu: %lld keys, %lld bytes",
          (unsigned long long) output_number,
          (unsigned long long) current_entries,
          (unsigned long long) current_bytes);
    }
  }
  return s;
}


Status DBImpl::InstallCompactionResults(CompactionState* compact) {
  mutex_.AssertHeld();
1768 1769 1770 1771 1772

  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact.
1773
  if (!versions_->VerifyCompactionFileConsistency(compact->compaction)) {
1774 1775 1776 1777 1778 1779 1780 1781
    Log(options_.info_log,  "Compaction %d@%d + %d@%d files aborted",
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1);
    return Status::IOError("Compaction input files inconsistent");
  }

1782
  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
J
jorlow@chromium.org 已提交
1783 1784 1785 1786 1787 1788 1789 1790 1791
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1,
      static_cast<long long>(compact->total_bytes));

  // Add compaction outputs
  compact->compaction->AddInputDeletions(compact->compaction->edit());
  const int level = compact->compaction->level();
D
dgrogan@chromium.org 已提交
1792
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1793 1794
    const CompactionState::Output& out = compact->outputs[i];
    compact->compaction->edit()->AddFile(
1795 1796
        (options_.compaction_style == kCompactionStyleUniversal) ?
          level : level + 1,
1797 1798
        out.number, out.file_size, out.smallest, out.largest,
        out.smallest_seqno, out.largest_seqno);
J
jorlow@chromium.org 已提交
1799
  }
1800
  return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
J
jorlow@chromium.org 已提交
1801 1802
}

1803 1804 1805 1806 1807 1808 1809 1810
//
// Given a sequence number, return the sequence number of the
// earliest snapshot that this sequence number is visible in.
// The snapshots themselves are arranged in ascending order of
// sequence numbers.
// Employ a sequential search because the total number of
// snapshots are typically small.
inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
1811 1812
  SequenceNumber in, std::vector<SequenceNumber>& snapshots,
  SequenceNumber* prev_snapshot) {
1813
  SequenceNumber prev __attribute__((unused)) = 0;
1814 1815 1816
  for (const auto cur : snapshots) {
    assert(prev <= cur);
    if (cur >= in) {
1817
      *prev_snapshot = prev;
1818
      return cur;
1819
    }
1820 1821
    prev = cur; // assignment
    assert(prev);
1822 1823 1824 1825 1826 1827 1828 1829
  }
  Log(options_.info_log,
      "Looking for seqid %ld but maxseqid is %ld", in,
      snapshots[snapshots.size()-1]);
  assert(0);
  return 0;
}

J
jorlow@chromium.org 已提交
1830
Status DBImpl::DoCompactionWork(CompactionState* compact) {
1831
  int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
A
Abhishek Kona 已提交
1832
  Log(options_.info_log,
1833
      "Compacting %d@%d + %d@%d files, score %.2f slots available %d",
J
jorlow@chromium.org 已提交
1834 1835 1836
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
1837
      compact->compaction->level() + 1,
1838
      compact->compaction->score(),
1839
      options_.max_background_compactions - bg_compaction_scheduled_);
1840 1841
  char scratch[256];
  compact->compaction->Summary(scratch, sizeof(scratch));
H
heyongqiang 已提交
1842
  Log(options_.info_log, "Compaction start summary: %s\n", scratch);
J
jorlow@chromium.org 已提交
1843 1844

  assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
1845
  assert(compact->builder == nullptr);
1846
  assert(!compact->outfile);
1847 1848 1849

  SequenceNumber visible_at_tip = 0;
  SequenceNumber earliest_snapshot;
H
Haobo Xu 已提交
1850
  SequenceNumber latest_snapshot = 0;
1851 1852 1853 1854 1855
  snapshots_.getAll(compact->existing_snapshots);
  if (compact->existing_snapshots.size() == 0) {
    // optimize for fast path if there are no snapshots
    visible_at_tip = versions_->LastSequence();
    earliest_snapshot = visible_at_tip;
J
jorlow@chromium.org 已提交
1856
  } else {
H
Haobo Xu 已提交
1857
    latest_snapshot = compact->existing_snapshots.back();
1858 1859 1860 1861
    // Add the current seqno as the 'latest' virtual
    // snapshot to the end of this list.
    compact->existing_snapshots.push_back(versions_->LastSequence());
    earliest_snapshot = compact->existing_snapshots[0];
J
jorlow@chromium.org 已提交
1862 1863
  }

1864
  // Is this compaction producing files at the bottommost level?
1865
  bool bottommost_level = compact->compaction->BottomMostLevel();
1866

1867 1868 1869
  // Allocate the output file numbers before we release the lock
  AllocateCompactionOutputFileNumbers(compact);

J
jorlow@chromium.org 已提交
1870 1871 1872
  // Release mutex while we're actually doing the compaction work
  mutex_.Unlock();

1873
  const uint64_t start_micros = env_->NowMicros();
1874
  unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
J
jorlow@chromium.org 已提交
1875 1876 1877 1878 1879
  input->SeekToFirst();
  Status status;
  ParsedInternalKey ikey;
  std::string current_user_key;
  bool has_current_user_key = false;
1880 1881
  SequenceNumber last_sequence_for_key __attribute__((unused)) =
    kMaxSequenceNumber;
1882
  SequenceNumber visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
1883
  std::string compaction_filter_value;
H
Haobo Xu 已提交
1884
  std::vector<char> delete_key; // for compaction filter
1885
  MergeHelper merge(user_comparator(), options_.merge_operator.get(),
1886 1887
                    options_.info_log.get(),
                    false /* internal key corruption is expected */);
1888 1889 1890 1891 1892 1893 1894
  auto compaction_filter = options_.compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
  if (!compaction_filter) {
    compaction_filter_from_factory = std::move(
        options_.compaction_filter_factory->CreateCompactionFilter());
    compaction_filter = compaction_filter_from_factory.get();
  }
J
jorlow@chromium.org 已提交
1895
  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
1896
    // Prioritize immutable compaction work
1897
    // TODO: remove memtable flush from normal compaction work
1898
    if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
1899 1900
      const uint64_t imm_start = env_->NowMicros();
      mutex_.Lock();
1901
      if (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
1902
        CompactMemTable();
H
hans@chromium.org 已提交
1903
        bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
1904 1905 1906 1907 1908
      }
      mutex_.Unlock();
      imm_micros += (env_->NowMicros() - imm_start);
    }

J
jorlow@chromium.org 已提交
1909
    Slice key = input->key();
1910
    Slice value = input->value();
H
Haobo Xu 已提交
1911

1912
    if (compact->compaction->ShouldStopBefore(key) &&
1913
        compact->builder != nullptr) {
1914
      status = FinishCompactionOutputFile(compact, input.get());
1915 1916 1917 1918 1919 1920
      if (!status.ok()) {
        break;
      }
    }

    // Handle key/value, add to state, etc.
J
jorlow@chromium.org 已提交
1921
    bool drop = false;
1922
    bool current_entry_is_merging = false;
J
jorlow@chromium.org 已提交
1923 1924
    if (!ParseInternalKey(key, &ikey)) {
      // Do not hide error keys
1925 1926
      // TODO: error key stays in db forever? Figure out the intention/rationale
      // v10 error v8 : we cannot hide v8 even though it's pretty obvious.
J
jorlow@chromium.org 已提交
1927 1928 1929
      current_user_key.clear();
      has_current_user_key = false;
      last_sequence_for_key = kMaxSequenceNumber;
1930
      visible_in_snapshot = kMaxSequenceNumber;
J
jorlow@chromium.org 已提交
1931 1932 1933 1934 1935 1936 1937 1938
    } else {
      if (!has_current_user_key ||
          user_comparator()->Compare(ikey.user_key,
                                     Slice(current_user_key)) != 0) {
        // First occurrence of this user key
        current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
        has_current_user_key = true;
        last_sequence_for_key = kMaxSequenceNumber;
1939
        visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
1940 1941

        // apply the compaction filter to the first occurrence of the user key
1942
        if (compaction_filter &&
H
Haobo Xu 已提交
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952
            ikey.type == kTypeValue &&
            (visible_at_tip || ikey.sequence > latest_snapshot)) {
          // If the user has specified a compaction filter and the sequence
          // number is greater than any external snapshot, then invoke the
          // filter.
          // If the return value of the compaction filter is true, replace
          // the entry with a delete marker.
          bool value_changed = false;
          compaction_filter_value.clear();
          bool to_delete =
1953
            compaction_filter->Filter(compact->compaction->level(),
H
Haobo Xu 已提交
1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
                                               ikey.user_key, value,
                                               &compaction_filter_value,
                                               &value_changed);
          if (to_delete) {
            // make a copy of the original key
            delete_key.assign(key.data(), key.data() + key.size());
            // convert it to a delete
            UpdateInternalKey(&delete_key[0], delete_key.size(),
                              ikey.sequence, kTypeDeletion);
            // anchor the key again
            key = Slice(&delete_key[0], delete_key.size());
            // needed because ikey is backed by key
            ParseInternalKey(key, &ikey);
            // no value associated with delete
            value.clear();
            RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
          } else if (value_changed) {
            value = compaction_filter_value;
          }
        }

J
jorlow@chromium.org 已提交
1975 1976
      }

1977 1978 1979
      // If there are no snapshots, then this kv affect visibility at tip.
      // Otherwise, search though all existing snapshots to find
      // the earlist snapshot that is affected by this kv.
1980 1981 1982 1983 1984 1985
      SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
      SequenceNumber visible = visible_at_tip ?
        visible_at_tip :
        findEarliestVisibleSnapshot(ikey.sequence,
                                    compact->existing_snapshots,
                                    &prev_snapshot);
1986 1987 1988 1989 1990

      if (visible_in_snapshot == visible) {
        // If the earliest snapshot is which this key is visible in
        // is the same as the visibily of a previous instance of the
        // same key, then this kv is not visible in any snapshot.
J
jorlow@chromium.org 已提交
1991
        // Hidden by an newer entry for same user key
1992
        // TODO: why not > ?
1993
        assert(last_sequence_for_key >= ikey.sequence);
J
jorlow@chromium.org 已提交
1994
        drop = true;    // (A)
1995
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
J
jorlow@chromium.org 已提交
1996
      } else if (ikey.type == kTypeDeletion &&
1997
                 ikey.sequence <= earliest_snapshot &&
J
jorlow@chromium.org 已提交
1998 1999 2000 2001 2002 2003 2004 2005 2006
                 compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
        // For this user key:
        // (1) there is no data in higher levels
        // (2) data in lower levels will have larger sequence numbers
        // (3) data in layers that are being compacted here and have
        //     smaller sequence numbers will be dropped in the next
        //     few iterations of this loop (by rule (A) above).
        // Therefore this deletion marker is obsolete and can be dropped.
        drop = true;
2007
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
2008 2009 2010 2011 2012 2013 2014
      } else if (ikey.type == kTypeMerge) {
        // We know the merge type entry is not hidden, otherwise we would
        // have hit (A)
        // We encapsulate the merge related state machine in a different
        // object to minimize change to the existing flow. Turn out this
        // logic could also be nicely re-used for memtable flush purge
        // optimization in BuildTable.
M
Mayank Agarwal 已提交
2015 2016
        merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
                         options_.statistics);
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
        current_entry_is_merging = true;
        if (merge.IsSuccess()) {
          // Successfully found Put/Delete/(end-of-key-range) while merging
          // Get the merge result
          key = merge.key();
          ParseInternalKey(key, &ikey);
          value = merge.value();
        } else {
          // Did not find a Put/Delete/(end-of-key-range) while merging
          // We now have some stack of merge operands to write out.
          // NOTE: key,value, and ikey are now referring to old entries.
          //       These will be correctly set below.
          assert(!merge.keys().empty());
          assert(merge.keys().size() == merge.values().size());

          // Hack to make sure last_sequence_for_key is correct
          ParseInternalKey(merge.keys().front(), &ikey);
        }
J
jorlow@chromium.org 已提交
2035 2036 2037
      }

      last_sequence_for_key = ikey.sequence;
2038
      visible_in_snapshot = visible;
J
jorlow@chromium.org 已提交
2039 2040
    }
#if 0
2041
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
2042
        "  Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
2043
        "%d smallest_snapshot: %d level: %d bottommost %d",
J
jorlow@chromium.org 已提交
2044
        ikey.user_key.ToString().c_str(),
D
dgrogan@chromium.org 已提交
2045
        (int)ikey.sequence, ikey.type, kTypeValue, drop,
J
jorlow@chromium.org 已提交
2046
        compact->compaction->IsBaseLevelForKey(ikey.user_key),
2047 2048
        (int)last_sequence_for_key, (int)earliest_snapshot,
        compact->compaction->level(), bottommost_level);
J
jorlow@chromium.org 已提交
2049 2050 2051
#endif

    if (!drop) {
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
      // We may write a single key (e.g.: for Put/Delete or successful merge).
      // Or we may instead have to write a sequence/list of keys.
      // We have to write a sequence iff we have an unsuccessful merge
      bool has_merge_list = current_entry_is_merging && !merge.IsSuccess();
      const std::deque<std::string>* keys = nullptr;
      const std::deque<std::string>* values = nullptr;
      std::deque<std::string>::const_reverse_iterator key_iter;
      std::deque<std::string>::const_reverse_iterator value_iter;
      if (has_merge_list) {
        keys = &merge.keys();
        values = &merge.values();
        key_iter = keys->rbegin();    // The back (*rbegin()) is the first key
        value_iter = values->rbegin();

        key = Slice(*key_iter);
        value = Slice(*value_iter);
2068
      }
2069

2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
      // If we have a list of keys to write, traverse the list.
      // If we have a single key to write, simply write that key.
      while (true) {
        // Invariant: key,value,ikey will always be the next entry to write
        char* kptr = (char*)key.data();
        std::string kstr;

        // Zeroing out the sequence number leads to better compression.
        // If this is the bottommost level (no files in lower levels)
        // and the earliest snapshot is larger than this seqno
        // then we can squash the seqno to zero.
2081 2082
        if (options_.compaction_style == kCompactionStyleLevel &&
            bottommost_level && ikey.sequence < earliest_snapshot &&
2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093
            ikey.type != kTypeMerge) {
          assert(ikey.type != kTypeDeletion);
          // make a copy because updating in place would cause problems
          // with the priority queue that is managing the input key iterator
          kstr.assign(key.data(), key.size());
          kptr = (char *)kstr.c_str();
          UpdateInternalKey(kptr, key.size(), (uint64_t)0, ikey.type);
        }

        Slice newkey(kptr, key.size());
        assert((key.clear(), 1)); // we do not need 'key' anymore
2094

2095 2096 2097 2098 2099 2100 2101
        // Open output file if necessary
        if (compact->builder == nullptr) {
          status = OpenCompactionOutputFile(compact);
          if (!status.ok()) {
            break;
          }
        }
2102 2103

        SequenceNumber seqno = GetInternalKeySeqno(newkey);
2104 2105
        if (compact->builder->NumEntries() == 0) {
          compact->current_output()->smallest.DecodeFrom(newkey);
2106 2107 2108 2109
          compact->current_output()->smallest_seqno = seqno;
        } else {
          compact->current_output()->smallest_seqno =
            std::min(compact->current_output()->smallest_seqno, seqno);
2110 2111 2112
        }
        compact->current_output()->largest.DecodeFrom(newkey);
        compact->builder->Add(newkey, value);
2113 2114
        compact->current_output()->largest_seqno =
          std::max(compact->current_output()->largest_seqno, seqno);
2115 2116 2117 2118 2119 2120 2121 2122

        // Close output file if it is big enough
        if (compact->builder->FileSize() >=
            compact->compaction->MaxOutputFileSize()) {
          status = FinishCompactionOutputFile(compact, input.get());
          if (!status.ok()) {
            break;
          }
J
jorlow@chromium.org 已提交
2123 2124
        }

2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
        // If we have a list of entries, move to next element
        // If we only had one entry, then break the loop.
        if (has_merge_list) {
          ++key_iter;
          ++value_iter;

          // If at end of list
          if (key_iter == keys->rend() || value_iter == values->rend()) {
            // Sanity Check: if one ends, then both end
            assert(key_iter == keys->rend() && value_iter == values->rend());
            break;
          }

          // Otherwise not at end of list. Update key, value, and ikey.
          key = Slice(*key_iter);
          value = Slice(*value_iter);
          ParseInternalKey(key, &ikey);

        } else{
          // Only had one item to begin with (Put/Delete)
J
jorlow@chromium.org 已提交
2145 2146 2147 2148 2149
          break;
        }
      }
    }

2150
    // MergeUntil has moved input to the next entry
2151
    if (!current_entry_is_merging) {
2152 2153
      input->Next();
    }
J
jorlow@chromium.org 已提交
2154 2155 2156
  }

  if (status.ok() && shutting_down_.Acquire_Load()) {
2157
    status = Status::IOError("Database shutdown started during compaction");
J
jorlow@chromium.org 已提交
2158
  }
2159
  if (status.ok() && compact->builder != nullptr) {
2160
    status = FinishCompactionOutputFile(compact, input.get());
J
jorlow@chromium.org 已提交
2161 2162 2163 2164
  }
  if (status.ok()) {
    status = input->status();
  }
2165
  input.reset();
J
jorlow@chromium.org 已提交
2166

2167 2168
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros - imm_micros;
A
Abhishek Kona 已提交
2169 2170 2171
  if (options_.statistics) {
    options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
  }
M
Mark Callaghan 已提交
2172 2173
  stats.files_in_leveln = compact->compaction->num_input_files(0);
  stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
2174 2175

  int num_output_files = compact->outputs.size();
2176
  if (compact->builder != nullptr) {
2177 2178 2179 2180 2181
    // An error occured so ignore the last output.
    assert(num_output_files > 0);
    --num_output_files;
  }
  stats.files_out_levelnp1 = num_output_files;
M
Mark Callaghan 已提交
2182 2183 2184 2185 2186 2187 2188

  for (int i = 0; i < compact->compaction->num_input_files(0); i++)
    stats.bytes_readn += compact->compaction->input(0, i)->file_size;

  for (int i = 0; i < compact->compaction->num_input_files(1); i++)
    stats.bytes_readnp1 += compact->compaction->input(1, i)->file_size;

2189
  for (int i = 0; i < num_output_files; i++) {
2190 2191 2192
    stats.bytes_written += compact->outputs[i].file_size;
  }

J
jorlow@chromium.org 已提交
2193
  mutex_.Lock();
2194
  stats_[compact->compaction->output_level()].Add(stats);
J
jorlow@chromium.org 已提交
2195

2196 2197 2198 2199
  // if there were any unused file number (mostly in case of
  // compaction error), free up the entry from pending_putputs
  ReleaseCompactionUnusedFileNumbers(compact);

J
jorlow@chromium.org 已提交
2200 2201 2202
  if (status.ok()) {
    status = InstallCompactionResults(compact);
  }
2203
  VersionSet::LevelSummaryStorage tmp;
2204
  Log(options_.info_log,
M
Mark Callaghan 已提交
2205
      "compacted to: %s, %.1f MB/sec, level %d, files in(%d, %d) out(%d) "
2206 2207
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s\n",
M
Mark Callaghan 已提交
2208 2209 2210
      versions_->LevelSummary(&tmp),
      (stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
          (double) stats.micros,
2211
      compact->compaction->output_level(),
M
Mark Callaghan 已提交
2212 2213 2214 2215
      stats.files_in_leveln, stats.files_in_levelnp1, stats.files_out_levelnp1,
      stats.bytes_readn / 1048576.0,
      stats.bytes_readnp1 / 1048576.0,
      stats.bytes_written / 1048576.0,
2216
      (stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
2217
          (double) stats.bytes_readn,
2218
      stats.bytes_written / (double) stats.bytes_readn,
2219
      status.ToString().c_str());
M
Mark Callaghan 已提交
2220

J
jorlow@chromium.org 已提交
2221 2222 2223
  return status;
}

2224 2225 2226 2227
namespace {
struct IterState {
  port::Mutex* mu;
  Version* version;
2228
  std::vector<MemTable*> mem; // includes both mem_ and imm_
2229 2230 2231 2232 2233
};

static void CleanupIteratorState(void* arg1, void* arg2) {
  IterState* state = reinterpret_cast<IterState*>(arg1);
  state->mu->Lock();
2234 2235 2236
  for (unsigned int i = 0; i < state->mem.size(); i++) {
    state->mem[i]->Unref();
  }
2237 2238 2239 2240
  state->version->Unref();
  state->mu->Unlock();
  delete state;
}
H
Hans Wennborg 已提交
2241
}  // namespace
2242

J
jorlow@chromium.org 已提交
2243 2244
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
                                      SequenceNumber* latest_snapshot) {
2245
  IterState* cleanup = new IterState;
J
jorlow@chromium.org 已提交
2246
  mutex_.Lock();
2247
  *latest_snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2248

2249
  // Collect together all needed child iterators for mem
J
jorlow@chromium.org 已提交
2250
  std::vector<Iterator*> list;
2251
  mem_->Ref();
J
Jim Paton 已提交
2252 2253
  list.push_back(mem_->NewIterator(options.prefix));

2254 2255 2256 2257 2258 2259 2260 2261
  cleanup->mem.push_back(mem_);

  // Collect together all needed child iterators for imm_
  std::vector<MemTable*> immutables;
  imm_.GetMemTables(&immutables);
  for (unsigned int i = 0; i < immutables.size(); i++) {
    MemTable* m = immutables[i];
    m->Ref();
J
Jim Paton 已提交
2262
    list.push_back(m->NewIterator(options.prefix));
2263
    cleanup->mem.push_back(m);
2264
  }
2265 2266

  // Collect iterators for files in L0 - Ln
2267
  versions_->current()->AddIterators(options, storage_options_, &list);
J
jorlow@chromium.org 已提交
2268 2269 2270
  Iterator* internal_iter =
      NewMergingIterator(&internal_comparator_, &list[0], list.size());
  versions_->current()->Ref();
2271 2272 2273

  cleanup->mu = &mutex_;
  cleanup->version = versions_->current();
2274
  internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
J
jorlow@chromium.org 已提交
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284

  mutex_.Unlock();
  return internal_iter;
}

Iterator* DBImpl::TEST_NewInternalIterator() {
  SequenceNumber ignored;
  return NewInternalIterator(ReadOptions(), &ignored);
}

J
jorlow@chromium.org 已提交
2285
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
2286 2287 2288 2289
  MutexLock l(&mutex_);
  return versions_->MaxNextLevelOverlappingBytes();
}

J
jorlow@chromium.org 已提交
2290 2291 2292
Status DBImpl::Get(const ReadOptions& options,
                   const Slice& key,
                   std::string* value) {
2293 2294 2295 2296 2297 2298
  return GetImpl(options, key, value);
}

Status DBImpl::GetImpl(const ReadOptions& options,
                       const Slice& key,
                       std::string* value,
2299
                       bool* value_found) {
2300
  Status s;
2301

2302
  StopWatch sw(env_, options_.statistics, DB_GET);
2303
  SequenceNumber snapshot;
2304
  mutex_.Lock();
2305
  if (options.snapshot != nullptr) {
2306 2307 2308
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2309
  }
2310

2311
  MemTable* mem = mem_;
2312
  MemTableList imm = imm_;
2313
  Version* current = versions_->current();
2314
  mem->Ref();
2315
  imm.RefAll();
2316
  current->Ref();
2317

2318 2319
  // Unlock while reading from files and memtables
  mutex_.Unlock();
2320
  bool have_stat_update = false;
2321
  Version::GetStats stats;
2322

2323 2324 2325 2326

  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2327
  // First look in the memtable, then in the immutable memtable (if any).
2328
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2329
  // merge_operands will contain the sequence of merges in the latter case.
2330
  LookupKey lkey(key, snapshot);
2331
  if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2332
    // Done
2333
  } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2334 2335
    // Done
  } else {
2336
    current->Get(options, lkey, value, &s, &merge_operands, &stats,
2337
                 options_, value_found);
2338
    have_stat_update = true;
2339
  }
2340
  mutex_.Lock();
2341

2342 2343
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2344 2345
    MaybeScheduleCompaction();
  }
2346
  mem->Unref();
2347
  imm.UnrefAll();
2348
  current->Unref();
2349 2350 2351
  mutex_.Unlock();

  // Note, tickers are atomic now - no lock protection needed any more.
2352
  RecordTick(options_.statistics, NUMBER_KEYS_READ);
2353
  RecordTick(options_.statistics, BYTES_READ, value->size());
2354
  return s;
J
jorlow@chromium.org 已提交
2355 2356
}

2357 2358 2359 2360 2361 2362
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
                                     const std::vector<Slice>& keys,
                                     std::vector<std::string>* values) {

  StopWatch sw(env_, options_.statistics, DB_MULTIGET);
  SequenceNumber snapshot;
2363
  mutex_.Lock();
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
  if (options.snapshot != nullptr) {
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
  }

  MemTable* mem = mem_;
  MemTableList imm = imm_;
  Version* current = versions_->current();
  mem->Ref();
  imm.RefAll();
  current->Ref();

  // Unlock while reading from files and memtables

  mutex_.Unlock();
  bool have_stat_update = false;
  Version::GetStats stats;

2383 2384 2385
  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396
  // Note: this always resizes the values array
  int numKeys = keys.size();
  std::vector<Status> statList(numKeys);
  values->resize(numKeys);

  // Keep track of bytes that we read for statistics-recording later
  uint64_t bytesRead = 0;

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2397 2398 2399
  // merge_operands will contain the sequence of merges in the latter case.
  for (int i=0; i<numKeys; ++i) {
    merge_operands.clear();
2400 2401 2402 2403
    Status& s = statList[i];
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
2404
    if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2405
      // Done
2406
    } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2407 2408
      // Done
    } else {
2409
      current->Get(options, lkey, value, &s, &merge_operands, &stats, options_);
2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
      have_stat_update = true;
    }

    if (s.ok()) {
      bytesRead += value->size();
    }
  }

  // Post processing (decrement reference counts and record statistics)
  mutex_.Lock();
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
    MaybeScheduleCompaction();
  }
  mem->Unref();
  imm.UnrefAll();
  current->Unref();
2427 2428
  mutex_.Unlock();

2429 2430 2431 2432 2433 2434 2435
  RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
  RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
  RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);

  return statList;
}

2436 2437 2438 2439 2440 2441 2442
bool DBImpl::KeyMayExist(const ReadOptions& options,
                         const Slice& key,
                         std::string* value,
                         bool* value_found) {
  if (value_found != nullptr) {
    *value_found = true; // falsify later if key-may-exist but can't fetch value
  }
2443 2444 2445
  ReadOptions roptions = options;
  roptions.read_tier = kBlockCacheTier; // read from block cache only
  return GetImpl(roptions, key, value, value_found).ok();
2446 2447
}

J
jorlow@chromium.org 已提交
2448 2449
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
  SequenceNumber latest_snapshot;
T
Tyler Harter 已提交
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462
  Iterator* iter = NewInternalIterator(options, &latest_snapshot);
  iter = NewDBIterator(
             &dbname_, env_, options_, user_comparator(), iter,
             (options.snapshot != nullptr
              ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
              : latest_snapshot));
  if (options.prefix) {
    // use extra wrapper to exclude any keys from the results which
    // don't begin with the prefix
    iter = new PrefixFilterIterator(iter, *options.prefix,
                                    options_.prefix_extractor);
  }
  return iter;
J
jorlow@chromium.org 已提交
2463 2464 2465 2466
}

const Snapshot* DBImpl::GetSnapshot() {
  MutexLock l(&mutex_);
2467
  return snapshots_.New(versions_->LastSequence());
J
jorlow@chromium.org 已提交
2468 2469 2470 2471
}

void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  MutexLock l(&mutex_);
2472
  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
J
jorlow@chromium.org 已提交
2473 2474 2475 2476 2477 2478 2479
}

// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  return DB::Put(o, key, val);
}

2480 2481 2482 2483 2484 2485 2486 2487 2488
Status DBImpl::Merge(const WriteOptions& o, const Slice& key,
                     const Slice& val) {
  if (!options_.merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, key, val);
  }
}

J
jorlow@chromium.org 已提交
2489 2490 2491 2492
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  return DB::Delete(options, key);
}

2493 2494 2495 2496
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
  Writer w(&mutex_);
  w.batch = my_batch;
  w.sync = options.sync;
H
heyongqiang 已提交
2497
  w.disableWAL = options.disableWAL;
2498
  w.done = false;
2499

2500
  StopWatch sw(env_, options_.statistics, DB_WRITE);
2501
  MutexLock l(&mutex_);
2502 2503 2504 2505 2506 2507
  writers_.push_back(&w);
  while (!w.done && &w != writers_.front()) {
    w.cv.Wait();
  }
  if (w.done) {
    return w.status;
2508 2509 2510
  }

  // May temporarily unlock and wait.
2511
  Status status = MakeRoomForWrite(my_batch == nullptr);
D
dgrogan@chromium.org 已提交
2512
  uint64_t last_sequence = versions_->LastSequence();
2513
  Writer* last_writer = &w;
2514
  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
2515 2516 2517 2518 2519
    // TODO: BuildBatchGroup physically concatenate/copy all write batches into
    // a new one. Mem copy is done with the lock held. Ideally, we only need
    // the lock to obtain the last_writer and the references to all batches.
    // Creation (copy) of the merged batch could have been done outside of the
    // lock protected region.
2520
    WriteBatch* updates = BuildBatchGroup(&last_writer);
2521

2522 2523 2524 2525
    // Add to log and apply to memtable.  We can release the lock
    // during this phase since &w is currently responsible for logging
    // and protects against concurrent loggers and concurrent writes
    // into mem_.
2526
    {
2527
      mutex_.Unlock();
2528 2529 2530 2531 2532 2533 2534 2535 2536
      const SequenceNumber current_sequence = last_sequence + 1;
      WriteBatchInternal::SetSequence(updates, current_sequence);
      int my_batch_count = WriteBatchInternal::Count(updates);
      last_sequence += my_batch_count;
      // Record statistics
      RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
      RecordTick(options_.statistics,
                 BYTES_WRITTEN,
                 WriteBatchInternal::ByteSize(updates));
2537 2538
      if (options.disableWAL) {
        flush_on_destroy_ = true;
2539 2540 2541
      }

      if (!options.disableWAL) {
H
heyongqiang 已提交
2542 2543
        status = log_->AddRecord(WriteBatchInternal::Contents(updates));
        if (status.ok() && options.sync) {
2544
          if (options_.use_fsync) {
2545
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2546
            status = log_->file()->Fsync();
2547
          } else {
2548
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2549
            status = log_->file()->Sync();
2550
          }
H
heyongqiang 已提交
2551
        }
2552 2553
      }
      if (status.ok()) {
2554 2555
        status = WriteBatchInternal::InsertInto(updates, mem_, &options_, this,
                                                options_.filter_deletes);
2556 2557 2558 2559 2560 2561 2562
        if (!status.ok()) {
          // Panic for in-memory corruptions
          // Note that existing logic was not sound. Any partial failure writing
          // into the memtable would result in a state that some write ops might
          // have succeeded in memtable but Status reports error for all writes.
          throw std::runtime_error("In memory WriteBatch corruption!");
        }
2563
        SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence);
2564 2565 2566
      }
      mutex_.Lock();
      if (status.ok()) {
2567 2568
        versions_->SetLastSequence(last_sequence);
        last_flushed_sequence_ = current_sequence;
2569
      }
J
jorlow@chromium.org 已提交
2570
    }
2571
    if (updates == &tmp_batch_) tmp_batch_.Clear();
J
jorlow@chromium.org 已提交
2572
  }
2573

2574 2575 2576 2577 2578 2579 2580
  while (true) {
    Writer* ready = writers_.front();
    writers_.pop_front();
    if (ready != &w) {
      ready->status = status;
      ready->done = true;
      ready->cv.Signal();
2581
    }
2582 2583
    if (ready == last_writer) break;
  }
2584

2585 2586 2587
  // Notify new head of write queue
  if (!writers_.empty()) {
    writers_.front()->cv.Signal();
2588
  }
J
jorlow@chromium.org 已提交
2589 2590 2591
  return status;
}

2592
// REQUIRES: Writer list must be non-empty
2593
// REQUIRES: First writer must have a non-nullptr batch
2594 2595 2596 2597
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
  assert(!writers_.empty());
  Writer* first = writers_.front();
  WriteBatch* result = first->batch;
2598
  assert(result != nullptr);
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619

  size_t size = WriteBatchInternal::ByteSize(first->batch);

  // Allow the group to grow up to a maximum size, but if the
  // original write is small, limit the growth so we do not slow
  // down the small write too much.
  size_t max_size = 1 << 20;
  if (size <= (128<<10)) {
    max_size = size + (128<<10);
  }

  *last_writer = first;
  std::deque<Writer*>::iterator iter = writers_.begin();
  ++iter;  // Advance past "first"
  for (; iter != writers_.end(); ++iter) {
    Writer* w = *iter;
    if (w->sync && !first->sync) {
      // Do not include a sync write into a batch handled by a non-sync write.
      break;
    }

H
heyongqiang 已提交
2620 2621 2622 2623 2624 2625
    if (!w->disableWAL && first->disableWAL) {
      // Do not include a write that needs WAL into a batch that has
      // WAL disabled.
      break;
    }

2626
    if (w->batch != nullptr) {
2627 2628 2629 2630 2631 2632 2633 2634 2635
      size += WriteBatchInternal::ByteSize(w->batch);
      if (size > max_size) {
        // Do not make batch too big
        break;
      }

      // Append to *reuslt
      if (result == first->batch) {
        // Switch to temporary batch instead of disturbing caller's batch
2636
        result = &tmp_batch_;
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
        assert(WriteBatchInternal::Count(result) == 0);
        WriteBatchInternal::Append(result, first->batch);
      }
      WriteBatchInternal::Append(result, w->batch);
    }
    *last_writer = w;
  }
  return result;
}

2647 2648 2649
// This function computes the amount of time in microseconds by which a write
// should be delayed based on the number of level-0 files according to the
// following formula:
J
Jim Paton 已提交
2650 2651 2652 2653
// if n < bottom, return 0;
// if n >= top, return 1000;
// otherwise, let r = (n - bottom) /
//                    (top - bottom)
2654 2655 2656 2657
//  and return r^2 * 1000.
// The goal of this formula is to gradually increase the rate at which writes
// are slowed. We also tried linear delay (r * 1000), but it seemed to do
// slightly worse. There is no other particular reason for choosing quadratic.
J
Jim Paton 已提交
2658
uint64_t DBImpl::SlowdownAmount(int n, int top, int bottom) {
2659
  uint64_t delay;
J
Jim Paton 已提交
2660
  if (n >= top) {
2661 2662
    delay = 1000;
  }
J
Jim Paton 已提交
2663
  else if (n < bottom) {
2664 2665 2666 2667
    delay = 0;
  }
  else {
    // If we are here, we know that:
J
Jim Paton 已提交
2668
    //   level0_start_slowdown <= n < level0_slowdown
2669 2670
    // since the previous two conditions are false.
    float how_much =
J
Jim Paton 已提交
2671 2672
      (float) (n - bottom) /
              (top - bottom);
2673 2674 2675 2676 2677 2678
    delay = how_much * how_much * 1000;
  }
  assert(delay <= 1000);
  return delay;
}

2679
// REQUIRES: mutex_ is held
2680
// REQUIRES: this thread is currently at the front of the writer queue
2681 2682
Status DBImpl::MakeRoomForWrite(bool force) {
  mutex_.AssertHeld();
2683
  assert(!writers_.empty());
2684
  bool allow_delay = !force;
J
Jim Paton 已提交
2685 2686
  bool allow_hard_rate_limit_delay = !force;
  bool allow_soft_rate_limit_delay = !force;
2687
  uint64_t rate_limit_delay_millis = 0;
2688
  Status s;
2689
  double score;
2690

2691 2692 2693 2694 2695
  while (true) {
    if (!bg_error_.ok()) {
      // Yield previous error
      s = bg_error_;
      break;
2696 2697
    } else if (
        allow_delay &&
2698
        versions_->NumLevelFiles(0) >=
2699
          options_.level0_slowdown_writes_trigger) {
2700 2701 2702
      // We are getting close to hitting a hard limit on the number of
      // L0 files.  Rather than delaying a single write by several
      // seconds when we hit the hard limit, start delaying each
2703
      // individual write by 0-1ms to reduce latency variance.  Also,
2704 2705 2706
      // this delay hands over some CPU to the compaction thread in
      // case it is sharing the same core as the writer.
      mutex_.Unlock();
2707
      uint64_t delayed;
J
Jim Paton 已提交
2708 2709
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
J
Jim Paton 已提交
2710 2711 2712 2713 2714
        env_->SleepForMicroseconds(
          SlowdownAmount(versions_->NumLevelFiles(0),
                         options_.level0_slowdown_writes_trigger,
                         options_.level0_stop_writes_trigger)
        );
2715
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2716
      }
2717
      RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
2718
      stall_level0_slowdown_ += delayed;
J
Jim Paton 已提交
2719
      stall_level0_slowdown_count_++;
2720
      allow_delay = false;  // Do not delay a single write more than once
2721 2722
      //Log(options_.info_log,
      //    "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
2723
      //     (long long unsigned int)delayed);
2724
      mutex_.Lock();
2725
      delayed_writes_++;
2726 2727 2728
    } else if (!force &&
               (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
      // There is room in current memtable
2729 2730 2731
      if (allow_delay) {
        DelayLoggingAndReset();
      }
2732
      break;
2733
    } else if (imm_.size() == options_.max_write_buffer_number - 1) {
2734
      // We have filled up the current memtable, but the previous
2735 2736
      // ones are still being compacted, so we wait.
      DelayLoggingAndReset();
2737
      Log(options_.info_log, "wait for memtable compaction...\n");
2738
      uint64_t stall;
J
Jim Paton 已提交
2739 2740 2741 2742
      {
        StopWatch sw(env_, options_.statistics,
          STALL_MEMTABLE_COMPACTION_COUNT);
        bg_cv_.Wait();
2743
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2744
      }
2745 2746
      RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
      stall_memtable_compaction_ += stall;
J
Jim Paton 已提交
2747
      stall_memtable_compaction_count_++;
2748
    } else if (versions_->NumLevelFiles(0) >=
2749
               options_.level0_stop_writes_trigger) {
2750
      // There are too many level-0 files.
2751 2752
      DelayLoggingAndReset();
      Log(options_.info_log, "wait for fewer level0 files...\n");
2753
      uint64_t stall;
J
Jim Paton 已提交
2754 2755 2756
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
        bg_cv_.Wait();
2757
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2758
      }
2759 2760
      RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
      stall_level0_num_files_ += stall;
J
Jim Paton 已提交
2761
      stall_level0_num_files_count_++;
2762
    } else if (
J
Jim Paton 已提交
2763 2764 2765
        allow_hard_rate_limit_delay &&
        options_.hard_rate_limit > 1.0 &&
        (score = versions_->MaxCompactionScore()) > options_.hard_rate_limit) {
2766
      // Delay a write when the compaction score for any level is too large.
2767
      int max_level = versions_->MaxCompactionScoreLevel();
2768
      mutex_.Unlock();
2769
      uint64_t delayed;
J
Jim Paton 已提交
2770
      {
J
Jim Paton 已提交
2771
        StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
2772
        env_->SleepForMicroseconds(1000);
2773
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2774
      }
2775
      stall_leveln_slowdown_[max_level] += delayed;
J
Jim Paton 已提交
2776
      stall_leveln_slowdown_count_[max_level]++;
2777
      // Make sure the following value doesn't round to zero.
2778 2779 2780
      uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
      rate_limit_delay_millis += rate_limit;
      RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
J
Jim Paton 已提交
2781 2782 2783 2784
      if (options_.rate_limit_delay_max_milliseconds > 0 &&
          rate_limit_delay_millis >=
          (unsigned)options_.rate_limit_delay_max_milliseconds) {
        allow_hard_rate_limit_delay = false;
2785 2786 2787 2788
      }
      // Log(options_.info_log,
      //    "delaying write %llu usecs for rate limits with max score %.2f\n",
      //    (long long unsigned int)delayed, score);
2789
      mutex_.Lock();
J
Jim Paton 已提交
2790 2791 2792 2793 2794 2795 2796
    } else if (
        allow_soft_rate_limit_delay &&
        options_.soft_rate_limit > 0.0 &&
        (score = versions_->MaxCompactionScore()) > options_.soft_rate_limit) {
      // Delay a write when the compaction score for any level is too large.
      // TODO: add statistics
      mutex_.Unlock();
J
Jim Paton 已提交
2797 2798 2799 2800 2801 2802 2803 2804 2805
      {
        StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
        env_->SleepForMicroseconds(SlowdownAmount(
          score,
          options_.soft_rate_limit,
          options_.hard_rate_limit)
        );
        rate_limit_delay_millis += sw.ElapsedMicros();
      }
J
Jim Paton 已提交
2806 2807
      allow_soft_rate_limit_delay = false;
      mutex_.Lock();
2808 2809
    } else {
      // Attempt to switch to a new memtable and trigger compaction of old
2810
      DelayLoggingAndReset();
2811 2812
      assert(versions_->PrevLogNumber() == 0);
      uint64_t new_log_number = versions_->NewFileNumber();
2813
      unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
2814 2815
      EnvOptions soptions(storage_options_);
      soptions.use_mmap_writes = false;
2816 2817 2818 2819 2820
      s = env_->NewWritableFile(
            LogFileName(dbname_, new_log_number),
            &lfile,
            soptions
          );
2821
      if (!s.ok()) {
H
heyongqiang 已提交
2822
        // Avoid chewing through file number space in a tight loop.
2823
        versions_->ReuseFileNumber(new_log_number);
2824 2825
        break;
      }
2826 2827 2828
      // Our final size should be less than write_buffer_size
      // (compression, etc) but err on the side of caution.
      lfile->SetPreallocationBlockSize(1.1 * options_.write_buffer_size);
2829
      logfile_number_ = new_log_number;
2830
      log_.reset(new log::Writer(std::move(lfile)));
2831
      mem_->SetNextLogNumber(logfile_number_);
2832
      imm_.Add(mem_);
2833 2834 2835
      if (force) {
        imm_.FlushRequested();
      }
X
Xing Jin 已提交
2836 2837
      mem_ = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
2838
      mem_->Ref();
2839
      mem_->SetLogNumber(logfile_number_);
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849
      force = false;   // Do not force another compaction if have room
      MaybeScheduleCompaction();
    }
  }
  return s;
}

bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  value->clear();

J
jorlow@chromium.org 已提交
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
  MutexLock l(&mutex_);
  Slice in = property;
  Slice prefix("leveldb.");
  if (!in.starts_with(prefix)) return false;
  in.remove_prefix(prefix.size());

  if (in.starts_with("num-files-at-level")) {
    in.remove_prefix(strlen("num-files-at-level"));
    uint64_t level;
    bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
2860
    if (!ok || (int)level >= NumberLevels()) {
J
jorlow@chromium.org 已提交
2861 2862
      return false;
    } else {
2863
      char buf[100];
D
dgrogan@chromium.org 已提交
2864 2865
      snprintf(buf, sizeof(buf), "%d",
               versions_->NumLevelFiles(static_cast<int>(level)));
2866
      *value = buf;
J
jorlow@chromium.org 已提交
2867 2868
      return true;
    }
2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
  } else if (in == "levelstats") {
    char buf[1000];
    snprintf(buf, sizeof(buf),
             "Level Files Size(MB)\n"
             "--------------------\n");
    value->append(buf);

    for (int level = 0; level < NumberLevels(); level++) {
      snprintf(buf, sizeof(buf),
               "%3d %8d %8.0f\n",
               level,
               versions_->NumLevelFiles(level),
               versions_->NumLevelBytes(level) / 1048576.0);
      value->append(buf);
    }
    return true;

2886
  } else if (in == "stats") {
M
Mark Callaghan 已提交
2887
    char buf[1000];
2888 2889
    uint64_t total_bytes_written = 0;
    uint64_t total_bytes_read = 0;
M
Mark Callaghan 已提交
2890
    uint64_t micros_up = env_->NowMicros() - started_at_;
2891 2892
    // Add "+1" to make sure seconds_up is > 0 and avoid NaN later
    double seconds_up = (micros_up + 1) / 1000000.0;
2893
    uint64_t total_slowdown = 0;
J
Jim Paton 已提交
2894
    uint64_t total_slowdown_count = 0;
2895 2896 2897 2898
    uint64_t interval_bytes_written = 0;
    uint64_t interval_bytes_read = 0;
    uint64_t interval_bytes_new = 0;
    double   interval_seconds_up = 0;
M
Mark Callaghan 已提交
2899 2900

    // Pardon the long line but I think it is easier to read this way.
2901 2902
    snprintf(buf, sizeof(buf),
             "                               Compactions\n"
2903
             "Level  Files Size(MB) Score Time(sec)  Read(MB) Write(MB)    Rn(MB)  Rnp1(MB)  Wnew(MB) RW-Amplify Read(MB/s) Write(MB/s)      Rn     Rnp1     Wnp1     NewW    Count  Ln-stall Stall-cnt\n"
J
Jim Paton 已提交
2904
             "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
2905 2906
             );
    value->append(buf);
2907
    for (int level = 0; level < NumberLevels(); level++) {
2908 2909
      int files = versions_->NumLevelFiles(level);
      if (stats_[level].micros > 0 || files > 0) {
M
Mark Callaghan 已提交
2910 2911 2912 2913 2914 2915
        int64_t bytes_read = stats_[level].bytes_readn +
                             stats_[level].bytes_readnp1;
        int64_t bytes_new = stats_[level].bytes_written -
                            stats_[level].bytes_readnp1;
        double amplify = (stats_[level].bytes_readn == 0)
            ? 0.0
2916 2917 2918
            : (stats_[level].bytes_written +
               stats_[level].bytes_readnp1 +
               stats_[level].bytes_readn) /
M
Mark Callaghan 已提交
2919 2920
                (double) stats_[level].bytes_readn;

2921 2922 2923
        total_bytes_read += bytes_read;
        total_bytes_written += stats_[level].bytes_written;

2924 2925
        snprintf(
            buf, sizeof(buf),
2926
            "%3d %8d %8.0f %5.1f %9.0f %9.0f %9.0f %9.0f %9.0f %9.0f %10.1f %9.1f %11.1f %8d %8d %8d %8d %8d %9.1f %9lu\n",
2927 2928 2929
            level,
            files,
            versions_->NumLevelBytes(level) / 1048576.0,
2930
            versions_->NumLevelBytes(level) /
2931
                versions_->MaxBytesForLevel(level),
2932
            stats_[level].micros / 1e6,
M
Mark Callaghan 已提交
2933 2934 2935 2936 2937 2938
            bytes_read / 1048576.0,
            stats_[level].bytes_written / 1048576.0,
            stats_[level].bytes_readn / 1048576.0,
            stats_[level].bytes_readnp1 / 1048576.0,
            bytes_new / 1048576.0,
            amplify,
2939 2940
            // +1 to avoid division by 0
            (bytes_read / 1048576.0) / ((stats_[level].micros+1) / 1000000.0),
2941
            (stats_[level].bytes_written / 1048576.0) /
2942
                ((stats_[level].micros+1) / 1000000.0),
M
Mark Callaghan 已提交
2943 2944 2945 2946
            stats_[level].files_in_leveln,
            stats_[level].files_in_levelnp1,
            stats_[level].files_out_levelnp1,
            stats_[level].files_out_levelnp1 - stats_[level].files_in_levelnp1,
2947
            stats_[level].count,
J
Jim Paton 已提交
2948 2949
            stall_leveln_slowdown_[level] / 1000000.0,
            (unsigned long) stall_leveln_slowdown_count_[level]);
2950
        total_slowdown += stall_leveln_slowdown_[level];
J
Jim Paton 已提交
2951
        total_slowdown_count += stall_leveln_slowdown_count_[level];
2952 2953 2954
        value->append(buf);
      }
    }
M
Mark Callaghan 已提交
2955

2956 2957 2958 2959 2960 2961 2962 2963 2964
    interval_bytes_new = stats_[0].bytes_written - last_stats_.bytes_new_;
    interval_bytes_read = total_bytes_read - last_stats_.bytes_read_;
    interval_bytes_written = total_bytes_written - last_stats_.bytes_written_;
    interval_seconds_up = seconds_up - last_stats_.seconds_up_;

    snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
             seconds_up, interval_seconds_up);
    value->append(buf);

M
Mark Callaghan 已提交
2965
    snprintf(buf, sizeof(buf),
2966 2967
             "Compaction IO cumulative (GB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
M
Mark Callaghan 已提交
2968
             stats_[0].bytes_written / (1048576.0 * 1024),
2969 2970 2971 2972 2973 2974 2975 2976
             total_bytes_read / (1048576.0 * 1024),
             total_bytes_written / (1048576.0 * 1024),
             (total_bytes_read + total_bytes_written) / (1048576.0 * 1024));
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO cumulative (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
M
Mark Callaghan 已提交
2977
             stats_[0].bytes_written / 1048576.0 / seconds_up,
2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988
             total_bytes_read / 1048576.0 / seconds_up,
             total_bytes_written / 1048576.0 / seconds_up,
             (total_bytes_read + total_bytes_written) / 1048576.0 / seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification cumulative: %.1f write, %.1f compaction\n",
             (double) total_bytes_written / (stats_[0].bytes_written+1),
             (double) (total_bytes_written + total_bytes_read)
                  / (stats_[0].bytes_written+1));
M
Mark Callaghan 已提交
2989 2990
    value->append(buf);

2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
             interval_bytes_new / 1048576.0,
             interval_bytes_read/ 1048576.0,
             interval_bytes_written / 1048576.0,
             (interval_bytes_read + interval_bytes_written) / 1048576.0);
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
             interval_bytes_new / 1048576.0 / interval_seconds_up,
             interval_bytes_read / 1048576.0 / interval_seconds_up,
             interval_bytes_written / 1048576.0 / interval_seconds_up,
             (interval_bytes_read + interval_bytes_written)
                 / 1048576.0 / interval_seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification interval: %.1f write, %.1f compaction\n",
             (double) interval_bytes_written / (interval_bytes_new+1),
             (double) (interval_bytes_written + interval_bytes_read) /
                  (interval_bytes_new+1));
M
Mark Callaghan 已提交
3016 3017 3018 3019
    value->append(buf);

    snprintf(buf, sizeof(buf),
            "Stalls(secs): %.3f level0_slowdown, %.3f level0_numfiles, "
3020
            "%.3f memtable_compaction, %.3f leveln_slowdown\n",
M
Mark Callaghan 已提交
3021 3022
            stall_level0_slowdown_ / 1000000.0,
            stall_level0_num_files_ / 1000000.0,
3023
            stall_memtable_compaction_ / 1000000.0,
3024
            total_slowdown / 1000000.0);
M
Mark Callaghan 已提交
3025 3026
    value->append(buf);

J
Jim Paton 已提交
3027 3028 3029 3030 3031 3032 3033 3034 3035
    snprintf(buf, sizeof(buf),
            "Stalls(count): %lu level0_slowdown, %lu level0_numfiles, "
            "%lu memtable_compaction, %lu leveln_slowdown\n",
            (unsigned long) stall_level0_slowdown_count_,
            (unsigned long) stall_level0_num_files_count_,
            (unsigned long) stall_memtable_compaction_count_,
            (unsigned long) total_slowdown_count);
    value->append(buf);

3036 3037 3038 3039 3040
    last_stats_.bytes_read_ = total_bytes_read;
    last_stats_.bytes_written_ = total_bytes_written;
    last_stats_.bytes_new_ = stats_[0].bytes_written;
    last_stats_.seconds_up_ = seconds_up;

3041
    return true;
G
Gabor Cselle 已提交
3042 3043 3044
  } else if (in == "sstables") {
    *value = versions_->current()->DebugString();
    return true;
J
jorlow@chromium.org 已提交
3045
  }
3046

J
jorlow@chromium.org 已提交
3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
  return false;
}

void DBImpl::GetApproximateSizes(
    const Range* range, int n,
    uint64_t* sizes) {
  // TODO(opt): better implementation
  Version* v;
  {
    MutexLock l(&mutex_);
    versions_->current()->Ref();
    v = versions_->current();
  }

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
    uint64_t start = versions_->ApproximateOffsetOf(v, k1);
    uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
    sizes[i] = (limit >= start ? limit - start : 0);
  }

  {
    MutexLock l(&mutex_);
    v->Unref();
  }
}

3076 3077 3078 3079 3080 3081 3082
inline void DBImpl::DelayLoggingAndReset() {
  if (delayed_writes_ > 0) {
    Log(options_.info_log, "delayed %d write...\n", delayed_writes_ );
    delayed_writes_ = 0;
  }
}

3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
  if (!ParseFileName(name, &number, &type) ||
      (type != kTableFile)) {
    Log(options_.info_log, "DeleteFile #%lld FAILED. Invalid file name\n",
        static_cast<unsigned long long>(number));
    return Status::InvalidArgument("Invalid file name");
  }

  int level;
  FileMetaData metadata;
  int maxlevel = NumberLevels();
  VersionEdit edit(maxlevel);
D
Dhruba Borthakur 已提交
3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107
  DeletionState deletion_state;
  Status status;
  {
    MutexLock l(&mutex_);
    status = versions_->GetMetadataForFile(number, &level, &metadata);
    if (!status.ok()) {
      Log(options_.info_log, "DeleteFile #%lld FAILED. File not found\n",
          static_cast<unsigned long long>(number));
      return Status::InvalidArgument("File not found");
    }
    assert((level > 0) && (level < maxlevel));
3108

D
Dhruba Borthakur 已提交
3109 3110
    // If the file is being compacted no need to delete.
    if (metadata.being_compacted) {
3111
      Log(options_.info_log,
D
Dhruba Borthakur 已提交
3112
          "DeleteFile #%lld Skipped. File about to be compacted\n",
3113
          static_cast<unsigned long long>(number));
D
Dhruba Borthakur 已提交
3114
      return Status::OK();
3115 3116
    }

D
Dhruba Borthakur 已提交
3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134
    // Only the files in the last level can be deleted externally.
    // This is to make sure that any deletion tombstones are not
    // lost. Check that the level passed is the last level.
    for (int i = level + 1; i < maxlevel; i++) {
      if (versions_->NumLevelFiles(i) != 0) {
        Log(options_.info_log,
            "DeleteFile #%lld FAILED. File not in last level\n",
            static_cast<unsigned long long>(number));
        return Status::InvalidArgument("File not in last level");
      }
    }
    edit.DeleteFile(level, number);
    status = versions_->LogAndApply(&edit, &mutex_);
    if (status.ok()) {
      FindObsoleteFiles(deletion_state);
    }
  } // lock released here

3135
  if (status.ok()) {
D
Dhruba Borthakur 已提交
3136 3137 3138
    // remove files outside the db-lock
    PurgeObsoleteFiles(deletion_state);
    EvictObsoleteFiles(deletion_state);
3139 3140 3141 3142
  }
  return status;
}

3143
void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata) {
3144 3145 3146 3147
  MutexLock l(&mutex_);
  return versions_->GetLiveFilesMetaData(metadata);
}

J
jorlow@chromium.org 已提交
3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
  WriteBatch batch;
  batch.Put(key, value);
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  WriteBatch batch;
  batch.Delete(key);
  return Write(opt, &batch);
}

3162 3163 3164 3165 3166 3167 3168
Status DB::Merge(const WriteOptions& opt, const Slice& key,
                 const Slice& value) {
  WriteBatch batch;
  batch.Merge(key, value);
  return Write(opt, &batch);
}

J
jorlow@chromium.org 已提交
3169 3170
DB::~DB() { }

J
Jim Paton 已提交
3171
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
3172
  *dbptr = nullptr;
H
Haobo Xu 已提交
3173
  EnvOptions soptions;
J
jorlow@chromium.org 已提交
3174

3175
  if (options.block_cache != nullptr && options.no_block_cache) {
3176
    return Status::InvalidArgument(
3177
        "no_block_cache is true while block_cache is not nullptr");
3178
  }
J
jorlow@chromium.org 已提交
3179
  DBImpl* impl = new DBImpl(options, dbname);
3180 3181 3182 3183 3184
  Status s = impl->CreateArchivalDirectory();
  if (!s.ok()) {
    delete impl;
    return s;
  }
J
jorlow@chromium.org 已提交
3185
  impl->mutex_.Lock();
3186
  VersionEdit edit(impl->NumberLevels());
3187
  s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
J
jorlow@chromium.org 已提交
3188
  if (s.ok()) {
3189
    uint64_t new_log_number = impl->versions_->NewFileNumber();
3190
    unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3191
    soptions.use_mmap_writes = false;
3192
    s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
3193
                                     &lfile, soptions);
J
jorlow@chromium.org 已提交
3194
    if (s.ok()) {
3195
      lfile->SetPreallocationBlockSize(1.1 * options.write_buffer_size);
3196
      edit.SetLogNumber(new_log_number);
3197
      impl->logfile_number_ = new_log_number;
3198
      impl->log_.reset(new log::Writer(std::move(lfile)));
3199
      s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
J
jorlow@chromium.org 已提交
3200 3201
    }
    if (s.ok()) {
3202
      impl->mem_->SetLogNumber(impl->logfile_number_);
J
jorlow@chromium.org 已提交
3203
      impl->DeleteObsoleteFiles();
3204
      impl->MaybeScheduleCompaction();
3205
      impl->MaybeScheduleLogDBDeployStats();
J
jorlow@chromium.org 已提交
3206 3207 3208
    }
  }
  impl->mutex_.Unlock();
3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221

  if (options.compaction_style == kCompactionStyleUniversal) {
    int num_files;
    for (int i = 1; i < impl->NumberLevels(); i++) {
      num_files = impl->versions_->NumLevelFiles(i);
      if (num_files > 0) {
        s = Status::InvalidArgument("Not all files are at level 0. Cannot "
          "open with universal compaction style.");
        break;
      }
    }
  }

J
jorlow@chromium.org 已提交
3222 3223 3224 3225 3226 3227 3228 3229
  if (s.ok()) {
    *dbptr = impl;
  } else {
    delete impl;
  }
  return s;
}

3230 3231 3232
Snapshot::~Snapshot() {
}

J
jorlow@chromium.org 已提交
3233 3234 3235
Status DestroyDB(const std::string& dbname, const Options& options) {
  Env* env = options.env;
  std::vector<std::string> filenames;
3236 3237
  std::vector<std::string> archiveFiles;

J
jorlow@chromium.org 已提交
3238 3239
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
3240 3241
  env->GetChildren(ArchivalDirectory(dbname), &archiveFiles);

J
jorlow@chromium.org 已提交
3242 3243 3244 3245 3246
  if (filenames.empty()) {
    return Status::OK();
  }

  FileLock* lock;
3247 3248
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
3249 3250 3251
  if (result.ok()) {
    uint64_t number;
    FileType type;
D
dgrogan@chromium.org 已提交
3252
    for (size_t i = 0; i < filenames.size(); i++) {
3253
      if (ParseFileName(filenames[i], &number, &type) &&
3254
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
3255 3256 3257 3258 3259 3260
        Status del;
        if (type == kMetaDatabase) {
          del = DestroyDB(dbname + "/" + filenames[i], options);
        } else {
          del = env->DeleteFile(dbname + "/" + filenames[i]);
        }
J
jorlow@chromium.org 已提交
3261 3262 3263 3264 3265
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277

    // Delete archival files.
    for (size_t i = 0; i < archiveFiles.size(); ++i) {
      ParseFileName(archiveFiles[i], &number, &type);
      if (type == kLogFile) {
        Status del = env->DeleteFile(ArchivalDirectory(dbname) + "/" +
                                     archiveFiles[i]);
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3278 3279
    // ignore case where no archival directory is present.
    env->DeleteDir(ArchivalDirectory(dbname));
3280

J
jorlow@chromium.org 已提交
3281
    env->UnlockFile(lock);  // Ignore error since state is already gone
3282
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
3283 3284 3285 3286 3287
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
  }
  return result;
}

3288 3289
//
// A global method that can dump out the build version
3290 3291
void dumpLeveldbBuildVersion(Logger * log) {
  Log(log, "Git sha %s", leveldb_build_git_sha);
3292 3293
  Log(log, "Compile time %s %s",
      leveldb_build_compile_time, leveldb_build_compile_date);
3294 3295
}

H
Hans Wennborg 已提交
3296
}  // namespace leveldb