db_impl.cc 107.0 KB
Newer Older
J
jorlow@chromium.org 已提交
1 2 3 4 5 6 7
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/db_impl.h"

#include <algorithm>
8 9
#include <climits>
#include <cstdio>
J
jorlow@chromium.org 已提交
10 11 12
#include <set>
#include <string>
#include <stdint.h>
13
#include <stdexcept>
J
jorlow@chromium.org 已提交
14
#include <vector>
15
#include <unordered_set>
16

J
jorlow@chromium.org 已提交
17 18 19 20 21 22 23
#include "db/builder.h"
#include "db/db_iter.h"
#include "db/dbformat.h"
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
24
#include "db/memtablelist.h"
25
#include "db/merge_helper.h"
T
Tyler Harter 已提交
26
#include "db/prefix_filter_iterator.h"
J
jorlow@chromium.org 已提交
27 28 29
#include "db/table_cache.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
30
#include "db/transaction_log_impl.h"
31 32 33 34 35 36 37
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
#include "rocksdb/table_builder.h"
J
jorlow@chromium.org 已提交
38 39 40
#include "port/port.h"
#include "table/block.h"
#include "table/merger.h"
H
Haobo Xu 已提交
41
#include "table/table.h"
J
jorlow@chromium.org 已提交
42
#include "table/two_level_iterator.h"
43 44
#include "util/auto_roll_logger.h"
#include "util/build_version.h"
J
jorlow@chromium.org 已提交
45 46 47
#include "util/coding.h"
#include "util/logging.h"
#include "util/mutexlock.h"
48
#include "util/stop_watch.h"
J
jorlow@chromium.org 已提交
49 50 51

namespace leveldb {

52 53
void dumpLeveldbBuildVersion(Logger * log);

54 55 56 57 58
// Information kept for every waiting writer
struct DBImpl::Writer {
  Status status;
  WriteBatch* batch;
  bool sync;
H
heyongqiang 已提交
59
  bool disableWAL;
60 61 62 63 64 65
  bool done;
  port::CondVar cv;

  explicit Writer(port::Mutex* mu) : cv(mu) { }
};

J
jorlow@chromium.org 已提交
66 67 68
struct DBImpl::CompactionState {
  Compaction* const compaction;

69 70 71 72 73
  // If there were two snapshots with seq numbers s1 and
  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
  // entirely within s1 and s2, then the earlier version of k1 can be safely
  // deleted because that version is not visible in any snapshot.
  std::vector<SequenceNumber> existing_snapshots;
J
jorlow@chromium.org 已提交
74 75 76 77 78 79

  // Files produced by compaction
  struct Output {
    uint64_t number;
    uint64_t file_size;
    InternalKey smallest, largest;
80
    SequenceNumber smallest_seqno, largest_seqno;
J
jorlow@chromium.org 已提交
81 82
  };
  std::vector<Output> outputs;
83
  std::list<uint64_t> allocated_file_numbers;
J
jorlow@chromium.org 已提交
84 85

  // State kept for output being generated
86 87
  unique_ptr<WritableFile> outfile;
  unique_ptr<TableBuilder> builder;
J
jorlow@chromium.org 已提交
88 89 90 91 92 93 94 95 96 97 98

  uint64_t total_bytes;

  Output* current_output() { return &outputs[outputs.size()-1]; }

  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0) {
  }
};

D
Dhruba Borthakur 已提交
99 100
struct DBImpl::DeletionState {

101 102
  // the list of all live files that cannot be deleted
  std::vector<uint64_t> live;
D
Dhruba Borthakur 已提交
103 104 105 106 107 108 109 110

  // a list of all siles that exists in the db directory
  std::vector<std::string> allfiles;

  // the current filenumber, lognumber and prevlognumber
  // that corresponds to the set of files in 'live'.
  uint64_t filenumber, lognumber, prevlognumber;

111
  // the list of all files to be evicted from the table cache
D
Dhruba Borthakur 已提交
112 113 114
  std::vector<uint64_t> files_to_evict;
};

J
jorlow@chromium.org 已提交
115
// Fix user-supplied options to be reasonable
116
template <class T, class V>
J
jorlow@chromium.org 已提交
117
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
D
dgrogan@chromium.org 已提交
118 119
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
J
jorlow@chromium.org 已提交
120 121 122
}
Options SanitizeOptions(const std::string& dbname,
                        const InternalKeyComparator* icmp,
S
Sanjay Ghemawat 已提交
123
                        const InternalFilterPolicy* ipolicy,
J
jorlow@chromium.org 已提交
124 125 126
                        const Options& src) {
  Options result = src;
  result.comparator = icmp;
127
  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
128
  ClipToRange(&result.max_open_files,            20,     1000000);
129 130
  ClipToRange(&result.write_buffer_size,         ((size_t)64)<<10,
                                                 ((size_t)64)<<30);
S
Sanjay Ghemawat 已提交
131
  ClipToRange(&result.block_size,                1<<10,  4<<20);
132

X
Xing Jin 已提交
133 134 135 136 137 138
  // if user sets arena_block_size, we trust user to use this value. Otherwise,
  // calculate a proper value from writer_buffer_size;
  if (result.arena_block_size <= 0) {
    result.arena_block_size = result.write_buffer_size / 10;
  }

139 140
  result.min_write_buffer_number_to_merge = std::min(
    result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
141
  if (result.info_log == nullptr) {
K
Kai Liu 已提交
142 143
    Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
                                       result, &result.info_log);
J
jorlow@chromium.org 已提交
144 145
    if (!s.ok()) {
      // No place suitable for logging
146
      result.info_log = nullptr;
J
jorlow@chromium.org 已提交
147 148
    }
  }
149
  if (result.block_cache == nullptr && !result.no_block_cache) {
150 151
    result.block_cache = NewLRUCache(8 << 20);
  }
152
  result.compression_per_level = src.compression_per_level;
153 154 155
  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
    result.block_size_deviation = 0;
  }
156 157 158
  if (result.max_mem_compaction_level >= result.num_levels) {
    result.max_mem_compaction_level = result.num_levels - 1;
  }
J
Jim Paton 已提交
159 160 161
  if (result.soft_rate_limit > result.hard_rate_limit) {
    result.soft_rate_limit = result.hard_rate_limit;
  }
162 163 164 165
  if (result.compaction_filter &&
      result.compaction_filter_factory->CreateCompactionFilter().get()) {
    Log(result.info_log, "Both filter and factory specified. Using filter");
  }
J
Jim Paton 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178
  if (result.prefix_extractor) {
    // If a prefix extractor has been supplied and a PrefixHashRepFactory is
    // being used, make sure that the latter uses the former as its transform
    // function.
    auto factory = dynamic_cast<PrefixHashRepFactory*>(
      result.memtable_factory.get());
    if (factory != nullptr && factory->transform_ != result.prefix_extractor) {
      Log(result.info_log, "A prefix hash representation factory was supplied "
          "whose prefix extractor does not match options.prefix_extractor. "
          "Falling back to skip list representation factory");
      result.memtable_factory = std::make_shared<SkipListFactory>();
    }
  }
J
jorlow@chromium.org 已提交
179 180 181 182 183
  return result;
}

DBImpl::DBImpl(const Options& options, const std::string& dbname)
    : env_(options.env),
H
heyongqiang 已提交
184
      dbname_(dbname),
J
jorlow@chromium.org 已提交
185
      internal_comparator_(options.comparator),
S
Sanjay Ghemawat 已提交
186 187
      options_(SanitizeOptions(
          dbname, &internal_comparator_, &internal_filter_policy_, options)),
H
heyongqiang 已提交
188
      internal_filter_policy_(options.filter_policy),
J
jorlow@chromium.org 已提交
189
      owns_info_log_(options_.info_log != options.info_log),
190
      db_lock_(nullptr),
H
Haobo Xu 已提交
191
      mutex_(options.use_adaptive_mutex),
192
      shutting_down_(nullptr),
J
jorlow@chromium.org 已提交
193
      bg_cv_(&mutex_),
J
Jim Paton 已提交
194
      mem_rep_factory_(options_.memtable_factory),
X
Xing Jin 已提交
195 196
      mem_(new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_)),
197
      logfile_number_(0),
198
      tmp_batch_(),
199
      bg_compaction_scheduled_(0),
200
      bg_logstats_scheduled_(false),
201 202
      manual_compaction_(nullptr),
      logger_(nullptr),
203
      disable_delete_obsolete_files_(false),
204
      delete_obsolete_files_last_run_(0),
205
      purge_wal_files_last_run_(0),
206
      last_stats_dump_time_microsec_(0),
M
Mark Callaghan 已提交
207 208 209
      stall_level0_slowdown_(0),
      stall_memtable_compaction_(0),
      stall_level0_num_files_(0),
J
Jim Paton 已提交
210 211 212
      stall_level0_slowdown_count_(0),
      stall_memtable_compaction_count_(0),
      stall_level0_num_files_count_(0),
213
      started_at_(options.env->NowMicros()),
214
      flush_on_destroy_(false),
215
      stats_(options.num_levels),
216
      delayed_writes_(0),
217
      last_flushed_sequence_(0),
218 219 220
      storage_options_(options),
      bg_work_gate_closed_(false),
      refitting_level_(false) {
221

222
  mem_->Ref();
223

H
heyongqiang 已提交
224
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
225 226

  stall_leveln_slowdown_.resize(options.num_levels);
J
Jim Paton 已提交
227 228
  stall_leveln_slowdown_count_.resize(options.num_levels);
  for (int i = 0; i < options.num_levels; ++i) {
229
    stall_leveln_slowdown_[i] = 0;
J
Jim Paton 已提交
230 231
    stall_leveln_slowdown_count_[i] = 0;
  }
232

J
jorlow@chromium.org 已提交
233
  // Reserve ten files or so for other uses and give the rest to TableCache.
234
  const int table_cache_size = options_.max_open_files - 10;
235 236
  table_cache_.reset(new TableCache(dbname_, &options_,
                                    storage_options_, table_cache_size));
J
jorlow@chromium.org 已提交
237

238 239
  versions_.reset(new VersionSet(dbname_, &options_, storage_options_,
                                 table_cache_.get(), &internal_comparator_));
240

241 242
  dumpLeveldbBuildVersion(options_.info_log.get());
  options_.Dump(options_.info_log.get());
243

244
#ifdef USE_SCRIBE
245
  logger_.reset(new ScribeLogger("localhost", 1456));
246 247 248
#endif

  char name[100];
249
  Status st = env_->GetHostName(name, 100L);
250
  if (st.ok()) {
251 252 253 254 255 256
    host_name_ = name;
  } else {
    Log(options_.info_log, "Can't get hostname, use localhost as host name.");
    host_name_ = "localhost";
  }
  last_log_ts = 0;
257

J
jorlow@chromium.org 已提交
258 259 260 261
}

DBImpl::~DBImpl() {
  // Wait for background work to finish
262
  if (flush_on_destroy_ && mem_->GetFirstSequenceNumber() != 0) {
263 264
    FlushMemTable(FlushOptions());
  }
265
  mutex_.Lock();
266
  shutting_down_.Release_Store(this);  // Any non-nullptr value is ok
267
  while (bg_compaction_scheduled_ || bg_logstats_scheduled_) {
H
hans@chromium.org 已提交
268
    bg_cv_.Wait();
J
jorlow@chromium.org 已提交
269 270 271
  }
  mutex_.Unlock();

272
  if (db_lock_ != nullptr) {
J
jorlow@chromium.org 已提交
273 274 275
    env_->UnlockFile(db_lock_);
  }

276
  if (mem_ != nullptr) mem_->Unref();
277
  imm_.UnrefAll();
J
jorlow@chromium.org 已提交
278 279
}

A
Abhishek Kona 已提交
280
// Do not flush and close database elegantly. Simulate a crash.
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
void DBImpl::TEST_Destroy_DBImpl() {
  // ensure that no new memtable flushes can occur
  flush_on_destroy_ = false;

  // wait till all background compactions are done.
  mutex_.Lock();
  while (bg_compaction_scheduled_ || bg_logstats_scheduled_) {
    bg_cv_.Wait();
  }

  // Prevent new compactions from occuring.
  const int LargeNumber = 10000000;
  bg_compaction_scheduled_ += LargeNumber;
  mutex_.Unlock();

  // force release the lock file.
297
  if (db_lock_ != nullptr) {
298 299
    env_->UnlockFile(db_lock_);
  }
300 301 302 303

  log_.reset();
  versions_.reset();
  table_cache_.reset();
304 305
}

A
Abhishek Kona 已提交
306 307 308
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  return versions_->ManifestFileNumber();
}
309

J
jorlow@chromium.org 已提交
310
Status DBImpl::NewDB() {
311
  VersionEdit new_db(NumberLevels());
J
jorlow@chromium.org 已提交
312
  new_db.SetComparatorName(user_comparator()->Name());
313
  new_db.SetLogNumber(0);
J
jorlow@chromium.org 已提交
314 315 316 317
  new_db.SetNextFile(2);
  new_db.SetLastSequence(0);

  const std::string manifest = DescriptorFileName(dbname_, 1);
318
  unique_ptr<WritableFile> file;
319
  Status s = env_->NewWritableFile(manifest, &file, storage_options_);
J
jorlow@chromium.org 已提交
320 321 322
  if (!s.ok()) {
    return s;
  }
323
  file->SetPreallocationBlockSize(options_.manifest_preallocation_size);
J
jorlow@chromium.org 已提交
324
  {
325
    log::Writer log(std::move(file));
J
jorlow@chromium.org 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
    std::string record;
    new_db.EncodeTo(&record);
    s = log.AddRecord(record);
  }
  if (s.ok()) {
    // Make "CURRENT" file that points to the new manifest file.
    s = SetCurrentFile(env_, dbname_, 1);
  } else {
    env_->DeleteFile(manifest);
  }
  return s;
}

void DBImpl::MaybeIgnoreError(Status* s) const {
  if (s->ok() || options_.paranoid_checks) {
    // No change needed
  } else {
343
    Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
J
jorlow@chromium.org 已提交
344 345 346 347
    *s = Status::OK();
  }
}

348 349
const Status DBImpl::CreateArchivalDirectory() {
  if (options_.WAL_ttl_seconds > 0) {
350
    std::string archivalPath = ArchivalDirectory(dbname_);
351 352 353 354 355
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

356 357 358 359
void DBImpl::PrintStatistics() {
  auto dbstats = options_.statistics;
  if (dbstats) {
    Log(options_.info_log,
360 361
        "STATISTCS:\n %s",
        dbstats->ToString().c_str());
362 363 364
  }
}

365
void DBImpl::MaybeDumpStats() {
H
Haobo Xu 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
  if (options_.stats_dump_period_sec == 0) return;

  const uint64_t now_micros = env_->NowMicros();

  if (last_stats_dump_time_microsec_ +
      options_.stats_dump_period_sec * 1000000
      <= now_micros) {
    // Multiple threads could race in here simultaneously.
    // However, the last one will update last_stats_dump_time_microsec_
    // atomically. We could see more than one dump during one dump
    // period in rare cases.
    last_stats_dump_time_microsec_ = now_micros;
    std::string stats;
    GetProperty("leveldb.stats", &stats);
    Log(options_.info_log, "%s", stats.c_str());
381
    PrintStatistics();
382 383 384
  }
}

D
Dhruba Borthakur 已提交
385 386 387 388 389
// Returns the list of live files in 'live' and the list
// of all files in the filesystem in 'allfiles'.
void DBImpl::FindObsoleteFiles(DeletionState& deletion_state) {
  mutex_.AssertHeld();

390 391 392 393 394
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

395 396 397 398 399
  // This method is costly when the number of files is large.
  // Do not allow it to trigger more often than once in
  // delete_obsolete_files_period_micros.
  if (options_.delete_obsolete_files_period_micros != 0) {
    const uint64_t now_micros = env_->NowMicros();
400
    if (delete_obsolete_files_last_run_ +
401 402 403 404 405 406
        options_.delete_obsolete_files_period_micros > now_micros) {
      return;
    }
    delete_obsolete_files_last_run_ = now_micros;
  }

407 408 409 410
  // Make a list of all of the live files; set is slow, should not
  // be used.
  deletion_state.live.assign(pending_outputs_.begin(),
                             pending_outputs_.end());
D
Dhruba Borthakur 已提交
411 412 413 414
  versions_->AddLiveFiles(&deletion_state.live);

  // set of all files in the directory
  env_->GetChildren(dbname_, &deletion_state.allfiles); // Ignore errors
J
jorlow@chromium.org 已提交
415

D
Dhruba Borthakur 已提交
416 417 418 419 420 421 422 423 424 425 426
  // store the current filenum, lognum, etc
  deletion_state.filenumber = versions_->ManifestFileNumber();
  deletion_state.lognumber = versions_->LogNumber();
  deletion_state.prevlognumber = versions_->PrevLogNumber();
}

// Diffs the files listed in filenames and those that do not
// belong to live files are posibly removed. If the removed file
// is a sst file, then it returns the file number in files_to_evict.
// It is not necesary to hold the mutex when invoking this method.
void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
J
jorlow@chromium.org 已提交
427 428
  uint64_t number;
  FileType type;
H
heyongqiang 已提交
429
  std::vector<std::string> old_log_files;
430

431 432 433 434 435
  // Now, convert live list to an unordered set, WITHOUT mutex held;
  // set is slow.
  std::unordered_set<uint64_t> live_set(state.live.begin(),
                                        state.live.end());

D
Dhruba Borthakur 已提交
436 437
  for (size_t i = 0; i < state.allfiles.size(); i++) {
    if (ParseFileName(state.allfiles[i], &number, &type)) {
J
jorlow@chromium.org 已提交
438 439 440
      bool keep = true;
      switch (type) {
        case kLogFile:
D
Dhruba Borthakur 已提交
441 442
          keep = ((number >= state.lognumber) ||
                  (number == state.prevlognumber));
J
jorlow@chromium.org 已提交
443 444 445 446
          break;
        case kDescriptorFile:
          // Keep my manifest file, and any newer incarnations'
          // (in case there is a race that allows other incarnations)
D
Dhruba Borthakur 已提交
447
          keep = (number >= state.filenumber);
J
jorlow@chromium.org 已提交
448 449
          break;
        case kTableFile:
450
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
451 452 453 454
          break;
        case kTempFile:
          // Any temp files that are currently being written to must
          // be recorded in pending_outputs_, which is inserted into "live"
455
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
456
          break;
H
heyongqiang 已提交
457 458 459
        case kInfoLogFile:
          keep = true;
          if (number != 0) {
D
Dhruba Borthakur 已提交
460
            old_log_files.push_back(state.allfiles[i]);
H
heyongqiang 已提交
461 462
          }
          break;
J
jorlow@chromium.org 已提交
463 464
        case kCurrentFile:
        case kDBLockFile:
K
Kosie van der Merwe 已提交
465
        case kMetaDatabase:
J
jorlow@chromium.org 已提交
466 467 468 469 470 471
          keep = true;
          break;
      }

      if (!keep) {
        if (type == kTableFile) {
D
Dhruba Borthakur 已提交
472 473
          // record the files to be evicted from the cache
          state.files_to_evict.push_back(number);
J
jorlow@chromium.org 已提交
474
        }
475
        Log(options_.info_log, "Delete type=%d #%lu", int(type), number);
476
        if (type == kLogFile && options_.WAL_ttl_seconds > 0) {
477 478 479 480 481
          Status st = env_->RenameFile(
            LogFileName(dbname_, number),
            ArchivedLogFileName(dbname_, number)
          );

482
          if (!st.ok()) {
483 484
            Log(
              options_.info_log, "RenameFile type=%d #%lu FAILED",
H
heyongqiang 已提交
485
              int(type),
486 487
              number
            );
488 489
          }
        } else {
490
          Status st = env_->DeleteFile(dbname_ + "/" + state.allfiles[i]);
491
          if (!st.ok()) {
492 493 494 495
            Log(options_.info_log, "Delete type=%d #%lld FAILED\n",
                int(type),
                static_cast<unsigned long long>(number));
          }
H
heyongqiang 已提交
496
        }
J
jorlow@chromium.org 已提交
497 498 499
      }
    }
  }
H
heyongqiang 已提交
500 501

  // Delete old log files.
K
Kai Liu 已提交
502 503 504 505 506
  size_t old_log_file_count = old_log_files.size();
  // NOTE: Currently we only support log purge when options_.db_log_dir is
  // located in `dbname` directory.
  if (old_log_file_count >= options_.keep_log_file_num &&
      options_.db_log_dir.empty()) {
H
heyongqiang 已提交
507
    std::sort(old_log_files.begin(), old_log_files.end());
K
Kai Liu 已提交
508
    size_t end = old_log_file_count - options_.keep_log_file_num;
509
    for (unsigned int i = 0; i <= end; i++) {
H
heyongqiang 已提交
510
      std::string& to_delete = old_log_files.at(i);
D
Dhruba Borthakur 已提交
511 512
      // Log(options_.info_log, "Delete type=%d %s\n",
      //     int(kInfoLogFile), to_delete.c_str());
H
heyongqiang 已提交
513 514 515
      env_->DeleteFile(dbname_ + "/" + to_delete);
    }
  }
516
  PurgeObsoleteWALFiles();
J
jorlow@chromium.org 已提交
517 518
}

D
Dhruba Borthakur 已提交
519 520 521 522 523 524 525 526 527 528 529 530
void DBImpl::EvictObsoleteFiles(DeletionState& state) {
  for (unsigned int i = 0; i < state.files_to_evict.size(); i++) {
    table_cache_->Evict(state.files_to_evict[i]);
  }
}

void DBImpl::DeleteObsoleteFiles() {
  mutex_.AssertHeld();
  DeletionState deletion_state;
  FindObsoleteFiles(deletion_state);
  PurgeObsoleteFiles(deletion_state);
  EvictObsoleteFiles(deletion_state);
531 532 533
}

void DBImpl::PurgeObsoleteWALFiles() {
534 535
  int64_t current_time;
  Status s = env_->GetCurrentTime(&current_time);
X
Xing Jin 已提交
536
  uint64_t now_seconds = static_cast<uint64_t>(current_time);
537 538
  assert(s.ok());

539
  if (options_.WAL_ttl_seconds != ULONG_MAX && options_.WAL_ttl_seconds > 0) {
X
Xing Jin 已提交
540
    if (purge_wal_files_last_run_ + options_.WAL_ttl_seconds > now_seconds) {
541 542 543 544 545 546 547 548 549
      return;
    }
    std::vector<std::string> wal_files;
    std::string archival_dir = ArchivalDirectory(dbname_);
    env_->GetChildren(archival_dir, &wal_files);
    for (const auto& f : wal_files) {
      uint64_t file_m_time;
      const std::string file_path = archival_dir + "/" + f;
      const Status s = env_->GetFileModificationTime(file_path, &file_m_time);
X
Xing Jin 已提交
550
      if (s.ok() && (now_seconds - file_m_time > options_.WAL_ttl_seconds)) {
551 552 553 554 555
        Status status = env_->DeleteFile(file_path);
        if (!status.ok()) {
          Log(options_.info_log,
              "Failed Deleting a WAL file Error : i%s",
              status.ToString().c_str());
556
        }
557
      } // Ignore errors.
558 559
    }
  }
X
Xing Jin 已提交
560
  purge_wal_files_last_run_ = now_seconds;
D
Dhruba Borthakur 已提交
561 562
}

563 564 565
// If externalTable is set, then apply recovered transactions
// to that table. This is used for readonly mode.
Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
H
heyongqiang 已提交
566
    bool error_if_log_file_exist) {
J
jorlow@chromium.org 已提交
567 568
  mutex_.AssertHeld();

569
  assert(db_lock_ == nullptr);
570
  if (!external_table) {
571 572 573 574 575 576 577 578 579 580 581 582 583
    // We call CreateDirIfMissing() as the directory may already exist (if we
    // are reopening a DB), when this happens we don't want creating the
    // directory to cause an error. However, we need to check if creating the
    // directory fails or else we may get an obscure message about the lock
    // file not existing. One real-world example of this occurring is if
    // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
    // when dbname_ is "dir/db" but when "dir" doesn't exist.
    Status s = env_->CreateDirIfMissing(dbname_);
    if (!s.ok()) {
      return s;
    }

    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
584 585 586
    if (!s.ok()) {
      return s;
    }
J
jorlow@chromium.org 已提交
587

588 589
    if (!env_->FileExists(CurrentFileName(dbname_))) {
      if (options_.create_if_missing) {
590
        // TODO: add merge_operator name check
591 592 593 594 595 596 597
        s = NewDB();
        if (!s.ok()) {
          return s;
        }
      } else {
        return Status::InvalidArgument(
            dbname_, "does not exist (create_if_missing is false)");
J
jorlow@chromium.org 已提交
598 599
      }
    } else {
600 601 602 603
      if (options_.error_if_exists) {
        return Status::InvalidArgument(
            dbname_, "exists (error_if_exists is true)");
      }
J
jorlow@chromium.org 已提交
604 605 606
    }
  }

607
  Status s = versions_->Recover();
J
jorlow@chromium.org 已提交
608 609
  if (s.ok()) {
    SequenceNumber max_sequence(0);
610 611 612 613 614 615 616 617 618 619 620 621 622 623

    // Recover from all newer log files than the ones named in the
    // descriptor (new log files may have been added by the previous
    // incarnation without registering them in the descriptor).
    //
    // Note that PrevLogNumber() is no longer used, but we pay
    // attention to it in case we are recovering a database
    // produced by an older version of leveldb.
    const uint64_t min_log = versions_->LogNumber();
    const uint64_t prev_log = versions_->PrevLogNumber();
    std::vector<std::string> filenames;
    s = env_->GetChildren(dbname_, &filenames);
    if (!s.ok()) {
      return s;
624
    }
625 626 627 628 629 630 631 632 633
    uint64_t number;
    FileType type;
    std::vector<uint64_t> logs;
    for (size_t i = 0; i < filenames.size(); i++) {
      if (ParseFileName(filenames[i], &number, &type)
          && type == kLogFile
          && ((number >= min_log) || (number == prev_log))) {
        logs.push_back(number);
      }
J
jorlow@chromium.org 已提交
634
    }
635

H
heyongqiang 已提交
636 637 638 639 640 641
    if (logs.size() > 0 && error_if_log_file_exist) {
      return Status::Corruption(""
          "The db was opened in readonly mode with error_if_log_file_exist"
          "flag but a log file already exists");
    }

642 643 644
    // Recover in the order in which the logs were generated
    std::sort(logs.begin(), logs.end());
    for (size_t i = 0; i < logs.size(); i++) {
645
      s = RecoverLogFile(logs[i], edit, &max_sequence, external_table);
646 647 648 649
      // The previous incarnation may not have written any MANIFEST
      // records after allocating this log number.  So we manually
      // update the file number allocation counter in VersionSet.
      versions_->MarkFileNumberUsed(logs[i]);
650 651
    }

J
jorlow@chromium.org 已提交
652
    if (s.ok()) {
653 654
      if (versions_->LastSequence() < max_sequence) {
        versions_->SetLastSequence(max_sequence);
655 656 657
        last_flushed_sequence_ = max_sequence;
      } else {
        last_flushed_sequence_ = versions_->LastSequence();
658
      }
659 660
      SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
                     versions_->LastSequence());
J
jorlow@chromium.org 已提交
661 662 663 664 665 666 667 668
    }
  }

  return s;
}

Status DBImpl::RecoverLogFile(uint64_t log_number,
                              VersionEdit* edit,
669 670
                              SequenceNumber* max_sequence,
                              MemTable* external_table) {
J
jorlow@chromium.org 已提交
671 672
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
673
    Logger* info_log;
J
jorlow@chromium.org 已提交
674
    const char* fname;
675 676
    Status* status;  // nullptr if options_.paranoid_checks==false or
                     //            options_.skip_log_error_on_recovery==true
J
jorlow@chromium.org 已提交
677
    virtual void Corruption(size_t bytes, const Status& s) {
678
      Log(info_log, "%s%s: dropping %d bytes; %s",
679
          (this->status == nullptr ? "(ignoring error) " : ""),
J
jorlow@chromium.org 已提交
680
          fname, static_cast<int>(bytes), s.ToString().c_str());
681
      if (this->status != nullptr && this->status->ok()) *this->status = s;
J
jorlow@chromium.org 已提交
682 683 684 685 686 687 688
    }
  };

  mutex_.AssertHeld();

  // Open the log file
  std::string fname = LogFileName(dbname_, log_number);
689
  unique_ptr<SequentialFile> file;
690
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
J
jorlow@chromium.org 已提交
691 692 693 694 695 696 697 698
  if (!status.ok()) {
    MaybeIgnoreError(&status);
    return status;
  }

  // Create the log reader.
  LogReporter reporter;
  reporter.env = env_;
699
  reporter.info_log = options_.info_log.get();
J
jorlow@chromium.org 已提交
700
  reporter.fname = fname.c_str();
701 702
  reporter.status = (options_.paranoid_checks &&
                     !options_.skip_log_error_on_recovery ? &status : nullptr);
J
jorlow@chromium.org 已提交
703 704 705 706
  // We intentially make log::Reader do checksumming even if
  // paranoid_checks==false so that corruptions cause entire commits
  // to be skipped instead of propagating bad information (like overly
  // large sequence numbers).
707
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
708
                     0/*initial_offset*/);
709
  Log(options_.info_log, "Recovering log #%llu",
J
jorlow@chromium.org 已提交
710 711 712 713 714 715
      (unsigned long long) log_number);

  // Read all the records and add to a memtable
  std::string scratch;
  Slice record;
  WriteBatch batch;
716
  MemTable* mem = nullptr;
717 718 719
  if (external_table) {
    mem = external_table;
  }
720
  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
J
jorlow@chromium.org 已提交
721 722 723 724 725 726 727
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      continue;
    }
    WriteBatchInternal::SetContents(&batch, record);

728
    if (mem == nullptr) {
X
Xing Jin 已提交
729 730
      mem = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
731
      mem->Ref();
J
jorlow@chromium.org 已提交
732
    }
733
    status = WriteBatchInternal::InsertInto(&batch, mem, &options_);
J
jorlow@chromium.org 已提交
734 735 736 737 738 739 740 741 742 743 744
    MaybeIgnoreError(&status);
    if (!status.ok()) {
      break;
    }
    const SequenceNumber last_seq =
        WriteBatchInternal::Sequence(&batch) +
        WriteBatchInternal::Count(&batch) - 1;
    if (last_seq > *max_sequence) {
      *max_sequence = last_seq;
    }

745 746
    if (!external_table &&
        mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
747
      status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
748 749 750 751 752
      if (!status.ok()) {
        // Reflect errors immediately so that conditions like full
        // file-systems cause the DB::Open() to fail.
        break;
      }
753
      mem->Unref();
754
      mem = nullptr;
J
jorlow@chromium.org 已提交
755 756 757
    }
  }

758
  if (status.ok() && mem != nullptr && !external_table) {
759
    status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
760 761 762 763
    // Reflect errors immediately so that conditions like full
    // file-systems cause the DB::Open() to fail.
  }

764
  if (mem != nullptr && !external_table) mem->Unref();
J
jorlow@chromium.org 已提交
765 766 767
  return status;
}

768
Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
J
jorlow@chromium.org 已提交
769
  mutex_.AssertHeld();
770
  const uint64_t start_micros = env_->NowMicros();
J
jorlow@chromium.org 已提交
771 772 773 774
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  pending_outputs_.insert(meta.number);
  Iterator* iter = mem->NewIterator();
775 776 777
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
    mem->GetFirstSequenceNumber();
778
  Log(options_.info_log, "Level-0 table #%llu: started",
J
jorlow@chromium.org 已提交
779
      (unsigned long long) meta.number);
780 781 782 783

  Status s;
  {
    mutex_.Unlock();
784 785
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
786 787
                   user_comparator(), newest_snapshot,
                   earliest_seqno_in_memtable);
788 789 790
    mutex_.Lock();
  }

791
  Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
J
jorlow@chromium.org 已提交
792 793 794 795
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;
796

797
  pending_outputs_.erase(meta.number);
798 799 800 801 802 803

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    edit->AddFile(level, meta.number, meta.file_size,
804 805
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
806 807
  }

808 809 810
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
M
Mark Callaghan 已提交
811
  stats.files_out_levelnp1 = 1;
812
  stats_[level].Add(stats);
J
jorlow@chromium.org 已提交
813 814 815
  return s;
}

816

817
Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
818
                                uint64_t* filenumber) {
J
jorlow@chromium.org 已提交
819
  mutex_.AssertHeld();
820 821 822 823 824
  const uint64_t start_micros = env_->NowMicros();
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  *filenumber = meta.number;
  pending_outputs_.insert(meta.number);
825 826 827 828 829 830 831

  std::vector<Iterator*> list;
  for (MemTable* m : mems) {
    list.push_back(m->NewIterator());
  }
  Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
                                      list.size());
832 833
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
834
    mems[0]->GetFirstSequenceNumber();
835 836
  Log(options_.info_log, "Level-0 flush table #%llu: started",
      (unsigned long long) meta.number);
J
jorlow@chromium.org 已提交
837

838
  Version* base = versions_->current();
839
  base->Ref();          // it is likely that we do not need this reference
840 841 842
  Status s;
  {
    mutex_.Unlock();
843 844
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
845 846
                   user_comparator(), newest_snapshot,
                   earliest_seqno_in_memtable);
847 848
    mutex_.Lock();
  }
849 850
  base->Unref();

851 852 853 854 855 856 857 858 859 860 861 862 863 864
  Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s",
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;

  // re-acquire the most current version
  base = versions_->current();

  // There could be multiple threads writing to its own level-0 file.
  // The pending_outputs cannot be cleared here, otherwise this newly
  // created file might not be considered as a live-file by another
  // compaction thread that is concurrently deleting obselete files.
  // The pending_outputs can be cleared only after the new version is
A
Abhishek Kona 已提交
865
  // committed so that other threads can recognize this file as a
866 867 868 869 870 871 872 873 874 875 876 877 878
  // valid one.
  // pending_outputs_.erase(meta.number);

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    const Slice min_user_key = meta.smallest.user_key();
    const Slice max_user_key = meta.largest.user_key();
    // if we have more than 1 background thread, then we cannot
    // insert files directly into higher levels because some other
    // threads could be concurrently producing compacted files for
    // that key range.
879
    if (base != nullptr && options_.max_background_compactions <= 1 &&
880
        options_.compaction_style == kCompactionStyleLevel) {
881 882 883
      level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
    }
    edit->AddFile(level, meta.number, meta.file_size,
884 885
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
886 887 888 889 890 891 892 893 894 895 896 897 898
  }

  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
  stats_[level].Add(stats);
  return s;
}

Status DBImpl::CompactMemTable(bool* madeProgress) {
  mutex_.AssertHeld();
  assert(imm_.size() != 0);

899
  if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
900 901 902 903 904 905 906 907
    Log(options_.info_log, "Memcompaction already in progress");
    Status s = Status::IOError("Memcompaction already in progress");
    return s;
  }

  // Save the contents of the earliest memtable as a new Table
  // This will release and re-acquire the mutex.
  uint64_t file_number;
908 909 910
  std::vector<MemTable*> mems;
  imm_.PickMemtablesToFlush(&mems);
  if (mems.empty()) {
911 912 913 914 915 916
    Log(options_.info_log, "Nothing in memstore to flush");
    Status s = Status::IOError("Nothing in memstore to flush");
    return s;
  }

  // record the logfile_number_ before we release the mutex
917
  MemTable* m = mems[0];
918 919
  VersionEdit* edit = m->GetEdits();
  edit->SetPrevLogNumber(0);
920
  edit->SetLogNumber(m->GetLogNumber());  // Earlier logs no longer needed
921

922
  Status s = WriteLevel0Table(mems, edit, &file_number);
923

924
  if (s.ok() && shutting_down_.Acquire_Load()) {
925 926 927
    s = Status::IOError(
      "Database shutdown started during memtable compaction"
    );
928
  }
J
jorlow@chromium.org 已提交
929

930
  // Replace immutable memtable with the generated Table
931
  s = imm_.InstallMemtableFlushResults(
932
    mems, versions_.get(), s, &mutex_, options_.info_log.get(),
933
    file_number, pending_outputs_);
J
jorlow@chromium.org 已提交
934 935

  if (s.ok()) {
936 937 938
    if (madeProgress) {
      *madeProgress = 1;
    }
939
    MaybeScheduleLogDBDeployStats();
D
Dhruba Borthakur 已提交
940 941 942
    // we could have deleted obsolete files here, but it is not
    // absolutely necessary because it could be also done as part
    // of other background compaction
J
jorlow@chromium.org 已提交
943 944 945 946
  }
  return s;
}

947 948
void DBImpl::CompactRange(const Slice* begin, const Slice* end,
                          bool reduce_level) {
G
Gabor Cselle 已提交
949 950 951 952
  int max_level_with_files = 1;
  {
    MutexLock l(&mutex_);
    Version* base = versions_->current();
953
    for (int level = 1; level < NumberLevels(); level++) {
G
Gabor Cselle 已提交
954 955 956 957 958 959 960 961 962
      if (base->OverlapInLevel(level, begin, end)) {
        max_level_with_files = level;
      }
    }
  }
  TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap
  for (int level = 0; level < max_level_with_files; level++) {
    TEST_CompactRange(level, begin, end);
  }
963 964 965 966 967 968 969 970 971 972

  if (reduce_level) {
    ReFitLevel(max_level_with_files);
  }
}

// return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting(int level) {
  mutex_.AssertHeld();
  int minimum_level = level;
973
  for (int i = level - 1; i > 0; --i) {
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
    // stop if level i is not empty
    if (versions_->NumLevelFiles(i) > 0) break;

    // stop if level i is too small (cannot fit the level files)
    if (versions_->MaxBytesForLevel(i) < versions_->NumLevelBytes(level)) break;

    minimum_level = i;
  }
  return minimum_level;
}

void DBImpl::ReFitLevel(int level) {
  assert(level < NumberLevels());

  MutexLock l(&mutex_);

  // only allow one thread refitting
  if (refitting_level_) {
    Log(options_.info_log, "ReFitLevel: another thread is refitting");
    return;
  }
  refitting_level_ = true;

  // wait for all background threads to stop
  bg_work_gate_closed_ = true;
  while (bg_compaction_scheduled_ > 0) {
    Log(options_.info_log,
        "RefitLevel: waiting for background threads to stop: %d",
        bg_compaction_scheduled_);
    bg_cv_.Wait();
  }

  // move to a smaller level
  int to_level = FindMinimumEmptyLevelFitting(level);

  assert(to_level <= level);

  if (to_level < level) {
    Log(options_.info_log, "Before refitting:\n%s",
        versions_->current()->DebugString().data());

    VersionEdit edit(NumberLevels());
    for (const auto& f : versions_->current()->files_[level]) {
      edit.DeleteFile(level, f->number);
1018 1019
      edit.AddFile(to_level, f->number, f->file_size, f->smallest, f->largest,
                   f->smallest_seqno, f->largest_seqno);
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
    }
    Log(options_.info_log, "Apply version edit:\n%s",
        edit.DebugString().data());

    auto status = versions_->LogAndApply(&edit, &mutex_);

    Log(options_.info_log, "LogAndApply: %s\n", status.ToString().data());

    if (status.ok()) {
      Log(options_.info_log, "After refitting:\n%s",
          versions_->current()->DebugString().data());
    }
  }

  refitting_level_ = false;
  bg_work_gate_closed_ = false;
G
Gabor Cselle 已提交
1036 1037
}

1038
int DBImpl::NumberLevels() {
1039
  return options_.num_levels;
1040 1041 1042
}

int DBImpl::MaxMemCompactionLevel() {
1043
  return options_.max_mem_compaction_level;
1044 1045 1046
}

int DBImpl::Level0StopWriteTrigger() {
1047
  return options_.level0_stop_writes_trigger;
1048 1049
}

H
heyongqiang 已提交
1050 1051 1052 1053 1054
Status DBImpl::Flush(const FlushOptions& options) {
  Status status = FlushMemTable(options);
  return status;
}

1055 1056 1057 1058
SequenceNumber DBImpl::GetLatestSequenceNumber() {
  return versions_->LastSequence();
}

1059
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
1060
                               unique_ptr<TransactionLogIterator>* iter) {
1061

1062 1063 1064 1065
  if (seq > last_flushed_sequence_) {
    return Status::IOError("Requested sequence not yet written in the db");
  }
  //  Get all sorted Wal Files.
1066 1067
  //  Do binary search and open files and find the seq number.

1068 1069
  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
  Status s = GetSortedWalFiles(*wal_files);
1070 1071 1072 1073
  if (!s.ok()) {
    return s;
  }

1074
  if (wal_files->empty()) {
1075 1076 1077 1078
    return Status::IOError(" NO WAL Files present in the db");
  }
  //  std::shared_ptr would have been useful here.

1079
  s = RetainProbableWalFiles(*wal_files, seq);
1080 1081
  if (!s.ok()) {
    return s;
1082
  }
1083
  iter->reset(
1084 1085
    new TransactionLogIteratorImpl(dbname_,
                                   &options_,
1086
                                   storage_options_,
1087
                                   seq,
1088
                                   std::move(wal_files),
1089 1090
                                   &last_flushed_sequence_));
  iter->get()->Next();
1091
  return iter->get()->status();
1092 1093
}

1094 1095
Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs,
                                      const SequenceNumber target) {
1096
  long start = 0; // signed to avoid overflow when target is < first file.
1097
  long end = static_cast<long>(all_logs.size()) - 1;
1098
  // Binary Search. avoid opening all files.
1099 1100
  while (end >= start) {
    long mid = start + (end - start) / 2;  // Avoid overflow.
1101 1102
    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
    if (current_seq_num == target) {
1103
      end = mid;
1104
      break;
1105
    } else if (current_seq_num < target) {
1106
      start = mid + 1;
1107
    } else {
1108
      end = mid - 1;
1109 1110
    }
  }
1111 1112 1113
  size_t start_index = std::max(0l, end); // end could be -ve.
  // The last wal file is always included
  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
1114 1115 1116
  return Status::OK();
}

1117 1118 1119 1120
bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type,
                                        const uint64_t number) {
  const std::string fname = (type == kAliveLogFile) ?
    LogFileName(dbname_, number) : ArchivedLogFileName(dbname_, number);
1121 1122
  uint64_t file_size;
  Status s = env_->GetFileSize(fname, &file_size);
1123
  return (s.ok() && (file_size == 0));
1124 1125
}

1126 1127
Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
                               WriteBatch* const result) {
1128

1129 1130
  if (type == kAliveLogFile) {
    std::string fname = LogFileName(dbname_, number);
1131 1132 1133
    Status status = ReadFirstLine(fname, result);
    if (!status.ok()) {
      //  check if the file got moved to archive.
1134 1135
      std::string archived_file = ArchivedLogFileName(dbname_, number);
      Status s = ReadFirstLine(archived_file, result);
1136
      if (!s.ok()) {
1137
        return Status::IOError("Log File has been deleted");
1138 1139 1140
      }
    }
    return Status::OK();
1141 1142
  } else if (type == kArchivedLogFile) {
    std::string fname = ArchivedLogFileName(dbname_, number);
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
    Status status = ReadFirstLine(fname, result);
    return status;
  }
  return Status::NotSupported("File Type Not Known");
}

Status DBImpl::ReadFirstLine(const std::string& fname,
                             WriteBatch* const batch) {
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
    Logger* info_log;
    const char* fname;
1155
    Status* status;  // nullptr if options_.paranoid_checks==false
1156 1157
    virtual void Corruption(size_t bytes, const Status& s) {
      Log(info_log, "%s%s: dropping %d bytes; %s",
1158
          (this->status == nullptr ? "(ignoring error) " : ""),
1159
          fname, static_cast<int>(bytes), s.ToString().c_str());
1160
      if (this->status != nullptr && this->status->ok()) *this->status = s;
1161 1162 1163
    }
  };

1164
  unique_ptr<SequentialFile> file;
1165
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
1166 1167 1168 1169 1170 1171 1172 1173

  if (!status.ok()) {
    return status;
  }


  LogReporter reporter;
  reporter.env = env_;
1174
  reporter.info_log = options_.info_log.get();
1175
  reporter.fname = fname.c_str();
1176
  reporter.status = (options_.paranoid_checks ? &status : nullptr);
1177
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1178 1179 1180
                     0/*initial_offset*/);
  std::string scratch;
  Slice record;
1181

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
  if (reader.ReadRecord(&record, &scratch) && status.ok()) {
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      return Status::IOError("Corruption noted");
      //  TODO read record's till the first no corrupt entry?
    }
    WriteBatchInternal::SetContents(batch, record);
    return Status::OK();
  }
  return Status::IOError("Error reading from file " + fname);
}

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
struct CompareLogByPointer {
  bool operator() (const unique_ptr<LogFile>& a,
                   const unique_ptr<LogFile>& b) {
    LogFileImpl* a_impl = dynamic_cast<LogFileImpl*>(a.get());
    LogFileImpl* b_impl = dynamic_cast<LogFileImpl*>(b.get());
    return *a_impl < *b_impl;
  }
};

Status DBImpl::AppendSortedWalsOfType(const std::string& path,
    VectorLogPtr& log_files, WalFileType log_type) {
  std::vector<std::string> all_files;
  const Status status = env_->GetChildren(path, &all_files);
1208 1209 1210
  if (!status.ok()) {
    return status;
  }
1211 1212
  log_files.reserve(log_files.size() + all_files.size());
  for (const auto& f : all_files) {
1213 1214
    uint64_t number;
    FileType type;
1215
    if (ParseFileName(f, &number, &type) && type == kLogFile){
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

      WriteBatch batch;
      Status s = ReadFirstRecord(log_type, number, &batch);
      if (!s.ok()) {
        if (CheckWalFileExistsAndEmpty(log_type, number)) {
          continue;
        }
        return s;
      }

      uint64_t size_bytes;
      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
      if (!s.ok()) {
        return s;
      }

      log_files.push_back(std::move(unique_ptr<LogFile>(new LogFileImpl(
        number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes))));
1234 1235
    }
  }
1236 1237
  CompareLogByPointer compare_log_files;
  std::sort(log_files.begin(), log_files.end(), compare_log_files);
1238 1239 1240
  return status;
}

G
Gabor Cselle 已提交
1241
void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
1242 1243
  assert(level >= 0);

G
Gabor Cselle 已提交
1244 1245
  InternalKey begin_storage, end_storage;

H
hans@chromium.org 已提交
1246 1247
  ManualCompaction manual;
  manual.level = level;
G
Gabor Cselle 已提交
1248
  manual.done = false;
1249
  manual.in_progress = false;
1250 1251 1252 1253
  // For universal compaction, we enforce every manual compaction to compact
  // all files.
  if (begin == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1254
    manual.begin = nullptr;
G
Gabor Cselle 已提交
1255 1256 1257 1258
  } else {
    begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
    manual.begin = &begin_storage;
  }
1259 1260
  if (end == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1261
    manual.end = nullptr;
G
Gabor Cselle 已提交
1262 1263 1264 1265 1266 1267
  } else {
    end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
    manual.end = &end_storage;
  }

  MutexLock l(&mutex_);
1268

A
Abhishek Kona 已提交
1269 1270 1271 1272
  // When a manual compaction arrives, temporarily throttle down
  // the number of background compaction threads to 1. This is
  // needed to ensure that this manual compaction can compact
  // any range of keys/files. We artificialy increase
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
  // bg_compaction_scheduled_ by a large number, this causes
  // the system to have a single background thread. Now,
  // this manual compaction can progress without stomping
  // on any other concurrent compactions.
  const int LargeNumber = 10000000;
  const int newvalue = options_.max_background_compactions-1;
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber) {
    Log(options_.info_log, "Manual compaction request waiting for background threads to fall below 1");
    bg_cv_.Wait();
  }
  Log(options_.info_log, "Manual compaction starting");

G
Gabor Cselle 已提交
1286
  while (!manual.done) {
1287
    while (manual_compaction_ != nullptr) {
G
Gabor Cselle 已提交
1288 1289 1290
      bg_cv_.Wait();
    }
    manual_compaction_ = &manual;
1291 1292 1293
    if (bg_compaction_scheduled_ == LargeNumber) {
      bg_compaction_scheduled_ = newvalue;
    }
G
Gabor Cselle 已提交
1294 1295 1296 1297
    MaybeScheduleCompaction();
    while (manual_compaction_ == &manual) {
      bg_cv_.Wait();
    }
H
hans@chromium.org 已提交
1298
  }
1299 1300 1301 1302 1303 1304 1305 1306 1307
  assert(!manual.in_progress);

  // wait till there are no background threads scheduled
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber + newvalue) {
    Log(options_.info_log, "Manual compaction resetting background threads");
    bg_cv_.Wait();
  }
  bg_compaction_scheduled_ = 0;
J
jorlow@chromium.org 已提交
1308 1309
}

H
heyongqiang 已提交
1310
Status DBImpl::FlushMemTable(const FlushOptions& options) {
1311 1312
  // nullptr batch means just wait for earlier writes to be done
  Status s = Write(WriteOptions(), nullptr);
H
heyongqiang 已提交
1313
  if (s.ok() && options.wait) {
1314
    // Wait until the compaction completes
H
heyongqiang 已提交
1315
    s = WaitForCompactMemTable();
1316 1317
  }
  return s;
J
jorlow@chromium.org 已提交
1318 1319
}

H
heyongqiang 已提交
1320
Status DBImpl::WaitForCompactMemTable() {
1321 1322 1323
  Status s;
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1324
  while (imm_.size() > 0 && bg_error_.ok()) {
1325 1326
    bg_cv_.Wait();
  }
1327
  if (imm_.size() != 0) {
1328 1329 1330
    s = bg_error_;
  }
  return s;
H
heyongqiang 已提交
1331 1332 1333 1334 1335 1336
}

Status DBImpl::TEST_CompactMemTable() {
  return FlushMemTable(FlushOptions());
}

1337
Status DBImpl::TEST_WaitForCompactMemTable() {
1338
  return WaitForCompactMemTable();
1339 1340 1341
}

Status DBImpl::TEST_WaitForCompact() {
1342 1343 1344 1345 1346 1347
  // Wait until the compaction completes
  MutexLock l(&mutex_);
  while (bg_compaction_scheduled_ && bg_error_.ok()) {
    bg_cv_.Wait();
  }
  return bg_error_;
1348 1349
}

J
jorlow@chromium.org 已提交
1350 1351
void DBImpl::MaybeScheduleCompaction() {
  mutex_.AssertHeld();
1352 1353 1354
  if (bg_work_gate_closed_) {
    // gate closed for backgrond work
  } else if (bg_compaction_scheduled_ >= options_.max_background_compactions) {
J
jorlow@chromium.org 已提交
1355 1356 1357
    // Already scheduled
  } else if (shutting_down_.Acquire_Load()) {
    // DB is being deleted; no more background compactions
1358
  } else if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge) &&
1359
             manual_compaction_ == nullptr &&
H
hans@chromium.org 已提交
1360
             !versions_->NeedsCompaction()) {
J
jorlow@chromium.org 已提交
1361 1362
    // No work to be done
  } else {
1363
    bg_compaction_scheduled_++;
J
jorlow@chromium.org 已提交
1364 1365 1366 1367 1368 1369 1370 1371
    env_->Schedule(&DBImpl::BGWork, this);
  }
}

void DBImpl::BGWork(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCall();
}

1372 1373 1374 1375
void DBImpl::TEST_PurgeObsoleteteWAL() {
  PurgeObsoleteWALFiles();
}

J
jorlow@chromium.org 已提交
1376
void DBImpl::BackgroundCall() {
1377
  bool madeProgress = false;
D
Dhruba Borthakur 已提交
1378
  DeletionState deletion_state;
H
Haobo Xu 已提交
1379 1380 1381

  MaybeDumpStats();

J
jorlow@chromium.org 已提交
1382
  MutexLock l(&mutex_);
1383
  // Log(options_.info_log, "XXX BG Thread %llx process new work item", pthread_self());
J
jorlow@chromium.org 已提交
1384
  assert(bg_compaction_scheduled_);
H
hans@chromium.org 已提交
1385
  if (!shutting_down_.Acquire_Load()) {
1386
    Status s = BackgroundCompaction(&madeProgress, deletion_state);
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background compaction error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
    }
J
jorlow@chromium.org 已提交
1399
  }
1400

D
Dhruba Borthakur 已提交
1401 1402 1403 1404
  // delete unnecessary files if any, this is done outside the mutex
  if (!deletion_state.live.empty()) {
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
D
Dhruba Borthakur 已提交
1405
    EvictObsoleteFiles(deletion_state);
1406
    mutex_.Lock();
1407

D
Dhruba Borthakur 已提交
1408 1409
  }

1410
  bg_compaction_scheduled_--;
J
jorlow@chromium.org 已提交
1411

1412 1413
  MaybeScheduleLogDBDeployStats();

J
jorlow@chromium.org 已提交
1414
  // Previous compaction may have produced too many files in a level,
A
Abhishek Kona 已提交
1415
  // So reschedule another compaction if we made progress in the
1416 1417 1418 1419
  // last compaction.
  if (madeProgress) {
    MaybeScheduleCompaction();
  }
H
hans@chromium.org 已提交
1420
  bg_cv_.SignalAll();
1421

J
jorlow@chromium.org 已提交
1422 1423
}

A
Abhishek Kona 已提交
1424
Status DBImpl::BackgroundCompaction(bool* madeProgress,
1425
  DeletionState& deletion_state) {
1426
  *madeProgress = false;
J
jorlow@chromium.org 已提交
1427
  mutex_.AssertHeld();
1428

1429
  while (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
A
Abhishek Kona 已提交
1430
    Log(options_.info_log,
1431 1432 1433 1434 1435 1436
        "BackgroundCompaction doing CompactMemTable, compaction slots available %d",
        options_.max_background_compactions - bg_compaction_scheduled_);
    Status stat = CompactMemTable(madeProgress);
    if (!stat.ok()) {
      return stat;
    }
1437 1438
  }

1439
  unique_ptr<Compaction> c;
1440
  bool is_manual = (manual_compaction_ != nullptr) &&
1441
                   (manual_compaction_->in_progress == false);
G
Gabor Cselle 已提交
1442
  InternalKey manual_end;
H
hans@chromium.org 已提交
1443
  if (is_manual) {
G
Gabor Cselle 已提交
1444
    ManualCompaction* m = manual_compaction_;
1445 1446
    assert(!m->in_progress);
    m->in_progress = true; // another thread cannot pick up the same work
1447 1448
    c.reset(versions_->CompactRange(m->level, m->begin, m->end));
    if (c) {
G
Gabor Cselle 已提交
1449
      manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
1450 1451
    } else {
      m->done = true;
G
Gabor Cselle 已提交
1452 1453 1454
    }
    Log(options_.info_log,
        "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
H
hans@chromium.org 已提交
1455
        m->level,
G
Gabor Cselle 已提交
1456 1457 1458
        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
        (m->end ? m->end->DebugString().c_str() : "(end)"),
        (m->done ? "(end)" : manual_end.DebugString().c_str()));
1459
  } else if (!options_.disable_auto_compactions) {
1460
    c.reset(versions_->PickCompaction());
J
jorlow@chromium.org 已提交
1461 1462 1463
  }

  Status status;
1464
  if (!c) {
H
hans@chromium.org 已提交
1465
    // Nothing to do
1466
    Log(options_.info_log, "Compaction nothing to do");
H
hans@chromium.org 已提交
1467
  } else if (!is_manual && c->IsTrivialMove()) {
J
jorlow@chromium.org 已提交
1468
    // Move file to next level
1469
    assert(c->num_input_files(0) == 1);
J
jorlow@chromium.org 已提交
1470 1471 1472
    FileMetaData* f = c->input(0, 0);
    c->edit()->DeleteFile(c->level(), f->number);
    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
1473 1474
                       f->smallest, f->largest,
                       f->smallest_seqno, f->largest_seqno);
1475
    status = versions_->LogAndApply(c->edit(), &mutex_);
H
hans@chromium.org 已提交
1476
    VersionSet::LevelSummaryStorage tmp;
1477
    Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
J
jorlow@chromium.org 已提交
1478 1479 1480
        static_cast<unsigned long long>(f->number),
        c->level() + 1,
        static_cast<unsigned long long>(f->file_size),
H
hans@chromium.org 已提交
1481 1482
        status.ToString().c_str(),
        versions_->LevelSummary(&tmp));
1483
    versions_->ReleaseCompactionFiles(c.get(), status);
1484
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1485
  } else {
1486
    MaybeScheduleCompaction(); // do more compaction work in parallel.
1487
    CompactionState* compact = new CompactionState(c.get());
J
jorlow@chromium.org 已提交
1488 1489
    status = DoCompactionWork(compact);
    CleanupCompaction(compact);
1490
    versions_->ReleaseCompactionFiles(c.get(), status);
1491
    c->ReleaseInputs();
D
Dhruba Borthakur 已提交
1492
    FindObsoleteFiles(deletion_state);
1493
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1494
  }
1495
  c.reset();
J
jorlow@chromium.org 已提交
1496 1497 1498 1499 1500 1501

  if (status.ok()) {
    // Done
  } else if (shutting_down_.Acquire_Load()) {
    // Ignore compaction errors found during shutting down
  } else {
1502
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
1503 1504 1505 1506 1507
        "Compaction error: %s", status.ToString().c_str());
    if (options_.paranoid_checks && bg_error_.ok()) {
      bg_error_ = status;
    }
  }
H
hans@chromium.org 已提交
1508 1509

  if (is_manual) {
G
Gabor Cselle 已提交
1510
    ManualCompaction* m = manual_compaction_;
1511 1512 1513
    if (!status.ok()) {
      m->done = true;
    }
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
    // For universal compaction:
    //   Because universal compaction always happens at level 0, so one
    //   compaction will pick up all overlapped files. No files will be
    //   filtered out due to size limit and left for a successive compaction.
    //   So we can safely conclude the current compaction.
    //
    //   Also note that, if we don't stop here, then the current compaction
    //   writes a new file back to level 0, which will be used in successive
    //   compaction. Hence the manual compaction will never finish.
    if (options_.compaction_style == kCompactionStyleUniversal) {
      m->done = true;
    }
G
Gabor Cselle 已提交
1526 1527 1528 1529 1530 1531
    if (!m->done) {
      // We only compacted part of the requested range.  Update *m
      // to the range that is left to be compacted.
      m->tmp_storage = manual_end;
      m->begin = &m->tmp_storage;
    }
1532
    m->in_progress = false; // not being processed anymore
1533
    manual_compaction_ = nullptr;
H
hans@chromium.org 已提交
1534
  }
1535
  return status;
J
jorlow@chromium.org 已提交
1536 1537 1538 1539
}

void DBImpl::CleanupCompaction(CompactionState* compact) {
  mutex_.AssertHeld();
1540
  if (compact->builder != nullptr) {
J
jorlow@chromium.org 已提交
1541 1542
    // May happen if we get a shutdown call in the middle of compaction
    compact->builder->Abandon();
1543
    compact->builder.reset();
J
jorlow@chromium.org 已提交
1544
  } else {
1545
    assert(compact->outfile == nullptr);
J
jorlow@chromium.org 已提交
1546
  }
D
dgrogan@chromium.org 已提交
1547
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1548 1549 1550 1551 1552 1553
    const CompactionState::Output& out = compact->outputs[i];
    pending_outputs_.erase(out.number);
  }
  delete compact;
}

1554 1555 1556 1557 1558
// Allocate the file numbers for the output file. We allocate as
// many output file numbers as there are files in level+1.
// Insert them into pending_outputs so that they do not get deleted.
void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1559 1560
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
1561
  int filesNeeded = compact->compaction->num_input_files(1);
1562
  for (int i = 0; i < filesNeeded; i++) {
1563 1564 1565 1566 1567 1568 1569 1570 1571
    uint64_t file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    compact->allocated_file_numbers.push_back(file_number);
  }
}

// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1572
  for (const auto file_number : compact->allocated_file_numbers) {
1573 1574 1575 1576 1577
    pending_outputs_.erase(file_number);
    // Log(options_.info_log, "XXX releasing unused file num %d", file_number);
  }
}

J
jorlow@chromium.org 已提交
1578
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
1579 1580
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
J
jorlow@chromium.org 已提交
1581
  uint64_t file_number;
1582 1583 1584 1585 1586 1587 1588
  // If we have not yet exhausted the pre-allocated file numbers,
  // then use the one from the front. Otherwise, we have to acquire
  // the heavyweight lock and allocate a new file number.
  if (!compact->allocated_file_numbers.empty()) {
    file_number = compact->allocated_file_numbers.front();
    compact->allocated_file_numbers.pop_front();
  } else {
J
jorlow@chromium.org 已提交
1589 1590 1591 1592 1593
    mutex_.Lock();
    file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    mutex_.Unlock();
  }
1594 1595 1596 1597
  CompactionState::Output out;
  out.number = file_number;
  out.smallest.Clear();
  out.largest.Clear();
1598
  out.smallest_seqno = out.largest_seqno = 0;
1599
  compact->outputs.push_back(out);
J
jorlow@chromium.org 已提交
1600 1601 1602

  // Make the output file
  std::string fname = TableFileName(dbname_, file_number);
1603
  Status s = env_->NewWritableFile(fname, &compact->outfile, storage_options_);
1604

J
jorlow@chromium.org 已提交
1605
  if (s.ok()) {
1606 1607 1608
    // Over-estimate slightly so we don't end up just barely crossing
    // the threshold.
    compact->outfile->SetPreallocationBlockSize(
1609
      1.1 * versions_->MaxFileSizeForLevel(compact->compaction->output_level()));
1610

1611
    compact->builder.reset(new TableBuilder(options_, compact->outfile.get(),
1612
                                            compact->compaction->output_level()));
J
jorlow@chromium.org 已提交
1613 1614 1615 1616 1617 1618
  }
  return s;
}

Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
                                          Iterator* input) {
1619
  assert(compact != nullptr);
1620
  assert(compact->outfile);
1621
  assert(compact->builder != nullptr);
J
jorlow@chromium.org 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636

  const uint64_t output_number = compact->current_output()->number;
  assert(output_number != 0);

  // Check for iterator errors
  Status s = input->status();
  const uint64_t current_entries = compact->builder->NumEntries();
  if (s.ok()) {
    s = compact->builder->Finish();
  } else {
    compact->builder->Abandon();
  }
  const uint64_t current_bytes = compact->builder->FileSize();
  compact->current_output()->file_size = current_bytes;
  compact->total_bytes += current_bytes;
1637
  compact->builder.reset();
J
jorlow@chromium.org 已提交
1638 1639

  // Finish and check for file errors
1640
  if (s.ok() && !options_.disableDataSync) {
1641
    if (options_.use_fsync) {
1642
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1643 1644
      s = compact->outfile->Fsync();
    } else {
1645
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1646 1647
      s = compact->outfile->Sync();
    }
J
jorlow@chromium.org 已提交
1648 1649 1650 1651
  }
  if (s.ok()) {
    s = compact->outfile->Close();
  }
1652
  compact->outfile.reset();
J
jorlow@chromium.org 已提交
1653 1654 1655

  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
J
jorlow@chromium.org 已提交
1656
    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
1657
                                               storage_options_,
J
jorlow@chromium.org 已提交
1658 1659
                                               output_number,
                                               current_bytes);
J
jorlow@chromium.org 已提交
1660 1661 1662
    s = iter->status();
    delete iter;
    if (s.ok()) {
1663
      Log(options_.info_log,
J
jorlow@chromium.org 已提交
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
          "Generated table #%llu: %lld keys, %lld bytes",
          (unsigned long long) output_number,
          (unsigned long long) current_entries,
          (unsigned long long) current_bytes);
    }
  }
  return s;
}


Status DBImpl::InstallCompactionResults(CompactionState* compact) {
  mutex_.AssertHeld();
1676 1677 1678 1679 1680

  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact.
1681
  if (!versions_->VerifyCompactionFileConsistency(compact->compaction)) {
1682 1683 1684 1685 1686 1687 1688 1689
    Log(options_.info_log,  "Compaction %d@%d + %d@%d files aborted",
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1);
    return Status::IOError("Compaction input files inconsistent");
  }

1690
  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
J
jorlow@chromium.org 已提交
1691 1692 1693 1694 1695 1696 1697 1698 1699
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1,
      static_cast<long long>(compact->total_bytes));

  // Add compaction outputs
  compact->compaction->AddInputDeletions(compact->compaction->edit());
  const int level = compact->compaction->level();
D
dgrogan@chromium.org 已提交
1700
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1701 1702
    const CompactionState::Output& out = compact->outputs[i];
    compact->compaction->edit()->AddFile(
1703 1704
        (options_.compaction_style == kCompactionStyleUniversal) ?
          level : level + 1,
1705 1706
        out.number, out.file_size, out.smallest, out.largest,
        out.smallest_seqno, out.largest_seqno);
J
jorlow@chromium.org 已提交
1707
  }
1708
  return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
J
jorlow@chromium.org 已提交
1709 1710
}

1711 1712 1713 1714 1715 1716 1717 1718
//
// Given a sequence number, return the sequence number of the
// earliest snapshot that this sequence number is visible in.
// The snapshots themselves are arranged in ascending order of
// sequence numbers.
// Employ a sequential search because the total number of
// snapshots are typically small.
inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
1719 1720
  SequenceNumber in, std::vector<SequenceNumber>& snapshots,
  SequenceNumber* prev_snapshot) {
1721
  SequenceNumber prev __attribute__((unused)) = 0;
1722 1723 1724
  for (const auto cur : snapshots) {
    assert(prev <= cur);
    if (cur >= in) {
1725
      *prev_snapshot = prev;
1726
      return cur;
1727
    }
1728 1729
    prev = cur; // assignment
    assert(prev);
1730 1731 1732 1733 1734 1735 1736 1737
  }
  Log(options_.info_log,
      "Looking for seqid %ld but maxseqid is %ld", in,
      snapshots[snapshots.size()-1]);
  assert(0);
  return 0;
}

J
jorlow@chromium.org 已提交
1738
Status DBImpl::DoCompactionWork(CompactionState* compact) {
1739
  int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
A
Abhishek Kona 已提交
1740
  Log(options_.info_log,
1741
      "Compacting %d@%d + %d@%d files, score %.2f slots available %d",
J
jorlow@chromium.org 已提交
1742 1743 1744
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
1745
      compact->compaction->level() + 1,
1746
      compact->compaction->score(),
1747
      options_.max_background_compactions - bg_compaction_scheduled_);
1748 1749
  char scratch[256];
  compact->compaction->Summary(scratch, sizeof(scratch));
H
heyongqiang 已提交
1750
  Log(options_.info_log, "Compaction start summary: %s\n", scratch);
J
jorlow@chromium.org 已提交
1751 1752

  assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
1753
  assert(compact->builder == nullptr);
1754
  assert(!compact->outfile);
1755 1756 1757

  SequenceNumber visible_at_tip = 0;
  SequenceNumber earliest_snapshot;
H
Haobo Xu 已提交
1758
  SequenceNumber latest_snapshot = 0;
1759 1760 1761 1762 1763
  snapshots_.getAll(compact->existing_snapshots);
  if (compact->existing_snapshots.size() == 0) {
    // optimize for fast path if there are no snapshots
    visible_at_tip = versions_->LastSequence();
    earliest_snapshot = visible_at_tip;
J
jorlow@chromium.org 已提交
1764
  } else {
H
Haobo Xu 已提交
1765
    latest_snapshot = compact->existing_snapshots.back();
1766 1767 1768 1769
    // Add the current seqno as the 'latest' virtual
    // snapshot to the end of this list.
    compact->existing_snapshots.push_back(versions_->LastSequence());
    earliest_snapshot = compact->existing_snapshots[0];
J
jorlow@chromium.org 已提交
1770 1771
  }

1772
  // Is this compaction producing files at the bottommost level?
1773
  bool bottommost_level = compact->compaction->BottomMostLevel();
1774

1775 1776 1777
  // Allocate the output file numbers before we release the lock
  AllocateCompactionOutputFileNumbers(compact);

J
jorlow@chromium.org 已提交
1778 1779 1780
  // Release mutex while we're actually doing the compaction work
  mutex_.Unlock();

1781
  const uint64_t start_micros = env_->NowMicros();
1782
  unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
J
jorlow@chromium.org 已提交
1783 1784 1785 1786 1787
  input->SeekToFirst();
  Status status;
  ParsedInternalKey ikey;
  std::string current_user_key;
  bool has_current_user_key = false;
1788 1789
  SequenceNumber last_sequence_for_key __attribute__((unused)) =
    kMaxSequenceNumber;
1790
  SequenceNumber visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
1791
  std::string compaction_filter_value;
H
Haobo Xu 已提交
1792
  std::vector<char> delete_key; // for compaction filter
1793
  MergeHelper merge(user_comparator(), options_.merge_operator.get(),
1794 1795
                    options_.info_log.get(),
                    false /* internal key corruption is expected */);
1796 1797 1798 1799 1800 1801 1802
  auto compaction_filter = options_.compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
  if (!compaction_filter) {
    compaction_filter_from_factory = std::move(
        options_.compaction_filter_factory->CreateCompactionFilter());
    compaction_filter = compaction_filter_from_factory.get();
  }
J
jorlow@chromium.org 已提交
1803
  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
1804
    // Prioritize immutable compaction work
1805
    if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
1806 1807
      const uint64_t imm_start = env_->NowMicros();
      mutex_.Lock();
1808
      if (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
1809
        CompactMemTable();
H
hans@chromium.org 已提交
1810
        bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
1811 1812 1813 1814 1815
      }
      mutex_.Unlock();
      imm_micros += (env_->NowMicros() - imm_start);
    }

J
jorlow@chromium.org 已提交
1816
    Slice key = input->key();
1817
    Slice value = input->value();
H
Haobo Xu 已提交
1818

1819
    if (compact->compaction->ShouldStopBefore(key) &&
1820
        compact->builder != nullptr) {
1821
      status = FinishCompactionOutputFile(compact, input.get());
1822 1823 1824 1825 1826 1827
      if (!status.ok()) {
        break;
      }
    }

    // Handle key/value, add to state, etc.
J
jorlow@chromium.org 已提交
1828
    bool drop = false;
1829
    bool current_entry_is_merging = false;
J
jorlow@chromium.org 已提交
1830 1831
    if (!ParseInternalKey(key, &ikey)) {
      // Do not hide error keys
1832 1833
      // TODO: error key stays in db forever? Figure out the intention/rationale
      // v10 error v8 : we cannot hide v8 even though it's pretty obvious.
J
jorlow@chromium.org 已提交
1834 1835 1836
      current_user_key.clear();
      has_current_user_key = false;
      last_sequence_for_key = kMaxSequenceNumber;
1837
      visible_in_snapshot = kMaxSequenceNumber;
J
jorlow@chromium.org 已提交
1838 1839 1840 1841 1842 1843 1844 1845
    } else {
      if (!has_current_user_key ||
          user_comparator()->Compare(ikey.user_key,
                                     Slice(current_user_key)) != 0) {
        // First occurrence of this user key
        current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
        has_current_user_key = true;
        last_sequence_for_key = kMaxSequenceNumber;
1846
        visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
1847 1848

        // apply the compaction filter to the first occurrence of the user key
1849
        if (compaction_filter &&
H
Haobo Xu 已提交
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
            ikey.type == kTypeValue &&
            (visible_at_tip || ikey.sequence > latest_snapshot)) {
          // If the user has specified a compaction filter and the sequence
          // number is greater than any external snapshot, then invoke the
          // filter.
          // If the return value of the compaction filter is true, replace
          // the entry with a delete marker.
          bool value_changed = false;
          compaction_filter_value.clear();
          bool to_delete =
1860
            compaction_filter->Filter(compact->compaction->level(),
H
Haobo Xu 已提交
1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881
                                               ikey.user_key, value,
                                               &compaction_filter_value,
                                               &value_changed);
          if (to_delete) {
            // make a copy of the original key
            delete_key.assign(key.data(), key.data() + key.size());
            // convert it to a delete
            UpdateInternalKey(&delete_key[0], delete_key.size(),
                              ikey.sequence, kTypeDeletion);
            // anchor the key again
            key = Slice(&delete_key[0], delete_key.size());
            // needed because ikey is backed by key
            ParseInternalKey(key, &ikey);
            // no value associated with delete
            value.clear();
            RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
          } else if (value_changed) {
            value = compaction_filter_value;
          }
        }

J
jorlow@chromium.org 已提交
1882 1883
      }

1884 1885 1886
      // If there are no snapshots, then this kv affect visibility at tip.
      // Otherwise, search though all existing snapshots to find
      // the earlist snapshot that is affected by this kv.
1887 1888 1889 1890 1891 1892
      SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
      SequenceNumber visible = visible_at_tip ?
        visible_at_tip :
        findEarliestVisibleSnapshot(ikey.sequence,
                                    compact->existing_snapshots,
                                    &prev_snapshot);
1893 1894 1895 1896 1897

      if (visible_in_snapshot == visible) {
        // If the earliest snapshot is which this key is visible in
        // is the same as the visibily of a previous instance of the
        // same key, then this kv is not visible in any snapshot.
J
jorlow@chromium.org 已提交
1898
        // Hidden by an newer entry for same user key
1899
        // TODO: why not > ?
1900
        assert(last_sequence_for_key >= ikey.sequence);
J
jorlow@chromium.org 已提交
1901
        drop = true;    // (A)
1902
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
J
jorlow@chromium.org 已提交
1903
      } else if (ikey.type == kTypeDeletion &&
1904
                 ikey.sequence <= earliest_snapshot &&
J
jorlow@chromium.org 已提交
1905 1906 1907 1908 1909 1910 1911 1912 1913
                 compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
        // For this user key:
        // (1) there is no data in higher levels
        // (2) data in lower levels will have larger sequence numbers
        // (3) data in layers that are being compacted here and have
        //     smaller sequence numbers will be dropped in the next
        //     few iterations of this loop (by rule (A) above).
        // Therefore this deletion marker is obsolete and can be dropped.
        drop = true;
1914
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
1915 1916 1917 1918 1919 1920 1921
      } else if (ikey.type == kTypeMerge) {
        // We know the merge type entry is not hidden, otherwise we would
        // have hit (A)
        // We encapsulate the merge related state machine in a different
        // object to minimize change to the existing flow. Turn out this
        // logic could also be nicely re-used for memtable flush purge
        // optimization in BuildTable.
M
Mayank Agarwal 已提交
1922 1923
        merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
                         options_.statistics);
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
        current_entry_is_merging = true;
        if (merge.IsSuccess()) {
          // Successfully found Put/Delete/(end-of-key-range) while merging
          // Get the merge result
          key = merge.key();
          ParseInternalKey(key, &ikey);
          value = merge.value();
        } else {
          // Did not find a Put/Delete/(end-of-key-range) while merging
          // We now have some stack of merge operands to write out.
          // NOTE: key,value, and ikey are now referring to old entries.
          //       These will be correctly set below.
          assert(!merge.keys().empty());
          assert(merge.keys().size() == merge.values().size());

          // Hack to make sure last_sequence_for_key is correct
          ParseInternalKey(merge.keys().front(), &ikey);
        }
J
jorlow@chromium.org 已提交
1942 1943 1944
      }

      last_sequence_for_key = ikey.sequence;
1945
      visible_in_snapshot = visible;
J
jorlow@chromium.org 已提交
1946 1947
    }
#if 0
1948
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
1949
        "  Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
1950
        "%d smallest_snapshot: %d level: %d bottommost %d",
J
jorlow@chromium.org 已提交
1951
        ikey.user_key.ToString().c_str(),
D
dgrogan@chromium.org 已提交
1952
        (int)ikey.sequence, ikey.type, kTypeValue, drop,
J
jorlow@chromium.org 已提交
1953
        compact->compaction->IsBaseLevelForKey(ikey.user_key),
1954 1955
        (int)last_sequence_for_key, (int)earliest_snapshot,
        compact->compaction->level(), bottommost_level);
J
jorlow@chromium.org 已提交
1956 1957 1958
#endif

    if (!drop) {
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
      // We may write a single key (e.g.: for Put/Delete or successful merge).
      // Or we may instead have to write a sequence/list of keys.
      // We have to write a sequence iff we have an unsuccessful merge
      bool has_merge_list = current_entry_is_merging && !merge.IsSuccess();
      const std::deque<std::string>* keys = nullptr;
      const std::deque<std::string>* values = nullptr;
      std::deque<std::string>::const_reverse_iterator key_iter;
      std::deque<std::string>::const_reverse_iterator value_iter;
      if (has_merge_list) {
        keys = &merge.keys();
        values = &merge.values();
        key_iter = keys->rbegin();    // The back (*rbegin()) is the first key
        value_iter = values->rbegin();

        key = Slice(*key_iter);
        value = Slice(*value_iter);
1975
      }
1976

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
      // If we have a list of keys to write, traverse the list.
      // If we have a single key to write, simply write that key.
      while (true) {
        // Invariant: key,value,ikey will always be the next entry to write
        char* kptr = (char*)key.data();
        std::string kstr;

        // Zeroing out the sequence number leads to better compression.
        // If this is the bottommost level (no files in lower levels)
        // and the earliest snapshot is larger than this seqno
        // then we can squash the seqno to zero.
1988 1989
        if (options_.compaction_style == kCompactionStyleLevel &&
            bottommost_level && ikey.sequence < earliest_snapshot &&
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
            ikey.type != kTypeMerge) {
          assert(ikey.type != kTypeDeletion);
          // make a copy because updating in place would cause problems
          // with the priority queue that is managing the input key iterator
          kstr.assign(key.data(), key.size());
          kptr = (char *)kstr.c_str();
          UpdateInternalKey(kptr, key.size(), (uint64_t)0, ikey.type);
        }

        Slice newkey(kptr, key.size());
        assert((key.clear(), 1)); // we do not need 'key' anymore
2001

2002 2003 2004 2005 2006 2007 2008
        // Open output file if necessary
        if (compact->builder == nullptr) {
          status = OpenCompactionOutputFile(compact);
          if (!status.ok()) {
            break;
          }
        }
2009 2010

        SequenceNumber seqno = GetInternalKeySeqno(newkey);
2011 2012
        if (compact->builder->NumEntries() == 0) {
          compact->current_output()->smallest.DecodeFrom(newkey);
2013 2014 2015 2016
          compact->current_output()->smallest_seqno = seqno;
        } else {
          compact->current_output()->smallest_seqno =
            std::min(compact->current_output()->smallest_seqno, seqno);
2017 2018 2019
        }
        compact->current_output()->largest.DecodeFrom(newkey);
        compact->builder->Add(newkey, value);
2020 2021
        compact->current_output()->largest_seqno =
          std::max(compact->current_output()->largest_seqno, seqno);
2022 2023 2024 2025 2026 2027 2028 2029

        // Close output file if it is big enough
        if (compact->builder->FileSize() >=
            compact->compaction->MaxOutputFileSize()) {
          status = FinishCompactionOutputFile(compact, input.get());
          if (!status.ok()) {
            break;
          }
J
jorlow@chromium.org 已提交
2030 2031
        }

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
        // If we have a list of entries, move to next element
        // If we only had one entry, then break the loop.
        if (has_merge_list) {
          ++key_iter;
          ++value_iter;

          // If at end of list
          if (key_iter == keys->rend() || value_iter == values->rend()) {
            // Sanity Check: if one ends, then both end
            assert(key_iter == keys->rend() && value_iter == values->rend());
            break;
          }

          // Otherwise not at end of list. Update key, value, and ikey.
          key = Slice(*key_iter);
          value = Slice(*value_iter);
          ParseInternalKey(key, &ikey);

        } else{
          // Only had one item to begin with (Put/Delete)
J
jorlow@chromium.org 已提交
2052 2053 2054 2055 2056
          break;
        }
      }
    }

2057
    // MergeUntil has moved input to the next entry
2058
    if (!current_entry_is_merging) {
2059 2060
      input->Next();
    }
J
jorlow@chromium.org 已提交
2061 2062 2063
  }

  if (status.ok() && shutting_down_.Acquire_Load()) {
2064
    status = Status::IOError("Database shutdown started during compaction");
J
jorlow@chromium.org 已提交
2065
  }
2066
  if (status.ok() && compact->builder != nullptr) {
2067
    status = FinishCompactionOutputFile(compact, input.get());
J
jorlow@chromium.org 已提交
2068 2069 2070 2071
  }
  if (status.ok()) {
    status = input->status();
  }
2072
  input.reset();
J
jorlow@chromium.org 已提交
2073

2074 2075
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros - imm_micros;
A
Abhishek Kona 已提交
2076 2077 2078
  if (options_.statistics) {
    options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
  }
M
Mark Callaghan 已提交
2079 2080
  stats.files_in_leveln = compact->compaction->num_input_files(0);
  stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
2081 2082

  int num_output_files = compact->outputs.size();
2083
  if (compact->builder != nullptr) {
2084 2085 2086 2087 2088
    // An error occured so ignore the last output.
    assert(num_output_files > 0);
    --num_output_files;
  }
  stats.files_out_levelnp1 = num_output_files;
M
Mark Callaghan 已提交
2089 2090 2091 2092 2093 2094 2095

  for (int i = 0; i < compact->compaction->num_input_files(0); i++)
    stats.bytes_readn += compact->compaction->input(0, i)->file_size;

  for (int i = 0; i < compact->compaction->num_input_files(1); i++)
    stats.bytes_readnp1 += compact->compaction->input(1, i)->file_size;

2096
  for (int i = 0; i < num_output_files; i++) {
2097 2098 2099
    stats.bytes_written += compact->outputs[i].file_size;
  }

J
jorlow@chromium.org 已提交
2100
  mutex_.Lock();
2101
  stats_[compact->compaction->level() + 1].Add(stats);
J
jorlow@chromium.org 已提交
2102

2103 2104 2105 2106
  // if there were any unused file number (mostly in case of
  // compaction error), free up the entry from pending_putputs
  ReleaseCompactionUnusedFileNumbers(compact);

J
jorlow@chromium.org 已提交
2107 2108 2109
  if (status.ok()) {
    status = InstallCompactionResults(compact);
  }
2110
  VersionSet::LevelSummaryStorage tmp;
2111
  Log(options_.info_log,
M
Mark Callaghan 已提交
2112
      "compacted to: %s, %.1f MB/sec, level %d, files in(%d, %d) out(%d) "
2113 2114
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s\n",
M
Mark Callaghan 已提交
2115 2116 2117
      versions_->LevelSummary(&tmp),
      (stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
          (double) stats.micros,
2118
      compact->compaction->output_level(),
M
Mark Callaghan 已提交
2119 2120 2121 2122
      stats.files_in_leveln, stats.files_in_levelnp1, stats.files_out_levelnp1,
      stats.bytes_readn / 1048576.0,
      stats.bytes_readnp1 / 1048576.0,
      stats.bytes_written / 1048576.0,
2123
      (stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
2124
          (double) stats.bytes_readn,
2125
      stats.bytes_written / (double) stats.bytes_readn,
2126
      status.ToString().c_str());
M
Mark Callaghan 已提交
2127

J
jorlow@chromium.org 已提交
2128 2129 2130
  return status;
}

2131 2132 2133 2134
namespace {
struct IterState {
  port::Mutex* mu;
  Version* version;
2135
  std::vector<MemTable*> mem; // includes both mem_ and imm_
2136 2137 2138 2139 2140
};

static void CleanupIteratorState(void* arg1, void* arg2) {
  IterState* state = reinterpret_cast<IterState*>(arg1);
  state->mu->Lock();
2141 2142 2143
  for (unsigned int i = 0; i < state->mem.size(); i++) {
    state->mem[i]->Unref();
  }
2144 2145 2146 2147
  state->version->Unref();
  state->mu->Unlock();
  delete state;
}
H
Hans Wennborg 已提交
2148
}  // namespace
2149

J
jorlow@chromium.org 已提交
2150 2151
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
                                      SequenceNumber* latest_snapshot) {
2152
  IterState* cleanup = new IterState;
J
jorlow@chromium.org 已提交
2153
  mutex_.Lock();
2154
  *latest_snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2155

2156
  // Collect together all needed child iterators for mem
J
jorlow@chromium.org 已提交
2157
  std::vector<Iterator*> list;
2158
  mem_->Ref();
J
Jim Paton 已提交
2159 2160
  list.push_back(mem_->NewIterator(options.prefix));

2161 2162 2163 2164 2165 2166 2167 2168
  cleanup->mem.push_back(mem_);

  // Collect together all needed child iterators for imm_
  std::vector<MemTable*> immutables;
  imm_.GetMemTables(&immutables);
  for (unsigned int i = 0; i < immutables.size(); i++) {
    MemTable* m = immutables[i];
    m->Ref();
J
Jim Paton 已提交
2169
    list.push_back(m->NewIterator(options.prefix));
2170
    cleanup->mem.push_back(m);
2171
  }
2172 2173

  // Collect iterators for files in L0 - Ln
2174
  versions_->current()->AddIterators(options, storage_options_, &list);
J
jorlow@chromium.org 已提交
2175 2176 2177
  Iterator* internal_iter =
      NewMergingIterator(&internal_comparator_, &list[0], list.size());
  versions_->current()->Ref();
2178 2179 2180

  cleanup->mu = &mutex_;
  cleanup->version = versions_->current();
2181
  internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
J
jorlow@chromium.org 已提交
2182 2183 2184 2185 2186 2187 2188 2189 2190 2191

  mutex_.Unlock();
  return internal_iter;
}

Iterator* DBImpl::TEST_NewInternalIterator() {
  SequenceNumber ignored;
  return NewInternalIterator(ReadOptions(), &ignored);
}

J
jorlow@chromium.org 已提交
2192
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
2193 2194 2195 2196
  MutexLock l(&mutex_);
  return versions_->MaxNextLevelOverlappingBytes();
}

J
jorlow@chromium.org 已提交
2197 2198 2199
Status DBImpl::Get(const ReadOptions& options,
                   const Slice& key,
                   std::string* value) {
2200 2201 2202 2203 2204 2205
  return GetImpl(options, key, value);
}

Status DBImpl::GetImpl(const ReadOptions& options,
                       const Slice& key,
                       std::string* value,
2206 2207
                       const bool no_io,
                       bool* value_found) {
2208
  Status s;
2209

2210
  StopWatch sw(env_, options_.statistics, DB_GET);
2211
  SequenceNumber snapshot;
2212
  MutexLock l(&mutex_);
2213
  if (options.snapshot != nullptr) {
2214 2215 2216
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2217
  }
2218

2219
  MemTable* mem = mem_;
2220
  MemTableList imm = imm_;
2221
  Version* current = versions_->current();
2222
  mem->Ref();
2223
  imm.RefAll();
2224
  current->Ref();
2225

2226 2227
  // Unlock while reading from files and memtables
  mutex_.Unlock();
2228
  bool have_stat_update = false;
2229
  Version::GetStats stats;
2230

2231 2232 2233 2234

  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2235
  // First look in the memtable, then in the immutable memtable (if any).
2236
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2237
  // merge_operands will contain the sequence of merges in the latter case.
2238
  LookupKey lkey(key, snapshot);
2239
  if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2240
    // Done
2241
  } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2242 2243
    // Done
  } else {
2244
    current->Get(options, lkey, value, &s, &merge_operands, &stats,
D
Deon Nicholas 已提交
2245
                 options_, no_io, value_found);
2246
    have_stat_update = true;
2247
  }
2248
  mutex_.Lock();
2249

2250 2251
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2252 2253
    MaybeScheduleCompaction();
  }
2254
  mem->Unref();
2255
  imm.UnrefAll();
2256
  current->Unref();
2257
  RecordTick(options_.statistics, NUMBER_KEYS_READ);
2258
  RecordTick(options_.statistics, BYTES_READ, value->size());
2259
  return s;
J
jorlow@chromium.org 已提交
2260 2261
}

2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
                                     const std::vector<Slice>& keys,
                                     std::vector<std::string>* values) {

  StopWatch sw(env_, options_.statistics, DB_MULTIGET);
  SequenceNumber snapshot;
  MutexLock l(&mutex_);
  if (options.snapshot != nullptr) {
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
  }

  MemTable* mem = mem_;
  MemTableList imm = imm_;
  Version* current = versions_->current();
  mem->Ref();
  imm.RefAll();
  current->Ref();

  // Unlock while reading from files and memtables

  mutex_.Unlock();
  bool have_stat_update = false;
  Version::GetStats stats;

2288 2289 2290
  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
  // Note: this always resizes the values array
  int numKeys = keys.size();
  std::vector<Status> statList(numKeys);
  values->resize(numKeys);

  // Keep track of bytes that we read for statistics-recording later
  uint64_t bytesRead = 0;

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2302 2303 2304
  // merge_operands will contain the sequence of merges in the latter case.
  for (int i=0; i<numKeys; ++i) {
    merge_operands.clear();
2305 2306 2307 2308
    Status& s = statList[i];
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
2309
    if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2310
      // Done
2311
    } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2312 2313
      // Done
    } else {
2314
      current->Get(options, lkey, value, &s, &merge_operands, &stats, options_);
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
      have_stat_update = true;
    }

    if (s.ok()) {
      bytesRead += value->size();
    }
  }

  // Post processing (decrement reference counts and record statistics)
  mutex_.Lock();
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
    MaybeScheduleCompaction();
  }
  mem->Unref();
  imm.UnrefAll();
  current->Unref();
  RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
  RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
  RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);

  return statList;
}

2339 2340 2341 2342 2343 2344 2345 2346
bool DBImpl::KeyMayExist(const ReadOptions& options,
                         const Slice& key,
                         std::string* value,
                         bool* value_found) {
  if (value_found != nullptr) {
    *value_found = true; // falsify later if key-may-exist but can't fetch value
  }
  return GetImpl(options, key, value, true, value_found).ok();
2347 2348
}

J
jorlow@chromium.org 已提交
2349 2350
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
  SequenceNumber latest_snapshot;
T
Tyler Harter 已提交
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
  Iterator* iter = NewInternalIterator(options, &latest_snapshot);
  iter = NewDBIterator(
             &dbname_, env_, options_, user_comparator(), iter,
             (options.snapshot != nullptr
              ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
              : latest_snapshot));
  if (options.prefix) {
    // use extra wrapper to exclude any keys from the results which
    // don't begin with the prefix
    iter = new PrefixFilterIterator(iter, *options.prefix,
                                    options_.prefix_extractor);
  }
  return iter;
J
jorlow@chromium.org 已提交
2364 2365 2366 2367
}

const Snapshot* DBImpl::GetSnapshot() {
  MutexLock l(&mutex_);
2368
  return snapshots_.New(versions_->LastSequence());
J
jorlow@chromium.org 已提交
2369 2370 2371 2372
}

void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  MutexLock l(&mutex_);
2373
  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
J
jorlow@chromium.org 已提交
2374 2375 2376 2377 2378 2379 2380
}

// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  return DB::Put(o, key, val);
}

2381 2382 2383 2384 2385 2386 2387 2388 2389
Status DBImpl::Merge(const WriteOptions& o, const Slice& key,
                     const Slice& val) {
  if (!options_.merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, key, val);
  }
}

J
jorlow@chromium.org 已提交
2390 2391 2392 2393
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  return DB::Delete(options, key);
}

2394 2395 2396 2397
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
  Writer w(&mutex_);
  w.batch = my_batch;
  w.sync = options.sync;
H
heyongqiang 已提交
2398
  w.disableWAL = options.disableWAL;
2399
  w.done = false;
2400

2401
  StopWatch sw(env_, options_.statistics, DB_WRITE);
2402
  MutexLock l(&mutex_);
2403 2404 2405 2406 2407 2408
  writers_.push_back(&w);
  while (!w.done && &w != writers_.front()) {
    w.cv.Wait();
  }
  if (w.done) {
    return w.status;
2409 2410 2411
  }

  // May temporarily unlock and wait.
2412
  Status status = MakeRoomForWrite(my_batch == nullptr);
D
dgrogan@chromium.org 已提交
2413
  uint64_t last_sequence = versions_->LastSequence();
2414
  Writer* last_writer = &w;
2415
  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
2416
    WriteBatch* updates = BuildBatchGroup(&last_writer);
2417 2418
    const SequenceNumber current_sequence = last_sequence + 1;
    WriteBatchInternal::SetSequence(updates, current_sequence);
2419 2420 2421 2422
    int my_batch_count = WriteBatchInternal::Count(updates);
    last_sequence += my_batch_count;
    // Record statistics
    RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
2423 2424 2425
    RecordTick(options_.statistics,
               BYTES_WRITTEN,
               WriteBatchInternal::ByteSize(updates));
2426 2427 2428 2429
    // Add to log and apply to memtable.  We can release the lock
    // during this phase since &w is currently responsible for logging
    // and protects against concurrent loggers and concurrent writes
    // into mem_.
2430
    {
2431
      mutex_.Unlock();
2432 2433
      if (options.disableWAL) {
        flush_on_destroy_ = true;
2434 2435 2436
      }

      if (!options.disableWAL) {
H
heyongqiang 已提交
2437 2438
        status = log_->AddRecord(WriteBatchInternal::Contents(updates));
        if (status.ok() && options.sync) {
2439
          if (options_.use_fsync) {
2440
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2441
            status = log_->file()->Fsync();
2442
          } else {
2443
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2444
            status = log_->file()->Sync();
2445
          }
H
heyongqiang 已提交
2446
        }
2447 2448
      }
      if (status.ok()) {
2449 2450
        status = WriteBatchInternal::InsertInto(updates, mem_, &options_, this,
                                                options_.filter_deletes);
2451 2452 2453 2454 2455 2456 2457
        if (!status.ok()) {
          // Panic for in-memory corruptions
          // Note that existing logic was not sound. Any partial failure writing
          // into the memtable would result in a state that some write ops might
          // have succeeded in memtable but Status reports error for all writes.
          throw std::runtime_error("In memory WriteBatch corruption!");
        }
2458
        RecordTick(options_.statistics, SEQUENCE_NUMBER, my_batch_count);
2459 2460
        versions_->SetLastSequence(last_sequence);
        last_flushed_sequence_ = current_sequence;
2461
      }
2462
      mutex_.Lock();
J
jorlow@chromium.org 已提交
2463
    }
2464
    if (updates == &tmp_batch_) tmp_batch_.Clear();
J
jorlow@chromium.org 已提交
2465
  }
2466

2467 2468 2469 2470 2471 2472 2473
  while (true) {
    Writer* ready = writers_.front();
    writers_.pop_front();
    if (ready != &w) {
      ready->status = status;
      ready->done = true;
      ready->cv.Signal();
2474
    }
2475 2476
    if (ready == last_writer) break;
  }
2477

2478 2479 2480
  // Notify new head of write queue
  if (!writers_.empty()) {
    writers_.front()->cv.Signal();
2481
  }
J
jorlow@chromium.org 已提交
2482 2483 2484
  return status;
}

2485
// REQUIRES: Writer list must be non-empty
2486
// REQUIRES: First writer must have a non-nullptr batch
2487 2488 2489 2490
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
  assert(!writers_.empty());
  Writer* first = writers_.front();
  WriteBatch* result = first->batch;
2491
  assert(result != nullptr);
2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512

  size_t size = WriteBatchInternal::ByteSize(first->batch);

  // Allow the group to grow up to a maximum size, but if the
  // original write is small, limit the growth so we do not slow
  // down the small write too much.
  size_t max_size = 1 << 20;
  if (size <= (128<<10)) {
    max_size = size + (128<<10);
  }

  *last_writer = first;
  std::deque<Writer*>::iterator iter = writers_.begin();
  ++iter;  // Advance past "first"
  for (; iter != writers_.end(); ++iter) {
    Writer* w = *iter;
    if (w->sync && !first->sync) {
      // Do not include a sync write into a batch handled by a non-sync write.
      break;
    }

H
heyongqiang 已提交
2513 2514 2515 2516 2517 2518
    if (!w->disableWAL && first->disableWAL) {
      // Do not include a write that needs WAL into a batch that has
      // WAL disabled.
      break;
    }

2519
    if (w->batch != nullptr) {
2520 2521 2522 2523 2524 2525 2526 2527 2528
      size += WriteBatchInternal::ByteSize(w->batch);
      if (size > max_size) {
        // Do not make batch too big
        break;
      }

      // Append to *reuslt
      if (result == first->batch) {
        // Switch to temporary batch instead of disturbing caller's batch
2529
        result = &tmp_batch_;
2530 2531 2532 2533 2534 2535 2536 2537 2538 2539
        assert(WriteBatchInternal::Count(result) == 0);
        WriteBatchInternal::Append(result, first->batch);
      }
      WriteBatchInternal::Append(result, w->batch);
    }
    *last_writer = w;
  }
  return result;
}

2540 2541 2542
// This function computes the amount of time in microseconds by which a write
// should be delayed based on the number of level-0 files according to the
// following formula:
J
Jim Paton 已提交
2543 2544 2545 2546
// if n < bottom, return 0;
// if n >= top, return 1000;
// otherwise, let r = (n - bottom) /
//                    (top - bottom)
2547 2548 2549 2550
//  and return r^2 * 1000.
// The goal of this formula is to gradually increase the rate at which writes
// are slowed. We also tried linear delay (r * 1000), but it seemed to do
// slightly worse. There is no other particular reason for choosing quadratic.
J
Jim Paton 已提交
2551
uint64_t DBImpl::SlowdownAmount(int n, int top, int bottom) {
2552
  uint64_t delay;
J
Jim Paton 已提交
2553
  if (n >= top) {
2554 2555
    delay = 1000;
  }
J
Jim Paton 已提交
2556
  else if (n < bottom) {
2557 2558 2559 2560
    delay = 0;
  }
  else {
    // If we are here, we know that:
J
Jim Paton 已提交
2561
    //   level0_start_slowdown <= n < level0_slowdown
2562 2563
    // since the previous two conditions are false.
    float how_much =
J
Jim Paton 已提交
2564 2565
      (float) (n - bottom) /
              (top - bottom);
2566 2567 2568 2569 2570 2571
    delay = how_much * how_much * 1000;
  }
  assert(delay <= 1000);
  return delay;
}

2572
// REQUIRES: mutex_ is held
2573
// REQUIRES: this thread is currently at the front of the writer queue
2574 2575
Status DBImpl::MakeRoomForWrite(bool force) {
  mutex_.AssertHeld();
2576
  assert(!writers_.empty());
2577
  bool allow_delay = !force;
J
Jim Paton 已提交
2578 2579
  bool allow_hard_rate_limit_delay = !force;
  bool allow_soft_rate_limit_delay = !force;
2580
  uint64_t rate_limit_delay_millis = 0;
2581
  Status s;
2582
  double score;
2583

2584 2585 2586 2587 2588
  while (true) {
    if (!bg_error_.ok()) {
      // Yield previous error
      s = bg_error_;
      break;
2589 2590
    } else if (
        allow_delay &&
2591
        versions_->NumLevelFiles(0) >=
2592
          options_.level0_slowdown_writes_trigger) {
2593 2594 2595
      // We are getting close to hitting a hard limit on the number of
      // L0 files.  Rather than delaying a single write by several
      // seconds when we hit the hard limit, start delaying each
2596
      // individual write by 0-1ms to reduce latency variance.  Also,
2597 2598 2599
      // this delay hands over some CPU to the compaction thread in
      // case it is sharing the same core as the writer.
      mutex_.Unlock();
2600
      uint64_t delayed;
J
Jim Paton 已提交
2601 2602
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
J
Jim Paton 已提交
2603 2604 2605 2606 2607
        env_->SleepForMicroseconds(
          SlowdownAmount(versions_->NumLevelFiles(0),
                         options_.level0_slowdown_writes_trigger,
                         options_.level0_stop_writes_trigger)
        );
2608
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2609
      }
2610
      RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
2611
      stall_level0_slowdown_ += delayed;
J
Jim Paton 已提交
2612
      stall_level0_slowdown_count_++;
2613
      allow_delay = false;  // Do not delay a single write more than once
2614 2615
      //Log(options_.info_log,
      //    "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
2616
      //     (long long unsigned int)delayed);
2617
      mutex_.Lock();
2618
      delayed_writes_++;
2619 2620 2621
    } else if (!force &&
               (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
      // There is room in current memtable
2622 2623 2624
      if (allow_delay) {
        DelayLoggingAndReset();
      }
2625
      break;
2626
    } else if (imm_.size() == options_.max_write_buffer_number - 1) {
2627
      // We have filled up the current memtable, but the previous
2628 2629
      // ones are still being compacted, so we wait.
      DelayLoggingAndReset();
2630
      Log(options_.info_log, "wait for memtable compaction...\n");
2631
      uint64_t stall;
J
Jim Paton 已提交
2632 2633 2634 2635
      {
        StopWatch sw(env_, options_.statistics,
          STALL_MEMTABLE_COMPACTION_COUNT);
        bg_cv_.Wait();
2636
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2637
      }
2638 2639
      RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
      stall_memtable_compaction_ += stall;
J
Jim Paton 已提交
2640
      stall_memtable_compaction_count_++;
2641
    } else if (versions_->NumLevelFiles(0) >=
2642
               options_.level0_stop_writes_trigger) {
2643
      // There are too many level-0 files.
2644 2645
      DelayLoggingAndReset();
      Log(options_.info_log, "wait for fewer level0 files...\n");
2646
      uint64_t stall;
J
Jim Paton 已提交
2647 2648 2649
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
        bg_cv_.Wait();
2650
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2651
      }
2652 2653
      RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
      stall_level0_num_files_ += stall;
J
Jim Paton 已提交
2654
      stall_level0_num_files_count_++;
2655
    } else if (
J
Jim Paton 已提交
2656 2657 2658
        allow_hard_rate_limit_delay &&
        options_.hard_rate_limit > 1.0 &&
        (score = versions_->MaxCompactionScore()) > options_.hard_rate_limit) {
2659
      // Delay a write when the compaction score for any level is too large.
2660
      int max_level = versions_->MaxCompactionScoreLevel();
2661
      mutex_.Unlock();
2662
      uint64_t delayed;
J
Jim Paton 已提交
2663
      {
J
Jim Paton 已提交
2664
        StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
2665
        env_->SleepForMicroseconds(1000);
2666
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2667
      }
2668
      stall_leveln_slowdown_[max_level] += delayed;
J
Jim Paton 已提交
2669
      stall_leveln_slowdown_count_[max_level]++;
2670
      // Make sure the following value doesn't round to zero.
2671 2672 2673
      uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
      rate_limit_delay_millis += rate_limit;
      RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
J
Jim Paton 已提交
2674 2675 2676 2677
      if (options_.rate_limit_delay_max_milliseconds > 0 &&
          rate_limit_delay_millis >=
          (unsigned)options_.rate_limit_delay_max_milliseconds) {
        allow_hard_rate_limit_delay = false;
2678 2679 2680 2681
      }
      // Log(options_.info_log,
      //    "delaying write %llu usecs for rate limits with max score %.2f\n",
      //    (long long unsigned int)delayed, score);
2682
      mutex_.Lock();
J
Jim Paton 已提交
2683 2684 2685 2686 2687 2688 2689
    } else if (
        allow_soft_rate_limit_delay &&
        options_.soft_rate_limit > 0.0 &&
        (score = versions_->MaxCompactionScore()) > options_.soft_rate_limit) {
      // Delay a write when the compaction score for any level is too large.
      // TODO: add statistics
      mutex_.Unlock();
J
Jim Paton 已提交
2690 2691 2692 2693 2694 2695 2696 2697 2698
      {
        StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
        env_->SleepForMicroseconds(SlowdownAmount(
          score,
          options_.soft_rate_limit,
          options_.hard_rate_limit)
        );
        rate_limit_delay_millis += sw.ElapsedMicros();
      }
J
Jim Paton 已提交
2699 2700
      allow_soft_rate_limit_delay = false;
      mutex_.Lock();
2701 2702
    } else {
      // Attempt to switch to a new memtable and trigger compaction of old
2703
      DelayLoggingAndReset();
2704 2705
      assert(versions_->PrevLogNumber() == 0);
      uint64_t new_log_number = versions_->NewFileNumber();
2706
      unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
2707 2708
      EnvOptions soptions(storage_options_);
      soptions.use_mmap_writes = false;
2709 2710 2711 2712 2713
      s = env_->NewWritableFile(
            LogFileName(dbname_, new_log_number),
            &lfile,
            soptions
          );
2714
      if (!s.ok()) {
H
heyongqiang 已提交
2715
        // Avoid chewing through file number space in a tight loop.
2716
        versions_->ReuseFileNumber(new_log_number);
2717 2718
        break;
      }
2719 2720 2721
      // Our final size should be less than write_buffer_size
      // (compression, etc) but err on the side of caution.
      lfile->SetPreallocationBlockSize(1.1 * options_.write_buffer_size);
2722
      logfile_number_ = new_log_number;
2723
      log_.reset(new log::Writer(std::move(lfile)));
2724
      mem_->SetLogNumber(logfile_number_);
2725
      imm_.Add(mem_);
X
Xing Jin 已提交
2726 2727
      mem_ = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
2728
      mem_->Ref();
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
      force = false;   // Do not force another compaction if have room
      MaybeScheduleCompaction();
    }
  }
  return s;
}

bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  value->clear();

J
jorlow@chromium.org 已提交
2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
  MutexLock l(&mutex_);
  Slice in = property;
  Slice prefix("leveldb.");
  if (!in.starts_with(prefix)) return false;
  in.remove_prefix(prefix.size());

  if (in.starts_with("num-files-at-level")) {
    in.remove_prefix(strlen("num-files-at-level"));
    uint64_t level;
    bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
2749
    if (!ok || (int)level >= NumberLevels()) {
J
jorlow@chromium.org 已提交
2750 2751
      return false;
    } else {
2752
      char buf[100];
D
dgrogan@chromium.org 已提交
2753 2754
      snprintf(buf, sizeof(buf), "%d",
               versions_->NumLevelFiles(static_cast<int>(level)));
2755
      *value = buf;
J
jorlow@chromium.org 已提交
2756 2757
      return true;
    }
2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774
  } else if (in == "levelstats") {
    char buf[1000];
    snprintf(buf, sizeof(buf),
             "Level Files Size(MB)\n"
             "--------------------\n");
    value->append(buf);

    for (int level = 0; level < NumberLevels(); level++) {
      snprintf(buf, sizeof(buf),
               "%3d %8d %8.0f\n",
               level,
               versions_->NumLevelFiles(level),
               versions_->NumLevelBytes(level) / 1048576.0);
      value->append(buf);
    }
    return true;

2775
  } else if (in == "stats") {
M
Mark Callaghan 已提交
2776
    char buf[1000];
2777 2778
    uint64_t total_bytes_written = 0;
    uint64_t total_bytes_read = 0;
M
Mark Callaghan 已提交
2779
    uint64_t micros_up = env_->NowMicros() - started_at_;
2780 2781
    // Add "+1" to make sure seconds_up is > 0 and avoid NaN later
    double seconds_up = (micros_up + 1) / 1000000.0;
2782
    uint64_t total_slowdown = 0;
J
Jim Paton 已提交
2783
    uint64_t total_slowdown_count = 0;
2784 2785 2786 2787
    uint64_t interval_bytes_written = 0;
    uint64_t interval_bytes_read = 0;
    uint64_t interval_bytes_new = 0;
    double   interval_seconds_up = 0;
M
Mark Callaghan 已提交
2788 2789

    // Pardon the long line but I think it is easier to read this way.
2790 2791
    snprintf(buf, sizeof(buf),
             "                               Compactions\n"
2792
             "Level  Files Size(MB) Score Time(sec)  Read(MB) Write(MB)    Rn(MB)  Rnp1(MB)  Wnew(MB) RW-Amplify Read(MB/s) Write(MB/s)      Rn     Rnp1     Wnp1     NewW    Count  Ln-stall Stall-cnt\n"
J
Jim Paton 已提交
2793
             "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
2794 2795
             );
    value->append(buf);
2796
    for (int level = 0; level < NumberLevels(); level++) {
2797 2798
      int files = versions_->NumLevelFiles(level);
      if (stats_[level].micros > 0 || files > 0) {
M
Mark Callaghan 已提交
2799 2800 2801 2802 2803 2804
        int64_t bytes_read = stats_[level].bytes_readn +
                             stats_[level].bytes_readnp1;
        int64_t bytes_new = stats_[level].bytes_written -
                            stats_[level].bytes_readnp1;
        double amplify = (stats_[level].bytes_readn == 0)
            ? 0.0
2805 2806 2807
            : (stats_[level].bytes_written +
               stats_[level].bytes_readnp1 +
               stats_[level].bytes_readn) /
M
Mark Callaghan 已提交
2808 2809
                (double) stats_[level].bytes_readn;

2810 2811 2812
        total_bytes_read += bytes_read;
        total_bytes_written += stats_[level].bytes_written;

2813 2814
        snprintf(
            buf, sizeof(buf),
2815
            "%3d %8d %8.0f %5.1f %9.0f %9.0f %9.0f %9.0f %9.0f %9.0f %10.1f %9.1f %11.1f %8d %8d %8d %8d %8d %9.1f %9lu\n",
2816 2817 2818
            level,
            files,
            versions_->NumLevelBytes(level) / 1048576.0,
2819
            versions_->NumLevelBytes(level) /
2820
                versions_->MaxBytesForLevel(level),
2821
            stats_[level].micros / 1e6,
M
Mark Callaghan 已提交
2822 2823 2824 2825 2826 2827
            bytes_read / 1048576.0,
            stats_[level].bytes_written / 1048576.0,
            stats_[level].bytes_readn / 1048576.0,
            stats_[level].bytes_readnp1 / 1048576.0,
            bytes_new / 1048576.0,
            amplify,
2828 2829
            // +1 to avoid division by 0
            (bytes_read / 1048576.0) / ((stats_[level].micros+1) / 1000000.0),
2830
            (stats_[level].bytes_written / 1048576.0) /
2831
                ((stats_[level].micros+1) / 1000000.0),
M
Mark Callaghan 已提交
2832 2833 2834 2835
            stats_[level].files_in_leveln,
            stats_[level].files_in_levelnp1,
            stats_[level].files_out_levelnp1,
            stats_[level].files_out_levelnp1 - stats_[level].files_in_levelnp1,
2836
            stats_[level].count,
J
Jim Paton 已提交
2837 2838
            stall_leveln_slowdown_[level] / 1000000.0,
            (unsigned long) stall_leveln_slowdown_count_[level]);
2839
        total_slowdown += stall_leveln_slowdown_[level];
J
Jim Paton 已提交
2840
        total_slowdown_count += stall_leveln_slowdown_count_[level];
2841 2842 2843
        value->append(buf);
      }
    }
M
Mark Callaghan 已提交
2844

2845 2846 2847 2848 2849 2850 2851 2852 2853
    interval_bytes_new = stats_[0].bytes_written - last_stats_.bytes_new_;
    interval_bytes_read = total_bytes_read - last_stats_.bytes_read_;
    interval_bytes_written = total_bytes_written - last_stats_.bytes_written_;
    interval_seconds_up = seconds_up - last_stats_.seconds_up_;

    snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
             seconds_up, interval_seconds_up);
    value->append(buf);

M
Mark Callaghan 已提交
2854
    snprintf(buf, sizeof(buf),
2855 2856
             "Compaction IO cumulative (GB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
M
Mark Callaghan 已提交
2857
             stats_[0].bytes_written / (1048576.0 * 1024),
2858 2859 2860 2861 2862 2863 2864 2865
             total_bytes_read / (1048576.0 * 1024),
             total_bytes_written / (1048576.0 * 1024),
             (total_bytes_read + total_bytes_written) / (1048576.0 * 1024));
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO cumulative (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
M
Mark Callaghan 已提交
2866
             stats_[0].bytes_written / 1048576.0 / seconds_up,
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
             total_bytes_read / 1048576.0 / seconds_up,
             total_bytes_written / 1048576.0 / seconds_up,
             (total_bytes_read + total_bytes_written) / 1048576.0 / seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification cumulative: %.1f write, %.1f compaction\n",
             (double) total_bytes_written / (stats_[0].bytes_written+1),
             (double) (total_bytes_written + total_bytes_read)
                  / (stats_[0].bytes_written+1));
M
Mark Callaghan 已提交
2878 2879
    value->append(buf);

2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904
    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
             interval_bytes_new / 1048576.0,
             interval_bytes_read/ 1048576.0,
             interval_bytes_written / 1048576.0,
             (interval_bytes_read + interval_bytes_written) / 1048576.0);
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
             interval_bytes_new / 1048576.0 / interval_seconds_up,
             interval_bytes_read / 1048576.0 / interval_seconds_up,
             interval_bytes_written / 1048576.0 / interval_seconds_up,
             (interval_bytes_read + interval_bytes_written)
                 / 1048576.0 / interval_seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification interval: %.1f write, %.1f compaction\n",
             (double) interval_bytes_written / (interval_bytes_new+1),
             (double) (interval_bytes_written + interval_bytes_read) /
                  (interval_bytes_new+1));
M
Mark Callaghan 已提交
2905 2906 2907 2908
    value->append(buf);

    snprintf(buf, sizeof(buf),
            "Stalls(secs): %.3f level0_slowdown, %.3f level0_numfiles, "
2909
            "%.3f memtable_compaction, %.3f leveln_slowdown\n",
M
Mark Callaghan 已提交
2910 2911
            stall_level0_slowdown_ / 1000000.0,
            stall_level0_num_files_ / 1000000.0,
2912
            stall_memtable_compaction_ / 1000000.0,
2913
            total_slowdown / 1000000.0);
M
Mark Callaghan 已提交
2914 2915
    value->append(buf);

J
Jim Paton 已提交
2916 2917 2918 2919 2920 2921 2922 2923 2924
    snprintf(buf, sizeof(buf),
            "Stalls(count): %lu level0_slowdown, %lu level0_numfiles, "
            "%lu memtable_compaction, %lu leveln_slowdown\n",
            (unsigned long) stall_level0_slowdown_count_,
            (unsigned long) stall_level0_num_files_count_,
            (unsigned long) stall_memtable_compaction_count_,
            (unsigned long) total_slowdown_count);
    value->append(buf);

2925 2926 2927 2928 2929
    last_stats_.bytes_read_ = total_bytes_read;
    last_stats_.bytes_written_ = total_bytes_written;
    last_stats_.bytes_new_ = stats_[0].bytes_written;
    last_stats_.seconds_up_ = seconds_up;

2930
    return true;
G
Gabor Cselle 已提交
2931 2932 2933
  } else if (in == "sstables") {
    *value = versions_->current()->DebugString();
    return true;
J
jorlow@chromium.org 已提交
2934
  }
2935

J
jorlow@chromium.org 已提交
2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
  return false;
}

void DBImpl::GetApproximateSizes(
    const Range* range, int n,
    uint64_t* sizes) {
  // TODO(opt): better implementation
  Version* v;
  {
    MutexLock l(&mutex_);
    versions_->current()->Ref();
    v = versions_->current();
  }

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
    uint64_t start = versions_->ApproximateOffsetOf(v, k1);
    uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
    sizes[i] = (limit >= start ? limit - start : 0);
  }

  {
    MutexLock l(&mutex_);
    v->Unref();
  }
}

2965 2966 2967 2968 2969 2970 2971
inline void DBImpl::DelayLoggingAndReset() {
  if (delayed_writes_ > 0) {
    Log(options_.info_log, "delayed %d write...\n", delayed_writes_ );
    delayed_writes_ = 0;
  }
}

2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
  if (!ParseFileName(name, &number, &type) ||
      (type != kTableFile)) {
    Log(options_.info_log, "DeleteFile #%lld FAILED. Invalid file name\n",
        static_cast<unsigned long long>(number));
    return Status::InvalidArgument("Invalid file name");
  }

  int level;
  FileMetaData metadata;
  int maxlevel = NumberLevels();
  VersionEdit edit(maxlevel);
  MutexLock l(&mutex_);
  Status status =
    versions_->GetMetadataForFile(number, &level, &metadata);
  if (!status.ok()) {
    Log(options_.info_log, "DeleteFile #%lld FAILED. File not found\n",
        static_cast<unsigned long long>(number));
    return Status::InvalidArgument("File not found");
  }
  assert((level > 0) && (level < maxlevel));

  // If the file is being compacted no need to delete.
  if (metadata.being_compacted) {
    Log(options_.info_log,
        "DeleteFile #%lld Skipped. File about to be compacted\n",
        static_cast<unsigned long long>(number));
    return Status::OK();
  }

  // Only the files in the last level can be deleted externally.
  // This is to make sure that any deletion tombstones are not
  // lost. Check that the level passed is the last level.
  for (int i = level + 1; i < maxlevel; i++) {
    if (versions_->NumLevelFiles(i) != 0) {
      Log(options_.info_log,
          "DeleteFile #%lld FAILED. File not in last level\n",
          static_cast<unsigned long long>(number));
      return Status::InvalidArgument("File not in last level");
    }
  }

  edit.DeleteFile(level, number);
  status = versions_->LogAndApply(&edit, &mutex_);
  if (status.ok()) {
    DeleteObsoleteFiles();
  }
  return status;
}

void DBImpl::GetLiveFilesMetaData(
  std::vector<LiveFileMetaData> *metadata) {
  MutexLock l(&mutex_);
  return versions_->GetLiveFilesMetaData(metadata);
}

J
jorlow@chromium.org 已提交
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
  WriteBatch batch;
  batch.Put(key, value);
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  WriteBatch batch;
  batch.Delete(key);
  return Write(opt, &batch);
}

3044 3045 3046 3047 3048 3049 3050
Status DB::Merge(const WriteOptions& opt, const Slice& key,
                 const Slice& value) {
  WriteBatch batch;
  batch.Merge(key, value);
  return Write(opt, &batch);
}

J
jorlow@chromium.org 已提交
3051 3052
DB::~DB() { }

J
Jim Paton 已提交
3053
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
3054
  *dbptr = nullptr;
H
Haobo Xu 已提交
3055
  EnvOptions soptions;
J
jorlow@chromium.org 已提交
3056

3057
  if (options.block_cache != nullptr && options.no_block_cache) {
3058
    return Status::InvalidArgument(
3059
        "no_block_cache is true while block_cache is not nullptr");
3060
  }
J
jorlow@chromium.org 已提交
3061
  DBImpl* impl = new DBImpl(options, dbname);
3062 3063 3064 3065 3066
  Status s = impl->CreateArchivalDirectory();
  if (!s.ok()) {
    delete impl;
    return s;
  }
J
jorlow@chromium.org 已提交
3067
  impl->mutex_.Lock();
3068
  VersionEdit edit(impl->NumberLevels());
3069
  s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
J
jorlow@chromium.org 已提交
3070
  if (s.ok()) {
3071
    uint64_t new_log_number = impl->versions_->NewFileNumber();
3072
    unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3073
    soptions.use_mmap_writes = false;
3074
    s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
3075
                                     &lfile, soptions);
J
jorlow@chromium.org 已提交
3076
    if (s.ok()) {
3077
      lfile->SetPreallocationBlockSize(1.1 * options.write_buffer_size);
3078
      edit.SetLogNumber(new_log_number);
3079
      impl->logfile_number_ = new_log_number;
3080
      impl->log_.reset(new log::Writer(std::move(lfile)));
3081
      s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
J
jorlow@chromium.org 已提交
3082 3083 3084
    }
    if (s.ok()) {
      impl->DeleteObsoleteFiles();
3085
      impl->MaybeScheduleCompaction();
3086
      impl->MaybeScheduleLogDBDeployStats();
J
jorlow@chromium.org 已提交
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
    }
  }
  impl->mutex_.Unlock();
  if (s.ok()) {
    *dbptr = impl;
  } else {
    delete impl;
  }
  return s;
}

3098 3099 3100
Snapshot::~Snapshot() {
}

J
jorlow@chromium.org 已提交
3101 3102 3103
Status DestroyDB(const std::string& dbname, const Options& options) {
  Env* env = options.env;
  std::vector<std::string> filenames;
3104 3105
  std::vector<std::string> archiveFiles;

J
jorlow@chromium.org 已提交
3106 3107
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
3108 3109
  env->GetChildren(ArchivalDirectory(dbname), &archiveFiles);

J
jorlow@chromium.org 已提交
3110 3111 3112 3113 3114
  if (filenames.empty()) {
    return Status::OK();
  }

  FileLock* lock;
3115 3116
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
3117 3118 3119
  if (result.ok()) {
    uint64_t number;
    FileType type;
D
dgrogan@chromium.org 已提交
3120
    for (size_t i = 0; i < filenames.size(); i++) {
3121
      if (ParseFileName(filenames[i], &number, &type) &&
3122
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
3123 3124 3125 3126 3127 3128
        Status del;
        if (type == kMetaDatabase) {
          del = DestroyDB(dbname + "/" + filenames[i], options);
        } else {
          del = env->DeleteFile(dbname + "/" + filenames[i]);
        }
J
jorlow@chromium.org 已提交
3129 3130 3131 3132 3133
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145

    // Delete archival files.
    for (size_t i = 0; i < archiveFiles.size(); ++i) {
      ParseFileName(archiveFiles[i], &number, &type);
      if (type == kLogFile) {
        Status del = env->DeleteFile(ArchivalDirectory(dbname) + "/" +
                                     archiveFiles[i]);
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3146 3147
    // ignore case where no archival directory is present.
    env->DeleteDir(ArchivalDirectory(dbname));
3148

J
jorlow@chromium.org 已提交
3149
    env->UnlockFile(lock);  // Ignore error since state is already gone
3150
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
3151 3152 3153 3154 3155
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
  }
  return result;
}

3156 3157
//
// A global method that can dump out the build version
3158 3159
void dumpLeveldbBuildVersion(Logger * log) {
  Log(log, "Git sha %s", leveldb_build_git_sha);
3160 3161
  Log(log, "Compile time %s %s",
      leveldb_build_compile_time, leveldb_build_compile_date);
3162 3163
}

H
Hans Wennborg 已提交
3164
}  // namespace leveldb