db_impl.cc 120.9 KB
Newer Older
1 2 3 4 5
//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
//  This source code is licensed under the BSD-style license found in the
//  LICENSE file in the root directory of this source tree. An additional grant
//  of patent rights can be found in the PATENTS file in the same directory.
//
J
jorlow@chromium.org 已提交
6 7 8 9 10 11 12
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/db_impl.h"

#include <algorithm>
13 14
#include <climits>
#include <cstdio>
J
jorlow@chromium.org 已提交
15
#include <set>
16
#include <stdexcept>
17 18
#include <stdint.h>
#include <string>
19
#include <unordered_set>
20
#include <vector>
21

J
jorlow@chromium.org 已提交
22 23
#include "db/builder.h"
#include "db/dbformat.h"
24
#include "db/db_iter.h"
J
jorlow@chromium.org 已提交
25 26 27 28
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
29
#include "db/memtablelist.h"
30
#include "db/merge_helper.h"
T
Tyler Harter 已提交
31
#include "db/prefix_filter_iterator.h"
J
jorlow@chromium.org 已提交
32
#include "db/table_cache.h"
33 34
#include "db/table_stats_collector.h"
#include "db/transaction_log_impl.h"
J
jorlow@chromium.org 已提交
35 36
#include "db/version_set.h"
#include "db/write_batch_internal.h"
37
#include "port/port.h"
38 39 40 41 42 43
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
S
Siying Dong 已提交
44 45
#include "rocksdb/table.h"
#include "port/port.h"
J
jorlow@chromium.org 已提交
46 47 48
#include "table/block.h"
#include "table/merger.h"
#include "table/two_level_iterator.h"
49 50
#include "util/auto_roll_logger.h"
#include "util/build_version.h"
J
jorlow@chromium.org 已提交
51 52 53
#include "util/coding.h"
#include "util/logging.h"
#include "util/mutexlock.h"
54
#include "util/perf_context_imp.h"
55
#include "util/stop_watch.h"
J
jorlow@chromium.org 已提交
56

57
namespace rocksdb {
J
jorlow@chromium.org 已提交
58

59 60
void dumpLeveldbBuildVersion(Logger * log);

61 62 63 64 65
// Information kept for every waiting writer
struct DBImpl::Writer {
  Status status;
  WriteBatch* batch;
  bool sync;
H
heyongqiang 已提交
66
  bool disableWAL;
67 68 69 70 71 72
  bool done;
  port::CondVar cv;

  explicit Writer(port::Mutex* mu) : cv(mu) { }
};

J
jorlow@chromium.org 已提交
73 74 75
struct DBImpl::CompactionState {
  Compaction* const compaction;

76 77 78 79 80
  // If there were two snapshots with seq numbers s1 and
  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
  // entirely within s1 and s2, then the earlier version of k1 can be safely
  // deleted because that version is not visible in any snapshot.
  std::vector<SequenceNumber> existing_snapshots;
J
jorlow@chromium.org 已提交
81 82 83 84 85 86

  // Files produced by compaction
  struct Output {
    uint64_t number;
    uint64_t file_size;
    InternalKey smallest, largest;
87
    SequenceNumber smallest_seqno, largest_seqno;
J
jorlow@chromium.org 已提交
88 89
  };
  std::vector<Output> outputs;
90
  std::list<uint64_t> allocated_file_numbers;
J
jorlow@chromium.org 已提交
91 92

  // State kept for output being generated
93 94
  unique_ptr<WritableFile> outfile;
  unique_ptr<TableBuilder> builder;
J
jorlow@chromium.org 已提交
95 96 97 98 99 100 101 102 103

  uint64_t total_bytes;

  Output* current_output() { return &outputs[outputs.size()-1]; }

  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0) {
  }
104 105 106 107 108 109 110

  // Create a client visible context of this compaction
  CompactionFilter::Context GetFilterContext() {
    CompactionFilter::Context context;
    context.is_full_compaction = compaction->IsFullCompaction();
    return context;
  }
J
jorlow@chromium.org 已提交
111 112 113
};

// Fix user-supplied options to be reasonable
114
template <class T, class V>
J
jorlow@chromium.org 已提交
115
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
D
dgrogan@chromium.org 已提交
116 117
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
J
jorlow@chromium.org 已提交
118 119 120
}
Options SanitizeOptions(const std::string& dbname,
                        const InternalKeyComparator* icmp,
S
Sanjay Ghemawat 已提交
121
                        const InternalFilterPolicy* ipolicy,
J
jorlow@chromium.org 已提交
122 123 124
                        const Options& src) {
  Options result = src;
  result.comparator = icmp;
125
  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
126
  ClipToRange(&result.max_open_files,            20,     1000000);
127 128
  ClipToRange(&result.write_buffer_size,         ((size_t)64)<<10,
                                                 ((size_t)64)<<30);
S
Sanjay Ghemawat 已提交
129
  ClipToRange(&result.block_size,                1<<10,  4<<20);
130

X
Xing Jin 已提交
131 132 133 134 135 136
  // if user sets arena_block_size, we trust user to use this value. Otherwise,
  // calculate a proper value from writer_buffer_size;
  if (result.arena_block_size <= 0) {
    result.arena_block_size = result.write_buffer_size / 10;
  }

137 138
  result.min_write_buffer_number_to_merge = std::min(
    result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
139
  if (result.info_log == nullptr) {
K
Kai Liu 已提交
140 141
    Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
                                       result, &result.info_log);
J
jorlow@chromium.org 已提交
142 143
    if (!s.ok()) {
      // No place suitable for logging
144
      result.info_log = nullptr;
J
jorlow@chromium.org 已提交
145 146
    }
  }
147
  if (result.block_cache == nullptr && !result.no_block_cache) {
148 149
    result.block_cache = NewLRUCache(8 << 20);
  }
150
  result.compression_per_level = src.compression_per_level;
151 152 153
  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
    result.block_size_deviation = 0;
  }
154 155 156
  if (result.max_mem_compaction_level >= result.num_levels) {
    result.max_mem_compaction_level = result.num_levels - 1;
  }
J
Jim Paton 已提交
157 158 159
  if (result.soft_rate_limit > result.hard_rate_limit) {
    result.soft_rate_limit = result.hard_rate_limit;
  }
160 161
  if (result.compaction_filter) {
    Log(result.info_log, "Compaction filter specified, ignore factory");
162
  }
J
Jim Paton 已提交
163 164 165 166 167 168
  if (result.prefix_extractor) {
    // If a prefix extractor has been supplied and a PrefixHashRepFactory is
    // being used, make sure that the latter uses the former as its transform
    // function.
    auto factory = dynamic_cast<PrefixHashRepFactory*>(
      result.memtable_factory.get());
169
    if (factory &&
170
        factory->GetTransform() != result.prefix_extractor) {
J
Jim Paton 已提交
171 172 173 174
      Log(result.info_log, "A prefix hash representation factory was supplied "
          "whose prefix extractor does not match options.prefix_extractor. "
          "Falling back to skip list representation factory");
      result.memtable_factory = std::make_shared<SkipListFactory>();
175 176
    } else if (factory) {
      Log(result.info_log, "Prefix hash memtable rep is in use.");
J
Jim Paton 已提交
177 178
    }
  }
179 180 181 182 183

  if (result.wal_dir.empty()) {
    // Use dbname as default
    result.wal_dir = dbname;
  }
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

  // -- Sanitize the table stats collector
  // All user defined stats collectors will be wrapped by
  // UserKeyTableStatsCollector since for them they only have the knowledge of
  // the user keys; internal keys are invisible to them.
  auto& collectors = result.table_stats_collectors;
  for (size_t i = 0; i < result.table_stats_collectors.size(); ++i) {
    assert(collectors[i]);
    collectors[i] =
      std::make_shared<UserKeyTableStatsCollector>(collectors[i]);
  }

  // Add collector to collect internal key statistics
  collectors.push_back(
      std::make_shared<InternalKeyStatsCollector>()
  );

201 202 203 204
  if (!result.flush_block_policy_factory) {
    result.SetUpDefaultFlushBlockPolicyFactory();
  }

J
jorlow@chromium.org 已提交
205 206 207
  return result;
}

S
Siying Dong 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
CompressionType GetCompressionType(const Options& options, int level,
                                   const bool enable_compression) {
  if (!enable_compression) {
    // disable compression
    return kNoCompression;
  }
  // If the use has specified a different compression level for each level,
  // then pick the compresison for that level.
  if (!options.compression_per_level.empty()) {
    const int n = options.compression_per_level.size() - 1;
    // It is possible for level_ to be -1; in that case, we use level
    // 0's compression.  This occurs mostly in backwards compatibility
    // situations when the builder doesn't know what level the file
    // belongs to.  Likewise, if level_ is beyond the end of the
    // specified compression levels, use the last value.
    return options.compression_per_level[std::max(0, std::min(level, n))];
  } else {
    return options.compression;
  }
}

J
jorlow@chromium.org 已提交
229 230
DBImpl::DBImpl(const Options& options, const std::string& dbname)
    : env_(options.env),
H
heyongqiang 已提交
231
      dbname_(dbname),
J
jorlow@chromium.org 已提交
232
      internal_comparator_(options.comparator),
S
Sanjay Ghemawat 已提交
233 234
      options_(SanitizeOptions(
          dbname, &internal_comparator_, &internal_filter_policy_, options)),
H
heyongqiang 已提交
235
      internal_filter_policy_(options.filter_policy),
J
jorlow@chromium.org 已提交
236
      owns_info_log_(options_.info_log != options.info_log),
237
      db_lock_(nullptr),
H
Haobo Xu 已提交
238
      mutex_(options.use_adaptive_mutex),
239
      shutting_down_(nullptr),
J
jorlow@chromium.org 已提交
240
      bg_cv_(&mutex_),
J
Jim Paton 已提交
241
      mem_rep_factory_(options_.memtable_factory),
X
Xing Jin 已提交
242 243
      mem_(new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_)),
244
      logfile_number_(0),
245
      tmp_batch_(),
246
      bg_compaction_scheduled_(0),
247
      bg_flush_scheduled_(0),
248
      bg_logstats_scheduled_(false),
249 250
      manual_compaction_(nullptr),
      logger_(nullptr),
251
      disable_delete_obsolete_files_(false),
252
      delete_obsolete_files_last_run_(0),
253
      purge_wal_files_last_run_(0),
254
      last_stats_dump_time_microsec_(0),
255
      default_interval_to_delete_obsolete_WAL_(600),
M
Mark Callaghan 已提交
256 257 258
      stall_level0_slowdown_(0),
      stall_memtable_compaction_(0),
      stall_level0_num_files_(0),
J
Jim Paton 已提交
259 260 261
      stall_level0_slowdown_count_(0),
      stall_memtable_compaction_count_(0),
      stall_level0_num_files_count_(0),
262
      started_at_(options.env->NowMicros()),
263
      flush_on_destroy_(false),
264
      stats_(options.num_levels),
265
      delayed_writes_(0),
266 267 268
      storage_options_(options),
      bg_work_gate_closed_(false),
      refitting_level_(false) {
269

270
  mem_->Ref();
271

H
heyongqiang 已提交
272
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
273 274

  stall_leveln_slowdown_.resize(options.num_levels);
J
Jim Paton 已提交
275 276
  stall_leveln_slowdown_count_.resize(options.num_levels);
  for (int i = 0; i < options.num_levels; ++i) {
277
    stall_leveln_slowdown_[i] = 0;
J
Jim Paton 已提交
278 279
    stall_leveln_slowdown_count_[i] = 0;
  }
280

J
jorlow@chromium.org 已提交
281
  // Reserve ten files or so for other uses and give the rest to TableCache.
282
  const int table_cache_size = options_.max_open_files - 10;
283 284
  table_cache_.reset(new TableCache(dbname_, &options_,
                                    storage_options_, table_cache_size));
J
jorlow@chromium.org 已提交
285

286 287
  versions_.reset(new VersionSet(dbname_, &options_, storage_options_,
                                 table_cache_.get(), &internal_comparator_));
288

289 290
  dumpLeveldbBuildVersion(options_.info_log.get());
  options_.Dump(options_.info_log.get());
291

292
  char name[100];
293
  Status st = env_->GetHostName(name, 100L);
294
  if (st.ok()) {
295 296 297 298 299 300
    host_name_ = name;
  } else {
    Log(options_.info_log, "Can't get hostname, use localhost as host name.");
    host_name_ = "localhost";
  }
  last_log_ts = 0;
301

I
Igor Canadi 已提交
302
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
303 304 305 306
}

DBImpl::~DBImpl() {
  // Wait for background work to finish
307
  if (flush_on_destroy_ && mem_->GetFirstSequenceNumber() != 0) {
308 309
    FlushMemTable(FlushOptions());
  }
310
  mutex_.Lock();
311
  shutting_down_.Release_Store(this);  // Any non-nullptr value is ok
312 313 314
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
H
hans@chromium.org 已提交
315
    bg_cv_.Wait();
J
jorlow@chromium.org 已提交
316 317 318
  }
  mutex_.Unlock();

319
  if (db_lock_ != nullptr) {
J
jorlow@chromium.org 已提交
320 321 322
    env_->UnlockFile(db_lock_);
  }

323
  if (mem_ != nullptr) mem_->Unref();
324
  imm_.UnrefAll();
I
Igor Canadi 已提交
325
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
326 327
}

A
Abhishek Kona 已提交
328
// Do not flush and close database elegantly. Simulate a crash.
329 330 331 332 333 334
void DBImpl::TEST_Destroy_DBImpl() {
  // ensure that no new memtable flushes can occur
  flush_on_destroy_ = false;

  // wait till all background compactions are done.
  mutex_.Lock();
335 336 337
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
338 339 340 341
    bg_cv_.Wait();
  }

  // Prevent new compactions from occuring.
342
  bg_work_gate_closed_ = true;
343 344
  const int LargeNumber = 10000000;
  bg_compaction_scheduled_ += LargeNumber;
345

346
  mutex_.Unlock();
I
Igor Canadi 已提交
347
  LogFlush(options_.info_log);
348 349

  // force release the lock file.
350
  if (db_lock_ != nullptr) {
351 352
    env_->UnlockFile(db_lock_);
  }
353 354 355 356

  log_.reset();
  versions_.reset();
  table_cache_.reset();
357 358
}

A
Abhishek Kona 已提交
359 360 361
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  return versions_->ManifestFileNumber();
}
362

J
jorlow@chromium.org 已提交
363
Status DBImpl::NewDB() {
364
  VersionEdit new_db(NumberLevels());
J
jorlow@chromium.org 已提交
365
  new_db.SetComparatorName(user_comparator()->Name());
366
  new_db.SetLogNumber(0);
J
jorlow@chromium.org 已提交
367 368 369 370
  new_db.SetNextFile(2);
  new_db.SetLastSequence(0);

  const std::string manifest = DescriptorFileName(dbname_, 1);
371
  unique_ptr<WritableFile> file;
372
  Status s = env_->NewWritableFile(manifest, &file, storage_options_);
J
jorlow@chromium.org 已提交
373 374 375
  if (!s.ok()) {
    return s;
  }
376
  file->SetPreallocationBlockSize(options_.manifest_preallocation_size);
J
jorlow@chromium.org 已提交
377
  {
378
    log::Writer log(std::move(file));
J
jorlow@chromium.org 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
    std::string record;
    new_db.EncodeTo(&record);
    s = log.AddRecord(record);
  }
  if (s.ok()) {
    // Make "CURRENT" file that points to the new manifest file.
    s = SetCurrentFile(env_, dbname_, 1);
  } else {
    env_->DeleteFile(manifest);
  }
  return s;
}

void DBImpl::MaybeIgnoreError(Status* s) const {
  if (s->ok() || options_.paranoid_checks) {
    // No change needed
  } else {
396
    Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
J
jorlow@chromium.org 已提交
397 398 399 400
    *s = Status::OK();
  }
}

401
const Status DBImpl::CreateArchivalDirectory() {
402
  if (options_.WAL_ttl_seconds > 0 || options_.WAL_size_limit_MB > 0) {
403
    std::string archivalPath = ArchivalDirectory(options_.wal_dir);
404 405 406 407 408
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

409 410 411 412
void DBImpl::PrintStatistics() {
  auto dbstats = options_.statistics;
  if (dbstats) {
    Log(options_.info_log,
413 414
        "STATISTCS:\n %s",
        dbstats->ToString().c_str());
415 416 417
  }
}

418
void DBImpl::MaybeDumpStats() {
H
Haobo Xu 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431
  if (options_.stats_dump_period_sec == 0) return;

  const uint64_t now_micros = env_->NowMicros();

  if (last_stats_dump_time_microsec_ +
      options_.stats_dump_period_sec * 1000000
      <= now_micros) {
    // Multiple threads could race in here simultaneously.
    // However, the last one will update last_stats_dump_time_microsec_
    // atomically. We could see more than one dump during one dump
    // period in rare cases.
    last_stats_dump_time_microsec_ = now_micros;
    std::string stats;
432
    GetProperty("rocksdb.stats", &stats);
H
Haobo Xu 已提交
433
    Log(options_.info_log, "%s", stats.c_str());
434
    PrintStatistics();
435 436 437
  }
}

438 439
// Returns the list of live files in 'sst_live' and the list
// of all files in the filesystem in 'all_files'.
I
Igor Canadi 已提交
440
void DBImpl::FindObsoleteFiles(DeletionState& deletion_state, bool force) {
D
Dhruba Borthakur 已提交
441 442
  mutex_.AssertHeld();

443 444 445 446 447
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

I
Igor Canadi 已提交
448 449 450 451 452
  // store the current filenum, lognum, etc
  deletion_state.manifest_file_number = versions_->ManifestFileNumber();
  deletion_state.log_number = versions_->LogNumber();
  deletion_state.prev_log_number = versions_->PrevLogNumber();

453 454 455
  // This method is costly when the number of files is large.
  // Do not allow it to trigger more often than once in
  // delete_obsolete_files_period_micros.
I
Igor Canadi 已提交
456
  if (!force && options_.delete_obsolete_files_period_micros != 0) {
457
    const uint64_t now_micros = env_->NowMicros();
458
    if (delete_obsolete_files_last_run_ +
459 460 461 462 463 464
        options_.delete_obsolete_files_period_micros > now_micros) {
      return;
    }
    delete_obsolete_files_last_run_ = now_micros;
  }

465 466
  // Make a list of all of the live files; set is slow, should not
  // be used.
467 468 469
  deletion_state.sst_live.assign(pending_outputs_.begin(),
                                 pending_outputs_.end());
  versions_->AddLiveFiles(&deletion_state.sst_live);
D
Dhruba Borthakur 已提交
470 471

  // set of all files in the directory
472
  env_->GetChildren(dbname_, &deletion_state.all_files); // Ignore errors
J
jorlow@chromium.org 已提交
473

474 475 476 477
  //Add log files in wal_dir
  if (options_.wal_dir != dbname_) {
    std::vector<std::string> log_files;
    env_->GetChildren(options_.wal_dir, &log_files); // Ignore errors
478 479
    deletion_state.all_files.insert(
      deletion_state.all_files.end(),
480 481 482 483
      log_files.begin(),
      log_files.end()
    );
  }
484 485
}

D
Dhruba Borthakur 已提交
486
// Diffs the files listed in filenames and those that do not
I
Igor Canadi 已提交
487
// belong to live files are posibly removed. Also, removes all the
488
// files in sst_delete_files and log_delete_files.
489
// It is not necessary to hold the mutex when invoking this method.
D
Dhruba Borthakur 已提交
490
void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
I
Igor Canadi 已提交
491 492 493 494 495
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

J
jorlow@chromium.org 已提交
496 497
  uint64_t number;
  FileType type;
H
heyongqiang 已提交
498
  std::vector<std::string> old_log_files;
499

500 501
  // Now, convert live list to an unordered set, WITHOUT mutex held;
  // set is slow.
502 503
  std::unordered_set<uint64_t> live_set(state.sst_live.begin(),
                                        state.sst_live.end());
I
Igor Canadi 已提交
504

505 506 507
  state.all_files.reserve(state.all_files.size() +
      state.sst_delete_files.size());
  for (auto file : state.sst_delete_files) {
I
Igor Canadi 已提交
508
    state.all_files.push_back(TableFileName("", file->number).substr(1));
509
    delete file;
I
Igor Canadi 已提交
510 511
  }

512 513 514
  state.all_files.reserve(state.all_files.size() +
      state.log_delete_files.size());
  for (auto filenum : state.log_delete_files) {
I
Igor Canadi 已提交
515
    if (filenum > 0) {
I
Igor Canadi 已提交
516
      state.all_files.push_back(LogFileName("", filenum).substr(1));
I
Igor Canadi 已提交
517 518
    }
  }
519

I
Igor Canadi 已提交
520 521 522 523 524 525
  // dedup state.all_files so we don't try to delete the same
  // file twice
  sort(state.all_files.begin(), state.all_files.end());
  auto unique_end = unique(state.all_files.begin(), state.all_files.end());

  for (size_t i = 0; state.all_files.begin() + i < unique_end; i++) {
526
    if (ParseFileName(state.all_files[i], &number, &type)) {
J
jorlow@chromium.org 已提交
527 528 529
      bool keep = true;
      switch (type) {
        case kLogFile:
I
Igor Canadi 已提交
530 531
          keep = ((number >= state.log_number) ||
                  (number == state.prev_log_number));
J
jorlow@chromium.org 已提交
532 533 534 535
          break;
        case kDescriptorFile:
          // Keep my manifest file, and any newer incarnations'
          // (in case there is a race that allows other incarnations)
I
Igor Canadi 已提交
536
          keep = (number >= state.manifest_file_number);
J
jorlow@chromium.org 已提交
537 538
          break;
        case kTableFile:
539
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
540 541 542 543
          break;
        case kTempFile:
          // Any temp files that are currently being written to must
          // be recorded in pending_outputs_, which is inserted into "live"
544
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
545
          break;
H
heyongqiang 已提交
546 547 548
        case kInfoLogFile:
          keep = true;
          if (number != 0) {
549
            old_log_files.push_back(state.all_files[i]);
H
heyongqiang 已提交
550 551
          }
          break;
J
jorlow@chromium.org 已提交
552 553
        case kCurrentFile:
        case kDBLockFile:
M
Mayank Agarwal 已提交
554
        case kIdentityFile:
K
Kosie van der Merwe 已提交
555
        case kMetaDatabase:
J
jorlow@chromium.org 已提交
556 557 558 559 560 561
          keep = true;
          break;
      }

      if (!keep) {
        if (type == kTableFile) {
I
Igor Canadi 已提交
562 563
          // evict from cache
          table_cache_->Evict(number);
J
jorlow@chromium.org 已提交
564
        }
I
Igor Canadi 已提交
565 566 567 568
        std::string fname = dbname_ + "/" + state.all_files[i];

        Log(options_.info_log, "Delete type=%d #%lu -- %s",
            int(type), number, fname.c_str());
569

I
Igor Canadi 已提交
570 571 572
        Status st;
        if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
              options_.WAL_size_limit_MB > 0)) {
I
Igor Canadi 已提交
573 574
            st = env_->RenameFile(fname,
                ArchivedLogFileName(options_.wal_dir, number));
I
Igor Canadi 已提交
575
            if (!st.ok()) {
I
Igor Canadi 已提交
576 577
              Log(options_.info_log, "RenameFile logfile #%lu FAILED -- %s\n",
                  number, st.ToString().c_str());
I
Igor Canadi 已提交
578
            }
579
        } else {
I
Igor Canadi 已提交
580
          st = env_->DeleteFile(fname);
581
          if (!st.ok()) {
I
Igor Canadi 已提交
582 583
            Log(options_.info_log, "Delete type=%d #%lu FAILED -- %s\n",
                int(type), number, st.ToString().c_str());
584
          }
H
heyongqiang 已提交
585
        }
J
jorlow@chromium.org 已提交
586 587 588
      }
    }
  }
H
heyongqiang 已提交
589

590
  // Delete old info log files.
K
Kai Liu 已提交
591 592 593 594 595
  size_t old_log_file_count = old_log_files.size();
  // NOTE: Currently we only support log purge when options_.db_log_dir is
  // located in `dbname` directory.
  if (old_log_file_count >= options_.keep_log_file_num &&
      options_.db_log_dir.empty()) {
H
heyongqiang 已提交
596
    std::sort(old_log_files.begin(), old_log_files.end());
K
Kai Liu 已提交
597
    size_t end = old_log_file_count - options_.keep_log_file_num;
598
    for (unsigned int i = 0; i <= end; i++) {
H
heyongqiang 已提交
599
      std::string& to_delete = old_log_files.at(i);
D
Dhruba Borthakur 已提交
600 601
      // Log(options_.info_log, "Delete type=%d %s\n",
      //     int(kInfoLogFile), to_delete.c_str());
H
heyongqiang 已提交
602 603 604
      env_->DeleteFile(dbname_ + "/" + to_delete);
    }
  }
605
  PurgeObsoleteWALFiles();
I
Igor Canadi 已提交
606
  LogFlush(options_.info_log);
D
Dhruba Borthakur 已提交
607 608 609 610 611
}

void DBImpl::DeleteObsoleteFiles() {
  mutex_.AssertHeld();
  DeletionState deletion_state;
I
Igor Canadi 已提交
612
  FindObsoleteFiles(deletion_state, true);
D
Dhruba Borthakur 已提交
613
  PurgeObsoleteFiles(deletion_state);
614 615
}

616 617 618 619 620 621 622 623
// 1. Go through all archived files and
//    a. if ttl is enabled, delete outdated files
//    b. if archive size limit is enabled, delete empty files,
//        compute file number and size.
// 2. If size limit is enabled:
//    a. compute how many files should be deleted
//    b. get sorted non-empty archived logs
//    c. delete what should be deleted
624
void DBImpl::PurgeObsoleteWALFiles() {
625 626 627 628 629 630
  bool const ttl_enabled = options_.WAL_ttl_seconds > 0;
  bool const size_limit_enabled =  options_.WAL_size_limit_MB > 0;
  if (!ttl_enabled && !size_limit_enabled) {
    return;
  }

631 632
  int64_t current_time;
  Status s = env_->GetCurrentTime(&current_time);
633 634 635 636 637 638 639 640
  if (!s.ok()) {
    Log(options_.info_log, "Can't get current time: %s", s.ToString().c_str());
    assert(false);
    return;
  }
  uint64_t const now_seconds = static_cast<uint64_t>(current_time);
  uint64_t const time_to_check = (ttl_enabled && !size_limit_enabled) ?
    options_.WAL_ttl_seconds / 2 : default_interval_to_delete_obsolete_WAL_;
641

642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
  if (purge_wal_files_last_run_ + time_to_check > now_seconds) {
    return;
  }

  purge_wal_files_last_run_ = now_seconds;

  std::string archival_dir = ArchivalDirectory(options_.wal_dir);
  std::vector<std::string> files;
  s = env_->GetChildren(archival_dir, &files);
  if (!s.ok()) {
    Log(options_.info_log, "Can't get archive files: %s", s.ToString().c_str());
    assert(false);
    return;
  }

  size_t log_files_num = 0;
  uint64_t log_file_size = 0;

  for (auto& f : files) {
    uint64_t number;
    FileType type;
    if (ParseFileName(f, &number, &type) && type == kLogFile) {
      std::string const file_path = archival_dir + "/" + f;
      if (ttl_enabled) {
        uint64_t file_m_time;
        Status const s = env_->GetFileModificationTime(file_path,
          &file_m_time);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file mod time: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          continue;
        }
        if (now_seconds - file_m_time > options_.WAL_ttl_seconds) {
          Status const s = env_->DeleteFile(file_path);
          if (!s.ok()) {
            Log(options_.info_log, "Can't delete file: %s: %s",
                file_path.c_str(), s.ToString().c_str());
            continue;
          }
          continue;
682
        }
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
      }

      if (size_limit_enabled) {
        uint64_t file_size;
        Status const s = env_->GetFileSize(file_path, &file_size);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file size: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          return;
        } else {
          if (file_size > 0) {
            log_file_size = std::max(log_file_size, file_size);
            ++log_files_num;
          } else {
            Status s = env_->DeleteFile(file_path);
            if (!s.ok()) {
              Log(options_.info_log, "Can't delete file: %s: %s",
                  file_path.c_str(), s.ToString().c_str());
              continue;
            }
          }
        }
      }
    }
  }

  if (0 == log_files_num || !size_limit_enabled) {
    return;
  }

  size_t const files_keep_num = options_.WAL_size_limit_MB *
    1024 * 1024 / log_file_size;
  if (log_files_num <= files_keep_num) {
    return;
  }

  size_t files_del_num = log_files_num - files_keep_num;
  VectorLogPtr archived_logs;
  AppendSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);

  if (files_del_num > archived_logs.size()) {
    Log(options_.info_log, "Trying to delete more archived log files than "
        "exist. Deleting all");
    files_del_num = archived_logs.size();
  }

  for (size_t i = 0; i < files_del_num; ++i) {
    std::string const file_path = archived_logs[i]->PathName();
    Status const s = DeleteFile(file_path);
    if (!s.ok()) {
      Log(options_.info_log, "Can't delete file: %s: %s",
          file_path.c_str(), s.ToString().c_str());
      continue;
736 737
    }
  }
D
Dhruba Borthakur 已提交
738 739
}

740 741 742
// If externalTable is set, then apply recovered transactions
// to that table. This is used for readonly mode.
Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
H
heyongqiang 已提交
743
    bool error_if_log_file_exist) {
J
jorlow@chromium.org 已提交
744 745
  mutex_.AssertHeld();

746
  assert(db_lock_ == nullptr);
747
  if (!external_table) {
748 749 750 751 752 753 754 755 756 757 758 759 760
    // We call CreateDirIfMissing() as the directory may already exist (if we
    // are reopening a DB), when this happens we don't want creating the
    // directory to cause an error. However, we need to check if creating the
    // directory fails or else we may get an obscure message about the lock
    // file not existing. One real-world example of this occurring is if
    // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
    // when dbname_ is "dir/db" but when "dir" doesn't exist.
    Status s = env_->CreateDirIfMissing(dbname_);
    if (!s.ok()) {
      return s;
    }

    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
761 762 763
    if (!s.ok()) {
      return s;
    }
J
jorlow@chromium.org 已提交
764

765 766
    if (!env_->FileExists(CurrentFileName(dbname_))) {
      if (options_.create_if_missing) {
767
        // TODO: add merge_operator name check
768 769 770 771 772 773 774
        s = NewDB();
        if (!s.ok()) {
          return s;
        }
      } else {
        return Status::InvalidArgument(
            dbname_, "does not exist (create_if_missing is false)");
J
jorlow@chromium.org 已提交
775 776
      }
    } else {
777 778 779 780
      if (options_.error_if_exists) {
        return Status::InvalidArgument(
            dbname_, "exists (error_if_exists is true)");
      }
J
jorlow@chromium.org 已提交
781
    }
M
Mayank Agarwal 已提交
782 783 784 785 786 787 788
    // Check for the IDENTITY file and create it if not there
    if (!env_->FileExists(IdentityFileName(dbname_))) {
      s = SetIdentityFile(env_, dbname_);
      if (!s.ok()) {
        return s;
      }
    }
J
jorlow@chromium.org 已提交
789 790
  }

791
  Status s = versions_->Recover();
J
jorlow@chromium.org 已提交
792 793
  if (s.ok()) {
    SequenceNumber max_sequence(0);
794 795 796 797 798 799 800

    // Recover from all newer log files than the ones named in the
    // descriptor (new log files may have been added by the previous
    // incarnation without registering them in the descriptor).
    //
    // Note that PrevLogNumber() is no longer used, but we pay
    // attention to it in case we are recovering a database
801
    // produced by an older version of rocksdb.
802 803 804
    const uint64_t min_log = versions_->LogNumber();
    const uint64_t prev_log = versions_->PrevLogNumber();
    std::vector<std::string> filenames;
805
    s = env_->GetChildren(options_.wal_dir, &filenames);
806 807
    if (!s.ok()) {
      return s;
808
    }
809 810 811 812 813 814 815 816 817
    uint64_t number;
    FileType type;
    std::vector<uint64_t> logs;
    for (size_t i = 0; i < filenames.size(); i++) {
      if (ParseFileName(filenames[i], &number, &type)
          && type == kLogFile
          && ((number >= min_log) || (number == prev_log))) {
        logs.push_back(number);
      }
J
jorlow@chromium.org 已提交
818
    }
819

H
heyongqiang 已提交
820 821 822 823 824 825
    if (logs.size() > 0 && error_if_log_file_exist) {
      return Status::Corruption(""
          "The db was opened in readonly mode with error_if_log_file_exist"
          "flag but a log file already exists");
    }

826 827 828
    // Recover in the order in which the logs were generated
    std::sort(logs.begin(), logs.end());
    for (size_t i = 0; i < logs.size(); i++) {
829
      s = RecoverLogFile(logs[i], edit, &max_sequence, external_table);
830 831 832 833
      // The previous incarnation may not have written any MANIFEST
      // records after allocating this log number.  So we manually
      // update the file number allocation counter in VersionSet.
      versions_->MarkFileNumberUsed(logs[i]);
834 835
    }

J
jorlow@chromium.org 已提交
836
    if (s.ok()) {
837 838 839
      if (versions_->LastSequence() < max_sequence) {
        versions_->SetLastSequence(max_sequence);
      }
840 841
      SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
                     versions_->LastSequence());
J
jorlow@chromium.org 已提交
842 843 844 845 846 847 848 849
    }
  }

  return s;
}

Status DBImpl::RecoverLogFile(uint64_t log_number,
                              VersionEdit* edit,
850 851
                              SequenceNumber* max_sequence,
                              MemTable* external_table) {
J
jorlow@chromium.org 已提交
852 853
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
854
    Logger* info_log;
J
jorlow@chromium.org 已提交
855
    const char* fname;
856 857
    Status* status;  // nullptr if options_.paranoid_checks==false or
                     //            options_.skip_log_error_on_recovery==true
J
jorlow@chromium.org 已提交
858
    virtual void Corruption(size_t bytes, const Status& s) {
859
      Log(info_log, "%s%s: dropping %d bytes; %s",
860
          (this->status == nullptr ? "(ignoring error) " : ""),
J
jorlow@chromium.org 已提交
861
          fname, static_cast<int>(bytes), s.ToString().c_str());
862
      if (this->status != nullptr && this->status->ok()) *this->status = s;
J
jorlow@chromium.org 已提交
863 864 865 866 867 868
    }
  };

  mutex_.AssertHeld();

  // Open the log file
869
  std::string fname = LogFileName(options_.wal_dir, log_number);
870
  unique_ptr<SequentialFile> file;
871
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
J
jorlow@chromium.org 已提交
872 873 874 875 876 877 878 879
  if (!status.ok()) {
    MaybeIgnoreError(&status);
    return status;
  }

  // Create the log reader.
  LogReporter reporter;
  reporter.env = env_;
880
  reporter.info_log = options_.info_log.get();
J
jorlow@chromium.org 已提交
881
  reporter.fname = fname.c_str();
882 883
  reporter.status = (options_.paranoid_checks &&
                     !options_.skip_log_error_on_recovery ? &status : nullptr);
J
jorlow@chromium.org 已提交
884 885 886 887
  // We intentially make log::Reader do checksumming even if
  // paranoid_checks==false so that corruptions cause entire commits
  // to be skipped instead of propagating bad information (like overly
  // large sequence numbers).
888
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
889
                     0/*initial_offset*/);
890
  Log(options_.info_log, "Recovering log #%llu",
J
jorlow@chromium.org 已提交
891 892 893 894 895 896
      (unsigned long long) log_number);

  // Read all the records and add to a memtable
  std::string scratch;
  Slice record;
  WriteBatch batch;
897
  MemTable* mem = nullptr;
898 899 900
  if (external_table) {
    mem = external_table;
  }
901
  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
J
jorlow@chromium.org 已提交
902 903 904 905 906 907 908
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      continue;
    }
    WriteBatchInternal::SetContents(&batch, record);

909
    if (mem == nullptr) {
X
Xing Jin 已提交
910 911
      mem = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
912
      mem->Ref();
J
jorlow@chromium.org 已提交
913
    }
914
    status = WriteBatchInternal::InsertInto(&batch, mem, &options_);
J
jorlow@chromium.org 已提交
915 916 917 918 919 920 921 922 923 924 925
    MaybeIgnoreError(&status);
    if (!status.ok()) {
      break;
    }
    const SequenceNumber last_seq =
        WriteBatchInternal::Sequence(&batch) +
        WriteBatchInternal::Count(&batch) - 1;
    if (last_seq > *max_sequence) {
      *max_sequence = last_seq;
    }

926 927
    if (!external_table &&
        mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
928
      status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
929 930 931 932 933
      if (!status.ok()) {
        // Reflect errors immediately so that conditions like full
        // file-systems cause the DB::Open() to fail.
        break;
      }
934
      mem->Unref();
935
      mem = nullptr;
J
jorlow@chromium.org 已提交
936 937 938
    }
  }

939
  if (status.ok() && mem != nullptr && !external_table) {
940
    status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
941 942 943 944
    // Reflect errors immediately so that conditions like full
    // file-systems cause the DB::Open() to fail.
  }

945
  if (mem != nullptr && !external_table) mem->Unref();
J
jorlow@chromium.org 已提交
946 947 948
  return status;
}

949
Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
J
jorlow@chromium.org 已提交
950
  mutex_.AssertHeld();
951
  const uint64_t start_micros = env_->NowMicros();
J
jorlow@chromium.org 已提交
952 953 954 955
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  pending_outputs_.insert(meta.number);
  Iterator* iter = mem->NewIterator();
956 957 958
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
    mem->GetFirstSequenceNumber();
959
  Log(options_.info_log, "Level-0 table #%llu: started",
J
jorlow@chromium.org 已提交
960
      (unsigned long long) meta.number);
961 962 963 964

  Status s;
  {
    mutex_.Unlock();
965 966
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
967
                   user_comparator(), newest_snapshot,
968
                   earliest_seqno_in_memtable, true);
I
Igor Canadi 已提交
969
    LogFlush(options_.info_log);
970 971 972
    mutex_.Lock();
  }

973
  Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
J
jorlow@chromium.org 已提交
974 975 976 977
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;
978

979
  pending_outputs_.erase(meta.number);
980 981 982 983 984 985

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    edit->AddFile(level, meta.number, meta.file_size,
986 987
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
988 989
  }

990 991 992
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
M
Mark Callaghan 已提交
993
  stats.files_out_levelnp1 = 1;
994
  stats_[level].Add(stats);
J
jorlow@chromium.org 已提交
995 996 997
  return s;
}

998

999
Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
1000
                                uint64_t* filenumber) {
J
jorlow@chromium.org 已提交
1001
  mutex_.AssertHeld();
1002 1003 1004 1005 1006
  const uint64_t start_micros = env_->NowMicros();
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  *filenumber = meta.number;
  pending_outputs_.insert(meta.number);
1007 1008 1009

  std::vector<Iterator*> list;
  for (MemTable* m : mems) {
1010
    Log(options_.info_log,
1011
        "Flushing memtable with log file: %llu\n",
1012
        m->GetLogNumber());
1013 1014 1015 1016
    list.push_back(m->NewIterator());
  }
  Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
                                      list.size());
1017 1018
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
1019
    mems[0]->GetFirstSequenceNumber();
1020
  Log(options_.info_log, "Level-0 flush table #%llu: started", meta.number);
J
jorlow@chromium.org 已提交
1021

1022
  Version* base = versions_->current();
1023
  base->Ref();          // it is likely that we do not need this reference
1024 1025 1026
  Status s;
  {
    mutex_.Unlock();
1027 1028 1029 1030 1031
    // We skip compression if universal compression is used and the size
    // threshold is set for compression.
    bool enable_compression = (options_.compaction_style
        != kCompactionStyleUniversal ||
        options_.compaction_options_universal.compression_size_percent < 0);
1032 1033
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
1034
                   user_comparator(), newest_snapshot,
1035
                   earliest_seqno_in_memtable, enable_compression);
I
Igor Canadi 已提交
1036
    LogFlush(options_.info_log);
1037 1038
    mutex_.Lock();
  }
1039 1040
  base->Unref();

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
  Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s",
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;

  // re-acquire the most current version
  base = versions_->current();

  // There could be multiple threads writing to its own level-0 file.
  // The pending_outputs cannot be cleared here, otherwise this newly
  // created file might not be considered as a live-file by another
  // compaction thread that is concurrently deleting obselete files.
  // The pending_outputs can be cleared only after the new version is
A
Abhishek Kona 已提交
1055
  // committed so that other threads can recognize this file as a
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
  // valid one.
  // pending_outputs_.erase(meta.number);

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    const Slice min_user_key = meta.smallest.user_key();
    const Slice max_user_key = meta.largest.user_key();
    // if we have more than 1 background thread, then we cannot
    // insert files directly into higher levels because some other
    // threads could be concurrently producing compacted files for
    // that key range.
1069
    if (base != nullptr && options_.max_background_compactions <= 1 &&
1070
        options_.compaction_style == kCompactionStyleLevel) {
1071 1072 1073
      level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
    }
    edit->AddFile(level, meta.number, meta.file_size,
1074 1075
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
1076 1077 1078 1079 1080 1081 1082 1083 1084
  }

  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
  stats_[level].Add(stats);
  return s;
}

I
Igor Canadi 已提交
1085 1086
Status DBImpl::FlushMemTableToOutputFile(bool* madeProgress,
                                         DeletionState& deletion_state) {
1087 1088 1089
  mutex_.AssertHeld();
  assert(imm_.size() != 0);

1090
  if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
1091 1092
    Log(options_.info_log, "FlushMemTableToOutputFile already in progress");
    Status s = Status::IOError("FlushMemTableToOutputFile already in progress");
1093 1094 1095 1096 1097
    return s;
  }

  // Save the contents of the earliest memtable as a new Table
  uint64_t file_number;
1098 1099 1100
  std::vector<MemTable*> mems;
  imm_.PickMemtablesToFlush(&mems);
  if (mems.empty()) {
1101 1102 1103 1104 1105 1106
    Log(options_.info_log, "Nothing in memstore to flush");
    Status s = Status::IOError("Nothing in memstore to flush");
    return s;
  }

  // record the logfile_number_ before we release the mutex
1107 1108 1109
  // entries mems are (implicitly) sorted in ascending order by their created
  // time. We will use the first memtable's `edit` to keep the meta info for
  // this flush.
1110
  MemTable* m = mems[0];
1111 1112
  VersionEdit* edit = m->GetEdits();
  edit->SetPrevLogNumber(0);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
  // SetLogNumber(log_num) indicates logs with number smaller than log_num
  // will no longer be picked up for recovery.
  edit->SetLogNumber(
      mems.back()->GetNextLogNumber()
  );

  std::vector<uint64_t> logs_to_delete;
  for (auto mem : mems) {
    logs_to_delete.push_back(mem->GetLogNumber());
  }
1123

1124
  // This will release and re-acquire the mutex.
1125
  Status s = WriteLevel0Table(mems, edit, &file_number);
1126

1127
  if (s.ok() && shutting_down_.Acquire_Load()) {
1128 1129 1130
    s = Status::IOError(
      "Database shutdown started during memtable compaction"
    );
1131
  }
J
jorlow@chromium.org 已提交
1132

1133
  // Replace immutable memtable with the generated Table
1134
  s = imm_.InstallMemtableFlushResults(
1135
    mems, versions_.get(), s, &mutex_, options_.info_log.get(),
1136
    file_number, pending_outputs_);
J
jorlow@chromium.org 已提交
1137 1138

  if (s.ok()) {
1139 1140 1141
    if (madeProgress) {
      *madeProgress = 1;
    }
1142

1143
    MaybeScheduleLogDBDeployStats();
I
Igor Canadi 已提交
1144

1145
    if (!disable_delete_obsolete_files_) {
I
Igor Canadi 已提交
1146
      // add to deletion state
1147 1148 1149 1150
      deletion_state.log_delete_files.insert(
          deletion_state.log_delete_files.end(),
          logs_to_delete.begin(),
          logs_to_delete.end());
1151
    }
J
jorlow@chromium.org 已提交
1152 1153 1154 1155
  }
  return s;
}

1156
void DBImpl::CompactRange(const Slice* begin, const Slice* end,
1157
                          bool reduce_level, int target_level) {
G
Gabor Cselle 已提交
1158 1159 1160 1161
  int max_level_with_files = 1;
  {
    MutexLock l(&mutex_);
    Version* base = versions_->current();
1162
    for (int level = 1; level < NumberLevels(); level++) {
G
Gabor Cselle 已提交
1163 1164 1165 1166 1167
      if (base->OverlapInLevel(level, begin, end)) {
        max_level_with_files = level;
      }
    }
  }
1168
  TEST_FlushMemTable(); // TODO(sanjay): Skip if memtable does not overlap
G
Gabor Cselle 已提交
1169 1170 1171
  for (int level = 0; level < max_level_with_files; level++) {
    TEST_CompactRange(level, begin, end);
  }
1172 1173

  if (reduce_level) {
1174
    ReFitLevel(max_level_with_files, target_level);
1175
  }
I
Igor Canadi 已提交
1176
  LogFlush(options_.info_log);
1177 1178 1179 1180 1181 1182
}

// return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting(int level) {
  mutex_.AssertHeld();
  int minimum_level = level;
1183
  for (int i = level - 1; i > 0; --i) {
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
    // stop if level i is not empty
    if (versions_->NumLevelFiles(i) > 0) break;

    // stop if level i is too small (cannot fit the level files)
    if (versions_->MaxBytesForLevel(i) < versions_->NumLevelBytes(level)) break;

    minimum_level = i;
  }
  return minimum_level;
}

1195
void DBImpl::ReFitLevel(int level, int target_level) {
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
  assert(level < NumberLevels());

  MutexLock l(&mutex_);

  // only allow one thread refitting
  if (refitting_level_) {
    Log(options_.info_log, "ReFitLevel: another thread is refitting");
    return;
  }
  refitting_level_ = true;

  // wait for all background threads to stop
  bg_work_gate_closed_ = true;
1209
  while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_) {
1210
    Log(options_.info_log,
1211 1212
        "RefitLevel: waiting for background threads to stop: %d %d",
        bg_compaction_scheduled_, bg_flush_scheduled_);
1213 1214 1215 1216
    bg_cv_.Wait();
  }

  // move to a smaller level
1217 1218 1219 1220
  int to_level = target_level;
  if (target_level < 0) {
    to_level = FindMinimumEmptyLevelFitting(level);
  }
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230

  assert(to_level <= level);

  if (to_level < level) {
    Log(options_.info_log, "Before refitting:\n%s",
        versions_->current()->DebugString().data());

    VersionEdit edit(NumberLevels());
    for (const auto& f : versions_->current()->files_[level]) {
      edit.DeleteFile(level, f->number);
1231 1232
      edit.AddFile(to_level, f->number, f->file_size, f->smallest, f->largest,
                   f->smallest_seqno, f->largest_seqno);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
    }
    Log(options_.info_log, "Apply version edit:\n%s",
        edit.DebugString().data());

    auto status = versions_->LogAndApply(&edit, &mutex_);

    Log(options_.info_log, "LogAndApply: %s\n", status.ToString().data());

    if (status.ok()) {
      Log(options_.info_log, "After refitting:\n%s",
          versions_->current()->DebugString().data());
    }
  }

  refitting_level_ = false;
  bg_work_gate_closed_ = false;
G
Gabor Cselle 已提交
1249 1250
}

1251
int DBImpl::NumberLevels() {
1252
  return options_.num_levels;
1253 1254 1255
}

int DBImpl::MaxMemCompactionLevel() {
1256
  return options_.max_mem_compaction_level;
1257 1258 1259
}

int DBImpl::Level0StopWriteTrigger() {
1260
  return options_.level0_stop_writes_trigger;
1261 1262
}

H
heyongqiang 已提交
1263 1264 1265 1266 1267
Status DBImpl::Flush(const FlushOptions& options) {
  Status status = FlushMemTable(options);
  return status;
}

1268
SequenceNumber DBImpl::GetLatestSequenceNumber() const {
1269 1270 1271
  return versions_->LastSequence();
}

1272
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
1273
                               unique_ptr<TransactionLogIterator>* iter) {
1274

1275
  RecordTick(options_.statistics, GET_UPDATES_SINCE_CALLS);
1276
  if (seq > versions_->LastSequence()) {
1277 1278 1279
    return Status::IOError("Requested sequence not yet written in the db");
  }
  //  Get all sorted Wal Files.
1280 1281
  //  Do binary search and open files and find the seq number.

1282 1283
  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
  Status s = GetSortedWalFiles(*wal_files);
1284 1285 1286 1287
  if (!s.ok()) {
    return s;
  }

1288
  s = RetainProbableWalFiles(*wal_files, seq);
1289 1290
  if (!s.ok()) {
    return s;
1291
  }
1292
  iter->reset(
1293
    new TransactionLogIteratorImpl(options_.wal_dir,
1294
                                   &options_,
1295
                                   storage_options_,
1296
                                   seq,
1297
                                   std::move(wal_files),
1298
                                   this));
1299
  return (*iter)->status();
1300 1301
}

1302 1303
Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs,
                                      const SequenceNumber target) {
1304
  long start = 0; // signed to avoid overflow when target is < first file.
1305
  long end = static_cast<long>(all_logs.size()) - 1;
1306
  // Binary Search. avoid opening all files.
1307 1308
  while (end >= start) {
    long mid = start + (end - start) / 2;  // Avoid overflow.
1309 1310
    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
    if (current_seq_num == target) {
1311
      end = mid;
1312
      break;
1313
    } else if (current_seq_num < target) {
1314
      start = mid + 1;
1315
    } else {
1316
      end = mid - 1;
1317 1318
    }
  }
1319 1320 1321
  size_t start_index = std::max(0l, end); // end could be -ve.
  // The last wal file is always included
  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
1322 1323 1324
  return Status::OK();
}

1325 1326 1327
bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type,
                                        const uint64_t number) {
  const std::string fname = (type == kAliveLogFile) ?
1328 1329
    LogFileName(options_.wal_dir, number) :
    ArchivedLogFileName(options_.wal_dir, number);
1330 1331
  uint64_t file_size;
  Status s = env_->GetFileSize(fname, &file_size);
1332
  return (s.ok() && (file_size == 0));
1333 1334
}

1335 1336
Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
                               WriteBatch* const result) {
1337

1338
  if (type == kAliveLogFile) {
1339
    std::string fname = LogFileName(options_.wal_dir, number);
1340 1341 1342
    Status status = ReadFirstLine(fname, result);
    if (!status.ok()) {
      //  check if the file got moved to archive.
1343 1344
      std::string archived_file =
        ArchivedLogFileName(options_.wal_dir, number);
1345
      Status s = ReadFirstLine(archived_file, result);
1346
      if (!s.ok()) {
1347
        return Status::IOError("Log File has been deleted: " + archived_file);
1348 1349 1350
      }
    }
    return Status::OK();
1351
  } else if (type == kArchivedLogFile) {
1352
    std::string fname = ArchivedLogFileName(options_.wal_dir, number);
1353 1354 1355
    Status status = ReadFirstLine(fname, result);
    return status;
  }
1356
  return Status::NotSupported("File Type Not Known: " + std::to_string(type));
1357 1358 1359 1360 1361 1362 1363 1364
}

Status DBImpl::ReadFirstLine(const std::string& fname,
                             WriteBatch* const batch) {
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
    Logger* info_log;
    const char* fname;
1365
    Status* status;  // nullptr if options_.paranoid_checks==false
1366 1367
    virtual void Corruption(size_t bytes, const Status& s) {
      Log(info_log, "%s%s: dropping %d bytes; %s",
1368
          (this->status == nullptr ? "(ignoring error) " : ""),
1369
          fname, static_cast<int>(bytes), s.ToString().c_str());
1370
      if (this->status != nullptr && this->status->ok()) *this->status = s;
1371 1372 1373
    }
  };

1374
  unique_ptr<SequentialFile> file;
1375
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
1376 1377 1378 1379 1380 1381 1382 1383

  if (!status.ok()) {
    return status;
  }


  LogReporter reporter;
  reporter.env = env_;
1384
  reporter.info_log = options_.info_log.get();
1385
  reporter.fname = fname.c_str();
1386
  reporter.status = (options_.paranoid_checks ? &status : nullptr);
1387
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1388 1389 1390
                     0/*initial_offset*/);
  std::string scratch;
  Slice record;
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
  if (reader.ReadRecord(&record, &scratch) && status.ok()) {
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      return Status::IOError("Corruption noted");
      //  TODO read record's till the first no corrupt entry?
    }
    WriteBatchInternal::SetContents(batch, record);
    return Status::OK();
  }
  return Status::IOError("Error reading from file " + fname);
}

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
struct CompareLogByPointer {
  bool operator() (const unique_ptr<LogFile>& a,
                   const unique_ptr<LogFile>& b) {
    LogFileImpl* a_impl = dynamic_cast<LogFileImpl*>(a.get());
    LogFileImpl* b_impl = dynamic_cast<LogFileImpl*>(b.get());
    return *a_impl < *b_impl;
  }
};

Status DBImpl::AppendSortedWalsOfType(const std::string& path,
    VectorLogPtr& log_files, WalFileType log_type) {
  std::vector<std::string> all_files;
  const Status status = env_->GetChildren(path, &all_files);
1418 1419 1420
  if (!status.ok()) {
    return status;
  }
1421
  log_files.reserve(log_files.size() + all_files.size());
1422 1423 1424 1425 1426 1427
  VectorLogPtr::iterator pos_start;
  if (!log_files.empty()) {
    pos_start = log_files.end() - 1;
  } else {
    pos_start = log_files.begin();
  }
1428
  for (const auto& f : all_files) {
1429 1430
    uint64_t number;
    FileType type;
1431
    if (ParseFileName(f, &number, &type) && type == kLogFile){
1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449

      WriteBatch batch;
      Status s = ReadFirstRecord(log_type, number, &batch);
      if (!s.ok()) {
        if (CheckWalFileExistsAndEmpty(log_type, number)) {
          continue;
        }
        return s;
      }

      uint64_t size_bytes;
      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
      if (!s.ok()) {
        return s;
      }

      log_files.push_back(std::move(unique_ptr<LogFile>(new LogFileImpl(
        number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes))));
1450 1451
    }
  }
1452
  CompareLogByPointer compare_log_files;
1453
  std::sort(pos_start, log_files.end(), compare_log_files);
1454 1455 1456
  return status;
}

G
Gabor Cselle 已提交
1457
void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
1458 1459
  assert(level >= 0);

G
Gabor Cselle 已提交
1460 1461
  InternalKey begin_storage, end_storage;

H
hans@chromium.org 已提交
1462 1463
  ManualCompaction manual;
  manual.level = level;
G
Gabor Cselle 已提交
1464
  manual.done = false;
1465
  manual.in_progress = false;
1466 1467 1468 1469
  // For universal compaction, we enforce every manual compaction to compact
  // all files.
  if (begin == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1470
    manual.begin = nullptr;
G
Gabor Cselle 已提交
1471 1472 1473 1474
  } else {
    begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
    manual.begin = &begin_storage;
  }
1475 1476
  if (end == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1477
    manual.end = nullptr;
G
Gabor Cselle 已提交
1478 1479 1480 1481 1482 1483
  } else {
    end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
    manual.end = &end_storage;
  }

  MutexLock l(&mutex_);
1484

A
Abhishek Kona 已提交
1485 1486 1487 1488
  // When a manual compaction arrives, temporarily throttle down
  // the number of background compaction threads to 1. This is
  // needed to ensure that this manual compaction can compact
  // any range of keys/files. We artificialy increase
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
  // bg_compaction_scheduled_ by a large number, this causes
  // the system to have a single background thread. Now,
  // this manual compaction can progress without stomping
  // on any other concurrent compactions.
  const int LargeNumber = 10000000;
  const int newvalue = options_.max_background_compactions-1;
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber) {
    Log(options_.info_log, "Manual compaction request waiting for background threads to fall below 1");
    bg_cv_.Wait();
  }
  Log(options_.info_log, "Manual compaction starting");

G
Gabor Cselle 已提交
1502
  while (!manual.done) {
1503
    while (manual_compaction_ != nullptr) {
G
Gabor Cselle 已提交
1504 1505 1506
      bg_cv_.Wait();
    }
    manual_compaction_ = &manual;
1507 1508 1509
    if (bg_compaction_scheduled_ == LargeNumber) {
      bg_compaction_scheduled_ = newvalue;
    }
1510
    MaybeScheduleFlushOrCompaction();
G
Gabor Cselle 已提交
1511 1512 1513
    while (manual_compaction_ == &manual) {
      bg_cv_.Wait();
    }
H
hans@chromium.org 已提交
1514
  }
1515 1516 1517 1518 1519 1520 1521 1522 1523
  assert(!manual.in_progress);

  // wait till there are no background threads scheduled
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber + newvalue) {
    Log(options_.info_log, "Manual compaction resetting background threads");
    bg_cv_.Wait();
  }
  bg_compaction_scheduled_ = 0;
J
jorlow@chromium.org 已提交
1524 1525
}

H
heyongqiang 已提交
1526
Status DBImpl::FlushMemTable(const FlushOptions& options) {
1527 1528
  // nullptr batch means just wait for earlier writes to be done
  Status s = Write(WriteOptions(), nullptr);
H
heyongqiang 已提交
1529
  if (s.ok() && options.wait) {
1530
    // Wait until the compaction completes
1531
    s = WaitForFlushMemTable();
1532 1533
  }
  return s;
J
jorlow@chromium.org 已提交
1534 1535
}

1536
Status DBImpl::WaitForFlushMemTable() {
1537 1538 1539
  Status s;
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1540
  while (imm_.size() > 0 && bg_error_.ok()) {
1541 1542
    bg_cv_.Wait();
  }
1543
  if (imm_.size() != 0) {
1544 1545 1546
    s = bg_error_;
  }
  return s;
H
heyongqiang 已提交
1547 1548
}

1549
Status DBImpl::TEST_FlushMemTable() {
H
heyongqiang 已提交
1550 1551 1552
  return FlushMemTable(FlushOptions());
}

1553 1554
Status DBImpl::TEST_WaitForFlushMemTable() {
  return WaitForFlushMemTable();
1555 1556 1557
}

Status DBImpl::TEST_WaitForCompact() {
1558
  // Wait until the compaction completes
1559 1560 1561 1562 1563

  // TODO: a bug here. This function actually does not necessarily
  // wait for compact. It actually waits for scheduled compaction
  // OR flush to finish.

1564
  MutexLock l(&mutex_);
1565 1566
  while ((bg_compaction_scheduled_ || bg_flush_scheduled_) &&
         bg_error_.ok()) {
1567 1568 1569
    bg_cv_.Wait();
  }
  return bg_error_;
1570 1571
}

1572
void DBImpl::MaybeScheduleFlushOrCompaction() {
J
jorlow@chromium.org 已提交
1573
  mutex_.AssertHeld();
1574 1575
  if (bg_work_gate_closed_) {
    // gate closed for backgrond work
J
jorlow@chromium.org 已提交
1576 1577 1578
  } else if (shutting_down_.Acquire_Load()) {
    // DB is being deleted; no more background compactions
  } else {
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
    bool is_flush_pending =
      imm_.IsFlushPending(options_.min_write_buffer_number_to_merge);
    if (is_flush_pending &&
        (bg_flush_scheduled_ < options_.max_background_flushes)) {
      // memtable flush needed
      bg_flush_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH);
    }

    if ((manual_compaction_ ||
         versions_->NeedsCompaction() ||
         (is_flush_pending && (options_.max_background_flushes <= 0))) &&
        bg_compaction_scheduled_ < options_.max_background_compactions) {
      // compaction needed, or memtable flush needed but HIGH pool not enabled.
      bg_compaction_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkCompaction, this, Env::Priority::LOW);
    }
  }
}

void DBImpl::BGWorkFlush(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallFlush();
}

void DBImpl::BGWorkCompaction(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallCompaction();
}

I
Igor Canadi 已提交
1607 1608
Status DBImpl::BackgroundFlush(bool* madeProgress,
                               DeletionState& deletion_state) {
1609 1610 1611 1612
  Status stat;
  while (stat.ok() &&
         imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
    Log(options_.info_log,
1613
        "BackgroundCallFlush doing FlushMemTableToOutputFile, flush slots available %d",
1614
        options_.max_background_flushes - bg_flush_scheduled_);
I
Igor Canadi 已提交
1615
    stat = FlushMemTableToOutputFile(madeProgress, deletion_state);
J
jorlow@chromium.org 已提交
1616
  }
1617
  return stat;
J
jorlow@chromium.org 已提交
1618 1619
}

1620
void DBImpl::BackgroundCallFlush() {
1621
  bool madeProgress = false;
I
Igor Canadi 已提交
1622
  DeletionState deletion_state;
1623 1624 1625
  assert(bg_flush_scheduled_);
  MutexLock l(&mutex_);

I
Igor Canadi 已提交
1626
  Status s;
1627
  if (!shutting_down_.Acquire_Load()) {
I
Igor Canadi 已提交
1628
    s = BackgroundFlush(&madeProgress, deletion_state);
1629 1630 1631 1632 1633 1634 1635 1636 1637
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background flush error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
I
Igor Canadi 已提交
1638
      LogFlush(options_.info_log);
1639 1640 1641 1642 1643
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
    }
  }

I
Igor Canadi 已提交
1644 1645 1646
  // If !s.ok(), this means that Flush failed. In that case, we want
  // to delete all obsolete files and we force FindObsoleteFiles()
  FindObsoleteFiles(deletion_state, !s.ok());
I
Igor Canadi 已提交
1647
  // delete unnecessary files if any, this is done outside the mutex
I
Igor Canadi 已提交
1648 1649 1650 1651 1652 1653
  if (deletion_state.HaveSomethingToDelete()) {
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
    mutex_.Lock();
  }

1654
  bg_flush_scheduled_--;
1655 1656 1657
  if (madeProgress) {
    MaybeScheduleFlushOrCompaction();
  }
1658
  bg_cv_.SignalAll();
J
jorlow@chromium.org 已提交
1659 1660
}

1661

1662 1663 1664 1665
void DBImpl::TEST_PurgeObsoleteteWAL() {
  PurgeObsoleteWALFiles();
}

1666
void DBImpl::BackgroundCallCompaction() {
1667
  bool madeProgress = false;
D
Dhruba Borthakur 已提交
1668
  DeletionState deletion_state;
H
Haobo Xu 已提交
1669 1670 1671

  MaybeDumpStats();

J
jorlow@chromium.org 已提交
1672
  MutexLock l(&mutex_);
1673
  // Log(options_.info_log, "XXX BG Thread %llx process new work item", pthread_self());
J
jorlow@chromium.org 已提交
1674
  assert(bg_compaction_scheduled_);
I
Igor Canadi 已提交
1675
  Status s;
H
hans@chromium.org 已提交
1676
  if (!shutting_down_.Acquire_Load()) {
I
Igor Canadi 已提交
1677
    s = BackgroundCompaction(&madeProgress, deletion_state);
1678 1679 1680 1681 1682 1683 1684 1685 1686
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background compaction error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
I
Igor Canadi 已提交
1687
      LogFlush(options_.info_log);
1688 1689 1690
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
    }
J
jorlow@chromium.org 已提交
1691
  }
1692

I
Igor Canadi 已提交
1693 1694 1695 1696 1697
  // If !s.ok(), this means that Compaction failed. In that case, we want
  // to delete all obsolete files we might have created and we force
  // FindObsoleteFiles(). This is because deletion_state does not catch
  // all created files if compaction failed.
  FindObsoleteFiles(deletion_state, !s.ok());
I
Igor Canadi 已提交
1698
  // delete unnecessary files if any, this is done outside the mutex
I
Igor Canadi 已提交
1699
  if (deletion_state.HaveSomethingToDelete()) {
D
Dhruba Borthakur 已提交
1700 1701
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
1702
    mutex_.Lock();
D
Dhruba Borthakur 已提交
1703 1704
  }

1705
  bg_compaction_scheduled_--;
J
jorlow@chromium.org 已提交
1706

1707 1708
  MaybeScheduleLogDBDeployStats();

J
jorlow@chromium.org 已提交
1709
  // Previous compaction may have produced too many files in a level,
A
Abhishek Kona 已提交
1710
  // So reschedule another compaction if we made progress in the
1711 1712
  // last compaction.
  if (madeProgress) {
1713
    MaybeScheduleFlushOrCompaction();
1714
  }
H
hans@chromium.org 已提交
1715
  bg_cv_.SignalAll();
1716

J
jorlow@chromium.org 已提交
1717 1718
}

A
Abhishek Kona 已提交
1719
Status DBImpl::BackgroundCompaction(bool* madeProgress,
1720
  DeletionState& deletion_state) {
1721
  *madeProgress = false;
J
jorlow@chromium.org 已提交
1722
  mutex_.AssertHeld();
1723

1724
  // TODO: remove memtable flush from formal compaction
1725
  while (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
A
Abhishek Kona 已提交
1726
    Log(options_.info_log,
1727 1728
        "BackgroundCompaction doing FlushMemTableToOutputFile, compaction slots "
        "available %d",
1729
        options_.max_background_compactions - bg_compaction_scheduled_);
I
Igor Canadi 已提交
1730
    Status stat = FlushMemTableToOutputFile(madeProgress, deletion_state);
1731 1732 1733
    if (!stat.ok()) {
      return stat;
    }
1734 1735
  }

1736
  unique_ptr<Compaction> c;
1737
  bool is_manual = (manual_compaction_ != nullptr) &&
1738
                   (manual_compaction_->in_progress == false);
G
Gabor Cselle 已提交
1739
  InternalKey manual_end;
H
hans@chromium.org 已提交
1740
  if (is_manual) {
G
Gabor Cselle 已提交
1741
    ManualCompaction* m = manual_compaction_;
1742 1743
    assert(!m->in_progress);
    m->in_progress = true; // another thread cannot pick up the same work
1744 1745
    c.reset(versions_->CompactRange(m->level, m->begin, m->end));
    if (c) {
G
Gabor Cselle 已提交
1746
      manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
1747 1748
    } else {
      m->done = true;
G
Gabor Cselle 已提交
1749 1750 1751
    }
    Log(options_.info_log,
        "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
H
hans@chromium.org 已提交
1752
        m->level,
G
Gabor Cselle 已提交
1753 1754 1755
        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
        (m->end ? m->end->DebugString().c_str() : "(end)"),
        (m->done ? "(end)" : manual_end.DebugString().c_str()));
1756
  } else if (!options_.disable_auto_compactions) {
1757
    c.reset(versions_->PickCompaction());
J
jorlow@chromium.org 已提交
1758 1759 1760
  }

  Status status;
1761
  if (!c) {
H
hans@chromium.org 已提交
1762
    // Nothing to do
1763
    Log(options_.info_log, "Compaction nothing to do");
H
hans@chromium.org 已提交
1764
  } else if (!is_manual && c->IsTrivialMove()) {
J
jorlow@chromium.org 已提交
1765
    // Move file to next level
1766
    assert(c->num_input_files(0) == 1);
J
jorlow@chromium.org 已提交
1767 1768 1769
    FileMetaData* f = c->input(0, 0);
    c->edit()->DeleteFile(c->level(), f->number);
    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
1770 1771
                       f->smallest, f->largest,
                       f->smallest_seqno, f->largest_seqno);
1772
    status = versions_->LogAndApply(c->edit(), &mutex_);
H
hans@chromium.org 已提交
1773
    VersionSet::LevelSummaryStorage tmp;
1774
    Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
J
jorlow@chromium.org 已提交
1775 1776 1777
        static_cast<unsigned long long>(f->number),
        c->level() + 1,
        static_cast<unsigned long long>(f->file_size),
H
hans@chromium.org 已提交
1778 1779
        status.ToString().c_str(),
        versions_->LevelSummary(&tmp));
1780
    versions_->ReleaseCompactionFiles(c.get(), status);
1781
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1782
  } else {
1783
    MaybeScheduleFlushOrCompaction(); // do more compaction work in parallel.
1784
    CompactionState* compact = new CompactionState(c.get());
I
Igor Canadi 已提交
1785
    status = DoCompactionWork(compact, deletion_state);
1786
    CleanupCompaction(compact, status);
1787
    versions_->ReleaseCompactionFiles(c.get(), status);
1788
    c->ReleaseInputs();
1789
    versions_->GetObsoleteFiles(&deletion_state.sst_delete_files);
1790
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1791
  }
1792
  c.reset();
J
jorlow@chromium.org 已提交
1793 1794 1795 1796 1797 1798

  if (status.ok()) {
    // Done
  } else if (shutting_down_.Acquire_Load()) {
    // Ignore compaction errors found during shutting down
  } else {
1799
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
1800 1801 1802 1803 1804
        "Compaction error: %s", status.ToString().c_str());
    if (options_.paranoid_checks && bg_error_.ok()) {
      bg_error_ = status;
    }
  }
H
hans@chromium.org 已提交
1805 1806

  if (is_manual) {
G
Gabor Cselle 已提交
1807
    ManualCompaction* m = manual_compaction_;
1808 1809 1810
    if (!status.ok()) {
      m->done = true;
    }
1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
    // For universal compaction:
    //   Because universal compaction always happens at level 0, so one
    //   compaction will pick up all overlapped files. No files will be
    //   filtered out due to size limit and left for a successive compaction.
    //   So we can safely conclude the current compaction.
    //
    //   Also note that, if we don't stop here, then the current compaction
    //   writes a new file back to level 0, which will be used in successive
    //   compaction. Hence the manual compaction will never finish.
    if (options_.compaction_style == kCompactionStyleUniversal) {
      m->done = true;
    }
G
Gabor Cselle 已提交
1823 1824 1825 1826 1827 1828
    if (!m->done) {
      // We only compacted part of the requested range.  Update *m
      // to the range that is left to be compacted.
      m->tmp_storage = manual_end;
      m->begin = &m->tmp_storage;
    }
1829
    m->in_progress = false; // not being processed anymore
1830
    manual_compaction_ = nullptr;
H
hans@chromium.org 已提交
1831
  }
1832
  return status;
J
jorlow@chromium.org 已提交
1833 1834
}

1835
void DBImpl::CleanupCompaction(CompactionState* compact, Status status) {
J
jorlow@chromium.org 已提交
1836
  mutex_.AssertHeld();
1837
  if (compact->builder != nullptr) {
J
jorlow@chromium.org 已提交
1838 1839
    // May happen if we get a shutdown call in the middle of compaction
    compact->builder->Abandon();
1840
    compact->builder.reset();
J
jorlow@chromium.org 已提交
1841
  } else {
1842
    assert(compact->outfile == nullptr);
J
jorlow@chromium.org 已提交
1843
  }
D
dgrogan@chromium.org 已提交
1844
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1845 1846
    const CompactionState::Output& out = compact->outputs[i];
    pending_outputs_.erase(out.number);
1847 1848 1849 1850 1851 1852

    // If this file was inserted into the table cache then remove
    // them here because this compaction was not committed.
    if (!status.ok()) {
      table_cache_->Evict(out.number);
    }
J
jorlow@chromium.org 已提交
1853 1854 1855 1856
  }
  delete compact;
}

1857 1858 1859 1860 1861
// Allocate the file numbers for the output file. We allocate as
// many output file numbers as there are files in level+1.
// Insert them into pending_outputs so that they do not get deleted.
void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1862 1863
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
1864
  int filesNeeded = compact->compaction->num_input_files(1);
1865
  for (int i = 0; i < filesNeeded; i++) {
1866 1867 1868 1869 1870 1871 1872 1873 1874
    uint64_t file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    compact->allocated_file_numbers.push_back(file_number);
  }
}

// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1875
  for (const auto file_number : compact->allocated_file_numbers) {
1876 1877 1878 1879 1880
    pending_outputs_.erase(file_number);
    // Log(options_.info_log, "XXX releasing unused file num %d", file_number);
  }
}

J
jorlow@chromium.org 已提交
1881
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
1882 1883
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
J
jorlow@chromium.org 已提交
1884
  uint64_t file_number;
1885 1886 1887 1888 1889 1890 1891
  // If we have not yet exhausted the pre-allocated file numbers,
  // then use the one from the front. Otherwise, we have to acquire
  // the heavyweight lock and allocate a new file number.
  if (!compact->allocated_file_numbers.empty()) {
    file_number = compact->allocated_file_numbers.front();
    compact->allocated_file_numbers.pop_front();
  } else {
J
jorlow@chromium.org 已提交
1892 1893 1894 1895 1896
    mutex_.Lock();
    file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    mutex_.Unlock();
  }
1897 1898 1899 1900
  CompactionState::Output out;
  out.number = file_number;
  out.smallest.Clear();
  out.largest.Clear();
1901
  out.smallest_seqno = out.largest_seqno = 0;
1902
  compact->outputs.push_back(out);
J
jorlow@chromium.org 已提交
1903 1904 1905

  // Make the output file
  std::string fname = TableFileName(dbname_, file_number);
1906
  Status s = env_->NewWritableFile(fname, &compact->outfile, storage_options_);
1907

J
jorlow@chromium.org 已提交
1908
  if (s.ok()) {
1909 1910 1911
    // Over-estimate slightly so we don't end up just barely crossing
    // the threshold.
    compact->outfile->SetPreallocationBlockSize(
1912
      1.1 * versions_->MaxFileSizeForLevel(compact->compaction->output_level()));
1913

S
Siying Dong 已提交
1914 1915 1916 1917
    CompressionType compression_type = GetCompressionType(
        options_, compact->compaction->output_level(),
        compact->compaction->enable_compression());

S
Siying Dong 已提交
1918
    compact->builder.reset(
S
Siying Dong 已提交
1919
        GetTableBuilder(options_, compact->outfile.get(), compression_type));
J
jorlow@chromium.org 已提交
1920
  }
I
Igor Canadi 已提交
1921
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
1922 1923 1924 1925 1926
  return s;
}

Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
                                          Iterator* input) {
1927
  assert(compact != nullptr);
1928
  assert(compact->outfile);
1929
  assert(compact->builder != nullptr);
J
jorlow@chromium.org 已提交
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944

  const uint64_t output_number = compact->current_output()->number;
  assert(output_number != 0);

  // Check for iterator errors
  Status s = input->status();
  const uint64_t current_entries = compact->builder->NumEntries();
  if (s.ok()) {
    s = compact->builder->Finish();
  } else {
    compact->builder->Abandon();
  }
  const uint64_t current_bytes = compact->builder->FileSize();
  compact->current_output()->file_size = current_bytes;
  compact->total_bytes += current_bytes;
1945
  compact->builder.reset();
J
jorlow@chromium.org 已提交
1946 1947

  // Finish and check for file errors
1948
  if (s.ok() && !options_.disableDataSync) {
1949
    if (options_.use_fsync) {
1950
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1951 1952
      s = compact->outfile->Fsync();
    } else {
1953
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1954 1955
      s = compact->outfile->Sync();
    }
J
jorlow@chromium.org 已提交
1956 1957 1958 1959
  }
  if (s.ok()) {
    s = compact->outfile->Close();
  }
1960
  compact->outfile.reset();
J
jorlow@chromium.org 已提交
1961 1962 1963

  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
J
jorlow@chromium.org 已提交
1964
    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
1965
                                               storage_options_,
J
jorlow@chromium.org 已提交
1966 1967
                                               output_number,
                                               current_bytes);
J
jorlow@chromium.org 已提交
1968 1969 1970
    s = iter->status();
    delete iter;
    if (s.ok()) {
1971
      Log(options_.info_log,
J
jorlow@chromium.org 已提交
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
          "Generated table #%llu: %lld keys, %lld bytes",
          (unsigned long long) output_number,
          (unsigned long long) current_entries,
          (unsigned long long) current_bytes);
    }
  }
  return s;
}


Status DBImpl::InstallCompactionResults(CompactionState* compact) {
  mutex_.AssertHeld();
1984 1985 1986 1987 1988

  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact.
1989
  if (!versions_->VerifyCompactionFileConsistency(compact->compaction)) {
1990 1991 1992 1993 1994 1995 1996 1997
    Log(options_.info_log,  "Compaction %d@%d + %d@%d files aborted",
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1);
    return Status::IOError("Compaction input files inconsistent");
  }

1998
  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
J
jorlow@chromium.org 已提交
1999 2000 2001 2002 2003 2004 2005 2006 2007
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1,
      static_cast<long long>(compact->total_bytes));

  // Add compaction outputs
  compact->compaction->AddInputDeletions(compact->compaction->edit());
  const int level = compact->compaction->level();
D
dgrogan@chromium.org 已提交
2008
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
2009 2010
    const CompactionState::Output& out = compact->outputs[i];
    compact->compaction->edit()->AddFile(
2011 2012
        (options_.compaction_style == kCompactionStyleUniversal) ?
          level : level + 1,
2013 2014
        out.number, out.file_size, out.smallest, out.largest,
        out.smallest_seqno, out.largest_seqno);
J
jorlow@chromium.org 已提交
2015
  }
2016
  return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
J
jorlow@chromium.org 已提交
2017 2018
}

2019 2020 2021 2022 2023 2024 2025 2026
//
// Given a sequence number, return the sequence number of the
// earliest snapshot that this sequence number is visible in.
// The snapshots themselves are arranged in ascending order of
// sequence numbers.
// Employ a sequential search because the total number of
// snapshots are typically small.
inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
2027 2028
  SequenceNumber in, std::vector<SequenceNumber>& snapshots,
  SequenceNumber* prev_snapshot) {
2029
  SequenceNumber prev __attribute__((unused)) = 0;
2030 2031 2032
  for (const auto cur : snapshots) {
    assert(prev <= cur);
    if (cur >= in) {
2033
      *prev_snapshot = prev;
2034
      return cur;
2035
    }
2036 2037
    prev = cur; // assignment
    assert(prev);
2038 2039
  }
  Log(options_.info_log,
2040
      "Looking for seqid %llu but maxseqid is %llu", in,
2041 2042 2043 2044 2045
      snapshots[snapshots.size()-1]);
  assert(0);
  return 0;
}

I
Igor Canadi 已提交
2046 2047
Status DBImpl::DoCompactionWork(CompactionState* compact,
                                DeletionState& deletion_state) {
2048
  assert(compact);
2049
  int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
A
Abhishek Kona 已提交
2050
  Log(options_.info_log,
2051
      "Compacting %d@%d + %d@%d files, score %.2f slots available %d",
J
jorlow@chromium.org 已提交
2052 2053 2054
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
2055
      compact->compaction->level() + 1,
2056
      compact->compaction->score(),
2057
      options_.max_background_compactions - bg_compaction_scheduled_);
2058 2059
  char scratch[256];
  compact->compaction->Summary(scratch, sizeof(scratch));
H
heyongqiang 已提交
2060
  Log(options_.info_log, "Compaction start summary: %s\n", scratch);
J
jorlow@chromium.org 已提交
2061 2062

  assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
2063
  assert(compact->builder == nullptr);
2064
  assert(!compact->outfile);
2065 2066 2067

  SequenceNumber visible_at_tip = 0;
  SequenceNumber earliest_snapshot;
H
Haobo Xu 已提交
2068
  SequenceNumber latest_snapshot = 0;
2069 2070 2071 2072 2073
  snapshots_.getAll(compact->existing_snapshots);
  if (compact->existing_snapshots.size() == 0) {
    // optimize for fast path if there are no snapshots
    visible_at_tip = versions_->LastSequence();
    earliest_snapshot = visible_at_tip;
J
jorlow@chromium.org 已提交
2074
  } else {
H
Haobo Xu 已提交
2075
    latest_snapshot = compact->existing_snapshots.back();
2076 2077 2078 2079
    // Add the current seqno as the 'latest' virtual
    // snapshot to the end of this list.
    compact->existing_snapshots.push_back(versions_->LastSequence());
    earliest_snapshot = compact->existing_snapshots[0];
J
jorlow@chromium.org 已提交
2080 2081
  }

2082
  // Is this compaction producing files at the bottommost level?
2083
  bool bottommost_level = compact->compaction->BottomMostLevel();
2084

2085 2086 2087
  // Allocate the output file numbers before we release the lock
  AllocateCompactionOutputFileNumbers(compact);

J
jorlow@chromium.org 已提交
2088 2089 2090
  // Release mutex while we're actually doing the compaction work
  mutex_.Unlock();

2091
  const uint64_t start_micros = env_->NowMicros();
2092
  unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
J
jorlow@chromium.org 已提交
2093 2094 2095 2096 2097
  input->SeekToFirst();
  Status status;
  ParsedInternalKey ikey;
  std::string current_user_key;
  bool has_current_user_key = false;
2098 2099
  SequenceNumber last_sequence_for_key __attribute__((unused)) =
    kMaxSequenceNumber;
2100
  SequenceNumber visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
2101
  std::string compaction_filter_value;
H
Haobo Xu 已提交
2102
  std::vector<char> delete_key; // for compaction filter
2103
  MergeHelper merge(user_comparator(), options_.merge_operator.get(),
2104 2105
                    options_.info_log.get(),
                    false /* internal key corruption is expected */);
2106 2107 2108
  auto compaction_filter = options_.compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
  if (!compaction_filter) {
2109 2110 2111
    auto context = compact->GetFilterContext();
    compaction_filter_from_factory =
      options_.compaction_filter_factory->CreateCompactionFilter(context);
2112 2113
    compaction_filter = compaction_filter_from_factory.get();
  }
2114

J
jorlow@chromium.org 已提交
2115
  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
2116
    // Prioritize immutable compaction work
2117
    // TODO: remove memtable flush from normal compaction work
2118
    if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
2119
      const uint64_t imm_start = env_->NowMicros();
I
Igor Canadi 已提交
2120
      LogFlush(options_.info_log);
2121
      mutex_.Lock();
2122
      if (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
I
Igor Canadi 已提交
2123
        FlushMemTableToOutputFile(nullptr, deletion_state);
H
hans@chromium.org 已提交
2124
        bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
2125 2126 2127 2128 2129
      }
      mutex_.Unlock();
      imm_micros += (env_->NowMicros() - imm_start);
    }

J
jorlow@chromium.org 已提交
2130
    Slice key = input->key();
2131
    Slice value = input->value();
H
Haobo Xu 已提交
2132

2133
    if (compact->compaction->ShouldStopBefore(key) &&
2134
        compact->builder != nullptr) {
2135
      status = FinishCompactionOutputFile(compact, input.get());
2136 2137 2138 2139 2140 2141
      if (!status.ok()) {
        break;
      }
    }

    // Handle key/value, add to state, etc.
J
jorlow@chromium.org 已提交
2142
    bool drop = false;
2143
    bool current_entry_is_merging = false;
J
jorlow@chromium.org 已提交
2144 2145
    if (!ParseInternalKey(key, &ikey)) {
      // Do not hide error keys
2146 2147
      // TODO: error key stays in db forever? Figure out the intention/rationale
      // v10 error v8 : we cannot hide v8 even though it's pretty obvious.
J
jorlow@chromium.org 已提交
2148 2149 2150
      current_user_key.clear();
      has_current_user_key = false;
      last_sequence_for_key = kMaxSequenceNumber;
2151
      visible_in_snapshot = kMaxSequenceNumber;
J
jorlow@chromium.org 已提交
2152 2153 2154 2155 2156 2157 2158 2159
    } else {
      if (!has_current_user_key ||
          user_comparator()->Compare(ikey.user_key,
                                     Slice(current_user_key)) != 0) {
        // First occurrence of this user key
        current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
        has_current_user_key = true;
        last_sequence_for_key = kMaxSequenceNumber;
2160
        visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
2161 2162

        // apply the compaction filter to the first occurrence of the user key
2163
        if (compaction_filter &&
H
Haobo Xu 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
            ikey.type == kTypeValue &&
            (visible_at_tip || ikey.sequence > latest_snapshot)) {
          // If the user has specified a compaction filter and the sequence
          // number is greater than any external snapshot, then invoke the
          // filter.
          // If the return value of the compaction filter is true, replace
          // the entry with a delete marker.
          bool value_changed = false;
          compaction_filter_value.clear();
          bool to_delete =
2174
            compaction_filter->Filter(compact->compaction->level(),
S
Siying Dong 已提交
2175 2176 2177
                                               ikey.user_key, value,
                                               &compaction_filter_value,
                                               &value_changed);
H
Haobo Xu 已提交
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
          if (to_delete) {
            // make a copy of the original key
            delete_key.assign(key.data(), key.data() + key.size());
            // convert it to a delete
            UpdateInternalKey(&delete_key[0], delete_key.size(),
                              ikey.sequence, kTypeDeletion);
            // anchor the key again
            key = Slice(&delete_key[0], delete_key.size());
            // needed because ikey is backed by key
            ParseInternalKey(key, &ikey);
            // no value associated with delete
            value.clear();
            RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
          } else if (value_changed) {
            value = compaction_filter_value;
          }
        }

J
jorlow@chromium.org 已提交
2196 2197
      }

2198 2199 2200
      // If there are no snapshots, then this kv affect visibility at tip.
      // Otherwise, search though all existing snapshots to find
      // the earlist snapshot that is affected by this kv.
2201 2202 2203 2204 2205 2206
      SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
      SequenceNumber visible = visible_at_tip ?
        visible_at_tip :
        findEarliestVisibleSnapshot(ikey.sequence,
                                    compact->existing_snapshots,
                                    &prev_snapshot);
2207 2208 2209 2210 2211

      if (visible_in_snapshot == visible) {
        // If the earliest snapshot is which this key is visible in
        // is the same as the visibily of a previous instance of the
        // same key, then this kv is not visible in any snapshot.
J
jorlow@chromium.org 已提交
2212
        // Hidden by an newer entry for same user key
2213
        // TODO: why not > ?
2214
        assert(last_sequence_for_key >= ikey.sequence);
J
jorlow@chromium.org 已提交
2215
        drop = true;    // (A)
2216
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
J
jorlow@chromium.org 已提交
2217
      } else if (ikey.type == kTypeDeletion &&
2218
                 ikey.sequence <= earliest_snapshot &&
J
jorlow@chromium.org 已提交
2219 2220 2221 2222 2223 2224 2225 2226 2227
                 compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
        // For this user key:
        // (1) there is no data in higher levels
        // (2) data in lower levels will have larger sequence numbers
        // (3) data in layers that are being compacted here and have
        //     smaller sequence numbers will be dropped in the next
        //     few iterations of this loop (by rule (A) above).
        // Therefore this deletion marker is obsolete and can be dropped.
        drop = true;
2228
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
2229 2230 2231 2232 2233 2234 2235
      } else if (ikey.type == kTypeMerge) {
        // We know the merge type entry is not hidden, otherwise we would
        // have hit (A)
        // We encapsulate the merge related state machine in a different
        // object to minimize change to the existing flow. Turn out this
        // logic could also be nicely re-used for memtable flush purge
        // optimization in BuildTable.
M
Mayank Agarwal 已提交
2236 2237
        merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
                         options_.statistics);
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
        current_entry_is_merging = true;
        if (merge.IsSuccess()) {
          // Successfully found Put/Delete/(end-of-key-range) while merging
          // Get the merge result
          key = merge.key();
          ParseInternalKey(key, &ikey);
          value = merge.value();
        } else {
          // Did not find a Put/Delete/(end-of-key-range) while merging
          // We now have some stack of merge operands to write out.
          // NOTE: key,value, and ikey are now referring to old entries.
          //       These will be correctly set below.
          assert(!merge.keys().empty());
          assert(merge.keys().size() == merge.values().size());

          // Hack to make sure last_sequence_for_key is correct
          ParseInternalKey(merge.keys().front(), &ikey);
        }
J
jorlow@chromium.org 已提交
2256 2257 2258
      }

      last_sequence_for_key = ikey.sequence;
2259
      visible_in_snapshot = visible;
J
jorlow@chromium.org 已提交
2260 2261
    }
#if 0
2262
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
2263
        "  Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
2264
        "%d smallest_snapshot: %d level: %d bottommost %d",
J
jorlow@chromium.org 已提交
2265
        ikey.user_key.ToString().c_str(),
D
dgrogan@chromium.org 已提交
2266
        (int)ikey.sequence, ikey.type, kTypeValue, drop,
J
jorlow@chromium.org 已提交
2267
        compact->compaction->IsBaseLevelForKey(ikey.user_key),
2268 2269
        (int)last_sequence_for_key, (int)earliest_snapshot,
        compact->compaction->level(), bottommost_level);
J
jorlow@chromium.org 已提交
2270 2271 2272
#endif

    if (!drop) {
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288
      // We may write a single key (e.g.: for Put/Delete or successful merge).
      // Or we may instead have to write a sequence/list of keys.
      // We have to write a sequence iff we have an unsuccessful merge
      bool has_merge_list = current_entry_is_merging && !merge.IsSuccess();
      const std::deque<std::string>* keys = nullptr;
      const std::deque<std::string>* values = nullptr;
      std::deque<std::string>::const_reverse_iterator key_iter;
      std::deque<std::string>::const_reverse_iterator value_iter;
      if (has_merge_list) {
        keys = &merge.keys();
        values = &merge.values();
        key_iter = keys->rbegin();    // The back (*rbegin()) is the first key
        value_iter = values->rbegin();

        key = Slice(*key_iter);
        value = Slice(*value_iter);
2289
      }
2290

2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
      // If we have a list of keys to write, traverse the list.
      // If we have a single key to write, simply write that key.
      while (true) {
        // Invariant: key,value,ikey will always be the next entry to write
        char* kptr = (char*)key.data();
        std::string kstr;

        // Zeroing out the sequence number leads to better compression.
        // If this is the bottommost level (no files in lower levels)
        // and the earliest snapshot is larger than this seqno
        // then we can squash the seqno to zero.
2302 2303
        if (options_.compaction_style == kCompactionStyleLevel &&
            bottommost_level && ikey.sequence < earliest_snapshot &&
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
            ikey.type != kTypeMerge) {
          assert(ikey.type != kTypeDeletion);
          // make a copy because updating in place would cause problems
          // with the priority queue that is managing the input key iterator
          kstr.assign(key.data(), key.size());
          kptr = (char *)kstr.c_str();
          UpdateInternalKey(kptr, key.size(), (uint64_t)0, ikey.type);
        }

        Slice newkey(kptr, key.size());
        assert((key.clear(), 1)); // we do not need 'key' anymore
2315

2316 2317 2318 2319 2320 2321 2322
        // Open output file if necessary
        if (compact->builder == nullptr) {
          status = OpenCompactionOutputFile(compact);
          if (!status.ok()) {
            break;
          }
        }
2323 2324

        SequenceNumber seqno = GetInternalKeySeqno(newkey);
2325 2326
        if (compact->builder->NumEntries() == 0) {
          compact->current_output()->smallest.DecodeFrom(newkey);
2327 2328 2329 2330
          compact->current_output()->smallest_seqno = seqno;
        } else {
          compact->current_output()->smallest_seqno =
            std::min(compact->current_output()->smallest_seqno, seqno);
2331 2332 2333
        }
        compact->current_output()->largest.DecodeFrom(newkey);
        compact->builder->Add(newkey, value);
2334 2335
        compact->current_output()->largest_seqno =
          std::max(compact->current_output()->largest_seqno, seqno);
2336 2337 2338 2339 2340 2341 2342 2343

        // Close output file if it is big enough
        if (compact->builder->FileSize() >=
            compact->compaction->MaxOutputFileSize()) {
          status = FinishCompactionOutputFile(compact, input.get());
          if (!status.ok()) {
            break;
          }
J
jorlow@chromium.org 已提交
2344 2345
        }

2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
        // If we have a list of entries, move to next element
        // If we only had one entry, then break the loop.
        if (has_merge_list) {
          ++key_iter;
          ++value_iter;

          // If at end of list
          if (key_iter == keys->rend() || value_iter == values->rend()) {
            // Sanity Check: if one ends, then both end
            assert(key_iter == keys->rend() && value_iter == values->rend());
            break;
          }

          // Otherwise not at end of list. Update key, value, and ikey.
          key = Slice(*key_iter);
          value = Slice(*value_iter);
          ParseInternalKey(key, &ikey);

        } else{
          // Only had one item to begin with (Put/Delete)
J
jorlow@chromium.org 已提交
2366 2367 2368 2369 2370
          break;
        }
      }
    }

2371
    // MergeUntil has moved input to the next entry
2372
    if (!current_entry_is_merging) {
2373 2374
      input->Next();
    }
J
jorlow@chromium.org 已提交
2375 2376 2377
  }

  if (status.ok() && shutting_down_.Acquire_Load()) {
2378
    status = Status::IOError("Database shutdown started during compaction");
J
jorlow@chromium.org 已提交
2379
  }
2380
  if (status.ok() && compact->builder != nullptr) {
2381
    status = FinishCompactionOutputFile(compact, input.get());
J
jorlow@chromium.org 已提交
2382 2383 2384 2385
  }
  if (status.ok()) {
    status = input->status();
  }
2386
  input.reset();
J
jorlow@chromium.org 已提交
2387

2388 2389
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros - imm_micros;
A
Abhishek Kona 已提交
2390 2391 2392
  if (options_.statistics) {
    options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
  }
M
Mark Callaghan 已提交
2393 2394
  stats.files_in_leveln = compact->compaction->num_input_files(0);
  stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
2395 2396

  int num_output_files = compact->outputs.size();
2397
  if (compact->builder != nullptr) {
2398 2399 2400 2401 2402
    // An error occured so ignore the last output.
    assert(num_output_files > 0);
    --num_output_files;
  }
  stats.files_out_levelnp1 = num_output_files;
M
Mark Callaghan 已提交
2403 2404 2405 2406 2407 2408 2409

  for (int i = 0; i < compact->compaction->num_input_files(0); i++)
    stats.bytes_readn += compact->compaction->input(0, i)->file_size;

  for (int i = 0; i < compact->compaction->num_input_files(1); i++)
    stats.bytes_readnp1 += compact->compaction->input(1, i)->file_size;

2410
  for (int i = 0; i < num_output_files; i++) {
2411 2412 2413
    stats.bytes_written += compact->outputs[i].file_size;
  }

I
Igor Canadi 已提交
2414
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
2415
  mutex_.Lock();
2416
  stats_[compact->compaction->output_level()].Add(stats);
J
jorlow@chromium.org 已提交
2417

2418 2419 2420 2421
  // if there were any unused file number (mostly in case of
  // compaction error), free up the entry from pending_putputs
  ReleaseCompactionUnusedFileNumbers(compact);

J
jorlow@chromium.org 已提交
2422 2423 2424
  if (status.ok()) {
    status = InstallCompactionResults(compact);
  }
2425
  VersionSet::LevelSummaryStorage tmp;
2426
  Log(options_.info_log,
M
Mark Callaghan 已提交
2427
      "compacted to: %s, %.1f MB/sec, level %d, files in(%d, %d) out(%d) "
2428 2429
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s\n",
M
Mark Callaghan 已提交
2430 2431 2432
      versions_->LevelSummary(&tmp),
      (stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
          (double) stats.micros,
2433
      compact->compaction->output_level(),
M
Mark Callaghan 已提交
2434 2435 2436 2437
      stats.files_in_leveln, stats.files_in_levelnp1, stats.files_out_levelnp1,
      stats.bytes_readn / 1048576.0,
      stats.bytes_readnp1 / 1048576.0,
      stats.bytes_written / 1048576.0,
2438
      (stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
2439
          (double) stats.bytes_readn,
2440
      stats.bytes_written / (double) stats.bytes_readn,
2441
      status.ToString().c_str());
M
Mark Callaghan 已提交
2442

J
jorlow@chromium.org 已提交
2443 2444 2445
  return status;
}

2446 2447 2448 2449
namespace {
struct IterState {
  port::Mutex* mu;
  Version* version;
2450
  std::vector<MemTable*> mem; // includes both mem_ and imm_
2451 2452 2453 2454 2455
};

static void CleanupIteratorState(void* arg1, void* arg2) {
  IterState* state = reinterpret_cast<IterState*>(arg1);
  state->mu->Lock();
2456 2457 2458
  for (unsigned int i = 0; i < state->mem.size(); i++) {
    state->mem[i]->Unref();
  }
2459 2460 2461 2462
  state->version->Unref();
  state->mu->Unlock();
  delete state;
}
H
Hans Wennborg 已提交
2463
}  // namespace
2464

J
jorlow@chromium.org 已提交
2465 2466
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
                                      SequenceNumber* latest_snapshot) {
2467
  IterState* cleanup = new IterState;
J
jorlow@chromium.org 已提交
2468
  mutex_.Lock();
2469
  *latest_snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2470

2471
  // Collect together all needed child iterators for mem
J
jorlow@chromium.org 已提交
2472
  std::vector<Iterator*> list;
2473
  mem_->Ref();
2474
  list.push_back(mem_->NewIterator(options));
J
Jim Paton 已提交
2475

2476 2477 2478 2479 2480 2481 2482 2483
  cleanup->mem.push_back(mem_);

  // Collect together all needed child iterators for imm_
  std::vector<MemTable*> immutables;
  imm_.GetMemTables(&immutables);
  for (unsigned int i = 0; i < immutables.size(); i++) {
    MemTable* m = immutables[i];
    m->Ref();
2484
    list.push_back(m->NewIterator(options));
2485
    cleanup->mem.push_back(m);
2486
  }
2487 2488

  // Collect iterators for files in L0 - Ln
2489
  versions_->current()->AddIterators(options, storage_options_, &list);
J
jorlow@chromium.org 已提交
2490 2491 2492
  Iterator* internal_iter =
      NewMergingIterator(&internal_comparator_, &list[0], list.size());
  versions_->current()->Ref();
2493 2494 2495

  cleanup->mu = &mutex_;
  cleanup->version = versions_->current();
2496
  internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
J
jorlow@chromium.org 已提交
2497 2498

  mutex_.Unlock();
I
Igor Canadi 已提交
2499
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
2500 2501 2502 2503 2504 2505 2506 2507
  return internal_iter;
}

Iterator* DBImpl::TEST_NewInternalIterator() {
  SequenceNumber ignored;
  return NewInternalIterator(ReadOptions(), &ignored);
}

J
jorlow@chromium.org 已提交
2508
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
2509 2510 2511 2512
  MutexLock l(&mutex_);
  return versions_->MaxNextLevelOverlappingBytes();
}

J
jorlow@chromium.org 已提交
2513 2514 2515
Status DBImpl::Get(const ReadOptions& options,
                   const Slice& key,
                   std::string* value) {
2516 2517 2518 2519 2520 2521
  return GetImpl(options, key, value);
}

Status DBImpl::GetImpl(const ReadOptions& options,
                       const Slice& key,
                       std::string* value,
2522
                       bool* value_found) {
2523
  Status s;
2524

2525
  StopWatch sw(env_, options_.statistics, DB_GET);
2526
  SequenceNumber snapshot;
2527
  mutex_.Lock();
2528
  if (options.snapshot != nullptr) {
2529 2530 2531
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2532
  }
2533

2534
  MemTable* mem = mem_;
2535
  MemTableList imm = imm_;
2536
  Version* current = versions_->current();
2537
  mem->Ref();
2538
  imm.RefAll();
2539
  current->Ref();
2540

2541 2542
  // Unlock while reading from files and memtables
  mutex_.Unlock();
2543
  bool have_stat_update = false;
2544
  Version::GetStats stats;
2545

2546 2547 2548 2549

  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2550
  // First look in the memtable, then in the immutable memtable (if any).
2551
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2552
  // merge_operands will contain the sequence of merges in the latter case.
2553
  LookupKey lkey(key, snapshot);
2554
  if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2555
    // Done
2556
  } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2557 2558
    // Done
  } else {
2559
    current->Get(options, lkey, value, &s, &merge_operands, &stats,
2560
                 options_, value_found);
2561
    have_stat_update = true;
2562
  }
2563
  mutex_.Lock();
2564

2565 2566
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2567
    MaybeScheduleFlushOrCompaction();
2568
  }
2569
  mem->Unref();
2570
  imm.UnrefAll();
2571
  current->Unref();
2572 2573
  mutex_.Unlock();

I
Igor Canadi 已提交
2574
  LogFlush(options_.info_log);
2575
  // Note, tickers are atomic now - no lock protection needed any more.
2576
  RecordTick(options_.statistics, NUMBER_KEYS_READ);
2577
  RecordTick(options_.statistics, BYTES_READ, value->size());
2578
  return s;
J
jorlow@chromium.org 已提交
2579 2580
}

2581 2582 2583 2584 2585 2586
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
                                     const std::vector<Slice>& keys,
                                     std::vector<std::string>* values) {

  StopWatch sw(env_, options_.statistics, DB_MULTIGET);
  SequenceNumber snapshot;
2587
  mutex_.Lock();
2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
  if (options.snapshot != nullptr) {
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
  }

  MemTable* mem = mem_;
  MemTableList imm = imm_;
  Version* current = versions_->current();
  mem->Ref();
  imm.RefAll();
  current->Ref();

  // Unlock while reading from files and memtables

  mutex_.Unlock();
  bool have_stat_update = false;
  Version::GetStats stats;

2607 2608 2609
  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620
  // Note: this always resizes the values array
  int numKeys = keys.size();
  std::vector<Status> statList(numKeys);
  values->resize(numKeys);

  // Keep track of bytes that we read for statistics-recording later
  uint64_t bytesRead = 0;

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2621 2622 2623
  // merge_operands will contain the sequence of merges in the latter case.
  for (int i=0; i<numKeys; ++i) {
    merge_operands.clear();
2624 2625 2626 2627
    Status& s = statList[i];
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
2628
    if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2629
      // Done
2630
    } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2631 2632
      // Done
    } else {
2633
      current->Get(options, lkey, value, &s, &merge_operands, &stats, options_);
2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
      have_stat_update = true;
    }

    if (s.ok()) {
      bytesRead += value->size();
    }
  }

  // Post processing (decrement reference counts and record statistics)
  mutex_.Lock();
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2646
    MaybeScheduleFlushOrCompaction();
2647 2648 2649 2650
  }
  mem->Unref();
  imm.UnrefAll();
  current->Unref();
2651 2652
  mutex_.Unlock();

I
Igor Canadi 已提交
2653
  LogFlush(options_.info_log);
2654 2655 2656 2657 2658 2659 2660
  RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
  RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
  RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);

  return statList;
}

2661 2662 2663 2664 2665 2666 2667
bool DBImpl::KeyMayExist(const ReadOptions& options,
                         const Slice& key,
                         std::string* value,
                         bool* value_found) {
  if (value_found != nullptr) {
    *value_found = true; // falsify later if key-may-exist but can't fetch value
  }
2668 2669 2670
  ReadOptions roptions = options;
  roptions.read_tier = kBlockCacheTier; // read from block cache only
  return GetImpl(roptions, key, value, value_found).ok();
2671 2672
}

J
jorlow@chromium.org 已提交
2673 2674
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
  SequenceNumber latest_snapshot;
T
Tyler Harter 已提交
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687
  Iterator* iter = NewInternalIterator(options, &latest_snapshot);
  iter = NewDBIterator(
             &dbname_, env_, options_, user_comparator(), iter,
             (options.snapshot != nullptr
              ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
              : latest_snapshot));
  if (options.prefix) {
    // use extra wrapper to exclude any keys from the results which
    // don't begin with the prefix
    iter = new PrefixFilterIterator(iter, *options.prefix,
                                    options_.prefix_extractor);
  }
  return iter;
J
jorlow@chromium.org 已提交
2688 2689 2690 2691
}

const Snapshot* DBImpl::GetSnapshot() {
  MutexLock l(&mutex_);
2692
  return snapshots_.New(versions_->LastSequence());
J
jorlow@chromium.org 已提交
2693 2694 2695 2696
}

void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  MutexLock l(&mutex_);
2697
  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
J
jorlow@chromium.org 已提交
2698 2699 2700 2701 2702 2703 2704
}

// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  return DB::Put(o, key, val);
}

2705 2706 2707 2708 2709 2710 2711 2712 2713
Status DBImpl::Merge(const WriteOptions& o, const Slice& key,
                     const Slice& val) {
  if (!options_.merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, key, val);
  }
}

J
jorlow@chromium.org 已提交
2714 2715 2716 2717
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  return DB::Delete(options, key);
}

2718 2719 2720 2721
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
  Writer w(&mutex_);
  w.batch = my_batch;
  w.sync = options.sync;
H
heyongqiang 已提交
2722
  w.disableWAL = options.disableWAL;
2723
  w.done = false;
2724

2725
  StopWatch sw(env_, options_.statistics, DB_WRITE);
2726
  MutexLock l(&mutex_);
2727 2728 2729 2730 2731 2732
  writers_.push_back(&w);
  while (!w.done && &w != writers_.front()) {
    w.cv.Wait();
  }
  if (w.done) {
    return w.status;
2733 2734 2735
  }

  // May temporarily unlock and wait.
2736
  Status status = MakeRoomForWrite(my_batch == nullptr);
D
dgrogan@chromium.org 已提交
2737
  uint64_t last_sequence = versions_->LastSequence();
2738
  Writer* last_writer = &w;
2739
  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
2740 2741 2742 2743 2744
    // TODO: BuildBatchGroup physically concatenate/copy all write batches into
    // a new one. Mem copy is done with the lock held. Ideally, we only need
    // the lock to obtain the last_writer and the references to all batches.
    // Creation (copy) of the merged batch could have been done outside of the
    // lock protected region.
2745
    WriteBatch* updates = BuildBatchGroup(&last_writer);
2746

2747 2748 2749 2750
    // Add to log and apply to memtable.  We can release the lock
    // during this phase since &w is currently responsible for logging
    // and protects against concurrent loggers and concurrent writes
    // into mem_.
2751
    {
2752
      mutex_.Unlock();
2753 2754 2755 2756 2757 2758 2759 2760 2761
      const SequenceNumber current_sequence = last_sequence + 1;
      WriteBatchInternal::SetSequence(updates, current_sequence);
      int my_batch_count = WriteBatchInternal::Count(updates);
      last_sequence += my_batch_count;
      // Record statistics
      RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
      RecordTick(options_.statistics,
                 BYTES_WRITTEN,
                 WriteBatchInternal::ByteSize(updates));
2762 2763
      if (options.disableWAL) {
        flush_on_destroy_ = true;
2764 2765 2766
      }

      if (!options.disableWAL) {
2767 2768
        StopWatchNano timer(env_);
        StartPerfTimer(&timer);
H
heyongqiang 已提交
2769
        status = log_->AddRecord(WriteBatchInternal::Contents(updates));
2770
        BumpPerfTime(&perf_context.wal_write_time, &timer);
H
heyongqiang 已提交
2771
        if (status.ok() && options.sync) {
2772
          if (options_.use_fsync) {
2773
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2774
            status = log_->file()->Fsync();
2775
          } else {
2776
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2777
            status = log_->file()->Sync();
2778
          }
H
heyongqiang 已提交
2779
        }
2780 2781
      }
      if (status.ok()) {
2782 2783
        status = WriteBatchInternal::InsertInto(updates, mem_, &options_, this,
                                                options_.filter_deletes);
2784 2785 2786 2787 2788 2789 2790
        if (!status.ok()) {
          // Panic for in-memory corruptions
          // Note that existing logic was not sound. Any partial failure writing
          // into the memtable would result in a state that some write ops might
          // have succeeded in memtable but Status reports error for all writes.
          throw std::runtime_error("In memory WriteBatch corruption!");
        }
2791
        SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence);
2792
      }
I
Igor Canadi 已提交
2793
      LogFlush(options_.info_log);
2794 2795
      mutex_.Lock();
      if (status.ok()) {
2796
        versions_->SetLastSequence(last_sequence);
2797
      }
J
jorlow@chromium.org 已提交
2798
    }
2799
    if (updates == &tmp_batch_) tmp_batch_.Clear();
J
jorlow@chromium.org 已提交
2800
  }
I
Igor Canadi 已提交
2801 2802 2803
  if (options_.paranoid_checks && !status.ok() && bg_error_.ok()) {
    bg_error_ = status; // stop compaction & fail any further writes
  }
2804

2805 2806 2807 2808 2809 2810 2811
  while (true) {
    Writer* ready = writers_.front();
    writers_.pop_front();
    if (ready != &w) {
      ready->status = status;
      ready->done = true;
      ready->cv.Signal();
2812
    }
2813 2814
    if (ready == last_writer) break;
  }
2815

2816 2817 2818
  // Notify new head of write queue
  if (!writers_.empty()) {
    writers_.front()->cv.Signal();
2819
  }
J
jorlow@chromium.org 已提交
2820 2821 2822
  return status;
}

2823
// REQUIRES: Writer list must be non-empty
2824
// REQUIRES: First writer must have a non-nullptr batch
2825 2826 2827 2828
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
  assert(!writers_.empty());
  Writer* first = writers_.front();
  WriteBatch* result = first->batch;
2829
  assert(result != nullptr);
2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850

  size_t size = WriteBatchInternal::ByteSize(first->batch);

  // Allow the group to grow up to a maximum size, but if the
  // original write is small, limit the growth so we do not slow
  // down the small write too much.
  size_t max_size = 1 << 20;
  if (size <= (128<<10)) {
    max_size = size + (128<<10);
  }

  *last_writer = first;
  std::deque<Writer*>::iterator iter = writers_.begin();
  ++iter;  // Advance past "first"
  for (; iter != writers_.end(); ++iter) {
    Writer* w = *iter;
    if (w->sync && !first->sync) {
      // Do not include a sync write into a batch handled by a non-sync write.
      break;
    }

H
heyongqiang 已提交
2851 2852 2853 2854 2855 2856
    if (!w->disableWAL && first->disableWAL) {
      // Do not include a write that needs WAL into a batch that has
      // WAL disabled.
      break;
    }

2857
    if (w->batch != nullptr) {
2858 2859 2860 2861 2862 2863 2864 2865 2866
      size += WriteBatchInternal::ByteSize(w->batch);
      if (size > max_size) {
        // Do not make batch too big
        break;
      }

      // Append to *reuslt
      if (result == first->batch) {
        // Switch to temporary batch instead of disturbing caller's batch
2867
        result = &tmp_batch_;
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
        assert(WriteBatchInternal::Count(result) == 0);
        WriteBatchInternal::Append(result, first->batch);
      }
      WriteBatchInternal::Append(result, w->batch);
    }
    *last_writer = w;
  }
  return result;
}

2878 2879 2880
// This function computes the amount of time in microseconds by which a write
// should be delayed based on the number of level-0 files according to the
// following formula:
J
Jim Paton 已提交
2881 2882 2883 2884
// if n < bottom, return 0;
// if n >= top, return 1000;
// otherwise, let r = (n - bottom) /
//                    (top - bottom)
2885 2886 2887 2888
//  and return r^2 * 1000.
// The goal of this formula is to gradually increase the rate at which writes
// are slowed. We also tried linear delay (r * 1000), but it seemed to do
// slightly worse. There is no other particular reason for choosing quadratic.
J
Jim Paton 已提交
2889
uint64_t DBImpl::SlowdownAmount(int n, int top, int bottom) {
2890
  uint64_t delay;
J
Jim Paton 已提交
2891
  if (n >= top) {
2892 2893
    delay = 1000;
  }
J
Jim Paton 已提交
2894
  else if (n < bottom) {
2895 2896 2897 2898
    delay = 0;
  }
  else {
    // If we are here, we know that:
J
Jim Paton 已提交
2899
    //   level0_start_slowdown <= n < level0_slowdown
2900 2901
    // since the previous two conditions are false.
    float how_much =
J
Jim Paton 已提交
2902 2903
      (float) (n - bottom) /
              (top - bottom);
2904 2905 2906 2907 2908 2909
    delay = how_much * how_much * 1000;
  }
  assert(delay <= 1000);
  return delay;
}

2910
// REQUIRES: mutex_ is held
2911
// REQUIRES: this thread is currently at the front of the writer queue
2912 2913
Status DBImpl::MakeRoomForWrite(bool force) {
  mutex_.AssertHeld();
2914
  assert(!writers_.empty());
2915
  bool allow_delay = !force;
J
Jim Paton 已提交
2916 2917
  bool allow_hard_rate_limit_delay = !force;
  bool allow_soft_rate_limit_delay = !force;
2918
  uint64_t rate_limit_delay_millis = 0;
2919
  Status s;
2920
  double score;
2921

2922 2923 2924 2925 2926
  while (true) {
    if (!bg_error_.ok()) {
      // Yield previous error
      s = bg_error_;
      break;
2927 2928
    } else if (
        allow_delay &&
2929
        versions_->NumLevelFiles(0) >=
2930
          options_.level0_slowdown_writes_trigger) {
2931 2932 2933
      // We are getting close to hitting a hard limit on the number of
      // L0 files.  Rather than delaying a single write by several
      // seconds when we hit the hard limit, start delaying each
2934
      // individual write by 0-1ms to reduce latency variance.  Also,
2935 2936 2937
      // this delay hands over some CPU to the compaction thread in
      // case it is sharing the same core as the writer.
      mutex_.Unlock();
2938
      uint64_t delayed;
J
Jim Paton 已提交
2939 2940
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
J
Jim Paton 已提交
2941 2942 2943 2944 2945
        env_->SleepForMicroseconds(
          SlowdownAmount(versions_->NumLevelFiles(0),
                         options_.level0_slowdown_writes_trigger,
                         options_.level0_stop_writes_trigger)
        );
2946
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2947
      }
2948
      RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
2949
      stall_level0_slowdown_ += delayed;
J
Jim Paton 已提交
2950
      stall_level0_slowdown_count_++;
2951
      allow_delay = false;  // Do not delay a single write more than once
2952 2953
      //Log(options_.info_log,
      //    "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
2954
      //     (long long unsigned int)delayed);
2955
      mutex_.Lock();
2956
      delayed_writes_++;
2957 2958 2959
    } else if (!force &&
               (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
      // There is room in current memtable
2960 2961 2962
      if (allow_delay) {
        DelayLoggingAndReset();
      }
2963
      break;
2964
    } else if (imm_.size() == options_.max_write_buffer_number - 1) {
2965
      // We have filled up the current memtable, but the previous
2966 2967
      // ones are still being compacted, so we wait.
      DelayLoggingAndReset();
2968
      Log(options_.info_log, "wait for memtable compaction...\n");
2969
      uint64_t stall;
J
Jim Paton 已提交
2970 2971 2972 2973
      {
        StopWatch sw(env_, options_.statistics,
          STALL_MEMTABLE_COMPACTION_COUNT);
        bg_cv_.Wait();
2974
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2975
      }
2976 2977
      RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
      stall_memtable_compaction_ += stall;
J
Jim Paton 已提交
2978
      stall_memtable_compaction_count_++;
2979
    } else if (versions_->NumLevelFiles(0) >=
2980
               options_.level0_stop_writes_trigger) {
2981
      // There are too many level-0 files.
2982 2983
      DelayLoggingAndReset();
      Log(options_.info_log, "wait for fewer level0 files...\n");
2984
      uint64_t stall;
J
Jim Paton 已提交
2985 2986 2987
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
        bg_cv_.Wait();
2988
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2989
      }
2990 2991
      RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
      stall_level0_num_files_ += stall;
J
Jim Paton 已提交
2992
      stall_level0_num_files_count_++;
2993
    } else if (
J
Jim Paton 已提交
2994 2995 2996
        allow_hard_rate_limit_delay &&
        options_.hard_rate_limit > 1.0 &&
        (score = versions_->MaxCompactionScore()) > options_.hard_rate_limit) {
2997
      // Delay a write when the compaction score for any level is too large.
2998
      int max_level = versions_->MaxCompactionScoreLevel();
2999
      mutex_.Unlock();
3000
      uint64_t delayed;
J
Jim Paton 已提交
3001
      {
J
Jim Paton 已提交
3002
        StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
3003
        env_->SleepForMicroseconds(1000);
3004
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
3005
      }
3006
      stall_leveln_slowdown_[max_level] += delayed;
J
Jim Paton 已提交
3007
      stall_leveln_slowdown_count_[max_level]++;
3008
      // Make sure the following value doesn't round to zero.
3009 3010 3011
      uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
      rate_limit_delay_millis += rate_limit;
      RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
J
Jim Paton 已提交
3012 3013 3014 3015
      if (options_.rate_limit_delay_max_milliseconds > 0 &&
          rate_limit_delay_millis >=
          (unsigned)options_.rate_limit_delay_max_milliseconds) {
        allow_hard_rate_limit_delay = false;
3016 3017 3018 3019
      }
      // Log(options_.info_log,
      //    "delaying write %llu usecs for rate limits with max score %.2f\n",
      //    (long long unsigned int)delayed, score);
3020
      mutex_.Lock();
J
Jim Paton 已提交
3021 3022 3023 3024 3025 3026 3027
    } else if (
        allow_soft_rate_limit_delay &&
        options_.soft_rate_limit > 0.0 &&
        (score = versions_->MaxCompactionScore()) > options_.soft_rate_limit) {
      // Delay a write when the compaction score for any level is too large.
      // TODO: add statistics
      mutex_.Unlock();
J
Jim Paton 已提交
3028 3029 3030 3031 3032 3033 3034 3035 3036
      {
        StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
        env_->SleepForMicroseconds(SlowdownAmount(
          score,
          options_.soft_rate_limit,
          options_.hard_rate_limit)
        );
        rate_limit_delay_millis += sw.ElapsedMicros();
      }
J
Jim Paton 已提交
3037 3038
      allow_soft_rate_limit_delay = false;
      mutex_.Lock();
3039 3040
    } else {
      // Attempt to switch to a new memtable and trigger compaction of old
3041
      DelayLoggingAndReset();
3042 3043
      assert(versions_->PrevLogNumber() == 0);
      uint64_t new_log_number = versions_->NewFileNumber();
3044
      unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3045 3046
      EnvOptions soptions(storage_options_);
      soptions.use_mmap_writes = false;
3047
      s = env_->NewWritableFile(
3048
            LogFileName(options_.wal_dir, new_log_number),
3049 3050 3051
            &lfile,
            soptions
          );
3052
      if (!s.ok()) {
H
heyongqiang 已提交
3053
        // Avoid chewing through file number space in a tight loop.
3054
        versions_->ReuseFileNumber(new_log_number);
3055 3056
        break;
      }
3057 3058 3059
      // Our final size should be less than write_buffer_size
      // (compression, etc) but err on the side of caution.
      lfile->SetPreallocationBlockSize(1.1 * options_.write_buffer_size);
3060
      logfile_number_ = new_log_number;
3061
      log_.reset(new log::Writer(std::move(lfile)));
3062
      mem_->SetNextLogNumber(logfile_number_);
3063
      imm_.Add(mem_);
3064 3065 3066
      if (force) {
        imm_.FlushRequested();
      }
3067 3068
      mem_ = new MemTable(
          internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
3069
      mem_->Ref();
3070
      Log(options_.info_log,
3071
          "New memtable created with log file: #%llu\n",
3072
          logfile_number_);
3073
      mem_->SetLogNumber(logfile_number_);
3074
      force = false;   // Do not force another compaction if have room
3075
      MaybeScheduleFlushOrCompaction();
3076 3077 3078 3079 3080 3081 3082 3083
    }
  }
  return s;
}

bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  value->clear();

J
jorlow@chromium.org 已提交
3084 3085
  MutexLock l(&mutex_);
  Slice in = property;
3086
  Slice prefix("rocksdb.");
J
jorlow@chromium.org 已提交
3087 3088 3089 3090 3091 3092 3093
  if (!in.starts_with(prefix)) return false;
  in.remove_prefix(prefix.size());

  if (in.starts_with("num-files-at-level")) {
    in.remove_prefix(strlen("num-files-at-level"));
    uint64_t level;
    bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
3094
    if (!ok || (int)level >= NumberLevels()) {
J
jorlow@chromium.org 已提交
3095 3096
      return false;
    } else {
3097
      char buf[100];
D
dgrogan@chromium.org 已提交
3098 3099
      snprintf(buf, sizeof(buf), "%d",
               versions_->NumLevelFiles(static_cast<int>(level)));
3100
      *value = buf;
J
jorlow@chromium.org 已提交
3101 3102
      return true;
    }
3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119
  } else if (in == "levelstats") {
    char buf[1000];
    snprintf(buf, sizeof(buf),
             "Level Files Size(MB)\n"
             "--------------------\n");
    value->append(buf);

    for (int level = 0; level < NumberLevels(); level++) {
      snprintf(buf, sizeof(buf),
               "%3d %8d %8.0f\n",
               level,
               versions_->NumLevelFiles(level),
               versions_->NumLevelBytes(level) / 1048576.0);
      value->append(buf);
    }
    return true;

3120
  } else if (in == "stats") {
M
Mark Callaghan 已提交
3121
    char buf[1000];
3122 3123
    uint64_t total_bytes_written = 0;
    uint64_t total_bytes_read = 0;
M
Mark Callaghan 已提交
3124
    uint64_t micros_up = env_->NowMicros() - started_at_;
3125 3126
    // Add "+1" to make sure seconds_up is > 0 and avoid NaN later
    double seconds_up = (micros_up + 1) / 1000000.0;
3127
    uint64_t total_slowdown = 0;
J
Jim Paton 已提交
3128
    uint64_t total_slowdown_count = 0;
3129 3130 3131 3132
    uint64_t interval_bytes_written = 0;
    uint64_t interval_bytes_read = 0;
    uint64_t interval_bytes_new = 0;
    double   interval_seconds_up = 0;
M
Mark Callaghan 已提交
3133 3134

    // Pardon the long line but I think it is easier to read this way.
3135 3136
    snprintf(buf, sizeof(buf),
             "                               Compactions\n"
3137
             "Level  Files Size(MB) Score Time(sec)  Read(MB) Write(MB)    Rn(MB)  Rnp1(MB)  Wnew(MB) RW-Amplify Read(MB/s) Write(MB/s)      Rn     Rnp1     Wnp1     NewW    Count  Ln-stall Stall-cnt\n"
J
Jim Paton 已提交
3138
             "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
3139 3140
             );
    value->append(buf);
3141
    for (int level = 0; level < NumberLevels(); level++) {
3142 3143
      int files = versions_->NumLevelFiles(level);
      if (stats_[level].micros > 0 || files > 0) {
M
Mark Callaghan 已提交
3144 3145 3146 3147 3148 3149
        int64_t bytes_read = stats_[level].bytes_readn +
                             stats_[level].bytes_readnp1;
        int64_t bytes_new = stats_[level].bytes_written -
                            stats_[level].bytes_readnp1;
        double amplify = (stats_[level].bytes_readn == 0)
            ? 0.0
3150 3151 3152
            : (stats_[level].bytes_written +
               stats_[level].bytes_readnp1 +
               stats_[level].bytes_readn) /
M
Mark Callaghan 已提交
3153 3154
                (double) stats_[level].bytes_readn;

3155 3156 3157
        total_bytes_read += bytes_read;
        total_bytes_written += stats_[level].bytes_written;

3158 3159
        snprintf(
            buf, sizeof(buf),
3160
            "%3d %8d %8.0f %5.1f %9.0f %9.0f %9.0f %9.0f %9.0f %9.0f %10.1f %9.1f %11.1f %8d %8d %8d %8d %8d %9.1f %9lu\n",
3161 3162 3163
            level,
            files,
            versions_->NumLevelBytes(level) / 1048576.0,
3164
            versions_->NumLevelBytes(level) /
3165
                versions_->MaxBytesForLevel(level),
3166
            stats_[level].micros / 1e6,
M
Mark Callaghan 已提交
3167 3168 3169 3170 3171 3172
            bytes_read / 1048576.0,
            stats_[level].bytes_written / 1048576.0,
            stats_[level].bytes_readn / 1048576.0,
            stats_[level].bytes_readnp1 / 1048576.0,
            bytes_new / 1048576.0,
            amplify,
3173 3174
            // +1 to avoid division by 0
            (bytes_read / 1048576.0) / ((stats_[level].micros+1) / 1000000.0),
3175
            (stats_[level].bytes_written / 1048576.0) /
3176
                ((stats_[level].micros+1) / 1000000.0),
M
Mark Callaghan 已提交
3177 3178 3179 3180
            stats_[level].files_in_leveln,
            stats_[level].files_in_levelnp1,
            stats_[level].files_out_levelnp1,
            stats_[level].files_out_levelnp1 - stats_[level].files_in_levelnp1,
3181
            stats_[level].count,
J
Jim Paton 已提交
3182 3183
            stall_leveln_slowdown_[level] / 1000000.0,
            (unsigned long) stall_leveln_slowdown_count_[level]);
3184
        total_slowdown += stall_leveln_slowdown_[level];
J
Jim Paton 已提交
3185
        total_slowdown_count += stall_leveln_slowdown_count_[level];
3186 3187 3188
        value->append(buf);
      }
    }
M
Mark Callaghan 已提交
3189

3190 3191 3192 3193 3194 3195 3196 3197 3198
    interval_bytes_new = stats_[0].bytes_written - last_stats_.bytes_new_;
    interval_bytes_read = total_bytes_read - last_stats_.bytes_read_;
    interval_bytes_written = total_bytes_written - last_stats_.bytes_written_;
    interval_seconds_up = seconds_up - last_stats_.seconds_up_;

    snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
             seconds_up, interval_seconds_up);
    value->append(buf);

M
Mark Callaghan 已提交
3199
    snprintf(buf, sizeof(buf),
3200 3201
             "Compaction IO cumulative (GB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
M
Mark Callaghan 已提交
3202
             stats_[0].bytes_written / (1048576.0 * 1024),
3203 3204 3205 3206 3207 3208 3209 3210
             total_bytes_read / (1048576.0 * 1024),
             total_bytes_written / (1048576.0 * 1024),
             (total_bytes_read + total_bytes_written) / (1048576.0 * 1024));
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO cumulative (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
M
Mark Callaghan 已提交
3211
             stats_[0].bytes_written / 1048576.0 / seconds_up,
3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222
             total_bytes_read / 1048576.0 / seconds_up,
             total_bytes_written / 1048576.0 / seconds_up,
             (total_bytes_read + total_bytes_written) / 1048576.0 / seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification cumulative: %.1f write, %.1f compaction\n",
             (double) total_bytes_written / (stats_[0].bytes_written+1),
             (double) (total_bytes_written + total_bytes_read)
                  / (stats_[0].bytes_written+1));
M
Mark Callaghan 已提交
3223 3224
    value->append(buf);

3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249
    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
             interval_bytes_new / 1048576.0,
             interval_bytes_read/ 1048576.0,
             interval_bytes_written / 1048576.0,
             (interval_bytes_read + interval_bytes_written) / 1048576.0);
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
             interval_bytes_new / 1048576.0 / interval_seconds_up,
             interval_bytes_read / 1048576.0 / interval_seconds_up,
             interval_bytes_written / 1048576.0 / interval_seconds_up,
             (interval_bytes_read + interval_bytes_written)
                 / 1048576.0 / interval_seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification interval: %.1f write, %.1f compaction\n",
             (double) interval_bytes_written / (interval_bytes_new+1),
             (double) (interval_bytes_written + interval_bytes_read) /
                  (interval_bytes_new+1));
M
Mark Callaghan 已提交
3250 3251 3252 3253
    value->append(buf);

    snprintf(buf, sizeof(buf),
            "Stalls(secs): %.3f level0_slowdown, %.3f level0_numfiles, "
3254
            "%.3f memtable_compaction, %.3f leveln_slowdown\n",
M
Mark Callaghan 已提交
3255 3256
            stall_level0_slowdown_ / 1000000.0,
            stall_level0_num_files_ / 1000000.0,
3257
            stall_memtable_compaction_ / 1000000.0,
3258
            total_slowdown / 1000000.0);
M
Mark Callaghan 已提交
3259 3260
    value->append(buf);

J
Jim Paton 已提交
3261 3262 3263 3264 3265 3266 3267 3268 3269
    snprintf(buf, sizeof(buf),
            "Stalls(count): %lu level0_slowdown, %lu level0_numfiles, "
            "%lu memtable_compaction, %lu leveln_slowdown\n",
            (unsigned long) stall_level0_slowdown_count_,
            (unsigned long) stall_level0_num_files_count_,
            (unsigned long) stall_memtable_compaction_count_,
            (unsigned long) total_slowdown_count);
    value->append(buf);

3270 3271 3272 3273 3274
    last_stats_.bytes_read_ = total_bytes_read;
    last_stats_.bytes_written_ = total_bytes_written;
    last_stats_.bytes_new_ = stats_[0].bytes_written;
    last_stats_.seconds_up_ = seconds_up;

3275
    return true;
G
Gabor Cselle 已提交
3276 3277 3278
  } else if (in == "sstables") {
    *value = versions_->current()->DebugString();
    return true;
3279 3280 3281
  } else if (in == "num-immutable-mem-table") {
    *value = std::to_string(imm_.size());
    return true;
J
jorlow@chromium.org 已提交
3282
  }
3283

J
jorlow@chromium.org 已提交
3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312
  return false;
}

void DBImpl::GetApproximateSizes(
    const Range* range, int n,
    uint64_t* sizes) {
  // TODO(opt): better implementation
  Version* v;
  {
    MutexLock l(&mutex_);
    versions_->current()->Ref();
    v = versions_->current();
  }

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
    uint64_t start = versions_->ApproximateOffsetOf(v, k1);
    uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
    sizes[i] = (limit >= start ? limit - start : 0);
  }

  {
    MutexLock l(&mutex_);
    v->Unref();
  }
}

3313 3314 3315 3316 3317 3318 3319
inline void DBImpl::DelayLoggingAndReset() {
  if (delayed_writes_ > 0) {
    Log(options_.info_log, "delayed %d write...\n", delayed_writes_ );
    delayed_writes_ = 0;
  }
}

3320 3321 3322
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
3323 3324 3325 3326
  WalFileType log_type;
  if (!ParseFileName(name, &number, &type, &log_type) ||
      (type != kTableFile && type != kLogFile)) {
    Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
3327 3328 3329
    return Status::InvalidArgument("Invalid file name");
  }

3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343
  Status status;
  if (type == kLogFile) {
    // Only allow deleting archived log files
    if (log_type != kArchivedLogFile) {
      Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
      return Status::NotSupported("Delete only supported for archived logs");
    }
    status = env_->DeleteFile(options_.wal_dir + "/" + name.c_str());
    if (!status.ok()) {
      Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
    }
    return status;
  }

3344 3345 3346 3347
  int level;
  FileMetaData metadata;
  int maxlevel = NumberLevels();
  VersionEdit edit(maxlevel);
D
Dhruba Borthakur 已提交
3348 3349 3350 3351 3352
  DeletionState deletion_state;
  {
    MutexLock l(&mutex_);
    status = versions_->GetMetadataForFile(number, &level, &metadata);
    if (!status.ok()) {
3353 3354
      Log(options_.info_log, "DeleteFile %s failed. File not found\n",
                             name.c_str());
D
Dhruba Borthakur 已提交
3355 3356 3357
      return Status::InvalidArgument("File not found");
    }
    assert((level > 0) && (level < maxlevel));
3358

D
Dhruba Borthakur 已提交
3359 3360
    // If the file is being compacted no need to delete.
    if (metadata.being_compacted) {
3361
      Log(options_.info_log,
3362
          "DeleteFile %s Skipped. File about to be compacted\n", name.c_str());
D
Dhruba Borthakur 已提交
3363
      return Status::OK();
3364 3365
    }

D
Dhruba Borthakur 已提交
3366 3367 3368 3369 3370 3371
    // Only the files in the last level can be deleted externally.
    // This is to make sure that any deletion tombstones are not
    // lost. Check that the level passed is the last level.
    for (int i = level + 1; i < maxlevel; i++) {
      if (versions_->NumLevelFiles(i) != 0) {
        Log(options_.info_log,
3372
            "DeleteFile %s FAILED. File not in last level\n", name.c_str());
D
Dhruba Borthakur 已提交
3373 3374 3375 3376 3377 3378
        return Status::InvalidArgument("File not in last level");
      }
    }
    edit.DeleteFile(level, number);
    status = versions_->LogAndApply(&edit, &mutex_);
    if (status.ok()) {
3379
      versions_->GetObsoleteFiles(&deletion_state.sst_delete_files);
D
Dhruba Borthakur 已提交
3380
    }
I
Igor Canadi 已提交
3381
    FindObsoleteFiles(deletion_state, false);
D
Dhruba Borthakur 已提交
3382
  } // lock released here
I
Igor Canadi 已提交
3383
  LogFlush(options_.info_log);
D
Dhruba Borthakur 已提交
3384

3385
  if (status.ok()) {
D
Dhruba Borthakur 已提交
3386 3387
    // remove files outside the db-lock
    PurgeObsoleteFiles(deletion_state);
3388 3389 3390 3391
  }
  return status;
}

3392
void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata) {
3393 3394 3395 3396
  MutexLock l(&mutex_);
  return versions_->GetLiveFilesMetaData(metadata);
}

J
jorlow@chromium.org 已提交
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
  WriteBatch batch;
  batch.Put(key, value);
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  WriteBatch batch;
  batch.Delete(key);
  return Write(opt, &batch);
}

3411 3412 3413 3414 3415 3416 3417
Status DB::Merge(const WriteOptions& opt, const Slice& key,
                 const Slice& value) {
  WriteBatch batch;
  batch.Merge(key, value);
  return Write(opt, &batch);
}

J
jorlow@chromium.org 已提交
3418 3419
DB::~DB() { }

J
Jim Paton 已提交
3420
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
3421
  *dbptr = nullptr;
H
Haobo Xu 已提交
3422
  EnvOptions soptions;
J
jorlow@chromium.org 已提交
3423

3424
  if (options.block_cache != nullptr && options.no_block_cache) {
3425
    return Status::InvalidArgument(
3426
        "no_block_cache is true while block_cache is not nullptr");
3427
  }
3428

J
jorlow@chromium.org 已提交
3429
  DBImpl* impl = new DBImpl(options, dbname);
3430 3431 3432 3433 3434 3435 3436
  Status s = impl->env_->CreateDirIfMissing(impl->options_.wal_dir);
  if (!s.ok()) {
    delete impl;
    return s;
  }

  s = impl->CreateArchivalDirectory();
3437 3438 3439 3440
  if (!s.ok()) {
    delete impl;
    return s;
  }
J
jorlow@chromium.org 已提交
3441
  impl->mutex_.Lock();
3442
  VersionEdit edit(impl->NumberLevels());
3443
  s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
J
jorlow@chromium.org 已提交
3444
  if (s.ok()) {
3445
    uint64_t new_log_number = impl->versions_->NewFileNumber();
3446
    unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3447
    soptions.use_mmap_writes = false;
3448 3449 3450 3451 3452
    s = options.env->NewWritableFile(
      LogFileName(impl->options_.wal_dir, new_log_number),
      &lfile,
      soptions
    );
J
jorlow@chromium.org 已提交
3453
    if (s.ok()) {
3454
      lfile->SetPreallocationBlockSize(1.1 * options.write_buffer_size);
3455
      edit.SetLogNumber(new_log_number);
3456
      impl->logfile_number_ = new_log_number;
3457
      impl->log_.reset(new log::Writer(std::move(lfile)));
3458
      s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
J
jorlow@chromium.org 已提交
3459 3460
    }
    if (s.ok()) {
3461
      impl->mem_->SetLogNumber(impl->logfile_number_);
J
jorlow@chromium.org 已提交
3462
      impl->DeleteObsoleteFiles();
3463
      impl->MaybeScheduleFlushOrCompaction();
3464
      impl->MaybeScheduleLogDBDeployStats();
J
jorlow@chromium.org 已提交
3465 3466 3467
    }
  }
  impl->mutex_.Unlock();
3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480

  if (options.compaction_style == kCompactionStyleUniversal) {
    int num_files;
    for (int i = 1; i < impl->NumberLevels(); i++) {
      num_files = impl->versions_->NumLevelFiles(i);
      if (num_files > 0) {
        s = Status::InvalidArgument("Not all files are at level 0. Cannot "
          "open with universal compaction style.");
        break;
      }
    }
  }

J
jorlow@chromium.org 已提交
3481 3482 3483 3484 3485 3486 3487 3488
  if (s.ok()) {
    *dbptr = impl;
  } else {
    delete impl;
  }
  return s;
}

3489 3490 3491
Snapshot::~Snapshot() {
}

J
jorlow@chromium.org 已提交
3492
Status DestroyDB(const std::string& dbname, const Options& options) {
3493 3494 3495 3496 3497
  const InternalKeyComparator comparator(options.comparator);
  const InternalFilterPolicy filter_policy(options.filter_policy);
  const Options& soptions(SanitizeOptions(
    dbname, &comparator, &filter_policy, options));
  Env* env = soptions.env;
J
jorlow@chromium.org 已提交
3498
  std::vector<std::string> filenames;
3499 3500
  std::vector<std::string> archiveFiles;

3501
  std::string archivedir = ArchivalDirectory(dbname);
J
jorlow@chromium.org 已提交
3502 3503
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
3504 3505 3506 3507 3508 3509 3510

  if (dbname != soptions.wal_dir) {
    std::vector<std::string> logfilenames;
    env->GetChildren(soptions.wal_dir, &logfilenames);
    filenames.insert(filenames.end(), logfilenames.begin(), logfilenames.end());
    archivedir = ArchivalDirectory(soptions.wal_dir);
  }
3511

J
jorlow@chromium.org 已提交
3512 3513 3514 3515 3516
  if (filenames.empty()) {
    return Status::OK();
  }

  FileLock* lock;
3517 3518
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
3519 3520 3521
  if (result.ok()) {
    uint64_t number;
    FileType type;
D
dgrogan@chromium.org 已提交
3522
    for (size_t i = 0; i < filenames.size(); i++) {
3523
      if (ParseFileName(filenames[i], &number, &type) &&
3524
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
3525 3526 3527
        Status del;
        if (type == kMetaDatabase) {
          del = DestroyDB(dbname + "/" + filenames[i], options);
3528 3529
        } else if (type == kLogFile) {
          del = env->DeleteFile(soptions.wal_dir + "/" + filenames[i]);
K
Kosie van der Merwe 已提交
3530 3531 3532
        } else {
          del = env->DeleteFile(dbname + "/" + filenames[i]);
        }
J
jorlow@chromium.org 已提交
3533 3534 3535 3536 3537
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3538

3539
    env->GetChildren(archivedir, &archiveFiles);
3540 3541
    // Delete archival files.
    for (size_t i = 0; i < archiveFiles.size(); ++i) {
3542 3543
      if (ParseFileName(archiveFiles[i], &number, &type) &&
          type == kLogFile) {
3544
        Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]);
3545 3546 3547 3548 3549
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3550
    // ignore case where no archival directory is present.
3551
    env->DeleteDir(archivedir);
3552

J
jorlow@chromium.org 已提交
3553
    env->UnlockFile(lock);  // Ignore error since state is already gone
3554
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
3555
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
3556
    env->DeleteDir(soptions.wal_dir);
J
jorlow@chromium.org 已提交
3557 3558 3559 3560
  }
  return result;
}

3561 3562
//
// A global method that can dump out the build version
3563
void dumpLeveldbBuildVersion(Logger * log) {
3564
  Log(log, "Git sha %s", rocksdb_build_git_sha);
3565
  Log(log, "Compile time %s %s",
3566
      rocksdb_build_compile_time, rocksdb_build_compile_date);
3567 3568
}

3569
}  // namespace rocksdb