db_impl.cc 120.5 KB
Newer Older
1 2 3 4 5
//  Copyright (c) 2013, Facebook, Inc.  All rights reserved.
//  This source code is licensed under the BSD-style license found in the
//  LICENSE file in the root directory of this source tree. An additional grant
//  of patent rights can be found in the PATENTS file in the same directory.
//
J
jorlow@chromium.org 已提交
6 7 8 9 10 11 12
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/db_impl.h"

#include <algorithm>
13 14
#include <climits>
#include <cstdio>
J
jorlow@chromium.org 已提交
15
#include <set>
16
#include <stdexcept>
17 18
#include <stdint.h>
#include <string>
19
#include <unordered_set>
20
#include <vector>
21

J
jorlow@chromium.org 已提交
22 23
#include "db/builder.h"
#include "db/dbformat.h"
24
#include "db/db_iter.h"
J
jorlow@chromium.org 已提交
25 26 27 28
#include "db/filename.h"
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
29
#include "db/memtablelist.h"
30
#include "db/merge_helper.h"
T
Tyler Harter 已提交
31
#include "db/prefix_filter_iterator.h"
J
jorlow@chromium.org 已提交
32
#include "db/table_cache.h"
33 34
#include "db/table_stats_collector.h"
#include "db/transaction_log_impl.h"
J
jorlow@chromium.org 已提交
35 36
#include "db/version_set.h"
#include "db/write_batch_internal.h"
37
#include "port/port.h"
38 39 40 41 42 43
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
S
Siying Dong 已提交
44 45
#include "rocksdb/table.h"
#include "port/port.h"
J
jorlow@chromium.org 已提交
46 47 48
#include "table/block.h"
#include "table/merger.h"
#include "table/two_level_iterator.h"
49 50
#include "util/auto_roll_logger.h"
#include "util/build_version.h"
J
jorlow@chromium.org 已提交
51 52 53
#include "util/coding.h"
#include "util/logging.h"
#include "util/mutexlock.h"
54
#include "util/perf_context_imp.h"
55
#include "util/stop_watch.h"
J
jorlow@chromium.org 已提交
56

57
namespace rocksdb {
J
jorlow@chromium.org 已提交
58

59 60
void dumpLeveldbBuildVersion(Logger * log);

61 62 63 64 65
// Information kept for every waiting writer
struct DBImpl::Writer {
  Status status;
  WriteBatch* batch;
  bool sync;
H
heyongqiang 已提交
66
  bool disableWAL;
67 68 69 70 71 72
  bool done;
  port::CondVar cv;

  explicit Writer(port::Mutex* mu) : cv(mu) { }
};

J
jorlow@chromium.org 已提交
73 74 75
struct DBImpl::CompactionState {
  Compaction* const compaction;

76 77 78 79 80
  // If there were two snapshots with seq numbers s1 and
  // s2 and s1 < s2, and if we find two instances of a key k1 then lies
  // entirely within s1 and s2, then the earlier version of k1 can be safely
  // deleted because that version is not visible in any snapshot.
  std::vector<SequenceNumber> existing_snapshots;
J
jorlow@chromium.org 已提交
81 82 83 84 85 86

  // Files produced by compaction
  struct Output {
    uint64_t number;
    uint64_t file_size;
    InternalKey smallest, largest;
87
    SequenceNumber smallest_seqno, largest_seqno;
J
jorlow@chromium.org 已提交
88 89
  };
  std::vector<Output> outputs;
90
  std::list<uint64_t> allocated_file_numbers;
J
jorlow@chromium.org 已提交
91 92

  // State kept for output being generated
93 94
  unique_ptr<WritableFile> outfile;
  unique_ptr<TableBuilder> builder;
J
jorlow@chromium.org 已提交
95 96 97 98 99 100 101 102 103

  uint64_t total_bytes;

  Output* current_output() { return &outputs[outputs.size()-1]; }

  explicit CompactionState(Compaction* c)
      : compaction(c),
        total_bytes(0) {
  }
104 105 106 107 108 109 110

  // Create a client visible context of this compaction
  CompactionFilter::Context GetFilterContext() {
    CompactionFilter::Context context;
    context.is_full_compaction = compaction->IsFullCompaction();
    return context;
  }
J
jorlow@chromium.org 已提交
111 112 113
};

// Fix user-supplied options to be reasonable
114
template <class T, class V>
J
jorlow@chromium.org 已提交
115
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
D
dgrogan@chromium.org 已提交
116 117
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
J
jorlow@chromium.org 已提交
118 119 120
}
Options SanitizeOptions(const std::string& dbname,
                        const InternalKeyComparator* icmp,
S
Sanjay Ghemawat 已提交
121
                        const InternalFilterPolicy* ipolicy,
J
jorlow@chromium.org 已提交
122 123 124
                        const Options& src) {
  Options result = src;
  result.comparator = icmp;
125
  result.filter_policy = (src.filter_policy != nullptr) ? ipolicy : nullptr;
126
  ClipToRange(&result.max_open_files,            20,     1000000);
127 128
  ClipToRange(&result.write_buffer_size,         ((size_t)64)<<10,
                                                 ((size_t)64)<<30);
S
Sanjay Ghemawat 已提交
129
  ClipToRange(&result.block_size,                1<<10,  4<<20);
130

X
Xing Jin 已提交
131 132 133 134 135 136
  // if user sets arena_block_size, we trust user to use this value. Otherwise,
  // calculate a proper value from writer_buffer_size;
  if (result.arena_block_size <= 0) {
    result.arena_block_size = result.write_buffer_size / 10;
  }

137 138
  result.min_write_buffer_number_to_merge = std::min(
    result.min_write_buffer_number_to_merge, result.max_write_buffer_number-1);
139
  if (result.info_log == nullptr) {
K
Kai Liu 已提交
140 141
    Status s = CreateLoggerFromOptions(dbname, result.db_log_dir, src.env,
                                       result, &result.info_log);
J
jorlow@chromium.org 已提交
142 143
    if (!s.ok()) {
      // No place suitable for logging
144
      result.info_log = nullptr;
J
jorlow@chromium.org 已提交
145 146
    }
  }
147
  if (result.block_cache == nullptr && !result.no_block_cache) {
148 149
    result.block_cache = NewLRUCache(8 << 20);
  }
150
  result.compression_per_level = src.compression_per_level;
151 152 153
  if (result.block_size_deviation < 0 || result.block_size_deviation > 100) {
    result.block_size_deviation = 0;
  }
154 155 156
  if (result.max_mem_compaction_level >= result.num_levels) {
    result.max_mem_compaction_level = result.num_levels - 1;
  }
J
Jim Paton 已提交
157 158 159
  if (result.soft_rate_limit > result.hard_rate_limit) {
    result.soft_rate_limit = result.hard_rate_limit;
  }
160 161
  if (result.compaction_filter) {
    Log(result.info_log, "Compaction filter specified, ignore factory");
162
  }
J
Jim Paton 已提交
163 164 165 166 167 168
  if (result.prefix_extractor) {
    // If a prefix extractor has been supplied and a PrefixHashRepFactory is
    // being used, make sure that the latter uses the former as its transform
    // function.
    auto factory = dynamic_cast<PrefixHashRepFactory*>(
      result.memtable_factory.get());
169
    if (factory &&
170
        factory->GetTransform() != result.prefix_extractor) {
J
Jim Paton 已提交
171 172 173 174
      Log(result.info_log, "A prefix hash representation factory was supplied "
          "whose prefix extractor does not match options.prefix_extractor. "
          "Falling back to skip list representation factory");
      result.memtable_factory = std::make_shared<SkipListFactory>();
175 176
    } else if (factory) {
      Log(result.info_log, "Prefix hash memtable rep is in use.");
J
Jim Paton 已提交
177 178
    }
  }
179 180 181 182 183

  if (result.wal_dir.empty()) {
    // Use dbname as default
    result.wal_dir = dbname;
  }
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

  // -- Sanitize the table stats collector
  // All user defined stats collectors will be wrapped by
  // UserKeyTableStatsCollector since for them they only have the knowledge of
  // the user keys; internal keys are invisible to them.
  auto& collectors = result.table_stats_collectors;
  for (size_t i = 0; i < result.table_stats_collectors.size(); ++i) {
    assert(collectors[i]);
    collectors[i] =
      std::make_shared<UserKeyTableStatsCollector>(collectors[i]);
  }

  // Add collector to collect internal key statistics
  collectors.push_back(
      std::make_shared<InternalKeyStatsCollector>()
  );

201 202 203 204
  if (!result.flush_block_policy_factory) {
    result.SetUpDefaultFlushBlockPolicyFactory();
  }

J
jorlow@chromium.org 已提交
205 206 207
  return result;
}

S
Siying Dong 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
CompressionType GetCompressionType(const Options& options, int level,
                                   const bool enable_compression) {
  if (!enable_compression) {
    // disable compression
    return kNoCompression;
  }
  // If the use has specified a different compression level for each level,
  // then pick the compresison for that level.
  if (!options.compression_per_level.empty()) {
    const int n = options.compression_per_level.size() - 1;
    // It is possible for level_ to be -1; in that case, we use level
    // 0's compression.  This occurs mostly in backwards compatibility
    // situations when the builder doesn't know what level the file
    // belongs to.  Likewise, if level_ is beyond the end of the
    // specified compression levels, use the last value.
    return options.compression_per_level[std::max(0, std::min(level, n))];
  } else {
    return options.compression;
  }
}

J
jorlow@chromium.org 已提交
229 230
DBImpl::DBImpl(const Options& options, const std::string& dbname)
    : env_(options.env),
H
heyongqiang 已提交
231
      dbname_(dbname),
J
jorlow@chromium.org 已提交
232
      internal_comparator_(options.comparator),
S
Sanjay Ghemawat 已提交
233 234
      options_(SanitizeOptions(
          dbname, &internal_comparator_, &internal_filter_policy_, options)),
H
heyongqiang 已提交
235
      internal_filter_policy_(options.filter_policy),
J
jorlow@chromium.org 已提交
236
      owns_info_log_(options_.info_log != options.info_log),
237
      db_lock_(nullptr),
H
Haobo Xu 已提交
238
      mutex_(options.use_adaptive_mutex),
239
      shutting_down_(nullptr),
J
jorlow@chromium.org 已提交
240
      bg_cv_(&mutex_),
J
Jim Paton 已提交
241
      mem_rep_factory_(options_.memtable_factory),
X
Xing Jin 已提交
242 243
      mem_(new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_)),
244
      logfile_number_(0),
245
      tmp_batch_(),
246
      bg_compaction_scheduled_(0),
247
      bg_flush_scheduled_(0),
248
      bg_logstats_scheduled_(false),
249 250
      manual_compaction_(nullptr),
      logger_(nullptr),
251
      disable_delete_obsolete_files_(false),
252
      delete_obsolete_files_last_run_(0),
253
      purge_wal_files_last_run_(0),
254
      last_stats_dump_time_microsec_(0),
255
      default_interval_to_delete_obsolete_WAL_(600),
M
Mark Callaghan 已提交
256 257 258
      stall_level0_slowdown_(0),
      stall_memtable_compaction_(0),
      stall_level0_num_files_(0),
J
Jim Paton 已提交
259 260 261
      stall_level0_slowdown_count_(0),
      stall_memtable_compaction_count_(0),
      stall_level0_num_files_count_(0),
262
      started_at_(options.env->NowMicros()),
263
      flush_on_destroy_(false),
264
      stats_(options.num_levels),
265
      delayed_writes_(0),
266 267 268
      storage_options_(options),
      bg_work_gate_closed_(false),
      refitting_level_(false) {
269

270
  mem_->Ref();
271

H
heyongqiang 已提交
272
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
273 274

  stall_leveln_slowdown_.resize(options.num_levels);
J
Jim Paton 已提交
275 276
  stall_leveln_slowdown_count_.resize(options.num_levels);
  for (int i = 0; i < options.num_levels; ++i) {
277
    stall_leveln_slowdown_[i] = 0;
J
Jim Paton 已提交
278 279
    stall_leveln_slowdown_count_[i] = 0;
  }
280

J
jorlow@chromium.org 已提交
281
  // Reserve ten files or so for other uses and give the rest to TableCache.
282
  const int table_cache_size = options_.max_open_files - 10;
283 284
  table_cache_.reset(new TableCache(dbname_, &options_,
                                    storage_options_, table_cache_size));
J
jorlow@chromium.org 已提交
285

286 287
  versions_.reset(new VersionSet(dbname_, &options_, storage_options_,
                                 table_cache_.get(), &internal_comparator_));
288

289 290
  dumpLeveldbBuildVersion(options_.info_log.get());
  options_.Dump(options_.info_log.get());
291

292
  char name[100];
293
  Status st = env_->GetHostName(name, 100L);
294
  if (st.ok()) {
295 296 297 298 299 300
    host_name_ = name;
  } else {
    Log(options_.info_log, "Can't get hostname, use localhost as host name.");
    host_name_ = "localhost";
  }
  last_log_ts = 0;
301

I
Igor Canadi 已提交
302
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
303 304 305 306
}

DBImpl::~DBImpl() {
  // Wait for background work to finish
307
  if (flush_on_destroy_ && mem_->GetFirstSequenceNumber() != 0) {
308 309
    FlushMemTable(FlushOptions());
  }
310
  mutex_.Lock();
311
  shutting_down_.Release_Store(this);  // Any non-nullptr value is ok
312 313 314
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
H
hans@chromium.org 已提交
315
    bg_cv_.Wait();
J
jorlow@chromium.org 已提交
316 317 318
  }
  mutex_.Unlock();

319
  if (db_lock_ != nullptr) {
J
jorlow@chromium.org 已提交
320 321 322
    env_->UnlockFile(db_lock_);
  }

323
  if (mem_ != nullptr) mem_->Unref();
324
  imm_.UnrefAll();
I
Igor Canadi 已提交
325
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
326 327
}

A
Abhishek Kona 已提交
328
// Do not flush and close database elegantly. Simulate a crash.
329 330 331 332 333 334
void DBImpl::TEST_Destroy_DBImpl() {
  // ensure that no new memtable flushes can occur
  flush_on_destroy_ = false;

  // wait till all background compactions are done.
  mutex_.Lock();
335 336 337
  while (bg_compaction_scheduled_ ||
         bg_flush_scheduled_ ||
         bg_logstats_scheduled_) {
338 339 340 341
    bg_cv_.Wait();
  }

  // Prevent new compactions from occuring.
342
  bg_work_gate_closed_ = true;
343 344
  const int LargeNumber = 10000000;
  bg_compaction_scheduled_ += LargeNumber;
345

346
  mutex_.Unlock();
I
Igor Canadi 已提交
347
  LogFlush(options_.info_log);
348 349

  // force release the lock file.
350
  if (db_lock_ != nullptr) {
351 352
    env_->UnlockFile(db_lock_);
  }
353 354 355 356

  log_.reset();
  versions_.reset();
  table_cache_.reset();
357 358
}

A
Abhishek Kona 已提交
359 360 361
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
  return versions_->ManifestFileNumber();
}
362

J
jorlow@chromium.org 已提交
363
Status DBImpl::NewDB() {
364
  VersionEdit new_db(NumberLevels());
J
jorlow@chromium.org 已提交
365
  new_db.SetComparatorName(user_comparator()->Name());
366
  new_db.SetLogNumber(0);
J
jorlow@chromium.org 已提交
367 368 369 370
  new_db.SetNextFile(2);
  new_db.SetLastSequence(0);

  const std::string manifest = DescriptorFileName(dbname_, 1);
371
  unique_ptr<WritableFile> file;
372
  Status s = env_->NewWritableFile(manifest, &file, storage_options_);
J
jorlow@chromium.org 已提交
373 374 375
  if (!s.ok()) {
    return s;
  }
376
  file->SetPreallocationBlockSize(options_.manifest_preallocation_size);
J
jorlow@chromium.org 已提交
377
  {
378
    log::Writer log(std::move(file));
J
jorlow@chromium.org 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
    std::string record;
    new_db.EncodeTo(&record);
    s = log.AddRecord(record);
  }
  if (s.ok()) {
    // Make "CURRENT" file that points to the new manifest file.
    s = SetCurrentFile(env_, dbname_, 1);
  } else {
    env_->DeleteFile(manifest);
  }
  return s;
}

void DBImpl::MaybeIgnoreError(Status* s) const {
  if (s->ok() || options_.paranoid_checks) {
    // No change needed
  } else {
396
    Log(options_.info_log, "Ignoring error %s", s->ToString().c_str());
J
jorlow@chromium.org 已提交
397 398 399 400
    *s = Status::OK();
  }
}

401
const Status DBImpl::CreateArchivalDirectory() {
402
  if (options_.WAL_ttl_seconds > 0 || options_.WAL_size_limit_MB > 0) {
403
    std::string archivalPath = ArchivalDirectory(options_.wal_dir);
404 405 406 407 408
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

409 410 411 412
void DBImpl::PrintStatistics() {
  auto dbstats = options_.statistics;
  if (dbstats) {
    Log(options_.info_log,
413 414
        "STATISTCS:\n %s",
        dbstats->ToString().c_str());
415 416 417
  }
}

418
void DBImpl::MaybeDumpStats() {
H
Haobo Xu 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431
  if (options_.stats_dump_period_sec == 0) return;

  const uint64_t now_micros = env_->NowMicros();

  if (last_stats_dump_time_microsec_ +
      options_.stats_dump_period_sec * 1000000
      <= now_micros) {
    // Multiple threads could race in here simultaneously.
    // However, the last one will update last_stats_dump_time_microsec_
    // atomically. We could see more than one dump during one dump
    // period in rare cases.
    last_stats_dump_time_microsec_ = now_micros;
    std::string stats;
432
    GetProperty("rocksdb.stats", &stats);
H
Haobo Xu 已提交
433
    Log(options_.info_log, "%s", stats.c_str());
434
    PrintStatistics();
435 436 437
  }
}

I
Igor Canadi 已提交
438
// Returns the list of live files in 'sstlive' and the list
D
Dhruba Borthakur 已提交
439
// of all files in the filesystem in 'allfiles'.
I
Igor Canadi 已提交
440
void DBImpl::FindObsoleteFiles(DeletionState& deletion_state, bool force) {
D
Dhruba Borthakur 已提交
441 442
  mutex_.AssertHeld();

443 444 445 446 447
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

I
Igor Canadi 已提交
448 449 450 451 452
  // store the current filenum, lognum, etc
  deletion_state.manifest_file_number = versions_->ManifestFileNumber();
  deletion_state.log_number = versions_->LogNumber();
  deletion_state.prev_log_number = versions_->PrevLogNumber();

453 454 455
  // This method is costly when the number of files is large.
  // Do not allow it to trigger more often than once in
  // delete_obsolete_files_period_micros.
I
Igor Canadi 已提交
456
  if (!force && options_.delete_obsolete_files_period_micros != 0) {
457
    const uint64_t now_micros = env_->NowMicros();
458
    if (delete_obsolete_files_last_run_ +
459 460 461 462 463 464
        options_.delete_obsolete_files_period_micros > now_micros) {
      return;
    }
    delete_obsolete_files_last_run_ = now_micros;
  }

465 466
  // Make a list of all of the live files; set is slow, should not
  // be used.
I
Igor Canadi 已提交
467 468 469
  deletion_state.sstlive.assign(pending_outputs_.begin(),
                                pending_outputs_.end());
  versions_->AddLiveFiles(&deletion_state.sstlive);
D
Dhruba Borthakur 已提交
470 471 472

  // set of all files in the directory
  env_->GetChildren(dbname_, &deletion_state.allfiles); // Ignore errors
J
jorlow@chromium.org 已提交
473

474 475 476 477 478 479 480 481 482 483
  //Add log files in wal_dir
  if (options_.wal_dir != dbname_) {
    std::vector<std::string> log_files;
    env_->GetChildren(options_.wal_dir, &log_files); // Ignore errors
    deletion_state.allfiles.insert(
      deletion_state.allfiles.end(),
      log_files.begin(),
      log_files.end()
    );
  }
484 485
}

D
Dhruba Borthakur 已提交
486
// Diffs the files listed in filenames and those that do not
I
Igor Canadi 已提交
487 488
// belong to live files are posibly removed. Also, removes all the
// files in sstdeletefiles and logdeletefiles.
489
// It is not necessary to hold the mutex when invoking this method.
D
Dhruba Borthakur 已提交
490
void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
I
Igor Canadi 已提交
491 492 493 494 495
  // if deletion is disabled, do nothing
  if (disable_delete_obsolete_files_) {
    return;
  }

J
jorlow@chromium.org 已提交
496 497
  uint64_t number;
  FileType type;
H
heyongqiang 已提交
498
  std::vector<std::string> old_log_files;
499

500 501
  // Now, convert live list to an unordered set, WITHOUT mutex held;
  // set is slow.
I
Igor Canadi 已提交
502 503 504 505 506 507 508 509 510 511 512 513 514 515
  std::unordered_set<uint64_t> live_set(state.sstlive.begin(),
                                        state.sstlive.end());

  state.allfiles.reserve(state.allfiles.size() + state.sstdeletefiles.size());
  for (auto filenum : state.sstdeletefiles) {
    state.allfiles.push_back(TableFileName("", filenum));
  }

  state.allfiles.reserve(state.allfiles.size() + state.logdeletefiles.size());
  for (auto filenum : state.logdeletefiles) {
    if (filenum > 0) {
      state.allfiles.push_back(LogFileName("", filenum));
    }
  }
516

D
Dhruba Borthakur 已提交
517 518
  for (size_t i = 0; i < state.allfiles.size(); i++) {
    if (ParseFileName(state.allfiles[i], &number, &type)) {
J
jorlow@chromium.org 已提交
519 520 521
      bool keep = true;
      switch (type) {
        case kLogFile:
I
Igor Canadi 已提交
522 523
          keep = ((number >= state.log_number) ||
                  (number == state.prev_log_number));
J
jorlow@chromium.org 已提交
524 525 526 527
          break;
        case kDescriptorFile:
          // Keep my manifest file, and any newer incarnations'
          // (in case there is a race that allows other incarnations)
I
Igor Canadi 已提交
528
          keep = (number >= state.manifest_file_number);
J
jorlow@chromium.org 已提交
529 530
          break;
        case kTableFile:
531
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
532 533 534 535
          break;
        case kTempFile:
          // Any temp files that are currently being written to must
          // be recorded in pending_outputs_, which is inserted into "live"
536
          keep = (live_set.find(number) != live_set.end());
J
jorlow@chromium.org 已提交
537
          break;
H
heyongqiang 已提交
538 539 540
        case kInfoLogFile:
          keep = true;
          if (number != 0) {
D
Dhruba Borthakur 已提交
541
            old_log_files.push_back(state.allfiles[i]);
H
heyongqiang 已提交
542 543
          }
          break;
J
jorlow@chromium.org 已提交
544 545
        case kCurrentFile:
        case kDBLockFile:
M
Mayank Agarwal 已提交
546
        case kIdentityFile:
K
Kosie van der Merwe 已提交
547
        case kMetaDatabase:
J
jorlow@chromium.org 已提交
548 549 550 551 552 553
          keep = true;
          break;
      }

      if (!keep) {
        if (type == kTableFile) {
I
Igor Canadi 已提交
554 555
          // evict from cache
          table_cache_->Evict(number);
J
jorlow@chromium.org 已提交
556
        }
557 558
        Log(options_.info_log, "Delete type=%d #%lu", int(type), number);

I
Igor Canadi 已提交
559 560 561 562 563 564 565 566 567
        Status st;
        if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
              options_.WAL_size_limit_MB > 0)) {
            st = env_->RenameFile(dbname_ + "/" + state.allfiles[i],
                                  ArchivedLogFileName(options_.wal_dir,
                                                      number));
            if (!st.ok()) {
              Log(options_.info_log, "RenameFile logfile #%lu FAILED", number);
            }
568
        } else {
I
Igor Canadi 已提交
569
          st = env_->DeleteFile(dbname_ + "/" + state.allfiles[i]);
570
          if (!st.ok()) {
I
Igor Canadi 已提交
571 572
            Log(options_.info_log, "Delete type=%d #%lu FAILED\n",
                int(type), number);
573
          }
H
heyongqiang 已提交
574
        }
J
jorlow@chromium.org 已提交
575 576 577
      }
    }
  }
H
heyongqiang 已提交
578

579
  // Delete old info log files.
K
Kai Liu 已提交
580 581 582 583 584
  size_t old_log_file_count = old_log_files.size();
  // NOTE: Currently we only support log purge when options_.db_log_dir is
  // located in `dbname` directory.
  if (old_log_file_count >= options_.keep_log_file_num &&
      options_.db_log_dir.empty()) {
H
heyongqiang 已提交
585
    std::sort(old_log_files.begin(), old_log_files.end());
K
Kai Liu 已提交
586
    size_t end = old_log_file_count - options_.keep_log_file_num;
587
    for (unsigned int i = 0; i <= end; i++) {
H
heyongqiang 已提交
588
      std::string& to_delete = old_log_files.at(i);
D
Dhruba Borthakur 已提交
589 590
      // Log(options_.info_log, "Delete type=%d %s\n",
      //     int(kInfoLogFile), to_delete.c_str());
H
heyongqiang 已提交
591 592 593
      env_->DeleteFile(dbname_ + "/" + to_delete);
    }
  }
594
  PurgeObsoleteWALFiles();
I
Igor Canadi 已提交
595
  LogFlush(options_.info_log);
D
Dhruba Borthakur 已提交
596 597 598 599 600
}

void DBImpl::DeleteObsoleteFiles() {
  mutex_.AssertHeld();
  DeletionState deletion_state;
I
Igor Canadi 已提交
601
  FindObsoleteFiles(deletion_state, true);
D
Dhruba Borthakur 已提交
602
  PurgeObsoleteFiles(deletion_state);
603 604
}

605 606 607 608 609 610 611 612
// 1. Go through all archived files and
//    a. if ttl is enabled, delete outdated files
//    b. if archive size limit is enabled, delete empty files,
//        compute file number and size.
// 2. If size limit is enabled:
//    a. compute how many files should be deleted
//    b. get sorted non-empty archived logs
//    c. delete what should be deleted
613
void DBImpl::PurgeObsoleteWALFiles() {
614 615 616 617 618 619
  bool const ttl_enabled = options_.WAL_ttl_seconds > 0;
  bool const size_limit_enabled =  options_.WAL_size_limit_MB > 0;
  if (!ttl_enabled && !size_limit_enabled) {
    return;
  }

620 621
  int64_t current_time;
  Status s = env_->GetCurrentTime(&current_time);
622 623 624 625 626 627 628 629
  if (!s.ok()) {
    Log(options_.info_log, "Can't get current time: %s", s.ToString().c_str());
    assert(false);
    return;
  }
  uint64_t const now_seconds = static_cast<uint64_t>(current_time);
  uint64_t const time_to_check = (ttl_enabled && !size_limit_enabled) ?
    options_.WAL_ttl_seconds / 2 : default_interval_to_delete_obsolete_WAL_;
630

631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
  if (purge_wal_files_last_run_ + time_to_check > now_seconds) {
    return;
  }

  purge_wal_files_last_run_ = now_seconds;

  std::string archival_dir = ArchivalDirectory(options_.wal_dir);
  std::vector<std::string> files;
  s = env_->GetChildren(archival_dir, &files);
  if (!s.ok()) {
    Log(options_.info_log, "Can't get archive files: %s", s.ToString().c_str());
    assert(false);
    return;
  }

  size_t log_files_num = 0;
  uint64_t log_file_size = 0;

  for (auto& f : files) {
    uint64_t number;
    FileType type;
    if (ParseFileName(f, &number, &type) && type == kLogFile) {
      std::string const file_path = archival_dir + "/" + f;
      if (ttl_enabled) {
        uint64_t file_m_time;
        Status const s = env_->GetFileModificationTime(file_path,
          &file_m_time);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file mod time: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          continue;
        }
        if (now_seconds - file_m_time > options_.WAL_ttl_seconds) {
          Status const s = env_->DeleteFile(file_path);
          if (!s.ok()) {
            Log(options_.info_log, "Can't delete file: %s: %s",
                file_path.c_str(), s.ToString().c_str());
            continue;
          }
          continue;
671
        }
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
      }

      if (size_limit_enabled) {
        uint64_t file_size;
        Status const s = env_->GetFileSize(file_path, &file_size);
        if (!s.ok()) {
          Log(options_.info_log, "Can't get file size: %s: %s",
              file_path.c_str(), s.ToString().c_str());
          return;
        } else {
          if (file_size > 0) {
            log_file_size = std::max(log_file_size, file_size);
            ++log_files_num;
          } else {
            Status s = env_->DeleteFile(file_path);
            if (!s.ok()) {
              Log(options_.info_log, "Can't delete file: %s: %s",
                  file_path.c_str(), s.ToString().c_str());
              continue;
            }
          }
        }
      }
    }
  }

  if (0 == log_files_num || !size_limit_enabled) {
    return;
  }

  size_t const files_keep_num = options_.WAL_size_limit_MB *
    1024 * 1024 / log_file_size;
  if (log_files_num <= files_keep_num) {
    return;
  }

  size_t files_del_num = log_files_num - files_keep_num;
  VectorLogPtr archived_logs;
  AppendSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);

  if (files_del_num > archived_logs.size()) {
    Log(options_.info_log, "Trying to delete more archived log files than "
        "exist. Deleting all");
    files_del_num = archived_logs.size();
  }

  for (size_t i = 0; i < files_del_num; ++i) {
    std::string const file_path = archived_logs[i]->PathName();
    Status const s = DeleteFile(file_path);
    if (!s.ok()) {
      Log(options_.info_log, "Can't delete file: %s: %s",
          file_path.c_str(), s.ToString().c_str());
      continue;
725 726
    }
  }
D
Dhruba Borthakur 已提交
727 728
}

729 730 731
// If externalTable is set, then apply recovered transactions
// to that table. This is used for readonly mode.
Status DBImpl::Recover(VersionEdit* edit, MemTable* external_table,
H
heyongqiang 已提交
732
    bool error_if_log_file_exist) {
J
jorlow@chromium.org 已提交
733 734
  mutex_.AssertHeld();

735
  assert(db_lock_ == nullptr);
736
  if (!external_table) {
737 738 739 740 741 742 743 744 745 746 747 748 749
    // We call CreateDirIfMissing() as the directory may already exist (if we
    // are reopening a DB), when this happens we don't want creating the
    // directory to cause an error. However, we need to check if creating the
    // directory fails or else we may get an obscure message about the lock
    // file not existing. One real-world example of this occurring is if
    // env->CreateDirIfMissing() doesn't create intermediate directories, e.g.
    // when dbname_ is "dir/db" but when "dir" doesn't exist.
    Status s = env_->CreateDirIfMissing(dbname_);
    if (!s.ok()) {
      return s;
    }

    s = env_->LockFile(LockFileName(dbname_), &db_lock_);
750 751 752
    if (!s.ok()) {
      return s;
    }
J
jorlow@chromium.org 已提交
753

754 755
    if (!env_->FileExists(CurrentFileName(dbname_))) {
      if (options_.create_if_missing) {
756
        // TODO: add merge_operator name check
757 758 759 760 761 762 763
        s = NewDB();
        if (!s.ok()) {
          return s;
        }
      } else {
        return Status::InvalidArgument(
            dbname_, "does not exist (create_if_missing is false)");
J
jorlow@chromium.org 已提交
764 765
      }
    } else {
766 767 768 769
      if (options_.error_if_exists) {
        return Status::InvalidArgument(
            dbname_, "exists (error_if_exists is true)");
      }
J
jorlow@chromium.org 已提交
770
    }
M
Mayank Agarwal 已提交
771 772 773 774 775 776 777
    // Check for the IDENTITY file and create it if not there
    if (!env_->FileExists(IdentityFileName(dbname_))) {
      s = SetIdentityFile(env_, dbname_);
      if (!s.ok()) {
        return s;
      }
    }
J
jorlow@chromium.org 已提交
778 779
  }

780
  Status s = versions_->Recover();
J
jorlow@chromium.org 已提交
781 782
  if (s.ok()) {
    SequenceNumber max_sequence(0);
783 784 785 786 787 788 789

    // Recover from all newer log files than the ones named in the
    // descriptor (new log files may have been added by the previous
    // incarnation without registering them in the descriptor).
    //
    // Note that PrevLogNumber() is no longer used, but we pay
    // attention to it in case we are recovering a database
790
    // produced by an older version of rocksdb.
791 792 793
    const uint64_t min_log = versions_->LogNumber();
    const uint64_t prev_log = versions_->PrevLogNumber();
    std::vector<std::string> filenames;
794
    s = env_->GetChildren(options_.wal_dir, &filenames);
795 796
    if (!s.ok()) {
      return s;
797
    }
798 799 800 801 802 803 804 805 806
    uint64_t number;
    FileType type;
    std::vector<uint64_t> logs;
    for (size_t i = 0; i < filenames.size(); i++) {
      if (ParseFileName(filenames[i], &number, &type)
          && type == kLogFile
          && ((number >= min_log) || (number == prev_log))) {
        logs.push_back(number);
      }
J
jorlow@chromium.org 已提交
807
    }
808

H
heyongqiang 已提交
809 810 811 812 813 814
    if (logs.size() > 0 && error_if_log_file_exist) {
      return Status::Corruption(""
          "The db was opened in readonly mode with error_if_log_file_exist"
          "flag but a log file already exists");
    }

815 816 817
    // Recover in the order in which the logs were generated
    std::sort(logs.begin(), logs.end());
    for (size_t i = 0; i < logs.size(); i++) {
818
      s = RecoverLogFile(logs[i], edit, &max_sequence, external_table);
819 820 821 822
      // The previous incarnation may not have written any MANIFEST
      // records after allocating this log number.  So we manually
      // update the file number allocation counter in VersionSet.
      versions_->MarkFileNumberUsed(logs[i]);
823 824
    }

J
jorlow@chromium.org 已提交
825
    if (s.ok()) {
826 827 828
      if (versions_->LastSequence() < max_sequence) {
        versions_->SetLastSequence(max_sequence);
      }
829 830
      SetTickerCount(options_.statistics, SEQUENCE_NUMBER,
                     versions_->LastSequence());
J
jorlow@chromium.org 已提交
831 832 833 834 835 836 837 838
    }
  }

  return s;
}

Status DBImpl::RecoverLogFile(uint64_t log_number,
                              VersionEdit* edit,
839 840
                              SequenceNumber* max_sequence,
                              MemTable* external_table) {
J
jorlow@chromium.org 已提交
841 842
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
843
    Logger* info_log;
J
jorlow@chromium.org 已提交
844
    const char* fname;
845 846
    Status* status;  // nullptr if options_.paranoid_checks==false or
                     //            options_.skip_log_error_on_recovery==true
J
jorlow@chromium.org 已提交
847
    virtual void Corruption(size_t bytes, const Status& s) {
848
      Log(info_log, "%s%s: dropping %d bytes; %s",
849
          (this->status == nullptr ? "(ignoring error) " : ""),
J
jorlow@chromium.org 已提交
850
          fname, static_cast<int>(bytes), s.ToString().c_str());
851
      if (this->status != nullptr && this->status->ok()) *this->status = s;
J
jorlow@chromium.org 已提交
852 853 854 855 856 857
    }
  };

  mutex_.AssertHeld();

  // Open the log file
858
  std::string fname = LogFileName(options_.wal_dir, log_number);
859
  unique_ptr<SequentialFile> file;
860
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
J
jorlow@chromium.org 已提交
861 862 863 864 865 866 867 868
  if (!status.ok()) {
    MaybeIgnoreError(&status);
    return status;
  }

  // Create the log reader.
  LogReporter reporter;
  reporter.env = env_;
869
  reporter.info_log = options_.info_log.get();
J
jorlow@chromium.org 已提交
870
  reporter.fname = fname.c_str();
871 872
  reporter.status = (options_.paranoid_checks &&
                     !options_.skip_log_error_on_recovery ? &status : nullptr);
J
jorlow@chromium.org 已提交
873 874 875 876
  // We intentially make log::Reader do checksumming even if
  // paranoid_checks==false so that corruptions cause entire commits
  // to be skipped instead of propagating bad information (like overly
  // large sequence numbers).
877
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
878
                     0/*initial_offset*/);
879
  Log(options_.info_log, "Recovering log #%llu",
J
jorlow@chromium.org 已提交
880 881 882 883 884 885
      (unsigned long long) log_number);

  // Read all the records and add to a memtable
  std::string scratch;
  Slice record;
  WriteBatch batch;
886
  MemTable* mem = nullptr;
887 888 889
  if (external_table) {
    mem = external_table;
  }
890
  while (reader.ReadRecord(&record, &scratch) && status.ok()) {
J
jorlow@chromium.org 已提交
891 892 893 894 895 896 897
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      continue;
    }
    WriteBatchInternal::SetContents(&batch, record);

898
    if (mem == nullptr) {
X
Xing Jin 已提交
899 900
      mem = new MemTable(internal_comparator_, mem_rep_factory_,
        NumberLevels(), options_);
901
      mem->Ref();
J
jorlow@chromium.org 已提交
902
    }
903
    status = WriteBatchInternal::InsertInto(&batch, mem, &options_);
J
jorlow@chromium.org 已提交
904 905 906 907 908 909 910 911 912 913 914
    MaybeIgnoreError(&status);
    if (!status.ok()) {
      break;
    }
    const SequenceNumber last_seq =
        WriteBatchInternal::Sequence(&batch) +
        WriteBatchInternal::Count(&batch) - 1;
    if (last_seq > *max_sequence) {
      *max_sequence = last_seq;
    }

915 916
    if (!external_table &&
        mem->ApproximateMemoryUsage() > options_.write_buffer_size) {
917
      status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
918 919 920 921 922
      if (!status.ok()) {
        // Reflect errors immediately so that conditions like full
        // file-systems cause the DB::Open() to fail.
        break;
      }
923
      mem->Unref();
924
      mem = nullptr;
J
jorlow@chromium.org 已提交
925 926 927
    }
  }

928
  if (status.ok() && mem != nullptr && !external_table) {
929
    status = WriteLevel0TableForRecovery(mem, edit);
J
jorlow@chromium.org 已提交
930 931 932 933
    // Reflect errors immediately so that conditions like full
    // file-systems cause the DB::Open() to fail.
  }

934
  if (mem != nullptr && !external_table) mem->Unref();
J
jorlow@chromium.org 已提交
935 936 937
  return status;
}

938
Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
J
jorlow@chromium.org 已提交
939
  mutex_.AssertHeld();
940
  const uint64_t start_micros = env_->NowMicros();
J
jorlow@chromium.org 已提交
941 942 943 944
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  pending_outputs_.insert(meta.number);
  Iterator* iter = mem->NewIterator();
945 946 947
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
    mem->GetFirstSequenceNumber();
948
  Log(options_.info_log, "Level-0 table #%llu: started",
J
jorlow@chromium.org 已提交
949
      (unsigned long long) meta.number);
950 951 952 953

  Status s;
  {
    mutex_.Unlock();
954 955
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
956
                   user_comparator(), newest_snapshot,
957
                   earliest_seqno_in_memtable, true);
I
Igor Canadi 已提交
958
    LogFlush(options_.info_log);
959 960 961
    mutex_.Lock();
  }

962
  Log(options_.info_log, "Level-0 table #%llu: %lld bytes %s",
J
jorlow@chromium.org 已提交
963 964 965 966
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;
967

968
  pending_outputs_.erase(meta.number);
969 970 971 972 973 974

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    edit->AddFile(level, meta.number, meta.file_size,
975 976
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
977 978
  }

979 980 981
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
M
Mark Callaghan 已提交
982
  stats.files_out_levelnp1 = 1;
983
  stats_[level].Add(stats);
J
jorlow@chromium.org 已提交
984 985 986
  return s;
}

987

988
Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
989
                                uint64_t* filenumber) {
J
jorlow@chromium.org 已提交
990
  mutex_.AssertHeld();
991 992 993 994 995
  const uint64_t start_micros = env_->NowMicros();
  FileMetaData meta;
  meta.number = versions_->NewFileNumber();
  *filenumber = meta.number;
  pending_outputs_.insert(meta.number);
996 997 998

  std::vector<Iterator*> list;
  for (MemTable* m : mems) {
999 1000 1001
    Log(options_.info_log,
        "Flushing memtable with log file: %lu\n",
        m->GetLogNumber());
1002 1003 1004 1005
    list.push_back(m->NewIterator());
  }
  Iterator* iter = NewMergingIterator(&internal_comparator_, &list[0],
                                      list.size());
1006 1007
  const SequenceNumber newest_snapshot = snapshots_.GetNewest();
  const SequenceNumber earliest_seqno_in_memtable =
1008
    mems[0]->GetFirstSequenceNumber();
1009 1010
  Log(options_.info_log, "Level-0 flush table #%llu: started",
      (unsigned long long) meta.number);
J
jorlow@chromium.org 已提交
1011

1012
  Version* base = versions_->current();
1013
  base->Ref();          // it is likely that we do not need this reference
1014 1015 1016
  Status s;
  {
    mutex_.Unlock();
1017 1018 1019 1020 1021
    // We skip compression if universal compression is used and the size
    // threshold is set for compression.
    bool enable_compression = (options_.compaction_style
        != kCompactionStyleUniversal ||
        options_.compaction_options_universal.compression_size_percent < 0);
1022 1023
    s = BuildTable(dbname_, env_, options_, storage_options_,
                   table_cache_.get(), iter, &meta,
1024
                   user_comparator(), newest_snapshot,
1025
                   earliest_seqno_in_memtable, enable_compression);
I
Igor Canadi 已提交
1026
    LogFlush(options_.info_log);
1027 1028
    mutex_.Lock();
  }
1029 1030
  base->Unref();

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
  Log(options_.info_log, "Level-0 flush table #%llu: %lld bytes %s",
      (unsigned long long) meta.number,
      (unsigned long long) meta.file_size,
      s.ToString().c_str());
  delete iter;

  // re-acquire the most current version
  base = versions_->current();

  // There could be multiple threads writing to its own level-0 file.
  // The pending_outputs cannot be cleared here, otherwise this newly
  // created file might not be considered as a live-file by another
  // compaction thread that is concurrently deleting obselete files.
  // The pending_outputs can be cleared only after the new version is
A
Abhishek Kona 已提交
1045
  // committed so that other threads can recognize this file as a
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
  // valid one.
  // pending_outputs_.erase(meta.number);

  // Note that if file_size is zero, the file has been deleted and
  // should not be added to the manifest.
  int level = 0;
  if (s.ok() && meta.file_size > 0) {
    const Slice min_user_key = meta.smallest.user_key();
    const Slice max_user_key = meta.largest.user_key();
    // if we have more than 1 background thread, then we cannot
    // insert files directly into higher levels because some other
    // threads could be concurrently producing compacted files for
    // that key range.
1059
    if (base != nullptr && options_.max_background_compactions <= 1 &&
1060
        options_.compaction_style == kCompactionStyleLevel) {
1061 1062 1063
      level = base->PickLevelForMemTableOutput(min_user_key, max_user_key);
    }
    edit->AddFile(level, meta.number, meta.file_size,
1064 1065
                  meta.smallest, meta.largest,
                  meta.smallest_seqno, meta.largest_seqno);
1066 1067 1068 1069 1070 1071 1072 1073 1074
  }

  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros;
  stats.bytes_written = meta.file_size;
  stats_[level].Add(stats);
  return s;
}

I
Igor Canadi 已提交
1075 1076
Status DBImpl::FlushMemTableToOutputFile(bool* madeProgress,
                                         DeletionState& deletion_state) {
1077 1078 1079
  mutex_.AssertHeld();
  assert(imm_.size() != 0);

1080
  if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
1081 1082
    Log(options_.info_log, "FlushMemTableToOutputFile already in progress");
    Status s = Status::IOError("FlushMemTableToOutputFile already in progress");
1083 1084 1085 1086 1087
    return s;
  }

  // Save the contents of the earliest memtable as a new Table
  uint64_t file_number;
1088 1089 1090
  std::vector<MemTable*> mems;
  imm_.PickMemtablesToFlush(&mems);
  if (mems.empty()) {
1091 1092 1093 1094 1095 1096
    Log(options_.info_log, "Nothing in memstore to flush");
    Status s = Status::IOError("Nothing in memstore to flush");
    return s;
  }

  // record the logfile_number_ before we release the mutex
1097 1098 1099
  // entries mems are (implicitly) sorted in ascending order by their created
  // time. We will use the first memtable's `edit` to keep the meta info for
  // this flush.
1100
  MemTable* m = mems[0];
1101 1102
  VersionEdit* edit = m->GetEdits();
  edit->SetPrevLogNumber(0);
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
  // SetLogNumber(log_num) indicates logs with number smaller than log_num
  // will no longer be picked up for recovery.
  edit->SetLogNumber(
      mems.back()->GetNextLogNumber()
  );

  std::vector<uint64_t> logs_to_delete;
  for (auto mem : mems) {
    logs_to_delete.push_back(mem->GetLogNumber());
  }
1113

1114
  // This will release and re-acquire the mutex.
1115
  Status s = WriteLevel0Table(mems, edit, &file_number);
1116

1117
  if (s.ok() && shutting_down_.Acquire_Load()) {
1118 1119 1120
    s = Status::IOError(
      "Database shutdown started during memtable compaction"
    );
1121
  }
J
jorlow@chromium.org 已提交
1122

1123
  // Replace immutable memtable with the generated Table
1124
  s = imm_.InstallMemtableFlushResults(
1125
    mems, versions_.get(), s, &mutex_, options_.info_log.get(),
1126
    file_number, pending_outputs_);
J
jorlow@chromium.org 已提交
1127 1128

  if (s.ok()) {
1129 1130 1131
    if (madeProgress) {
      *madeProgress = 1;
    }
1132

1133
    MaybeScheduleLogDBDeployStats();
I
Igor Canadi 已提交
1134 1135 1136 1137 1138 1139 1140

    if (options_.purge_log_after_memtable_flush &&
        !disable_delete_obsolete_files_) {
      // add to deletion state
      deletion_state.logdeletefiles.insert(deletion_state.logdeletefiles.end(),
                                           logs_to_delete.begin(),
                                           logs_to_delete.end());
1141
    }
J
jorlow@chromium.org 已提交
1142 1143 1144 1145
  }
  return s;
}

1146
void DBImpl::CompactRange(const Slice* begin, const Slice* end,
1147
                          bool reduce_level, int target_level) {
G
Gabor Cselle 已提交
1148 1149 1150 1151
  int max_level_with_files = 1;
  {
    MutexLock l(&mutex_);
    Version* base = versions_->current();
1152
    for (int level = 1; level < NumberLevels(); level++) {
G
Gabor Cselle 已提交
1153 1154 1155 1156 1157
      if (base->OverlapInLevel(level, begin, end)) {
        max_level_with_files = level;
      }
    }
  }
1158
  TEST_FlushMemTable(); // TODO(sanjay): Skip if memtable does not overlap
G
Gabor Cselle 已提交
1159 1160 1161
  for (int level = 0; level < max_level_with_files; level++) {
    TEST_CompactRange(level, begin, end);
  }
1162 1163

  if (reduce_level) {
1164
    ReFitLevel(max_level_with_files, target_level);
1165
  }
I
Igor Canadi 已提交
1166
  LogFlush(options_.info_log);
1167 1168 1169 1170 1171 1172
}

// return the same level if it cannot be moved
int DBImpl::FindMinimumEmptyLevelFitting(int level) {
  mutex_.AssertHeld();
  int minimum_level = level;
1173
  for (int i = level - 1; i > 0; --i) {
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
    // stop if level i is not empty
    if (versions_->NumLevelFiles(i) > 0) break;

    // stop if level i is too small (cannot fit the level files)
    if (versions_->MaxBytesForLevel(i) < versions_->NumLevelBytes(level)) break;

    minimum_level = i;
  }
  return minimum_level;
}

1185
void DBImpl::ReFitLevel(int level, int target_level) {
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
  assert(level < NumberLevels());

  MutexLock l(&mutex_);

  // only allow one thread refitting
  if (refitting_level_) {
    Log(options_.info_log, "ReFitLevel: another thread is refitting");
    return;
  }
  refitting_level_ = true;

  // wait for all background threads to stop
  bg_work_gate_closed_ = true;
1199
  while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_) {
1200
    Log(options_.info_log,
1201 1202
        "RefitLevel: waiting for background threads to stop: %d %d",
        bg_compaction_scheduled_, bg_flush_scheduled_);
1203 1204 1205 1206
    bg_cv_.Wait();
  }

  // move to a smaller level
1207 1208 1209 1210
  int to_level = target_level;
  if (target_level < 0) {
    to_level = FindMinimumEmptyLevelFitting(level);
  }
1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

  assert(to_level <= level);

  if (to_level < level) {
    Log(options_.info_log, "Before refitting:\n%s",
        versions_->current()->DebugString().data());

    VersionEdit edit(NumberLevels());
    for (const auto& f : versions_->current()->files_[level]) {
      edit.DeleteFile(level, f->number);
1221 1222
      edit.AddFile(to_level, f->number, f->file_size, f->smallest, f->largest,
                   f->smallest_seqno, f->largest_seqno);
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
    }
    Log(options_.info_log, "Apply version edit:\n%s",
        edit.DebugString().data());

    auto status = versions_->LogAndApply(&edit, &mutex_);

    Log(options_.info_log, "LogAndApply: %s\n", status.ToString().data());

    if (status.ok()) {
      Log(options_.info_log, "After refitting:\n%s",
          versions_->current()->DebugString().data());
    }
  }

  refitting_level_ = false;
  bg_work_gate_closed_ = false;
G
Gabor Cselle 已提交
1239 1240
}

1241
int DBImpl::NumberLevels() {
1242
  return options_.num_levels;
1243 1244 1245
}

int DBImpl::MaxMemCompactionLevel() {
1246
  return options_.max_mem_compaction_level;
1247 1248 1249
}

int DBImpl::Level0StopWriteTrigger() {
1250
  return options_.level0_stop_writes_trigger;
1251 1252
}

H
heyongqiang 已提交
1253 1254 1255 1256 1257
Status DBImpl::Flush(const FlushOptions& options) {
  Status status = FlushMemTable(options);
  return status;
}

1258
SequenceNumber DBImpl::GetLatestSequenceNumber() const {
1259 1260 1261
  return versions_->LastSequence();
}

1262
Status DBImpl::GetUpdatesSince(SequenceNumber seq,
1263
                               unique_ptr<TransactionLogIterator>* iter) {
1264

1265
  RecordTick(options_.statistics, GET_UPDATES_SINCE_CALLS);
1266
  if (seq > versions_->LastSequence()) {
1267 1268 1269
    return Status::IOError("Requested sequence not yet written in the db");
  }
  //  Get all sorted Wal Files.
1270 1271
  //  Do binary search and open files and find the seq number.

1272 1273
  std::unique_ptr<VectorLogPtr> wal_files(new VectorLogPtr);
  Status s = GetSortedWalFiles(*wal_files);
1274 1275 1276 1277
  if (!s.ok()) {
    return s;
  }

1278
  s = RetainProbableWalFiles(*wal_files, seq);
1279 1280
  if (!s.ok()) {
    return s;
1281
  }
1282
  iter->reset(
1283
    new TransactionLogIteratorImpl(options_.wal_dir,
1284
                                   &options_,
1285
                                   storage_options_,
1286
                                   seq,
1287
                                   std::move(wal_files),
1288
                                   this));
1289
  return (*iter)->status();
1290 1291
}

1292 1293
Status DBImpl::RetainProbableWalFiles(VectorLogPtr& all_logs,
                                      const SequenceNumber target) {
1294
  long start = 0; // signed to avoid overflow when target is < first file.
1295
  long end = static_cast<long>(all_logs.size()) - 1;
1296
  // Binary Search. avoid opening all files.
1297 1298
  while (end >= start) {
    long mid = start + (end - start) / 2;  // Avoid overflow.
1299 1300
    SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence();
    if (current_seq_num == target) {
1301
      end = mid;
1302
      break;
1303
    } else if (current_seq_num < target) {
1304
      start = mid + 1;
1305
    } else {
1306
      end = mid - 1;
1307 1308
    }
  }
1309 1310 1311
  size_t start_index = std::max(0l, end); // end could be -ve.
  // The last wal file is always included
  all_logs.erase(all_logs.begin(), all_logs.begin() + start_index);
1312 1313 1314
  return Status::OK();
}

1315 1316 1317
bool DBImpl::CheckWalFileExistsAndEmpty(const WalFileType type,
                                        const uint64_t number) {
  const std::string fname = (type == kAliveLogFile) ?
1318 1319
    LogFileName(options_.wal_dir, number) :
    ArchivedLogFileName(options_.wal_dir, number);
1320 1321
  uint64_t file_size;
  Status s = env_->GetFileSize(fname, &file_size);
1322
  return (s.ok() && (file_size == 0));
1323 1324
}

1325 1326
Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
                               WriteBatch* const result) {
1327

1328
  if (type == kAliveLogFile) {
1329
    std::string fname = LogFileName(options_.wal_dir, number);
1330 1331 1332
    Status status = ReadFirstLine(fname, result);
    if (!status.ok()) {
      //  check if the file got moved to archive.
1333 1334
      std::string archived_file =
        ArchivedLogFileName(options_.wal_dir, number);
1335
      Status s = ReadFirstLine(archived_file, result);
1336
      if (!s.ok()) {
1337
        return Status::IOError("Log File has been deleted: " + archived_file);
1338 1339 1340
      }
    }
    return Status::OK();
1341
  } else if (type == kArchivedLogFile) {
1342
    std::string fname = ArchivedLogFileName(options_.wal_dir, number);
1343 1344 1345
    Status status = ReadFirstLine(fname, result);
    return status;
  }
1346
  return Status::NotSupported("File Type Not Known: " + type);
1347 1348 1349 1350 1351 1352 1353 1354
}

Status DBImpl::ReadFirstLine(const std::string& fname,
                             WriteBatch* const batch) {
  struct LogReporter : public log::Reader::Reporter {
    Env* env;
    Logger* info_log;
    const char* fname;
1355
    Status* status;  // nullptr if options_.paranoid_checks==false
1356 1357
    virtual void Corruption(size_t bytes, const Status& s) {
      Log(info_log, "%s%s: dropping %d bytes; %s",
1358
          (this->status == nullptr ? "(ignoring error) " : ""),
1359
          fname, static_cast<int>(bytes), s.ToString().c_str());
1360
      if (this->status != nullptr && this->status->ok()) *this->status = s;
1361 1362 1363
    }
  };

1364
  unique_ptr<SequentialFile> file;
1365
  Status status = env_->NewSequentialFile(fname, &file, storage_options_);
1366 1367 1368 1369 1370 1371 1372 1373

  if (!status.ok()) {
    return status;
  }


  LogReporter reporter;
  reporter.env = env_;
1374
  reporter.info_log = options_.info_log.get();
1375
  reporter.fname = fname.c_str();
1376
  reporter.status = (options_.paranoid_checks ? &status : nullptr);
1377
  log::Reader reader(std::move(file), &reporter, true/*checksum*/,
1378 1379 1380
                     0/*initial_offset*/);
  std::string scratch;
  Slice record;
1381

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
  if (reader.ReadRecord(&record, &scratch) && status.ok()) {
    if (record.size() < 12) {
      reporter.Corruption(
          record.size(), Status::Corruption("log record too small"));
      return Status::IOError("Corruption noted");
      //  TODO read record's till the first no corrupt entry?
    }
    WriteBatchInternal::SetContents(batch, record);
    return Status::OK();
  }
  return Status::IOError("Error reading from file " + fname);
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
struct CompareLogByPointer {
  bool operator() (const unique_ptr<LogFile>& a,
                   const unique_ptr<LogFile>& b) {
    LogFileImpl* a_impl = dynamic_cast<LogFileImpl*>(a.get());
    LogFileImpl* b_impl = dynamic_cast<LogFileImpl*>(b.get());
    return *a_impl < *b_impl;
  }
};

Status DBImpl::AppendSortedWalsOfType(const std::string& path,
    VectorLogPtr& log_files, WalFileType log_type) {
  std::vector<std::string> all_files;
  const Status status = env_->GetChildren(path, &all_files);
1408 1409 1410
  if (!status.ok()) {
    return status;
  }
1411
  log_files.reserve(log_files.size() + all_files.size());
1412 1413 1414 1415 1416 1417
  VectorLogPtr::iterator pos_start;
  if (!log_files.empty()) {
    pos_start = log_files.end() - 1;
  } else {
    pos_start = log_files.begin();
  }
1418
  for (const auto& f : all_files) {
1419 1420
    uint64_t number;
    FileType type;
1421
    if (ParseFileName(f, &number, &type) && type == kLogFile){
1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439

      WriteBatch batch;
      Status s = ReadFirstRecord(log_type, number, &batch);
      if (!s.ok()) {
        if (CheckWalFileExistsAndEmpty(log_type, number)) {
          continue;
        }
        return s;
      }

      uint64_t size_bytes;
      s = env_->GetFileSize(LogFileName(path, number), &size_bytes);
      if (!s.ok()) {
        return s;
      }

      log_files.push_back(std::move(unique_ptr<LogFile>(new LogFileImpl(
        number, log_type, WriteBatchInternal::Sequence(&batch), size_bytes))));
1440 1441
    }
  }
1442
  CompareLogByPointer compare_log_files;
1443
  std::sort(pos_start, log_files.end(), compare_log_files);
1444 1445 1446
  return status;
}

G
Gabor Cselle 已提交
1447
void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
1448 1449
  assert(level >= 0);

G
Gabor Cselle 已提交
1450 1451
  InternalKey begin_storage, end_storage;

H
hans@chromium.org 已提交
1452 1453
  ManualCompaction manual;
  manual.level = level;
G
Gabor Cselle 已提交
1454
  manual.done = false;
1455
  manual.in_progress = false;
1456 1457 1458 1459
  // For universal compaction, we enforce every manual compaction to compact
  // all files.
  if (begin == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1460
    manual.begin = nullptr;
G
Gabor Cselle 已提交
1461 1462 1463 1464
  } else {
    begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
    manual.begin = &begin_storage;
  }
1465 1466
  if (end == nullptr ||
      options_.compaction_style == kCompactionStyleUniversal) {
1467
    manual.end = nullptr;
G
Gabor Cselle 已提交
1468 1469 1470 1471 1472 1473
  } else {
    end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));
    manual.end = &end_storage;
  }

  MutexLock l(&mutex_);
1474

A
Abhishek Kona 已提交
1475 1476 1477 1478
  // When a manual compaction arrives, temporarily throttle down
  // the number of background compaction threads to 1. This is
  // needed to ensure that this manual compaction can compact
  // any range of keys/files. We artificialy increase
1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491
  // bg_compaction_scheduled_ by a large number, this causes
  // the system to have a single background thread. Now,
  // this manual compaction can progress without stomping
  // on any other concurrent compactions.
  const int LargeNumber = 10000000;
  const int newvalue = options_.max_background_compactions-1;
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber) {
    Log(options_.info_log, "Manual compaction request waiting for background threads to fall below 1");
    bg_cv_.Wait();
  }
  Log(options_.info_log, "Manual compaction starting");

G
Gabor Cselle 已提交
1492
  while (!manual.done) {
1493
    while (manual_compaction_ != nullptr) {
G
Gabor Cselle 已提交
1494 1495 1496
      bg_cv_.Wait();
    }
    manual_compaction_ = &manual;
1497 1498 1499
    if (bg_compaction_scheduled_ == LargeNumber) {
      bg_compaction_scheduled_ = newvalue;
    }
1500
    MaybeScheduleFlushOrCompaction();
G
Gabor Cselle 已提交
1501 1502 1503
    while (manual_compaction_ == &manual) {
      bg_cv_.Wait();
    }
H
hans@chromium.org 已提交
1504
  }
1505 1506 1507 1508 1509 1510 1511 1512 1513
  assert(!manual.in_progress);

  // wait till there are no background threads scheduled
  bg_compaction_scheduled_ += LargeNumber;
  while (bg_compaction_scheduled_ > LargeNumber + newvalue) {
    Log(options_.info_log, "Manual compaction resetting background threads");
    bg_cv_.Wait();
  }
  bg_compaction_scheduled_ = 0;
J
jorlow@chromium.org 已提交
1514 1515
}

H
heyongqiang 已提交
1516
Status DBImpl::FlushMemTable(const FlushOptions& options) {
1517 1518
  // nullptr batch means just wait for earlier writes to be done
  Status s = Write(WriteOptions(), nullptr);
H
heyongqiang 已提交
1519
  if (s.ok() && options.wait) {
1520
    // Wait until the compaction completes
1521
    s = WaitForFlushMemTable();
1522 1523
  }
  return s;
J
jorlow@chromium.org 已提交
1524 1525
}

1526
Status DBImpl::WaitForFlushMemTable() {
1527 1528 1529
  Status s;
  // Wait until the compaction completes
  MutexLock l(&mutex_);
1530
  while (imm_.size() > 0 && bg_error_.ok()) {
1531 1532
    bg_cv_.Wait();
  }
1533
  if (imm_.size() != 0) {
1534 1535 1536
    s = bg_error_;
  }
  return s;
H
heyongqiang 已提交
1537 1538
}

1539
Status DBImpl::TEST_FlushMemTable() {
H
heyongqiang 已提交
1540 1541 1542
  return FlushMemTable(FlushOptions());
}

1543 1544
Status DBImpl::TEST_WaitForFlushMemTable() {
  return WaitForFlushMemTable();
1545 1546 1547
}

Status DBImpl::TEST_WaitForCompact() {
1548
  // Wait until the compaction completes
1549 1550 1551 1552 1553

  // TODO: a bug here. This function actually does not necessarily
  // wait for compact. It actually waits for scheduled compaction
  // OR flush to finish.

1554
  MutexLock l(&mutex_);
1555 1556
  while ((bg_compaction_scheduled_ || bg_flush_scheduled_) &&
         bg_error_.ok()) {
1557 1558 1559
    bg_cv_.Wait();
  }
  return bg_error_;
1560 1561
}

1562
void DBImpl::MaybeScheduleFlushOrCompaction() {
J
jorlow@chromium.org 已提交
1563
  mutex_.AssertHeld();
1564 1565
  if (bg_work_gate_closed_) {
    // gate closed for backgrond work
J
jorlow@chromium.org 已提交
1566 1567 1568
  } else if (shutting_down_.Acquire_Load()) {
    // DB is being deleted; no more background compactions
  } else {
1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
    bool is_flush_pending =
      imm_.IsFlushPending(options_.min_write_buffer_number_to_merge);
    if (is_flush_pending &&
        (bg_flush_scheduled_ < options_.max_background_flushes)) {
      // memtable flush needed
      bg_flush_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH);
    }

    if ((manual_compaction_ ||
         versions_->NeedsCompaction() ||
         (is_flush_pending && (options_.max_background_flushes <= 0))) &&
        bg_compaction_scheduled_ < options_.max_background_compactions) {
      // compaction needed, or memtable flush needed but HIGH pool not enabled.
      bg_compaction_scheduled_++;
      env_->Schedule(&DBImpl::BGWorkCompaction, this, Env::Priority::LOW);
    }
  }
}

void DBImpl::BGWorkFlush(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallFlush();
}

void DBImpl::BGWorkCompaction(void* db) {
  reinterpret_cast<DBImpl*>(db)->BackgroundCallCompaction();
}

I
Igor Canadi 已提交
1597 1598
Status DBImpl::BackgroundFlush(bool* madeProgress,
                               DeletionState& deletion_state) {
1599 1600 1601 1602
  Status stat;
  while (stat.ok() &&
         imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
    Log(options_.info_log,
1603
        "BackgroundCallFlush doing FlushMemTableToOutputFile, flush slots available %d",
1604
        options_.max_background_flushes - bg_flush_scheduled_);
I
Igor Canadi 已提交
1605
    stat = FlushMemTableToOutputFile(madeProgress, deletion_state);
J
jorlow@chromium.org 已提交
1606
  }
1607
  return stat;
J
jorlow@chromium.org 已提交
1608 1609
}

1610
void DBImpl::BackgroundCallFlush() {
1611
  bool madeProgress = false;
I
Igor Canadi 已提交
1612
  DeletionState deletion_state;
1613 1614 1615 1616
  assert(bg_flush_scheduled_);
  MutexLock l(&mutex_);

  if (!shutting_down_.Acquire_Load()) {
I
Igor Canadi 已提交
1617
    Status s = BackgroundFlush(&madeProgress, deletion_state);
1618 1619 1620 1621 1622 1623 1624 1625 1626
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background flush error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
I
Igor Canadi 已提交
1627
      LogFlush(options_.info_log);
1628 1629
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
I
Igor Canadi 已提交
1630
      // clean up all the files we might have created.
I
Igor Canadi 已提交
1631
      FindObsoleteFiles(deletion_state, true);
1632 1633 1634
    }
  }

I
Igor Canadi 已提交
1635
  // delete unnecessary files if any, this is done outside the mutex
I
Igor Canadi 已提交
1636
  FindObsoleteFiles(deletion_state, false);
I
Igor Canadi 已提交
1637 1638 1639 1640 1641 1642
  if (deletion_state.HaveSomethingToDelete()) {
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
    mutex_.Lock();
  }

1643
  bg_flush_scheduled_--;
1644 1645 1646
  if (madeProgress) {
    MaybeScheduleFlushOrCompaction();
  }
1647
  bg_cv_.SignalAll();
J
jorlow@chromium.org 已提交
1648 1649
}

1650

1651 1652 1653 1654
void DBImpl::TEST_PurgeObsoleteteWAL() {
  PurgeObsoleteWALFiles();
}

1655
void DBImpl::BackgroundCallCompaction() {
1656
  bool madeProgress = false;
D
Dhruba Borthakur 已提交
1657
  DeletionState deletion_state;
H
Haobo Xu 已提交
1658 1659 1660

  MaybeDumpStats();

J
jorlow@chromium.org 已提交
1661
  MutexLock l(&mutex_);
1662
  // Log(options_.info_log, "XXX BG Thread %llx process new work item", pthread_self());
J
jorlow@chromium.org 已提交
1663
  assert(bg_compaction_scheduled_);
H
hans@chromium.org 已提交
1664
  if (!shutting_down_.Acquire_Load()) {
1665
    Status s = BackgroundCompaction(&madeProgress, deletion_state);
1666 1667 1668 1669 1670 1671 1672 1673 1674
    if (!s.ok()) {
      // Wait a little bit before retrying background compaction in
      // case this is an environmental problem and we do not want to
      // chew up resources for failed compactions for the duration of
      // the problem.
      bg_cv_.SignalAll();  // In case a waiter can proceed despite the error
      Log(options_.info_log, "Waiting after background compaction error: %s",
          s.ToString().c_str());
      mutex_.Unlock();
I
Igor Canadi 已提交
1675
      LogFlush(options_.info_log);
1676 1677
      env_->SleepForMicroseconds(1000000);
      mutex_.Lock();
I
Igor Canadi 已提交
1678 1679
      // clean up all the files we might have created
      FindObsoleteFiles(deletion_state, true);
1680
    }
J
jorlow@chromium.org 已提交
1681
  }
1682

D
Dhruba Borthakur 已提交
1683
  // delete unnecessary files if any, this is done outside the mutex
I
Igor Canadi 已提交
1684
  FindObsoleteFiles(deletion_state, false);
I
Igor Canadi 已提交
1685
  if (deletion_state.HaveSomethingToDelete()) {
D
Dhruba Borthakur 已提交
1686 1687
    mutex_.Unlock();
    PurgeObsoleteFiles(deletion_state);
1688
    mutex_.Lock();
D
Dhruba Borthakur 已提交
1689 1690
  }

1691
  bg_compaction_scheduled_--;
J
jorlow@chromium.org 已提交
1692

1693 1694
  MaybeScheduleLogDBDeployStats();

J
jorlow@chromium.org 已提交
1695
  // Previous compaction may have produced too many files in a level,
A
Abhishek Kona 已提交
1696
  // So reschedule another compaction if we made progress in the
1697 1698
  // last compaction.
  if (madeProgress) {
1699
    MaybeScheduleFlushOrCompaction();
1700
  }
H
hans@chromium.org 已提交
1701
  bg_cv_.SignalAll();
1702

J
jorlow@chromium.org 已提交
1703 1704
}

A
Abhishek Kona 已提交
1705
Status DBImpl::BackgroundCompaction(bool* madeProgress,
1706
  DeletionState& deletion_state) {
1707
  *madeProgress = false;
J
jorlow@chromium.org 已提交
1708
  mutex_.AssertHeld();
1709

1710
  // TODO: remove memtable flush from formal compaction
1711
  while (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
A
Abhishek Kona 已提交
1712
    Log(options_.info_log,
1713 1714
        "BackgroundCompaction doing FlushMemTableToOutputFile, compaction slots "
        "available %d",
1715
        options_.max_background_compactions - bg_compaction_scheduled_);
I
Igor Canadi 已提交
1716
    Status stat = FlushMemTableToOutputFile(madeProgress, deletion_state);
1717 1718 1719
    if (!stat.ok()) {
      return stat;
    }
1720 1721
  }

1722
  unique_ptr<Compaction> c;
1723
  bool is_manual = (manual_compaction_ != nullptr) &&
1724
                   (manual_compaction_->in_progress == false);
G
Gabor Cselle 已提交
1725
  InternalKey manual_end;
H
hans@chromium.org 已提交
1726
  if (is_manual) {
G
Gabor Cselle 已提交
1727
    ManualCompaction* m = manual_compaction_;
1728 1729
    assert(!m->in_progress);
    m->in_progress = true; // another thread cannot pick up the same work
1730 1731
    c.reset(versions_->CompactRange(m->level, m->begin, m->end));
    if (c) {
G
Gabor Cselle 已提交
1732
      manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
1733 1734
    } else {
      m->done = true;
G
Gabor Cselle 已提交
1735 1736 1737
    }
    Log(options_.info_log,
        "Manual compaction at level-%d from %s .. %s; will stop at %s\n",
H
hans@chromium.org 已提交
1738
        m->level,
G
Gabor Cselle 已提交
1739 1740 1741
        (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
        (m->end ? m->end->DebugString().c_str() : "(end)"),
        (m->done ? "(end)" : manual_end.DebugString().c_str()));
1742
  } else if (!options_.disable_auto_compactions) {
1743
    c.reset(versions_->PickCompaction());
J
jorlow@chromium.org 已提交
1744 1745 1746
  }

  Status status;
1747
  if (!c) {
H
hans@chromium.org 已提交
1748
    // Nothing to do
1749
    Log(options_.info_log, "Compaction nothing to do");
H
hans@chromium.org 已提交
1750
  } else if (!is_manual && c->IsTrivialMove()) {
J
jorlow@chromium.org 已提交
1751
    // Move file to next level
1752
    assert(c->num_input_files(0) == 1);
J
jorlow@chromium.org 已提交
1753 1754 1755
    FileMetaData* f = c->input(0, 0);
    c->edit()->DeleteFile(c->level(), f->number);
    c->edit()->AddFile(c->level() + 1, f->number, f->file_size,
1756 1757
                       f->smallest, f->largest,
                       f->smallest_seqno, f->largest_seqno);
1758
    status = versions_->LogAndApply(c->edit(), &mutex_);
H
hans@chromium.org 已提交
1759
    VersionSet::LevelSummaryStorage tmp;
1760
    Log(options_.info_log, "Moved #%lld to level-%d %lld bytes %s: %s\n",
J
jorlow@chromium.org 已提交
1761 1762 1763
        static_cast<unsigned long long>(f->number),
        c->level() + 1,
        static_cast<unsigned long long>(f->file_size),
H
hans@chromium.org 已提交
1764 1765
        status.ToString().c_str(),
        versions_->LevelSummary(&tmp));
1766
    versions_->ReleaseCompactionFiles(c.get(), status);
1767
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1768
  } else {
1769
    MaybeScheduleFlushOrCompaction(); // do more compaction work in parallel.
1770
    CompactionState* compact = new CompactionState(c.get());
I
Igor Canadi 已提交
1771
    status = DoCompactionWork(compact, deletion_state);
1772
    CleanupCompaction(compact, status);
1773
    versions_->ReleaseCompactionFiles(c.get(), status);
1774
    c->ReleaseInputs();
I
Igor Canadi 已提交
1775
    versions_->GetAndFreeObsoleteFiles(&deletion_state.sstdeletefiles);
1776
    *madeProgress = true;
J
jorlow@chromium.org 已提交
1777
  }
1778
  c.reset();
J
jorlow@chromium.org 已提交
1779 1780 1781 1782 1783 1784

  if (status.ok()) {
    // Done
  } else if (shutting_down_.Acquire_Load()) {
    // Ignore compaction errors found during shutting down
  } else {
1785
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
1786 1787 1788 1789 1790
        "Compaction error: %s", status.ToString().c_str());
    if (options_.paranoid_checks && bg_error_.ok()) {
      bg_error_ = status;
    }
  }
H
hans@chromium.org 已提交
1791 1792

  if (is_manual) {
G
Gabor Cselle 已提交
1793
    ManualCompaction* m = manual_compaction_;
1794 1795 1796
    if (!status.ok()) {
      m->done = true;
    }
1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
    // For universal compaction:
    //   Because universal compaction always happens at level 0, so one
    //   compaction will pick up all overlapped files. No files will be
    //   filtered out due to size limit and left for a successive compaction.
    //   So we can safely conclude the current compaction.
    //
    //   Also note that, if we don't stop here, then the current compaction
    //   writes a new file back to level 0, which will be used in successive
    //   compaction. Hence the manual compaction will never finish.
    if (options_.compaction_style == kCompactionStyleUniversal) {
      m->done = true;
    }
G
Gabor Cselle 已提交
1809 1810 1811 1812 1813 1814
    if (!m->done) {
      // We only compacted part of the requested range.  Update *m
      // to the range that is left to be compacted.
      m->tmp_storage = manual_end;
      m->begin = &m->tmp_storage;
    }
1815
    m->in_progress = false; // not being processed anymore
1816
    manual_compaction_ = nullptr;
H
hans@chromium.org 已提交
1817
  }
1818
  return status;
J
jorlow@chromium.org 已提交
1819 1820
}

1821
void DBImpl::CleanupCompaction(CompactionState* compact, Status status) {
J
jorlow@chromium.org 已提交
1822
  mutex_.AssertHeld();
1823
  if (compact->builder != nullptr) {
J
jorlow@chromium.org 已提交
1824 1825
    // May happen if we get a shutdown call in the middle of compaction
    compact->builder->Abandon();
1826
    compact->builder.reset();
J
jorlow@chromium.org 已提交
1827
  } else {
1828
    assert(compact->outfile == nullptr);
J
jorlow@chromium.org 已提交
1829
  }
D
dgrogan@chromium.org 已提交
1830
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1831 1832
    const CompactionState::Output& out = compact->outputs[i];
    pending_outputs_.erase(out.number);
1833 1834 1835 1836 1837 1838

    // If this file was inserted into the table cache then remove
    // them here because this compaction was not committed.
    if (!status.ok()) {
      table_cache_->Evict(out.number);
    }
J
jorlow@chromium.org 已提交
1839 1840 1841 1842
  }
  delete compact;
}

1843 1844 1845 1846 1847
// Allocate the file numbers for the output file. We allocate as
// many output file numbers as there are files in level+1.
// Insert them into pending_outputs so that they do not get deleted.
void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1848 1849
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
1850
  int filesNeeded = compact->compaction->num_input_files(1);
1851
  for (int i = 0; i < filesNeeded; i++) {
1852 1853 1854 1855 1856 1857 1858 1859 1860
    uint64_t file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    compact->allocated_file_numbers.push_back(file_number);
  }
}

// Frees up unused file number.
void DBImpl::ReleaseCompactionUnusedFileNumbers(CompactionState* compact) {
  mutex_.AssertHeld();
1861
  for (const auto file_number : compact->allocated_file_numbers) {
1862 1863 1864 1865 1866
    pending_outputs_.erase(file_number);
    // Log(options_.info_log, "XXX releasing unused file num %d", file_number);
  }
}

J
jorlow@chromium.org 已提交
1867
Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
1868 1869
  assert(compact != nullptr);
  assert(compact->builder == nullptr);
J
jorlow@chromium.org 已提交
1870
  uint64_t file_number;
1871 1872 1873 1874 1875 1876 1877
  // If we have not yet exhausted the pre-allocated file numbers,
  // then use the one from the front. Otherwise, we have to acquire
  // the heavyweight lock and allocate a new file number.
  if (!compact->allocated_file_numbers.empty()) {
    file_number = compact->allocated_file_numbers.front();
    compact->allocated_file_numbers.pop_front();
  } else {
J
jorlow@chromium.org 已提交
1878 1879 1880 1881 1882
    mutex_.Lock();
    file_number = versions_->NewFileNumber();
    pending_outputs_.insert(file_number);
    mutex_.Unlock();
  }
1883 1884 1885 1886
  CompactionState::Output out;
  out.number = file_number;
  out.smallest.Clear();
  out.largest.Clear();
1887
  out.smallest_seqno = out.largest_seqno = 0;
1888
  compact->outputs.push_back(out);
J
jorlow@chromium.org 已提交
1889 1890 1891

  // Make the output file
  std::string fname = TableFileName(dbname_, file_number);
1892
  Status s = env_->NewWritableFile(fname, &compact->outfile, storage_options_);
1893

J
jorlow@chromium.org 已提交
1894
  if (s.ok()) {
1895 1896 1897
    // Over-estimate slightly so we don't end up just barely crossing
    // the threshold.
    compact->outfile->SetPreallocationBlockSize(
1898
      1.1 * versions_->MaxFileSizeForLevel(compact->compaction->output_level()));
1899

S
Siying Dong 已提交
1900 1901 1902 1903
    CompressionType compression_type = GetCompressionType(
        options_, compact->compaction->output_level(),
        compact->compaction->enable_compression());

S
Siying Dong 已提交
1904
    compact->builder.reset(
S
Siying Dong 已提交
1905
        GetTableBuilder(options_, compact->outfile.get(), compression_type));
J
jorlow@chromium.org 已提交
1906
  }
I
Igor Canadi 已提交
1907
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
1908 1909 1910 1911 1912
  return s;
}

Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
                                          Iterator* input) {
1913
  assert(compact != nullptr);
1914
  assert(compact->outfile);
1915
  assert(compact->builder != nullptr);
J
jorlow@chromium.org 已提交
1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930

  const uint64_t output_number = compact->current_output()->number;
  assert(output_number != 0);

  // Check for iterator errors
  Status s = input->status();
  const uint64_t current_entries = compact->builder->NumEntries();
  if (s.ok()) {
    s = compact->builder->Finish();
  } else {
    compact->builder->Abandon();
  }
  const uint64_t current_bytes = compact->builder->FileSize();
  compact->current_output()->file_size = current_bytes;
  compact->total_bytes += current_bytes;
1931
  compact->builder.reset();
J
jorlow@chromium.org 已提交
1932 1933

  // Finish and check for file errors
1934
  if (s.ok() && !options_.disableDataSync) {
1935
    if (options_.use_fsync) {
1936
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1937 1938
      s = compact->outfile->Fsync();
    } else {
1939
      StopWatch sw(env_, options_.statistics, COMPACTION_OUTFILE_SYNC_MICROS);
1940 1941
      s = compact->outfile->Sync();
    }
J
jorlow@chromium.org 已提交
1942 1943 1944 1945
  }
  if (s.ok()) {
    s = compact->outfile->Close();
  }
1946
  compact->outfile.reset();
J
jorlow@chromium.org 已提交
1947 1948 1949

  if (s.ok() && current_entries > 0) {
    // Verify that the table is usable
J
jorlow@chromium.org 已提交
1950
    Iterator* iter = table_cache_->NewIterator(ReadOptions(),
1951
                                               storage_options_,
J
jorlow@chromium.org 已提交
1952 1953
                                               output_number,
                                               current_bytes);
J
jorlow@chromium.org 已提交
1954 1955 1956
    s = iter->status();
    delete iter;
    if (s.ok()) {
1957
      Log(options_.info_log,
J
jorlow@chromium.org 已提交
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
          "Generated table #%llu: %lld keys, %lld bytes",
          (unsigned long long) output_number,
          (unsigned long long) current_entries,
          (unsigned long long) current_bytes);
    }
  }
  return s;
}


Status DBImpl::InstallCompactionResults(CompactionState* compact) {
  mutex_.AssertHeld();
1970 1971 1972 1973 1974

  // paranoia: verify that the files that we started with
  // still exist in the current version and in the same original level.
  // This ensures that a concurrent compaction did not erroneously
  // pick the same files to compact.
1975
  if (!versions_->VerifyCompactionFileConsistency(compact->compaction)) {
1976 1977 1978 1979 1980 1981 1982 1983
    Log(options_.info_log,  "Compaction %d@%d + %d@%d files aborted",
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1);
    return Status::IOError("Compaction input files inconsistent");
  }

1984
  Log(options_.info_log,  "Compacted %d@%d + %d@%d files => %lld bytes",
J
jorlow@chromium.org 已提交
1985 1986 1987 1988 1989 1990 1991 1992 1993
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
      compact->compaction->level() + 1,
      static_cast<long long>(compact->total_bytes));

  // Add compaction outputs
  compact->compaction->AddInputDeletions(compact->compaction->edit());
  const int level = compact->compaction->level();
D
dgrogan@chromium.org 已提交
1994
  for (size_t i = 0; i < compact->outputs.size(); i++) {
J
jorlow@chromium.org 已提交
1995 1996
    const CompactionState::Output& out = compact->outputs[i];
    compact->compaction->edit()->AddFile(
1997 1998
        (options_.compaction_style == kCompactionStyleUniversal) ?
          level : level + 1,
1999 2000
        out.number, out.file_size, out.smallest, out.largest,
        out.smallest_seqno, out.largest_seqno);
J
jorlow@chromium.org 已提交
2001
  }
2002
  return versions_->LogAndApply(compact->compaction->edit(), &mutex_);
J
jorlow@chromium.org 已提交
2003 2004
}

2005 2006 2007 2008 2009 2010 2011 2012
//
// Given a sequence number, return the sequence number of the
// earliest snapshot that this sequence number is visible in.
// The snapshots themselves are arranged in ascending order of
// sequence numbers.
// Employ a sequential search because the total number of
// snapshots are typically small.
inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
2013 2014
  SequenceNumber in, std::vector<SequenceNumber>& snapshots,
  SequenceNumber* prev_snapshot) {
2015
  SequenceNumber prev __attribute__((unused)) = 0;
2016 2017 2018
  for (const auto cur : snapshots) {
    assert(prev <= cur);
    if (cur >= in) {
2019
      *prev_snapshot = prev;
2020
      return cur;
2021
    }
2022 2023
    prev = cur; // assignment
    assert(prev);
2024 2025 2026 2027 2028 2029 2030 2031
  }
  Log(options_.info_log,
      "Looking for seqid %ld but maxseqid is %ld", in,
      snapshots[snapshots.size()-1]);
  assert(0);
  return 0;
}

I
Igor Canadi 已提交
2032 2033
Status DBImpl::DoCompactionWork(CompactionState* compact,
                                DeletionState& deletion_state) {
2034
  assert(compact);
2035
  int64_t imm_micros = 0;  // Micros spent doing imm_ compactions
A
Abhishek Kona 已提交
2036
  Log(options_.info_log,
2037
      "Compacting %d@%d + %d@%d files, score %.2f slots available %d",
J
jorlow@chromium.org 已提交
2038 2039 2040
      compact->compaction->num_input_files(0),
      compact->compaction->level(),
      compact->compaction->num_input_files(1),
2041
      compact->compaction->level() + 1,
2042
      compact->compaction->score(),
2043
      options_.max_background_compactions - bg_compaction_scheduled_);
2044 2045
  char scratch[256];
  compact->compaction->Summary(scratch, sizeof(scratch));
H
heyongqiang 已提交
2046
  Log(options_.info_log, "Compaction start summary: %s\n", scratch);
J
jorlow@chromium.org 已提交
2047 2048

  assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
2049
  assert(compact->builder == nullptr);
2050
  assert(!compact->outfile);
2051 2052 2053

  SequenceNumber visible_at_tip = 0;
  SequenceNumber earliest_snapshot;
H
Haobo Xu 已提交
2054
  SequenceNumber latest_snapshot = 0;
2055 2056 2057 2058 2059
  snapshots_.getAll(compact->existing_snapshots);
  if (compact->existing_snapshots.size() == 0) {
    // optimize for fast path if there are no snapshots
    visible_at_tip = versions_->LastSequence();
    earliest_snapshot = visible_at_tip;
J
jorlow@chromium.org 已提交
2060
  } else {
H
Haobo Xu 已提交
2061
    latest_snapshot = compact->existing_snapshots.back();
2062 2063 2064 2065
    // Add the current seqno as the 'latest' virtual
    // snapshot to the end of this list.
    compact->existing_snapshots.push_back(versions_->LastSequence());
    earliest_snapshot = compact->existing_snapshots[0];
J
jorlow@chromium.org 已提交
2066 2067
  }

2068
  // Is this compaction producing files at the bottommost level?
2069
  bool bottommost_level = compact->compaction->BottomMostLevel();
2070

2071 2072 2073
  // Allocate the output file numbers before we release the lock
  AllocateCompactionOutputFileNumbers(compact);

J
jorlow@chromium.org 已提交
2074 2075 2076
  // Release mutex while we're actually doing the compaction work
  mutex_.Unlock();

2077
  const uint64_t start_micros = env_->NowMicros();
2078
  unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
J
jorlow@chromium.org 已提交
2079 2080 2081 2082 2083
  input->SeekToFirst();
  Status status;
  ParsedInternalKey ikey;
  std::string current_user_key;
  bool has_current_user_key = false;
2084 2085
  SequenceNumber last_sequence_for_key __attribute__((unused)) =
    kMaxSequenceNumber;
2086
  SequenceNumber visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
2087
  std::string compaction_filter_value;
H
Haobo Xu 已提交
2088
  std::vector<char> delete_key; // for compaction filter
2089
  MergeHelper merge(user_comparator(), options_.merge_operator.get(),
2090 2091
                    options_.info_log.get(),
                    false /* internal key corruption is expected */);
2092 2093 2094
  auto compaction_filter = options_.compaction_filter;
  std::unique_ptr<CompactionFilter> compaction_filter_from_factory = nullptr;
  if (!compaction_filter) {
2095 2096 2097
    auto context = compact->GetFilterContext();
    compaction_filter_from_factory =
      options_.compaction_filter_factory->CreateCompactionFilter(context);
2098 2099
    compaction_filter = compaction_filter_from_factory.get();
  }
2100

J
jorlow@chromium.org 已提交
2101
  for (; input->Valid() && !shutting_down_.Acquire_Load(); ) {
2102
    // Prioritize immutable compaction work
2103
    // TODO: remove memtable flush from normal compaction work
2104
    if (imm_.imm_flush_needed.NoBarrier_Load() != nullptr) {
2105
      const uint64_t imm_start = env_->NowMicros();
I
Igor Canadi 已提交
2106
      LogFlush(options_.info_log);
2107
      mutex_.Lock();
2108
      if (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) {
I
Igor Canadi 已提交
2109
        FlushMemTableToOutputFile(nullptr, deletion_state);
H
hans@chromium.org 已提交
2110
        bg_cv_.SignalAll();  // Wakeup MakeRoomForWrite() if necessary
2111 2112 2113 2114 2115
      }
      mutex_.Unlock();
      imm_micros += (env_->NowMicros() - imm_start);
    }

J
jorlow@chromium.org 已提交
2116
    Slice key = input->key();
2117
    Slice value = input->value();
H
Haobo Xu 已提交
2118

2119
    if (compact->compaction->ShouldStopBefore(key) &&
2120
        compact->builder != nullptr) {
2121
      status = FinishCompactionOutputFile(compact, input.get());
2122 2123 2124 2125 2126 2127
      if (!status.ok()) {
        break;
      }
    }

    // Handle key/value, add to state, etc.
J
jorlow@chromium.org 已提交
2128
    bool drop = false;
2129
    bool current_entry_is_merging = false;
J
jorlow@chromium.org 已提交
2130 2131
    if (!ParseInternalKey(key, &ikey)) {
      // Do not hide error keys
2132 2133
      // TODO: error key stays in db forever? Figure out the intention/rationale
      // v10 error v8 : we cannot hide v8 even though it's pretty obvious.
J
jorlow@chromium.org 已提交
2134 2135 2136
      current_user_key.clear();
      has_current_user_key = false;
      last_sequence_for_key = kMaxSequenceNumber;
2137
      visible_in_snapshot = kMaxSequenceNumber;
J
jorlow@chromium.org 已提交
2138 2139 2140 2141 2142 2143 2144 2145
    } else {
      if (!has_current_user_key ||
          user_comparator()->Compare(ikey.user_key,
                                     Slice(current_user_key)) != 0) {
        // First occurrence of this user key
        current_user_key.assign(ikey.user_key.data(), ikey.user_key.size());
        has_current_user_key = true;
        last_sequence_for_key = kMaxSequenceNumber;
2146
        visible_in_snapshot = kMaxSequenceNumber;
H
Haobo Xu 已提交
2147 2148

        // apply the compaction filter to the first occurrence of the user key
2149
        if (compaction_filter &&
H
Haobo Xu 已提交
2150 2151 2152 2153 2154 2155 2156 2157 2158 2159
            ikey.type == kTypeValue &&
            (visible_at_tip || ikey.sequence > latest_snapshot)) {
          // If the user has specified a compaction filter and the sequence
          // number is greater than any external snapshot, then invoke the
          // filter.
          // If the return value of the compaction filter is true, replace
          // the entry with a delete marker.
          bool value_changed = false;
          compaction_filter_value.clear();
          bool to_delete =
2160
            compaction_filter->Filter(compact->compaction->level(),
S
Siying Dong 已提交
2161 2162 2163
                                               ikey.user_key, value,
                                               &compaction_filter_value,
                                               &value_changed);
H
Haobo Xu 已提交
2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
          if (to_delete) {
            // make a copy of the original key
            delete_key.assign(key.data(), key.data() + key.size());
            // convert it to a delete
            UpdateInternalKey(&delete_key[0], delete_key.size(),
                              ikey.sequence, kTypeDeletion);
            // anchor the key again
            key = Slice(&delete_key[0], delete_key.size());
            // needed because ikey is backed by key
            ParseInternalKey(key, &ikey);
            // no value associated with delete
            value.clear();
            RecordTick(options_.statistics, COMPACTION_KEY_DROP_USER);
          } else if (value_changed) {
            value = compaction_filter_value;
          }
        }

J
jorlow@chromium.org 已提交
2182 2183
      }

2184 2185 2186
      // If there are no snapshots, then this kv affect visibility at tip.
      // Otherwise, search though all existing snapshots to find
      // the earlist snapshot that is affected by this kv.
2187 2188 2189 2190 2191 2192
      SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot
      SequenceNumber visible = visible_at_tip ?
        visible_at_tip :
        findEarliestVisibleSnapshot(ikey.sequence,
                                    compact->existing_snapshots,
                                    &prev_snapshot);
2193 2194 2195 2196 2197

      if (visible_in_snapshot == visible) {
        // If the earliest snapshot is which this key is visible in
        // is the same as the visibily of a previous instance of the
        // same key, then this kv is not visible in any snapshot.
J
jorlow@chromium.org 已提交
2198
        // Hidden by an newer entry for same user key
2199
        // TODO: why not > ?
2200
        assert(last_sequence_for_key >= ikey.sequence);
J
jorlow@chromium.org 已提交
2201
        drop = true;    // (A)
2202
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_NEWER_ENTRY);
J
jorlow@chromium.org 已提交
2203
      } else if (ikey.type == kTypeDeletion &&
2204
                 ikey.sequence <= earliest_snapshot &&
J
jorlow@chromium.org 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213
                 compact->compaction->IsBaseLevelForKey(ikey.user_key)) {
        // For this user key:
        // (1) there is no data in higher levels
        // (2) data in lower levels will have larger sequence numbers
        // (3) data in layers that are being compacted here and have
        //     smaller sequence numbers will be dropped in the next
        //     few iterations of this loop (by rule (A) above).
        // Therefore this deletion marker is obsolete and can be dropped.
        drop = true;
2214
        RecordTick(options_.statistics, COMPACTION_KEY_DROP_OBSOLETE);
2215 2216 2217 2218 2219 2220 2221
      } else if (ikey.type == kTypeMerge) {
        // We know the merge type entry is not hidden, otherwise we would
        // have hit (A)
        // We encapsulate the merge related state machine in a different
        // object to minimize change to the existing flow. Turn out this
        // logic could also be nicely re-used for memtable flush purge
        // optimization in BuildTable.
M
Mayank Agarwal 已提交
2222 2223
        merge.MergeUntil(input.get(), prev_snapshot, bottommost_level,
                         options_.statistics);
2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241
        current_entry_is_merging = true;
        if (merge.IsSuccess()) {
          // Successfully found Put/Delete/(end-of-key-range) while merging
          // Get the merge result
          key = merge.key();
          ParseInternalKey(key, &ikey);
          value = merge.value();
        } else {
          // Did not find a Put/Delete/(end-of-key-range) while merging
          // We now have some stack of merge operands to write out.
          // NOTE: key,value, and ikey are now referring to old entries.
          //       These will be correctly set below.
          assert(!merge.keys().empty());
          assert(merge.keys().size() == merge.values().size());

          // Hack to make sure last_sequence_for_key is correct
          ParseInternalKey(merge.keys().front(), &ikey);
        }
J
jorlow@chromium.org 已提交
2242 2243 2244
      }

      last_sequence_for_key = ikey.sequence;
2245
      visible_in_snapshot = visible;
J
jorlow@chromium.org 已提交
2246 2247
    }
#if 0
2248
    Log(options_.info_log,
J
jorlow@chromium.org 已提交
2249
        "  Compact: %s, seq %d, type: %d %d, drop: %d, is_base: %d, "
2250
        "%d smallest_snapshot: %d level: %d bottommost %d",
J
jorlow@chromium.org 已提交
2251
        ikey.user_key.ToString().c_str(),
D
dgrogan@chromium.org 已提交
2252
        (int)ikey.sequence, ikey.type, kTypeValue, drop,
J
jorlow@chromium.org 已提交
2253
        compact->compaction->IsBaseLevelForKey(ikey.user_key),
2254 2255
        (int)last_sequence_for_key, (int)earliest_snapshot,
        compact->compaction->level(), bottommost_level);
J
jorlow@chromium.org 已提交
2256 2257 2258
#endif

    if (!drop) {
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274
      // We may write a single key (e.g.: for Put/Delete or successful merge).
      // Or we may instead have to write a sequence/list of keys.
      // We have to write a sequence iff we have an unsuccessful merge
      bool has_merge_list = current_entry_is_merging && !merge.IsSuccess();
      const std::deque<std::string>* keys = nullptr;
      const std::deque<std::string>* values = nullptr;
      std::deque<std::string>::const_reverse_iterator key_iter;
      std::deque<std::string>::const_reverse_iterator value_iter;
      if (has_merge_list) {
        keys = &merge.keys();
        values = &merge.values();
        key_iter = keys->rbegin();    // The back (*rbegin()) is the first key
        value_iter = values->rbegin();

        key = Slice(*key_iter);
        value = Slice(*value_iter);
2275
      }
2276

2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
      // If we have a list of keys to write, traverse the list.
      // If we have a single key to write, simply write that key.
      while (true) {
        // Invariant: key,value,ikey will always be the next entry to write
        char* kptr = (char*)key.data();
        std::string kstr;

        // Zeroing out the sequence number leads to better compression.
        // If this is the bottommost level (no files in lower levels)
        // and the earliest snapshot is larger than this seqno
        // then we can squash the seqno to zero.
2288 2289
        if (options_.compaction_style == kCompactionStyleLevel &&
            bottommost_level && ikey.sequence < earliest_snapshot &&
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
            ikey.type != kTypeMerge) {
          assert(ikey.type != kTypeDeletion);
          // make a copy because updating in place would cause problems
          // with the priority queue that is managing the input key iterator
          kstr.assign(key.data(), key.size());
          kptr = (char *)kstr.c_str();
          UpdateInternalKey(kptr, key.size(), (uint64_t)0, ikey.type);
        }

        Slice newkey(kptr, key.size());
        assert((key.clear(), 1)); // we do not need 'key' anymore
2301

2302 2303 2304 2305 2306 2307 2308
        // Open output file if necessary
        if (compact->builder == nullptr) {
          status = OpenCompactionOutputFile(compact);
          if (!status.ok()) {
            break;
          }
        }
2309 2310

        SequenceNumber seqno = GetInternalKeySeqno(newkey);
2311 2312
        if (compact->builder->NumEntries() == 0) {
          compact->current_output()->smallest.DecodeFrom(newkey);
2313 2314 2315 2316
          compact->current_output()->smallest_seqno = seqno;
        } else {
          compact->current_output()->smallest_seqno =
            std::min(compact->current_output()->smallest_seqno, seqno);
2317 2318 2319
        }
        compact->current_output()->largest.DecodeFrom(newkey);
        compact->builder->Add(newkey, value);
2320 2321
        compact->current_output()->largest_seqno =
          std::max(compact->current_output()->largest_seqno, seqno);
2322 2323 2324 2325 2326 2327 2328 2329

        // Close output file if it is big enough
        if (compact->builder->FileSize() >=
            compact->compaction->MaxOutputFileSize()) {
          status = FinishCompactionOutputFile(compact, input.get());
          if (!status.ok()) {
            break;
          }
J
jorlow@chromium.org 已提交
2330 2331
        }

2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351
        // If we have a list of entries, move to next element
        // If we only had one entry, then break the loop.
        if (has_merge_list) {
          ++key_iter;
          ++value_iter;

          // If at end of list
          if (key_iter == keys->rend() || value_iter == values->rend()) {
            // Sanity Check: if one ends, then both end
            assert(key_iter == keys->rend() && value_iter == values->rend());
            break;
          }

          // Otherwise not at end of list. Update key, value, and ikey.
          key = Slice(*key_iter);
          value = Slice(*value_iter);
          ParseInternalKey(key, &ikey);

        } else{
          // Only had one item to begin with (Put/Delete)
J
jorlow@chromium.org 已提交
2352 2353 2354 2355 2356
          break;
        }
      }
    }

2357
    // MergeUntil has moved input to the next entry
2358
    if (!current_entry_is_merging) {
2359 2360
      input->Next();
    }
J
jorlow@chromium.org 已提交
2361 2362 2363
  }

  if (status.ok() && shutting_down_.Acquire_Load()) {
2364
    status = Status::IOError("Database shutdown started during compaction");
J
jorlow@chromium.org 已提交
2365
  }
2366
  if (status.ok() && compact->builder != nullptr) {
2367
    status = FinishCompactionOutputFile(compact, input.get());
J
jorlow@chromium.org 已提交
2368 2369 2370 2371
  }
  if (status.ok()) {
    status = input->status();
  }
2372
  input.reset();
J
jorlow@chromium.org 已提交
2373

2374 2375
  CompactionStats stats;
  stats.micros = env_->NowMicros() - start_micros - imm_micros;
A
Abhishek Kona 已提交
2376 2377 2378
  if (options_.statistics) {
    options_.statistics->measureTime(COMPACTION_TIME, stats.micros);
  }
M
Mark Callaghan 已提交
2379 2380
  stats.files_in_leveln = compact->compaction->num_input_files(0);
  stats.files_in_levelnp1 = compact->compaction->num_input_files(1);
2381 2382

  int num_output_files = compact->outputs.size();
2383
  if (compact->builder != nullptr) {
2384 2385 2386 2387 2388
    // An error occured so ignore the last output.
    assert(num_output_files > 0);
    --num_output_files;
  }
  stats.files_out_levelnp1 = num_output_files;
M
Mark Callaghan 已提交
2389 2390 2391 2392 2393 2394 2395

  for (int i = 0; i < compact->compaction->num_input_files(0); i++)
    stats.bytes_readn += compact->compaction->input(0, i)->file_size;

  for (int i = 0; i < compact->compaction->num_input_files(1); i++)
    stats.bytes_readnp1 += compact->compaction->input(1, i)->file_size;

2396
  for (int i = 0; i < num_output_files; i++) {
2397 2398 2399
    stats.bytes_written += compact->outputs[i].file_size;
  }

I
Igor Canadi 已提交
2400
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
2401
  mutex_.Lock();
2402
  stats_[compact->compaction->output_level()].Add(stats);
J
jorlow@chromium.org 已提交
2403

2404 2405 2406 2407
  // if there were any unused file number (mostly in case of
  // compaction error), free up the entry from pending_putputs
  ReleaseCompactionUnusedFileNumbers(compact);

J
jorlow@chromium.org 已提交
2408 2409 2410
  if (status.ok()) {
    status = InstallCompactionResults(compact);
  }
2411
  VersionSet::LevelSummaryStorage tmp;
2412
  Log(options_.info_log,
M
Mark Callaghan 已提交
2413
      "compacted to: %s, %.1f MB/sec, level %d, files in(%d, %d) out(%d) "
2414 2415
      "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) "
      "write-amplify(%.1f) %s\n",
M
Mark Callaghan 已提交
2416 2417 2418
      versions_->LevelSummary(&tmp),
      (stats.bytes_readn + stats.bytes_readnp1 + stats.bytes_written) /
          (double) stats.micros,
2419
      compact->compaction->output_level(),
M
Mark Callaghan 已提交
2420 2421 2422 2423
      stats.files_in_leveln, stats.files_in_levelnp1, stats.files_out_levelnp1,
      stats.bytes_readn / 1048576.0,
      stats.bytes_readnp1 / 1048576.0,
      stats.bytes_written / 1048576.0,
2424
      (stats.bytes_written + stats.bytes_readnp1 + stats.bytes_readn) /
2425
          (double) stats.bytes_readn,
2426
      stats.bytes_written / (double) stats.bytes_readn,
2427
      status.ToString().c_str());
M
Mark Callaghan 已提交
2428

J
jorlow@chromium.org 已提交
2429 2430 2431
  return status;
}

2432 2433 2434 2435
namespace {
struct IterState {
  port::Mutex* mu;
  Version* version;
2436
  std::vector<MemTable*> mem; // includes both mem_ and imm_
2437 2438 2439 2440 2441
};

static void CleanupIteratorState(void* arg1, void* arg2) {
  IterState* state = reinterpret_cast<IterState*>(arg1);
  state->mu->Lock();
2442 2443 2444
  for (unsigned int i = 0; i < state->mem.size(); i++) {
    state->mem[i]->Unref();
  }
2445 2446 2447 2448
  state->version->Unref();
  state->mu->Unlock();
  delete state;
}
H
Hans Wennborg 已提交
2449
}  // namespace
2450

J
jorlow@chromium.org 已提交
2451 2452
Iterator* DBImpl::NewInternalIterator(const ReadOptions& options,
                                      SequenceNumber* latest_snapshot) {
2453
  IterState* cleanup = new IterState;
J
jorlow@chromium.org 已提交
2454
  mutex_.Lock();
2455
  *latest_snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2456

2457
  // Collect together all needed child iterators for mem
J
jorlow@chromium.org 已提交
2458
  std::vector<Iterator*> list;
2459
  mem_->Ref();
2460
  list.push_back(mem_->NewIterator(options));
J
Jim Paton 已提交
2461

2462 2463 2464 2465 2466 2467 2468 2469
  cleanup->mem.push_back(mem_);

  // Collect together all needed child iterators for imm_
  std::vector<MemTable*> immutables;
  imm_.GetMemTables(&immutables);
  for (unsigned int i = 0; i < immutables.size(); i++) {
    MemTable* m = immutables[i];
    m->Ref();
2470
    list.push_back(m->NewIterator(options));
2471
    cleanup->mem.push_back(m);
2472
  }
2473 2474

  // Collect iterators for files in L0 - Ln
2475
  versions_->current()->AddIterators(options, storage_options_, &list);
J
jorlow@chromium.org 已提交
2476 2477 2478
  Iterator* internal_iter =
      NewMergingIterator(&internal_comparator_, &list[0], list.size());
  versions_->current()->Ref();
2479 2480 2481

  cleanup->mu = &mutex_;
  cleanup->version = versions_->current();
2482
  internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);
J
jorlow@chromium.org 已提交
2483 2484

  mutex_.Unlock();
I
Igor Canadi 已提交
2485
  LogFlush(options_.info_log);
J
jorlow@chromium.org 已提交
2486 2487 2488 2489 2490 2491 2492 2493
  return internal_iter;
}

Iterator* DBImpl::TEST_NewInternalIterator() {
  SequenceNumber ignored;
  return NewInternalIterator(ReadOptions(), &ignored);
}

J
jorlow@chromium.org 已提交
2494
int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes() {
2495 2496 2497 2498
  MutexLock l(&mutex_);
  return versions_->MaxNextLevelOverlappingBytes();
}

J
jorlow@chromium.org 已提交
2499 2500 2501
Status DBImpl::Get(const ReadOptions& options,
                   const Slice& key,
                   std::string* value) {
2502 2503 2504 2505 2506 2507
  return GetImpl(options, key, value);
}

Status DBImpl::GetImpl(const ReadOptions& options,
                       const Slice& key,
                       std::string* value,
2508
                       bool* value_found) {
2509
  Status s;
2510

2511
  StopWatch sw(env_, options_.statistics, DB_GET);
2512
  SequenceNumber snapshot;
2513
  mutex_.Lock();
2514
  if (options.snapshot != nullptr) {
2515 2516 2517
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
J
jorlow@chromium.org 已提交
2518
  }
2519

2520
  MemTable* mem = mem_;
2521
  MemTableList imm = imm_;
2522
  Version* current = versions_->current();
2523
  mem->Ref();
2524
  imm.RefAll();
2525
  current->Ref();
2526

2527 2528
  // Unlock while reading from files and memtables
  mutex_.Unlock();
2529
  bool have_stat_update = false;
2530
  Version::GetStats stats;
2531

2532 2533 2534 2535

  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2536
  // First look in the memtable, then in the immutable memtable (if any).
2537
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2538
  // merge_operands will contain the sequence of merges in the latter case.
2539
  LookupKey lkey(key, snapshot);
2540
  if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2541
    // Done
2542
  } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2543 2544
    // Done
  } else {
2545
    current->Get(options, lkey, value, &s, &merge_operands, &stats,
2546
                 options_, value_found);
2547
    have_stat_update = true;
2548
  }
2549
  mutex_.Lock();
2550

2551 2552
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2553
    MaybeScheduleFlushOrCompaction();
2554
  }
2555
  mem->Unref();
2556
  imm.UnrefAll();
2557
  current->Unref();
2558 2559
  mutex_.Unlock();

I
Igor Canadi 已提交
2560
  LogFlush(options_.info_log);
2561
  // Note, tickers are atomic now - no lock protection needed any more.
2562
  RecordTick(options_.statistics, NUMBER_KEYS_READ);
2563
  RecordTick(options_.statistics, BYTES_READ, value->size());
2564
  return s;
J
jorlow@chromium.org 已提交
2565 2566
}

2567 2568 2569 2570 2571 2572
std::vector<Status> DBImpl::MultiGet(const ReadOptions& options,
                                     const std::vector<Slice>& keys,
                                     std::vector<std::string>* values) {

  StopWatch sw(env_, options_.statistics, DB_MULTIGET);
  SequenceNumber snapshot;
2573
  mutex_.Lock();
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
  if (options.snapshot != nullptr) {
    snapshot = reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_;
  } else {
    snapshot = versions_->LastSequence();
  }

  MemTable* mem = mem_;
  MemTableList imm = imm_;
  Version* current = versions_->current();
  mem->Ref();
  imm.RefAll();
  current->Ref();

  // Unlock while reading from files and memtables

  mutex_.Unlock();
  bool have_stat_update = false;
  Version::GetStats stats;

2593 2594 2595
  // Prepare to store a list of merge operations if merge occurs.
  std::deque<std::string> merge_operands;

2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
  // Note: this always resizes the values array
  int numKeys = keys.size();
  std::vector<Status> statList(numKeys);
  values->resize(numKeys);

  // Keep track of bytes that we read for statistics-recording later
  uint64_t bytesRead = 0;

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
2607 2608 2609
  // merge_operands will contain the sequence of merges in the latter case.
  for (int i=0; i<numKeys; ++i) {
    merge_operands.clear();
2610 2611 2612 2613
    Status& s = statList[i];
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
2614
    if (mem->Get(lkey, value, &s, &merge_operands, options_)) {
2615
      // Done
2616
    } else if (imm.Get(lkey, value, &s, &merge_operands, options_)) {
2617 2618
      // Done
    } else {
2619
      current->Get(options, lkey, value, &s, &merge_operands, &stats, options_);
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631
      have_stat_update = true;
    }

    if (s.ok()) {
      bytesRead += value->size();
    }
  }

  // Post processing (decrement reference counts and record statistics)
  mutex_.Lock();
  if (!options_.disable_seek_compaction &&
      have_stat_update && current->UpdateStats(stats)) {
2632
    MaybeScheduleFlushOrCompaction();
2633 2634 2635 2636
  }
  mem->Unref();
  imm.UnrefAll();
  current->Unref();
2637 2638
  mutex_.Unlock();

I
Igor Canadi 已提交
2639
  LogFlush(options_.info_log);
2640 2641 2642 2643 2644 2645 2646
  RecordTick(options_.statistics, NUMBER_MULTIGET_CALLS);
  RecordTick(options_.statistics, NUMBER_MULTIGET_KEYS_READ, numKeys);
  RecordTick(options_.statistics, NUMBER_MULTIGET_BYTES_READ, bytesRead);

  return statList;
}

2647 2648 2649 2650 2651 2652 2653
bool DBImpl::KeyMayExist(const ReadOptions& options,
                         const Slice& key,
                         std::string* value,
                         bool* value_found) {
  if (value_found != nullptr) {
    *value_found = true; // falsify later if key-may-exist but can't fetch value
  }
2654 2655 2656
  ReadOptions roptions = options;
  roptions.read_tier = kBlockCacheTier; // read from block cache only
  return GetImpl(roptions, key, value, value_found).ok();
2657 2658
}

J
jorlow@chromium.org 已提交
2659 2660
Iterator* DBImpl::NewIterator(const ReadOptions& options) {
  SequenceNumber latest_snapshot;
T
Tyler Harter 已提交
2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673
  Iterator* iter = NewInternalIterator(options, &latest_snapshot);
  iter = NewDBIterator(
             &dbname_, env_, options_, user_comparator(), iter,
             (options.snapshot != nullptr
              ? reinterpret_cast<const SnapshotImpl*>(options.snapshot)->number_
              : latest_snapshot));
  if (options.prefix) {
    // use extra wrapper to exclude any keys from the results which
    // don't begin with the prefix
    iter = new PrefixFilterIterator(iter, *options.prefix,
                                    options_.prefix_extractor);
  }
  return iter;
J
jorlow@chromium.org 已提交
2674 2675 2676 2677
}

const Snapshot* DBImpl::GetSnapshot() {
  MutexLock l(&mutex_);
2678
  return snapshots_.New(versions_->LastSequence());
J
jorlow@chromium.org 已提交
2679 2680 2681 2682
}

void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  MutexLock l(&mutex_);
2683
  snapshots_.Delete(reinterpret_cast<const SnapshotImpl*>(s));
J
jorlow@chromium.org 已提交
2684 2685 2686 2687 2688 2689 2690
}

// Convenience methods
Status DBImpl::Put(const WriteOptions& o, const Slice& key, const Slice& val) {
  return DB::Put(o, key, val);
}

2691 2692 2693 2694 2695 2696 2697 2698 2699
Status DBImpl::Merge(const WriteOptions& o, const Slice& key,
                     const Slice& val) {
  if (!options_.merge_operator) {
    return Status::NotSupported("Provide a merge_operator when opening DB");
  } else {
    return DB::Merge(o, key, val);
  }
}

J
jorlow@chromium.org 已提交
2700 2701 2702 2703
Status DBImpl::Delete(const WriteOptions& options, const Slice& key) {
  return DB::Delete(options, key);
}

2704 2705 2706 2707
Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
  Writer w(&mutex_);
  w.batch = my_batch;
  w.sync = options.sync;
H
heyongqiang 已提交
2708
  w.disableWAL = options.disableWAL;
2709
  w.done = false;
2710

2711
  StopWatch sw(env_, options_.statistics, DB_WRITE);
2712
  MutexLock l(&mutex_);
2713 2714 2715 2716 2717 2718
  writers_.push_back(&w);
  while (!w.done && &w != writers_.front()) {
    w.cv.Wait();
  }
  if (w.done) {
    return w.status;
2719 2720 2721
  }

  // May temporarily unlock and wait.
2722
  Status status = MakeRoomForWrite(my_batch == nullptr);
D
dgrogan@chromium.org 已提交
2723
  uint64_t last_sequence = versions_->LastSequence();
2724
  Writer* last_writer = &w;
2725
  if (status.ok() && my_batch != nullptr) {  // nullptr batch is for compactions
2726 2727 2728 2729 2730
    // TODO: BuildBatchGroup physically concatenate/copy all write batches into
    // a new one. Mem copy is done with the lock held. Ideally, we only need
    // the lock to obtain the last_writer and the references to all batches.
    // Creation (copy) of the merged batch could have been done outside of the
    // lock protected region.
2731
    WriteBatch* updates = BuildBatchGroup(&last_writer);
2732

2733 2734 2735 2736
    // Add to log and apply to memtable.  We can release the lock
    // during this phase since &w is currently responsible for logging
    // and protects against concurrent loggers and concurrent writes
    // into mem_.
2737
    {
2738
      mutex_.Unlock();
2739 2740 2741 2742 2743 2744 2745 2746 2747
      const SequenceNumber current_sequence = last_sequence + 1;
      WriteBatchInternal::SetSequence(updates, current_sequence);
      int my_batch_count = WriteBatchInternal::Count(updates);
      last_sequence += my_batch_count;
      // Record statistics
      RecordTick(options_.statistics, NUMBER_KEYS_WRITTEN, my_batch_count);
      RecordTick(options_.statistics,
                 BYTES_WRITTEN,
                 WriteBatchInternal::ByteSize(updates));
2748 2749
      if (options.disableWAL) {
        flush_on_destroy_ = true;
2750 2751 2752
      }

      if (!options.disableWAL) {
2753 2754
        StopWatchNano timer(env_);
        StartPerfTimer(&timer);
H
heyongqiang 已提交
2755
        status = log_->AddRecord(WriteBatchInternal::Contents(updates));
2756
        BumpPerfTime(&perf_context.wal_write_time, &timer);
H
heyongqiang 已提交
2757
        if (status.ok() && options.sync) {
2758
          if (options_.use_fsync) {
2759
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2760
            status = log_->file()->Fsync();
2761
          } else {
2762
            StopWatch(env_, options_.statistics, WAL_FILE_SYNC_MICROS);
2763
            status = log_->file()->Sync();
2764
          }
H
heyongqiang 已提交
2765
        }
2766 2767
      }
      if (status.ok()) {
2768 2769
        status = WriteBatchInternal::InsertInto(updates, mem_, &options_, this,
                                                options_.filter_deletes);
2770 2771 2772 2773 2774 2775 2776
        if (!status.ok()) {
          // Panic for in-memory corruptions
          // Note that existing logic was not sound. Any partial failure writing
          // into the memtable would result in a state that some write ops might
          // have succeeded in memtable but Status reports error for all writes.
          throw std::runtime_error("In memory WriteBatch corruption!");
        }
2777
        SetTickerCount(options_.statistics, SEQUENCE_NUMBER, last_sequence);
2778
      }
I
Igor Canadi 已提交
2779
      LogFlush(options_.info_log);
2780 2781
      mutex_.Lock();
      if (status.ok()) {
2782
        versions_->SetLastSequence(last_sequence);
2783
      }
J
jorlow@chromium.org 已提交
2784
    }
2785
    if (updates == &tmp_batch_) tmp_batch_.Clear();
J
jorlow@chromium.org 已提交
2786
  }
I
Igor Canadi 已提交
2787 2788 2789
  if (options_.paranoid_checks && !status.ok() && bg_error_.ok()) {
    bg_error_ = status; // stop compaction & fail any further writes
  }
2790

2791 2792 2793 2794 2795 2796 2797
  while (true) {
    Writer* ready = writers_.front();
    writers_.pop_front();
    if (ready != &w) {
      ready->status = status;
      ready->done = true;
      ready->cv.Signal();
2798
    }
2799 2800
    if (ready == last_writer) break;
  }
2801

2802 2803 2804
  // Notify new head of write queue
  if (!writers_.empty()) {
    writers_.front()->cv.Signal();
2805
  }
J
jorlow@chromium.org 已提交
2806 2807 2808
  return status;
}

2809
// REQUIRES: Writer list must be non-empty
2810
// REQUIRES: First writer must have a non-nullptr batch
2811 2812 2813 2814
WriteBatch* DBImpl::BuildBatchGroup(Writer** last_writer) {
  assert(!writers_.empty());
  Writer* first = writers_.front();
  WriteBatch* result = first->batch;
2815
  assert(result != nullptr);
2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836

  size_t size = WriteBatchInternal::ByteSize(first->batch);

  // Allow the group to grow up to a maximum size, but if the
  // original write is small, limit the growth so we do not slow
  // down the small write too much.
  size_t max_size = 1 << 20;
  if (size <= (128<<10)) {
    max_size = size + (128<<10);
  }

  *last_writer = first;
  std::deque<Writer*>::iterator iter = writers_.begin();
  ++iter;  // Advance past "first"
  for (; iter != writers_.end(); ++iter) {
    Writer* w = *iter;
    if (w->sync && !first->sync) {
      // Do not include a sync write into a batch handled by a non-sync write.
      break;
    }

H
heyongqiang 已提交
2837 2838 2839 2840 2841 2842
    if (!w->disableWAL && first->disableWAL) {
      // Do not include a write that needs WAL into a batch that has
      // WAL disabled.
      break;
    }

2843
    if (w->batch != nullptr) {
2844 2845 2846 2847 2848 2849 2850 2851 2852
      size += WriteBatchInternal::ByteSize(w->batch);
      if (size > max_size) {
        // Do not make batch too big
        break;
      }

      // Append to *reuslt
      if (result == first->batch) {
        // Switch to temporary batch instead of disturbing caller's batch
2853
        result = &tmp_batch_;
2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
        assert(WriteBatchInternal::Count(result) == 0);
        WriteBatchInternal::Append(result, first->batch);
      }
      WriteBatchInternal::Append(result, w->batch);
    }
    *last_writer = w;
  }
  return result;
}

2864 2865 2866
// This function computes the amount of time in microseconds by which a write
// should be delayed based on the number of level-0 files according to the
// following formula:
J
Jim Paton 已提交
2867 2868 2869 2870
// if n < bottom, return 0;
// if n >= top, return 1000;
// otherwise, let r = (n - bottom) /
//                    (top - bottom)
2871 2872 2873 2874
//  and return r^2 * 1000.
// The goal of this formula is to gradually increase the rate at which writes
// are slowed. We also tried linear delay (r * 1000), but it seemed to do
// slightly worse. There is no other particular reason for choosing quadratic.
J
Jim Paton 已提交
2875
uint64_t DBImpl::SlowdownAmount(int n, int top, int bottom) {
2876
  uint64_t delay;
J
Jim Paton 已提交
2877
  if (n >= top) {
2878 2879
    delay = 1000;
  }
J
Jim Paton 已提交
2880
  else if (n < bottom) {
2881 2882 2883 2884
    delay = 0;
  }
  else {
    // If we are here, we know that:
J
Jim Paton 已提交
2885
    //   level0_start_slowdown <= n < level0_slowdown
2886 2887
    // since the previous two conditions are false.
    float how_much =
J
Jim Paton 已提交
2888 2889
      (float) (n - bottom) /
              (top - bottom);
2890 2891 2892 2893 2894 2895
    delay = how_much * how_much * 1000;
  }
  assert(delay <= 1000);
  return delay;
}

2896
// REQUIRES: mutex_ is held
2897
// REQUIRES: this thread is currently at the front of the writer queue
2898 2899
Status DBImpl::MakeRoomForWrite(bool force) {
  mutex_.AssertHeld();
2900
  assert(!writers_.empty());
2901
  bool allow_delay = !force;
J
Jim Paton 已提交
2902 2903
  bool allow_hard_rate_limit_delay = !force;
  bool allow_soft_rate_limit_delay = !force;
2904
  uint64_t rate_limit_delay_millis = 0;
2905
  Status s;
2906
  double score;
2907

2908 2909 2910 2911 2912
  while (true) {
    if (!bg_error_.ok()) {
      // Yield previous error
      s = bg_error_;
      break;
2913 2914
    } else if (
        allow_delay &&
2915
        versions_->NumLevelFiles(0) >=
2916
          options_.level0_slowdown_writes_trigger) {
2917 2918 2919
      // We are getting close to hitting a hard limit on the number of
      // L0 files.  Rather than delaying a single write by several
      // seconds when we hit the hard limit, start delaying each
2920
      // individual write by 0-1ms to reduce latency variance.  Also,
2921 2922 2923
      // this delay hands over some CPU to the compaction thread in
      // case it is sharing the same core as the writer.
      mutex_.Unlock();
2924
      uint64_t delayed;
J
Jim Paton 已提交
2925 2926
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_SLOWDOWN_COUNT);
J
Jim Paton 已提交
2927 2928 2929 2930 2931
        env_->SleepForMicroseconds(
          SlowdownAmount(versions_->NumLevelFiles(0),
                         options_.level0_slowdown_writes_trigger,
                         options_.level0_stop_writes_trigger)
        );
2932
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2933
      }
2934
      RecordTick(options_.statistics, STALL_L0_SLOWDOWN_MICROS, delayed);
2935
      stall_level0_slowdown_ += delayed;
J
Jim Paton 已提交
2936
      stall_level0_slowdown_count_++;
2937
      allow_delay = false;  // Do not delay a single write more than once
2938 2939
      //Log(options_.info_log,
      //    "delaying write %llu usecs for level0_slowdown_writes_trigger\n",
2940
      //     (long long unsigned int)delayed);
2941
      mutex_.Lock();
2942
      delayed_writes_++;
2943 2944 2945
    } else if (!force &&
               (mem_->ApproximateMemoryUsage() <= options_.write_buffer_size)) {
      // There is room in current memtable
2946 2947 2948
      if (allow_delay) {
        DelayLoggingAndReset();
      }
2949
      break;
2950
    } else if (imm_.size() == options_.max_write_buffer_number - 1) {
2951
      // We have filled up the current memtable, but the previous
2952 2953
      // ones are still being compacted, so we wait.
      DelayLoggingAndReset();
2954
      Log(options_.info_log, "wait for memtable compaction...\n");
2955
      uint64_t stall;
J
Jim Paton 已提交
2956 2957 2958 2959
      {
        StopWatch sw(env_, options_.statistics,
          STALL_MEMTABLE_COMPACTION_COUNT);
        bg_cv_.Wait();
2960
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2961
      }
2962 2963
      RecordTick(options_.statistics, STALL_MEMTABLE_COMPACTION_MICROS, stall);
      stall_memtable_compaction_ += stall;
J
Jim Paton 已提交
2964
      stall_memtable_compaction_count_++;
2965
    } else if (versions_->NumLevelFiles(0) >=
2966
               options_.level0_stop_writes_trigger) {
2967
      // There are too many level-0 files.
2968 2969
      DelayLoggingAndReset();
      Log(options_.info_log, "wait for fewer level0 files...\n");
2970
      uint64_t stall;
J
Jim Paton 已提交
2971 2972 2973
      {
        StopWatch sw(env_, options_.statistics, STALL_L0_NUM_FILES_COUNT);
        bg_cv_.Wait();
2974
        stall = sw.ElapsedMicros();
J
Jim Paton 已提交
2975
      }
2976 2977
      RecordTick(options_.statistics, STALL_L0_NUM_FILES_MICROS, stall);
      stall_level0_num_files_ += stall;
J
Jim Paton 已提交
2978
      stall_level0_num_files_count_++;
2979
    } else if (
J
Jim Paton 已提交
2980 2981 2982
        allow_hard_rate_limit_delay &&
        options_.hard_rate_limit > 1.0 &&
        (score = versions_->MaxCompactionScore()) > options_.hard_rate_limit) {
2983
      // Delay a write when the compaction score for any level is too large.
2984
      int max_level = versions_->MaxCompactionScoreLevel();
2985
      mutex_.Unlock();
2986
      uint64_t delayed;
J
Jim Paton 已提交
2987
      {
J
Jim Paton 已提交
2988
        StopWatch sw(env_, options_.statistics, HARD_RATE_LIMIT_DELAY_COUNT);
J
Jim Paton 已提交
2989
        env_->SleepForMicroseconds(1000);
2990
        delayed = sw.ElapsedMicros();
J
Jim Paton 已提交
2991
      }
2992
      stall_leveln_slowdown_[max_level] += delayed;
J
Jim Paton 已提交
2993
      stall_leveln_slowdown_count_[max_level]++;
2994
      // Make sure the following value doesn't round to zero.
2995 2996 2997
      uint64_t rate_limit = std::max((delayed / 1000), (uint64_t) 1);
      rate_limit_delay_millis += rate_limit;
      RecordTick(options_.statistics, RATE_LIMIT_DELAY_MILLIS, rate_limit);
J
Jim Paton 已提交
2998 2999 3000 3001
      if (options_.rate_limit_delay_max_milliseconds > 0 &&
          rate_limit_delay_millis >=
          (unsigned)options_.rate_limit_delay_max_milliseconds) {
        allow_hard_rate_limit_delay = false;
3002 3003 3004 3005
      }
      // Log(options_.info_log,
      //    "delaying write %llu usecs for rate limits with max score %.2f\n",
      //    (long long unsigned int)delayed, score);
3006
      mutex_.Lock();
J
Jim Paton 已提交
3007 3008 3009 3010 3011 3012 3013
    } else if (
        allow_soft_rate_limit_delay &&
        options_.soft_rate_limit > 0.0 &&
        (score = versions_->MaxCompactionScore()) > options_.soft_rate_limit) {
      // Delay a write when the compaction score for any level is too large.
      // TODO: add statistics
      mutex_.Unlock();
J
Jim Paton 已提交
3014 3015 3016 3017 3018 3019 3020 3021 3022
      {
        StopWatch sw(env_, options_.statistics, SOFT_RATE_LIMIT_DELAY_COUNT);
        env_->SleepForMicroseconds(SlowdownAmount(
          score,
          options_.soft_rate_limit,
          options_.hard_rate_limit)
        );
        rate_limit_delay_millis += sw.ElapsedMicros();
      }
J
Jim Paton 已提交
3023 3024
      allow_soft_rate_limit_delay = false;
      mutex_.Lock();
3025 3026
    } else {
      // Attempt to switch to a new memtable and trigger compaction of old
3027
      DelayLoggingAndReset();
3028 3029
      assert(versions_->PrevLogNumber() == 0);
      uint64_t new_log_number = versions_->NewFileNumber();
3030
      unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3031 3032
      EnvOptions soptions(storage_options_);
      soptions.use_mmap_writes = false;
3033
      s = env_->NewWritableFile(
3034
            LogFileName(options_.wal_dir, new_log_number),
3035 3036 3037
            &lfile,
            soptions
          );
3038
      if (!s.ok()) {
H
heyongqiang 已提交
3039
        // Avoid chewing through file number space in a tight loop.
3040
        versions_->ReuseFileNumber(new_log_number);
3041 3042
        break;
      }
3043 3044 3045
      // Our final size should be less than write_buffer_size
      // (compression, etc) but err on the side of caution.
      lfile->SetPreallocationBlockSize(1.1 * options_.write_buffer_size);
3046
      logfile_number_ = new_log_number;
3047
      log_.reset(new log::Writer(std::move(lfile)));
3048
      mem_->SetNextLogNumber(logfile_number_);
3049
      imm_.Add(mem_);
3050 3051 3052
      if (force) {
        imm_.FlushRequested();
      }
3053 3054
      mem_ = new MemTable(
          internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
3055
      mem_->Ref();
3056 3057 3058
      Log(options_.info_log,
          "New memtable created with log file: #%lu\n",
          logfile_number_);
3059
      mem_->SetLogNumber(logfile_number_);
3060
      force = false;   // Do not force another compaction if have room
3061
      MaybeScheduleFlushOrCompaction();
3062 3063 3064 3065 3066 3067 3068 3069
    }
  }
  return s;
}

bool DBImpl::GetProperty(const Slice& property, std::string* value) {
  value->clear();

J
jorlow@chromium.org 已提交
3070 3071
  MutexLock l(&mutex_);
  Slice in = property;
3072
  Slice prefix("rocksdb.");
J
jorlow@chromium.org 已提交
3073 3074 3075 3076 3077 3078 3079
  if (!in.starts_with(prefix)) return false;
  in.remove_prefix(prefix.size());

  if (in.starts_with("num-files-at-level")) {
    in.remove_prefix(strlen("num-files-at-level"));
    uint64_t level;
    bool ok = ConsumeDecimalNumber(&in, &level) && in.empty();
3080
    if (!ok || (int)level >= NumberLevels()) {
J
jorlow@chromium.org 已提交
3081 3082
      return false;
    } else {
3083
      char buf[100];
D
dgrogan@chromium.org 已提交
3084 3085
      snprintf(buf, sizeof(buf), "%d",
               versions_->NumLevelFiles(static_cast<int>(level)));
3086
      *value = buf;
J
jorlow@chromium.org 已提交
3087 3088
      return true;
    }
3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
  } else if (in == "levelstats") {
    char buf[1000];
    snprintf(buf, sizeof(buf),
             "Level Files Size(MB)\n"
             "--------------------\n");
    value->append(buf);

    for (int level = 0; level < NumberLevels(); level++) {
      snprintf(buf, sizeof(buf),
               "%3d %8d %8.0f\n",
               level,
               versions_->NumLevelFiles(level),
               versions_->NumLevelBytes(level) / 1048576.0);
      value->append(buf);
    }
    return true;

3106
  } else if (in == "stats") {
M
Mark Callaghan 已提交
3107
    char buf[1000];
3108 3109
    uint64_t total_bytes_written = 0;
    uint64_t total_bytes_read = 0;
M
Mark Callaghan 已提交
3110
    uint64_t micros_up = env_->NowMicros() - started_at_;
3111 3112
    // Add "+1" to make sure seconds_up is > 0 and avoid NaN later
    double seconds_up = (micros_up + 1) / 1000000.0;
3113
    uint64_t total_slowdown = 0;
J
Jim Paton 已提交
3114
    uint64_t total_slowdown_count = 0;
3115 3116 3117 3118
    uint64_t interval_bytes_written = 0;
    uint64_t interval_bytes_read = 0;
    uint64_t interval_bytes_new = 0;
    double   interval_seconds_up = 0;
M
Mark Callaghan 已提交
3119 3120

    // Pardon the long line but I think it is easier to read this way.
3121 3122
    snprintf(buf, sizeof(buf),
             "                               Compactions\n"
3123
             "Level  Files Size(MB) Score Time(sec)  Read(MB) Write(MB)    Rn(MB)  Rnp1(MB)  Wnew(MB) RW-Amplify Read(MB/s) Write(MB/s)      Rn     Rnp1     Wnp1     NewW    Count  Ln-stall Stall-cnt\n"
J
Jim Paton 已提交
3124
             "--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n"
3125 3126
             );
    value->append(buf);
3127
    for (int level = 0; level < NumberLevels(); level++) {
3128 3129
      int files = versions_->NumLevelFiles(level);
      if (stats_[level].micros > 0 || files > 0) {
M
Mark Callaghan 已提交
3130 3131 3132 3133 3134 3135
        int64_t bytes_read = stats_[level].bytes_readn +
                             stats_[level].bytes_readnp1;
        int64_t bytes_new = stats_[level].bytes_written -
                            stats_[level].bytes_readnp1;
        double amplify = (stats_[level].bytes_readn == 0)
            ? 0.0
3136 3137 3138
            : (stats_[level].bytes_written +
               stats_[level].bytes_readnp1 +
               stats_[level].bytes_readn) /
M
Mark Callaghan 已提交
3139 3140
                (double) stats_[level].bytes_readn;

3141 3142 3143
        total_bytes_read += bytes_read;
        total_bytes_written += stats_[level].bytes_written;

3144 3145
        snprintf(
            buf, sizeof(buf),
3146
            "%3d %8d %8.0f %5.1f %9.0f %9.0f %9.0f %9.0f %9.0f %9.0f %10.1f %9.1f %11.1f %8d %8d %8d %8d %8d %9.1f %9lu\n",
3147 3148 3149
            level,
            files,
            versions_->NumLevelBytes(level) / 1048576.0,
3150
            versions_->NumLevelBytes(level) /
3151
                versions_->MaxBytesForLevel(level),
3152
            stats_[level].micros / 1e6,
M
Mark Callaghan 已提交
3153 3154 3155 3156 3157 3158
            bytes_read / 1048576.0,
            stats_[level].bytes_written / 1048576.0,
            stats_[level].bytes_readn / 1048576.0,
            stats_[level].bytes_readnp1 / 1048576.0,
            bytes_new / 1048576.0,
            amplify,
3159 3160
            // +1 to avoid division by 0
            (bytes_read / 1048576.0) / ((stats_[level].micros+1) / 1000000.0),
3161
            (stats_[level].bytes_written / 1048576.0) /
3162
                ((stats_[level].micros+1) / 1000000.0),
M
Mark Callaghan 已提交
3163 3164 3165 3166
            stats_[level].files_in_leveln,
            stats_[level].files_in_levelnp1,
            stats_[level].files_out_levelnp1,
            stats_[level].files_out_levelnp1 - stats_[level].files_in_levelnp1,
3167
            stats_[level].count,
J
Jim Paton 已提交
3168 3169
            stall_leveln_slowdown_[level] / 1000000.0,
            (unsigned long) stall_leveln_slowdown_count_[level]);
3170
        total_slowdown += stall_leveln_slowdown_[level];
J
Jim Paton 已提交
3171
        total_slowdown_count += stall_leveln_slowdown_count_[level];
3172 3173 3174
        value->append(buf);
      }
    }
M
Mark Callaghan 已提交
3175

3176 3177 3178 3179 3180 3181 3182 3183 3184
    interval_bytes_new = stats_[0].bytes_written - last_stats_.bytes_new_;
    interval_bytes_read = total_bytes_read - last_stats_.bytes_read_;
    interval_bytes_written = total_bytes_written - last_stats_.bytes_written_;
    interval_seconds_up = seconds_up - last_stats_.seconds_up_;

    snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n",
             seconds_up, interval_seconds_up);
    value->append(buf);

M
Mark Callaghan 已提交
3185
    snprintf(buf, sizeof(buf),
3186 3187
             "Compaction IO cumulative (GB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
M
Mark Callaghan 已提交
3188
             stats_[0].bytes_written / (1048576.0 * 1024),
3189 3190 3191 3192 3193 3194 3195 3196
             total_bytes_read / (1048576.0 * 1024),
             total_bytes_written / (1048576.0 * 1024),
             (total_bytes_read + total_bytes_written) / (1048576.0 * 1024));
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO cumulative (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
M
Mark Callaghan 已提交
3197
             stats_[0].bytes_written / 1048576.0 / seconds_up,
3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
             total_bytes_read / 1048576.0 / seconds_up,
             total_bytes_written / 1048576.0 / seconds_up,
             (total_bytes_read + total_bytes_written) / 1048576.0 / seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification cumulative: %.1f write, %.1f compaction\n",
             (double) total_bytes_written / (stats_[0].bytes_written+1),
             (double) (total_bytes_written + total_bytes_read)
                  / (stats_[0].bytes_written+1));
M
Mark Callaghan 已提交
3209 3210
    value->append(buf);

3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB): "
             "%.2f new, %.2f read, %.2f write, %.2f read+write\n",
             interval_bytes_new / 1048576.0,
             interval_bytes_read/ 1048576.0,
             interval_bytes_written / 1048576.0,
             (interval_bytes_read + interval_bytes_written) / 1048576.0);
    value->append(buf);

    snprintf(buf, sizeof(buf),
             "Compaction IO interval (MB/sec): "
             "%.1f new, %.1f read, %.1f write, %.1f read+write\n",
             interval_bytes_new / 1048576.0 / interval_seconds_up,
             interval_bytes_read / 1048576.0 / interval_seconds_up,
             interval_bytes_written / 1048576.0 / interval_seconds_up,
             (interval_bytes_read + interval_bytes_written)
                 / 1048576.0 / interval_seconds_up);
    value->append(buf);

    // +1 to avoid divide by 0 and NaN
    snprintf(buf, sizeof(buf),
             "Amplification interval: %.1f write, %.1f compaction\n",
             (double) interval_bytes_written / (interval_bytes_new+1),
             (double) (interval_bytes_written + interval_bytes_read) /
                  (interval_bytes_new+1));
M
Mark Callaghan 已提交
3236 3237 3238 3239
    value->append(buf);

    snprintf(buf, sizeof(buf),
            "Stalls(secs): %.3f level0_slowdown, %.3f level0_numfiles, "
3240
            "%.3f memtable_compaction, %.3f leveln_slowdown\n",
M
Mark Callaghan 已提交
3241 3242
            stall_level0_slowdown_ / 1000000.0,
            stall_level0_num_files_ / 1000000.0,
3243
            stall_memtable_compaction_ / 1000000.0,
3244
            total_slowdown / 1000000.0);
M
Mark Callaghan 已提交
3245 3246
    value->append(buf);

J
Jim Paton 已提交
3247 3248 3249 3250 3251 3252 3253 3254 3255
    snprintf(buf, sizeof(buf),
            "Stalls(count): %lu level0_slowdown, %lu level0_numfiles, "
            "%lu memtable_compaction, %lu leveln_slowdown\n",
            (unsigned long) stall_level0_slowdown_count_,
            (unsigned long) stall_level0_num_files_count_,
            (unsigned long) stall_memtable_compaction_count_,
            (unsigned long) total_slowdown_count);
    value->append(buf);

3256 3257 3258 3259 3260
    last_stats_.bytes_read_ = total_bytes_read;
    last_stats_.bytes_written_ = total_bytes_written;
    last_stats_.bytes_new_ = stats_[0].bytes_written;
    last_stats_.seconds_up_ = seconds_up;

3261
    return true;
G
Gabor Cselle 已提交
3262 3263 3264
  } else if (in == "sstables") {
    *value = versions_->current()->DebugString();
    return true;
3265 3266 3267
  } else if (in == "num-immutable-mem-table") {
    *value = std::to_string(imm_.size());
    return true;
J
jorlow@chromium.org 已提交
3268
  }
3269

J
jorlow@chromium.org 已提交
3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298
  return false;
}

void DBImpl::GetApproximateSizes(
    const Range* range, int n,
    uint64_t* sizes) {
  // TODO(opt): better implementation
  Version* v;
  {
    MutexLock l(&mutex_);
    versions_->current()->Ref();
    v = versions_->current();
  }

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
    uint64_t start = versions_->ApproximateOffsetOf(v, k1);
    uint64_t limit = versions_->ApproximateOffsetOf(v, k2);
    sizes[i] = (limit >= start ? limit - start : 0);
  }

  {
    MutexLock l(&mutex_);
    v->Unref();
  }
}

3299 3300 3301 3302 3303 3304 3305
inline void DBImpl::DelayLoggingAndReset() {
  if (delayed_writes_ > 0) {
    Log(options_.info_log, "delayed %d write...\n", delayed_writes_ );
    delayed_writes_ = 0;
  }
}

3306 3307 3308
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
3309 3310 3311 3312
  WalFileType log_type;
  if (!ParseFileName(name, &number, &type, &log_type) ||
      (type != kTableFile && type != kLogFile)) {
    Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
3313 3314 3315
    return Status::InvalidArgument("Invalid file name");
  }

3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
  Status status;
  if (type == kLogFile) {
    // Only allow deleting archived log files
    if (log_type != kArchivedLogFile) {
      Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
      return Status::NotSupported("Delete only supported for archived logs");
    }
    status = env_->DeleteFile(options_.wal_dir + "/" + name.c_str());
    if (!status.ok()) {
      Log(options_.info_log, "DeleteFile %s failed.\n", name.c_str());
    }
    return status;
  }

3330 3331 3332 3333
  int level;
  FileMetaData metadata;
  int maxlevel = NumberLevels();
  VersionEdit edit(maxlevel);
D
Dhruba Borthakur 已提交
3334 3335 3336 3337 3338
  DeletionState deletion_state;
  {
    MutexLock l(&mutex_);
    status = versions_->GetMetadataForFile(number, &level, &metadata);
    if (!status.ok()) {
3339 3340
      Log(options_.info_log, "DeleteFile %s failed. File not found\n",
                             name.c_str());
D
Dhruba Borthakur 已提交
3341 3342 3343
      return Status::InvalidArgument("File not found");
    }
    assert((level > 0) && (level < maxlevel));
3344

D
Dhruba Borthakur 已提交
3345 3346
    // If the file is being compacted no need to delete.
    if (metadata.being_compacted) {
3347
      Log(options_.info_log,
3348
          "DeleteFile %s Skipped. File about to be compacted\n", name.c_str());
D
Dhruba Borthakur 已提交
3349
      return Status::OK();
3350 3351
    }

D
Dhruba Borthakur 已提交
3352 3353 3354 3355 3356 3357
    // Only the files in the last level can be deleted externally.
    // This is to make sure that any deletion tombstones are not
    // lost. Check that the level passed is the last level.
    for (int i = level + 1; i < maxlevel; i++) {
      if (versions_->NumLevelFiles(i) != 0) {
        Log(options_.info_log,
3358
            "DeleteFile %s FAILED. File not in last level\n", name.c_str());
D
Dhruba Borthakur 已提交
3359 3360 3361 3362 3363 3364
        return Status::InvalidArgument("File not in last level");
      }
    }
    edit.DeleteFile(level, number);
    status = versions_->LogAndApply(&edit, &mutex_);
    if (status.ok()) {
I
Igor Canadi 已提交
3365
      versions_->GetAndFreeObsoleteFiles(&deletion_state.sstdeletefiles);
D
Dhruba Borthakur 已提交
3366
    }
I
Igor Canadi 已提交
3367
    FindObsoleteFiles(deletion_state, false);
D
Dhruba Borthakur 已提交
3368
  } // lock released here
I
Igor Canadi 已提交
3369
  LogFlush(options_.info_log);
D
Dhruba Borthakur 已提交
3370

3371
  if (status.ok()) {
D
Dhruba Borthakur 已提交
3372 3373
    // remove files outside the db-lock
    PurgeObsoleteFiles(deletion_state);
3374 3375 3376 3377
  }
  return status;
}

3378
void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData> *metadata) {
3379 3380 3381 3382
  MutexLock l(&mutex_);
  return versions_->GetLiveFilesMetaData(metadata);
}

J
jorlow@chromium.org 已提交
3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396
// Default implementations of convenience methods that subclasses of DB
// can call if they wish
Status DB::Put(const WriteOptions& opt, const Slice& key, const Slice& value) {
  WriteBatch batch;
  batch.Put(key, value);
  return Write(opt, &batch);
}

Status DB::Delete(const WriteOptions& opt, const Slice& key) {
  WriteBatch batch;
  batch.Delete(key);
  return Write(opt, &batch);
}

3397 3398 3399 3400 3401 3402 3403
Status DB::Merge(const WriteOptions& opt, const Slice& key,
                 const Slice& value) {
  WriteBatch batch;
  batch.Merge(key, value);
  return Write(opt, &batch);
}

J
jorlow@chromium.org 已提交
3404 3405
DB::~DB() { }

J
Jim Paton 已提交
3406
Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
3407
  *dbptr = nullptr;
H
Haobo Xu 已提交
3408
  EnvOptions soptions;
J
jorlow@chromium.org 已提交
3409

3410
  if (options.block_cache != nullptr && options.no_block_cache) {
3411
    return Status::InvalidArgument(
3412
        "no_block_cache is true while block_cache is not nullptr");
3413
  }
3414

J
jorlow@chromium.org 已提交
3415
  DBImpl* impl = new DBImpl(options, dbname);
3416 3417 3418 3419 3420 3421 3422
  Status s = impl->env_->CreateDirIfMissing(impl->options_.wal_dir);
  if (!s.ok()) {
    delete impl;
    return s;
  }

  s = impl->CreateArchivalDirectory();
3423 3424 3425 3426
  if (!s.ok()) {
    delete impl;
    return s;
  }
J
jorlow@chromium.org 已提交
3427
  impl->mutex_.Lock();
3428
  VersionEdit edit(impl->NumberLevels());
3429
  s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
J
jorlow@chromium.org 已提交
3430
  if (s.ok()) {
3431
    uint64_t new_log_number = impl->versions_->NewFileNumber();
3432
    unique_ptr<WritableFile> lfile;
H
Haobo Xu 已提交
3433
    soptions.use_mmap_writes = false;
3434 3435 3436 3437 3438
    s = options.env->NewWritableFile(
      LogFileName(impl->options_.wal_dir, new_log_number),
      &lfile,
      soptions
    );
J
jorlow@chromium.org 已提交
3439
    if (s.ok()) {
3440
      lfile->SetPreallocationBlockSize(1.1 * options.write_buffer_size);
3441
      edit.SetLogNumber(new_log_number);
3442
      impl->logfile_number_ = new_log_number;
3443
      impl->log_.reset(new log::Writer(std::move(lfile)));
3444
      s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
J
jorlow@chromium.org 已提交
3445 3446
    }
    if (s.ok()) {
3447
      impl->mem_->SetLogNumber(impl->logfile_number_);
J
jorlow@chromium.org 已提交
3448
      impl->DeleteObsoleteFiles();
3449
      impl->MaybeScheduleFlushOrCompaction();
3450
      impl->MaybeScheduleLogDBDeployStats();
J
jorlow@chromium.org 已提交
3451 3452 3453
    }
  }
  impl->mutex_.Unlock();
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466

  if (options.compaction_style == kCompactionStyleUniversal) {
    int num_files;
    for (int i = 1; i < impl->NumberLevels(); i++) {
      num_files = impl->versions_->NumLevelFiles(i);
      if (num_files > 0) {
        s = Status::InvalidArgument("Not all files are at level 0. Cannot "
          "open with universal compaction style.");
        break;
      }
    }
  }

J
jorlow@chromium.org 已提交
3467 3468 3469 3470 3471 3472 3473 3474
  if (s.ok()) {
    *dbptr = impl;
  } else {
    delete impl;
  }
  return s;
}

3475 3476 3477
Snapshot::~Snapshot() {
}

J
jorlow@chromium.org 已提交
3478
Status DestroyDB(const std::string& dbname, const Options& options) {
3479 3480 3481 3482 3483
  const InternalKeyComparator comparator(options.comparator);
  const InternalFilterPolicy filter_policy(options.filter_policy);
  const Options& soptions(SanitizeOptions(
    dbname, &comparator, &filter_policy, options));
  Env* env = soptions.env;
J
jorlow@chromium.org 已提交
3484
  std::vector<std::string> filenames;
3485 3486
  std::vector<std::string> archiveFiles;

3487
  std::string archivedir = ArchivalDirectory(dbname);
J
jorlow@chromium.org 已提交
3488 3489
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
3490 3491 3492 3493 3494 3495 3496

  if (dbname != soptions.wal_dir) {
    std::vector<std::string> logfilenames;
    env->GetChildren(soptions.wal_dir, &logfilenames);
    filenames.insert(filenames.end(), logfilenames.begin(), logfilenames.end());
    archivedir = ArchivalDirectory(soptions.wal_dir);
  }
3497

J
jorlow@chromium.org 已提交
3498 3499 3500 3501 3502
  if (filenames.empty()) {
    return Status::OK();
  }

  FileLock* lock;
3503 3504
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
3505 3506 3507
  if (result.ok()) {
    uint64_t number;
    FileType type;
D
dgrogan@chromium.org 已提交
3508
    for (size_t i = 0; i < filenames.size(); i++) {
3509
      if (ParseFileName(filenames[i], &number, &type) &&
3510
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
3511 3512 3513
        Status del;
        if (type == kMetaDatabase) {
          del = DestroyDB(dbname + "/" + filenames[i], options);
3514 3515
        } else if (type == kLogFile) {
          del = env->DeleteFile(soptions.wal_dir + "/" + filenames[i]);
K
Kosie van der Merwe 已提交
3516 3517 3518
        } else {
          del = env->DeleteFile(dbname + "/" + filenames[i]);
        }
J
jorlow@chromium.org 已提交
3519 3520 3521 3522 3523
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3524

3525
    env->GetChildren(archivedir, &archiveFiles);
3526 3527
    // Delete archival files.
    for (size_t i = 0; i < archiveFiles.size(); ++i) {
3528 3529
      if (ParseFileName(archiveFiles[i], &number, &type) &&
          type == kLogFile) {
3530
        Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]);
3531 3532 3533 3534 3535
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3536
    // ignore case where no archival directory is present.
3537
    env->DeleteDir(archivedir);
3538

J
jorlow@chromium.org 已提交
3539
    env->UnlockFile(lock);  // Ignore error since state is already gone
3540
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
3541
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
3542
    env->DeleteDir(soptions.wal_dir);
J
jorlow@chromium.org 已提交
3543 3544 3545 3546
  }
  return result;
}

3547 3548
//
// A global method that can dump out the build version
3549
void dumpLeveldbBuildVersion(Logger * log) {
3550
  Log(log, "Git sha %s", rocksdb_build_git_sha);
3551
  Log(log, "Compile time %s %s",
3552
      rocksdb_build_compile_time, rocksdb_build_compile_date);
3553 3554
}

3555
}  // namespace rocksdb