db_impl.cc 148.7 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "db/db_impl/db_impl.h"
J
jorlow@chromium.org 已提交
10

11
#include <stdint.h>
D
David Bernard 已提交
12
#ifdef OS_SOLARIS
D
David Bernard 已提交
13
#include <alloca.h>
D
David Bernard 已提交
14
#endif
15

J
jorlow@chromium.org 已提交
16
#include <algorithm>
17
#include <cinttypes>
18
#include <cstdio>
19
#include <map>
J
jorlow@chromium.org 已提交
20
#include <set>
21
#include <stdexcept>
22
#include <string>
23
#include <unordered_map>
24
#include <unordered_set>
T
Tomislav Novak 已提交
25
#include <utility>
26
#include <vector>
27

J
jorlow@chromium.org 已提交
28
#include "db/builder.h"
29
#include "db/compaction/compaction_job.h"
30
#include "db/db_info_dumper.h"
31
#include "db/db_iter.h"
K
kailiu 已提交
32
#include "db/dbformat.h"
33
#include "db/error_handler.h"
34
#include "db/event_helpers.h"
35
#include "db/external_sst_file_ingestion_job.h"
36 37
#include "db/flush_job.h"
#include "db/forward_iterator.h"
38
#include "db/import_column_family_job.h"
I
Igor Canadi 已提交
39
#include "db/job_context.h"
J
jorlow@chromium.org 已提交
40 41
#include "db/log_reader.h"
#include "db/log_writer.h"
42
#include "db/malloc_stats.h"
J
jorlow@chromium.org 已提交
43
#include "db/memtable.h"
K
kailiu 已提交
44
#include "db/memtable_list.h"
45
#include "db/merge_context.h"
46
#include "db/merge_helper.h"
47
#include "db/range_tombstone_fragmenter.h"
J
jorlow@chromium.org 已提交
48
#include "db/table_cache.h"
K
kailiu 已提交
49
#include "db/table_properties_collector.h"
50
#include "db/transaction_log_impl.h"
J
jorlow@chromium.org 已提交
51 52
#include "db/version_set.h"
#include "db/write_batch_internal.h"
A
agiardullo 已提交
53
#include "db/write_callback.h"
54 55 56
#include "file/file_util.h"
#include "file/filename.h"
#include "file/sst_file_manager_impl.h"
57 58 59
#include "logging/auto_roll_logger.h"
#include "logging/log_buffer.h"
#include "logging/logging.h"
60 61
#include "memtable/hash_linklist_rep.h"
#include "memtable/hash_skiplist_rep.h"
62
#include "monitoring/in_memory_stats_history.h"
63 64
#include "monitoring/iostats_context_imp.h"
#include "monitoring/perf_context_imp.h"
65
#include "monitoring/persistent_stats_history.h"
66 67 68 69 70
#include "monitoring/thread_status_updater.h"
#include "monitoring/thread_status_util.h"
#include "options/cf_options.h"
#include "options/options_helper.h"
#include "options/options_parser.h"
71
#include "port/port.h"
I
Igor Canadi 已提交
72
#include "rocksdb/cache.h"
73
#include "rocksdb/compaction_filter.h"
A
Aaron G 已提交
74
#include "rocksdb/convenience.h"
75 76 77 78
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/statistics.h"
79
#include "rocksdb/stats_history.h"
80
#include "rocksdb/status.h"
S
Siying Dong 已提交
81
#include "rocksdb/table.h"
82
#include "rocksdb/write_buffer_manager.h"
83 84
#include "table/block_based/block.h"
#include "table/block_based/block_based_table_factory.h"
85
#include "table/get_context.h"
86
#include "table/merging_iterator.h"
87
#include "table/multiget_context.h"
K
kailiu 已提交
88
#include "table/table_builder.h"
J
jorlow@chromium.org 已提交
89
#include "table/two_level_iterator.h"
90
#include "test_util/sync_point.h"
A
Aaron G 已提交
91
#include "tools/sst_dump_tool_imp.h"
K
kailiu 已提交
92
#include "util/autovector.h"
93
#include "util/build_version.h"
94
#include "util/cast_util.h"
J
jorlow@chromium.org 已提交
95
#include "util/coding.h"
I
Igor Canadi 已提交
96
#include "util/compression.h"
97
#include "util/crc32c.h"
98
#include "util/file_reader_writer.h"
J
jorlow@chromium.org 已提交
99
#include "util/mutexlock.h"
100
#include "util/stop_watch.h"
101
#include "util/string_util.h"
J
jorlow@chromium.org 已提交
102

103
namespace rocksdb {
104
const std::string kDefaultColumnFamilyName("default");
105 106
const std::string kPersistentStatsColumnFamilyName(
    "___rocksdb_stats_history___");
107
void DumpRocksDBBuildVersion(Logger* log);
108

A
Aaron Gao 已提交
109 110 111
CompressionType GetCompressionFlush(
    const ImmutableCFOptions& ioptions,
    const MutableCFOptions& mutable_cf_options) {
112 113 114
  // Compressing memtable flushes might not help unless the sequential load
  // optimization is used for leveled compaction. Otherwise the CPU and
  // latency overhead is not offset by saving much space.
115
  if (ioptions.compaction_style == kCompactionStyleUniversal) {
116 117
    if (mutable_cf_options.compaction_options_universal
            .compression_size_percent < 0) {
118 119 120 121 122 123 124
      return mutable_cf_options.compression;
    } else {
      return kNoCompression;
    }
  } else if (!ioptions.compression_per_level.empty()) {
    // For leveled compress when min_level_to_compress != 0.
    return ioptions.compression_per_level[0];
125
  } else {
A
Aaron Gao 已提交
126
    return mutable_cf_options.compression;
127 128
  }
}
I
Igor Canadi 已提交
129

S
Siying Dong 已提交
130
namespace {
131
void DumpSupportInfo(Logger* logger) {
132
  ROCKS_LOG_HEADER(logger, "Compression algorithms supported:");
133 134 135 136 137 138 139
  for (auto& compression : OptionsHelper::compression_type_string_map) {
    if (compression.second != kNoCompression &&
        compression.second != kDisableCompressionOption) {
      ROCKS_LOG_HEADER(logger, "\t%s supported: %d", compression.first.c_str(),
                       CompressionTypeSupported(compression.second));
    }
  }
140 141
  ROCKS_LOG_HEADER(logger, "Fast CRC32 supported: %s",
                   crc32c::IsFastCrc32Supported().c_str());
I
Igor Canadi 已提交
142
}
143 144

int64_t kDefaultLowPriThrottledRate = 2 * 1024 * 1024;
145
}  // namespace
146

147
DBImpl::DBImpl(const DBOptions& options, const std::string& dbname,
148
               const bool seq_per_batch, const bool batch_per_txn)
J
jorlow@chromium.org 已提交
149
    : env_(options.env),
H
heyongqiang 已提交
150
      dbname_(dbname),
151
      own_info_log_(options.info_log == nullptr),
152 153 154
      initial_db_options_(SanitizeOptions(dbname, options)),
      immutable_db_options_(initial_db_options_),
      mutable_db_options_(initial_db_options_),
155 156 157
      stats_(immutable_db_options_.statistics.get()),
      mutex_(stats_, env_, DB_MUTEX_WAIT_MICROS,
             immutable_db_options_.use_adaptive_mutex),
158 159 160 161 162
      default_cf_handle_(nullptr),
      max_total_in_memory_state_(0),
      env_options_(BuildDBOptions(immutable_db_options_, mutable_db_options_)),
      env_options_for_compaction_(env_->OptimizeForCompactionTableWrite(
          env_options_, immutable_db_options_)),
163 164
      seq_per_batch_(seq_per_batch),
      batch_per_txn_(batch_per_txn),
165
      db_lock_(nullptr),
I
Igor Canadi 已提交
166
      shutting_down_(false),
J
jorlow@chromium.org 已提交
167
      bg_cv_(&mutex_),
168
      logfile_number_(0),
169
      log_dir_synced_(false),
I
Igor Canadi 已提交
170
      log_empty_(true),
171
      persist_stats_cf_handle_(nullptr),
172
      log_sync_cv_(&mutex_),
I
Igor Canadi 已提交
173
      total_log_size_(0),
S
sdong 已提交
174
      is_snapshot_supported_(true),
175
      write_buffer_manager_(immutable_db_options_.write_buffer_manager.get()),
176
      write_thread_(immutable_db_options_),
177
      nonmem_write_thread_(immutable_db_options_),
178
      write_controller_(mutable_db_options_.delayed_write_rate),
179 180 181 182 183
      // Use delayed_write_rate as a base line to determine the initial
      // low pri write rate limit. It may be adjusted later.
      low_pri_write_rate_limiter_(NewGenericRateLimiter(std::min(
          static_cast<int64_t>(mutable_db_options_.delayed_write_rate / 8),
          kDefaultLowPriThrottledRate))),
S
sdong 已提交
184
      last_batch_group_size_(0),
185 186
      unscheduled_flushes_(0),
      unscheduled_compactions_(0),
187
      bg_bottom_compaction_scheduled_(0),
188
      bg_compaction_scheduled_(0),
189
      num_running_compactions_(0),
190
      bg_flush_scheduled_(0),
191
      num_running_flushes_(0),
192
      bg_purge_scheduled_(0),
193
      disable_delete_obsolete_files_(0),
194
      pending_purge_obsolete_files_(0),
195
      delete_obsolete_files_last_run_(env_->NowMicros()),
196
      last_stats_dump_time_microsec_(0),
197
      next_job_id_(1),
198
      has_unpersisted_data_(false),
S
Siying Dong 已提交
199
      unable_to_release_oldest_log_(false),
200
      num_running_ingest_file_(0),
I
Igor Canadi 已提交
201
#ifndef ROCKSDB_LITE
202
      wal_manager_(immutable_db_options_, env_options_, seq_per_batch),
I
Igor Canadi 已提交
203
#endif  // ROCKSDB_LITE
204
      event_logger_(immutable_db_options_.info_log.get()),
205
      bg_work_paused_(0),
206
      bg_compaction_paused_(0),
207
      refitting_level_(false),
208
      opened_successfully_(false),
209
      two_write_queues_(options.two_write_queues),
210
      manual_wal_flush_(options.manual_wal_flush),
211 212 213 214 215 216 217 218 219 220 221
      // last_sequencee_ is always maintained by the main queue that also writes
      // to the memtable. When two_write_queues_ is disabled last seq in
      // memtable is the same as last seq published to the readers. When it is
      // enabled but seq_per_batch_ is disabled, last seq in memtable still
      // indicates last published seq since wal-only writes that go to the 2nd
      // queue do not consume a sequence number. Otherwise writes performed by
      // the 2nd queue could change what is visible to the readers. In this
      // cases, last_seq_same_as_publish_seq_==false, the 2nd queue maintains a
      // separate variable to indicate the last published sequence.
      last_seq_same_as_publish_seq_(
          !(seq_per_batch && options.two_write_queues)),
222 223 224 225
      // Since seq_per_batch_ is currently set only by WritePreparedTxn which
      // requires a custom gc for compaction, we use that to set use_custom_gc_
      // as well.
      use_custom_gc_(seq_per_batch),
226
      shutdown_initiated_(false),
227
      own_sfm_(options.sst_file_manager == nullptr),
228
      preserve_deletes_(options.preserve_deletes),
229
      closed_(false),
230
      error_handler_(this, immutable_db_options_, &mutex_),
231
      atomic_flush_install_cv_(&mutex_) {
232 233 234
  // !batch_per_trx_ implies seq_per_batch_ because it is only unset for
  // WriteUnprepared, which should use seq_per_batch_.
  assert(batch_per_txn_ || seq_per_batch_);
H
heyongqiang 已提交
235
  env_->GetAbsolutePath(dbname, &db_absolute_path_);
236

J
jorlow@chromium.org 已提交
237
  // Reserve ten files or so for other uses and give the rest to TableCache.
238
  // Give a large number for setting of "infinite" open files.
L
Leonidas Galanis 已提交
239
  const int table_cache_size = (mutable_db_options_.max_open_files == -1)
240
                                   ? TableCache::kInfiniteCapacity
L
Leonidas Galanis 已提交
241
                                   : mutable_db_options_.max_open_files - 10;
242 243
  table_cache_ = NewLRUCache(table_cache_size,
                             immutable_db_options_.table_cache_numshardbits);
244

245
  versions_.reset(new VersionSet(dbname_, &immutable_db_options_, env_options_,
246
                                 table_cache_.get(), write_buffer_manager_,
247
                                 &write_controller_, &block_cache_tracer_));
248 249
  column_family_memtables_.reset(
      new ColumnFamilyMemTablesImpl(versions_->GetColumnFamilySet()));
250

251 252 253 254 255
  DumpRocksDBBuildVersion(immutable_db_options_.info_log.get());
  DumpDBFileSummary(immutable_db_options_, dbname_);
  immutable_db_options_.Dump(immutable_db_options_.info_log.get());
  mutable_db_options_.Dump(immutable_db_options_.info_log.get());
  DumpSupportInfo(immutable_db_options_.info_log.get());
256 257 258 259 260

  // always open the DB with 0 here, which means if preserve_deletes_==true
  // we won't drop any deletion markers until SetPreserveDeletesSequenceNumber()
  // is called by client and this seqnum is advanced.
  preserve_deletes_seqnum_.store(0);
J
jorlow@chromium.org 已提交
261 262
}

263 264 265 266 267 268 269 270 271 272
Status DBImpl::Resume() {
  ROCKS_LOG_INFO(immutable_db_options_.info_log, "Resuming DB");

  InstrumentedMutexLock db_mutex(&mutex_);

  if (!error_handler_.IsDBStopped() && !error_handler_.IsBGWorkStopped()) {
    // Nothing to do
    return Status::OK();
  }

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
  if (error_handler_.IsRecoveryInProgress()) {
    // Don't allow a mix of manual and automatic recovery
    return Status::Busy();
  }

  mutex_.Unlock();
  Status s = error_handler_.RecoverFromBGError(true);
  mutex_.Lock();
  return s;
}

// This function implements the guts of recovery from a background error. It
// is eventually called for both manual as well as automatic recovery. It does
// the following -
// 1. Wait for currently scheduled background flush/compaction to exit, in
//    order to inadvertently causing an error and thinking recovery failed
// 2. Flush memtables if there's any data for all the CFs. This may result
//    another error, which will be saved by error_handler_ and reported later
//    as the recovery status
// 3. Find and delete any obsolete files
// 4. Schedule compactions if needed for all the CFs. This is needed as the
//    flush in the prior step might have been a no-op for some CFs, which
//    means a new super version wouldn't have been installed
Status DBImpl::ResumeImpl() {
  mutex_.AssertHeld();
  WaitForBackgroundWork();

  Status bg_error = error_handler_.GetBGError();
  Status s;
  if (shutdown_initiated_) {
    // Returning shutdown status to SFM during auto recovery will cause it
    // to abort the recovery and allow the shutdown to progress
    s = Status::ShutdownInProgress();
  }
  if (s.ok() && bg_error.severity() > Status::Severity::kHardError) {
308 309
    ROCKS_LOG_INFO(
        immutable_db_options_.info_log,
310
        "DB resume requested but failed due to Fatal/Unrecoverable error");
311 312 313 314 315 316
    s = bg_error;
  }

  // We cannot guarantee consistency of the WAL. So force flush Memtables of
  // all the column families
  if (s.ok()) {
Y
Yanqin Jin 已提交
317 318 319
    FlushOptions flush_opts;
    // We allow flush to stall write since we are trying to resume from error.
    flush_opts.allow_write_stall = true;
320
    if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
      autovector<ColumnFamilyData*> cfds;
      SelectColumnFamiliesForAtomicFlush(&cfds);
      mutex_.Unlock();
      s = AtomicFlushMemTables(cfds, flush_opts, FlushReason::kErrorRecovery);
      mutex_.Lock();
    } else {
      for (auto cfd : *versions_->GetColumnFamilySet()) {
        if (cfd->IsDropped()) {
          continue;
        }
        cfd->Ref();
        mutex_.Unlock();
        s = FlushMemTable(cfd, flush_opts, FlushReason::kErrorRecovery);
        mutex_.Lock();
        cfd->Unref();
        if (!s.ok()) {
          break;
        }
      }
    }
341 342 343 344 345
    if (!s.ok()) {
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "DB resume requested but failed due to Flush failure [%s]",
                     s.ToString().c_str());
    }
346 347 348 349
  }

  JobContext job_context(0);
  FindObsoleteFiles(&job_context, true);
350 351 352
  if (s.ok()) {
    s = error_handler_.ClearBGError();
  }
353 354 355 356 357 358 359 360
  mutex_.Unlock();

  job_context.manifest_file_number = 1;
  if (job_context.HaveSomethingToDelete()) {
    PurgeObsoleteFiles(job_context);
  }
  job_context.Clean();

361 362 363
  if (s.ok()) {
    ROCKS_LOG_INFO(immutable_db_options_.info_log, "Successfully resumed DB");
  }
364
  mutex_.Lock();
365 366 367 368 369 370 371 372 373 374 375 376 377 378
  // Check for shutdown again before scheduling further compactions,
  // since we released and re-acquired the lock above
  if (shutdown_initiated_) {
    s = Status::ShutdownInProgress();
  }
  if (s.ok()) {
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      SchedulePendingCompaction(cfd);
    }
    MaybeScheduleFlushOrCompaction();
  }

  // Wake up any waiters - in this case, it could be the shutdown thread
  bg_cv_.SignalAll();
379

380 381
  // No need to check BGError again. If something happened, event listener would
  // be notified and the operation causing it would have failed
382 383 384 385 386 387 388 389 390
  return s;
}

void DBImpl::WaitForBackgroundWork() {
  // Wait for background work to finish
  while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
         bg_flush_scheduled_) {
    bg_cv_.Wait();
  }
391 392
}

393
// Will lock the mutex_,  will wait for completion if wait is true
394
void DBImpl::CancelAllBackgroundWork(bool wait) {
395 396
  ROCKS_LOG_INFO(immutable_db_options_.info_log,
                 "Shutdown: canceling all background work");
397

398 399 400 401
  if (thread_dump_stats_ != nullptr) {
    thread_dump_stats_->cancel();
    thread_dump_stats_.reset();
  }
402 403 404 405 406
  if (thread_persist_stats_ != nullptr) {
    thread_persist_stats_->cancel();
    thread_persist_stats_.reset();
  }
  InstrumentedMutexLock l(&mutex_);
407
  if (!shutting_down_.load(std::memory_order_acquire) &&
408
      has_unpersisted_data_.load(std::memory_order_relaxed) &&
Y
Yi Wu 已提交
409
      !mutable_db_options_.avoid_flush_during_shutdown) {
410
    if (immutable_db_options_.atomic_flush) {
Y
Yanqin Jin 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423 424
      autovector<ColumnFamilyData*> cfds;
      SelectColumnFamiliesForAtomicFlush(&cfds);
      mutex_.Unlock();
      AtomicFlushMemTables(cfds, FlushOptions(), FlushReason::kShutDown);
      mutex_.Lock();
    } else {
      for (auto cfd : *versions_->GetColumnFamilySet()) {
        if (!cfd->IsDropped() && cfd->initialized() && !cfd->mem()->IsEmpty()) {
          cfd->Ref();
          mutex_.Unlock();
          FlushMemTable(cfd, FlushOptions(), FlushReason::kShutDown);
          mutex_.Lock();
          cfd->Unref();
        }
425 426
      }
    }
427
    versions_->GetColumnFamilySet()->FreeDeadColumnFamilies();
428
  }
429 430 431 432 433 434

  shutting_down_.store(true, std::memory_order_release);
  bg_cv_.SignalAll();
  if (!wait) {
    return;
  }
435
  WaitForBackgroundWork();
436 437
}

438
Status DBImpl::CloseHelper() {
439 440 441 442 443 444 445 446 447 448
  // Guarantee that there is no background error recovery in progress before
  // continuing with the shutdown
  mutex_.Lock();
  shutdown_initiated_ = true;
  error_handler_.CancelErrorRecovery();
  while (error_handler_.IsRecoveryInProgress()) {
    bg_cv_.Wait();
  }
  mutex_.Unlock();

449 450
  // CancelAllBackgroundWork called with false means we just set the shutdown
  // marker. After this we do a variant of the waiting and unschedule work
451 452
  // (to consider: moving all the waiting into CancelAllBackgroundWork(true))
  CancelAllBackgroundWork(false);
453 454
  int bottom_compactions_unscheduled =
      env_->UnSchedule(this, Env::Priority::BOTTOM);
455 456
  int compactions_unscheduled = env_->UnSchedule(this, Env::Priority::LOW);
  int flushes_unscheduled = env_->UnSchedule(this, Env::Priority::HIGH);
457
  Status ret;
458
  mutex_.Lock();
459
  bg_bottom_compaction_scheduled_ -= bottom_compactions_unscheduled;
460 461 462 463
  bg_compaction_scheduled_ -= compactions_unscheduled;
  bg_flush_scheduled_ -= flushes_unscheduled;

  // Wait for background work to finish
464
  while (bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
465
         bg_flush_scheduled_ || bg_purge_scheduled_ ||
466 467
         pending_purge_obsolete_files_ ||
         error_handler_.IsRecoveryInProgress()) {
468
    TEST_SYNC_POINT("DBImpl::~DBImpl:WaitJob");
469 470
    bg_cv_.Wait();
  }
471
  TEST_SYNC_POINT_CALLBACK("DBImpl::CloseHelper:PendingPurgeFinished",
472
                           &files_grabbed_for_purge_);
473
  EraseThreadStatusDbInfo();
I
Igor Canadi 已提交
474 475
  flush_scheduler_.Clear();

476
  while (!flush_queue_.empty()) {
477 478 479 480 481 482
    const FlushRequest& flush_req = PopFirstFromFlushQueue();
    for (const auto& iter : flush_req) {
      ColumnFamilyData* cfd = iter.first;
      if (cfd->Unref()) {
        delete cfd;
      }
483 484 485 486 487 488 489 490 491
    }
  }
  while (!compaction_queue_.empty()) {
    auto cfd = PopFirstFromCompactionQueue();
    if (cfd->Unref()) {
      delete cfd;
    }
  }

492
  if (default_cf_handle_ != nullptr || persist_stats_cf_handle_ != nullptr) {
I
Igor Canadi 已提交
493 494
    // we need to delete handle outside of lock because it does its own locking
    mutex_.Unlock();
495 496 497 498 499 500 501 502
    if (default_cf_handle_) {
      delete default_cf_handle_;
      default_cf_handle_ = nullptr;
    }
    if (persist_stats_cf_handle_) {
      delete persist_stats_cf_handle_;
      persist_stats_cf_handle_ = nullptr;
    }
I
Igor Canadi 已提交
503
    mutex_.Lock();
504 505
  }

I
Igor Canadi 已提交
506 507 508 509 510 511 512 513 514 515
  // Clean up obsolete files due to SuperVersion release.
  // (1) Need to delete to obsolete files before closing because RepairDB()
  // scans all existing files in the file system and builds manifest file.
  // Keeping obsolete files confuses the repair process.
  // (2) Need to check if we Open()/Recover() the DB successfully before
  // deleting because if VersionSet recover fails (may be due to corrupted
  // manifest file), it is not able to identify live files correctly. As a
  // result, all "live" files can get deleted by accident. However, corrupted
  // manifest is recoverable by RepairDB().
  if (opened_successfully_) {
516
    JobContext job_context(next_job_id_.fetch_add(1));
I
Igor Canadi 已提交
517
    FindObsoleteFiles(&job_context, true);
518 519

    mutex_.Unlock();
I
Igor Canadi 已提交
520
    // manifest number starting from 2
I
Igor Canadi 已提交
521 522 523
    job_context.manifest_file_number = 1;
    if (job_context.HaveSomethingToDelete()) {
      PurgeObsoleteFiles(job_context);
524
    }
I
Igor Canadi 已提交
525
    job_context.Clean();
526
    mutex_.Lock();
527 528
  }

529 530 531
  for (auto l : logs_to_free_) {
    delete l;
  }
532
  for (auto& log : logs_) {
533 534 535
    uint64_t log_number = log.writer->get_log_number();
    Status s = log.ClearWriter();
    if (!s.ok()) {
536 537 538 539 540
      ROCKS_LOG_WARN(
          immutable_db_options_.info_log,
          "Unable to Sync WAL file %s with error -- %s",
          LogFileName(immutable_db_options_.wal_dir, log_number).c_str(),
          s.ToString().c_str());
541 542 543 544 545
      // Retain the first error
      if (ret.ok()) {
        ret = s;
      }
    }
546
  }
547
  logs_.clear();
548

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
  // Table cache may have table handles holding blocks from the block cache.
  // We need to release them before the block cache is destroyed. The block
  // cache may be destroyed inside versions_.reset(), when column family data
  // list is destroyed, so leaving handles in table cache after
  // versions_.reset() may cause issues.
  // Here we clean all unreferenced handles in table cache.
  // Now we assume all user queries have finished, so only version set itself
  // can possibly hold the blocks from block cache. After releasing unreferenced
  // handles here, only handles held by version set left and inside
  // versions_.reset(), we will release them. There, we need to make sure every
  // time a handle is released, we erase it from the cache too. By doing that,
  // we can guarantee that after versions_.reset(), table cache is empty
  // so the cache can be safely destroyed.
  table_cache_->EraseUnRefEntries();

I
Islam AbdelRahman 已提交
564 565 566 567
  for (auto& txn_entry : recovered_transactions_) {
    delete txn_entry.second;
  }

568
  // versions need to be destroyed before table_cache since it can hold
569 570
  // references to table_cache.
  versions_.reset();
571
  mutex_.Unlock();
I
Igor Canadi 已提交
572 573 574
  if (db_lock_ != nullptr) {
    env_->UnlockFile(db_lock_);
  }
575

576
  ROCKS_LOG_INFO(immutable_db_options_.info_log, "Shutdown complete");
577
  LogFlush(immutable_db_options_.info_log);
578

579 580 581 582 583 584 585 586 587
#ifndef ROCKSDB_LITE
  // If the sst_file_manager was allocated by us during DB::Open(), ccall
  // Close() on it before closing the info_log. Otherwise, background thread
  // in SstFileManagerImpl might try to log something
  if (immutable_db_options_.sst_file_manager && own_sfm_) {
    auto sfm = static_cast<SstFileManagerImpl*>(
        immutable_db_options_.sst_file_manager.get());
    sfm->Close();
  }
588
#endif  // ROCKSDB_LITE
589

590
  if (immutable_db_options_.info_log && own_info_log_) {
591 592 593 594
    Status s = immutable_db_options_.info_log->Close();
    if (ret.ok()) {
      ret = s;
    }
595
  }
596 597 598 599 600 601
  if (ret.IsAborted()) {
    // Reserve IsAborted() error for those where users didn't release
    // certain resource and they can release them and come back and
    // retry. In this case, we wrap this exception to something else.
    return Status::Incomplete(ret.ToString());
  }
602
  return ret;
J
jorlow@chromium.org 已提交
603 604
}

605
Status DBImpl::CloseImpl() { return CloseHelper(); }
606 607 608 609 610 611 612

DBImpl::~DBImpl() {
  if (!closed_) {
    closed_ = true;
    CloseHelper();
  }
}
613

J
jorlow@chromium.org 已提交
614
void DBImpl::MaybeIgnoreError(Status* s) const {
615
  if (s->ok() || immutable_db_options_.paranoid_checks) {
J
jorlow@chromium.org 已提交
616 617
    // No change needed
  } else {
618 619
    ROCKS_LOG_WARN(immutable_db_options_.info_log, "Ignoring error %s",
                   s->ToString().c_str());
J
jorlow@chromium.org 已提交
620 621 622 623
    *s = Status::OK();
  }
}

624
const Status DBImpl::CreateArchivalDirectory() {
625 626 627
  if (immutable_db_options_.wal_ttl_seconds > 0 ||
      immutable_db_options_.wal_size_limit_mb > 0) {
    std::string archivalPath = ArchivalDirectory(immutable_db_options_.wal_dir);
628 629 630 631 632
    return env_->CreateDirIfMissing(archivalPath);
  }
  return Status::OK();
}

633
void DBImpl::PrintStatistics() {
634
  auto dbstats = immutable_db_options_.statistics.get();
635
  if (dbstats) {
636
    ROCKS_LOG_INFO(immutable_db_options_.info_log, "STATISTICS:\n %s",
637
                   dbstats->ToString().c_str());
638 639 640
  }
}

641 642
void DBImpl::StartTimedTasks() {
  unsigned int stats_dump_period_sec = 0;
643
  unsigned int stats_persist_period_sec = 0;
644 645 646 647 648 649 650
  {
    InstrumentedMutexLock l(&mutex_);
    stats_dump_period_sec = mutable_db_options_.stats_dump_period_sec;
    if (stats_dump_period_sec > 0) {
      if (!thread_dump_stats_) {
        thread_dump_stats_.reset(new rocksdb::RepeatableThread(
            [this]() { DBImpl::DumpStats(); }, "dump_st", env_,
651
            static_cast<uint64_t>(stats_dump_period_sec) * kMicrosInSecond));
652 653
      }
    }
654 655 656 657 658
    stats_persist_period_sec = mutable_db_options_.stats_persist_period_sec;
    if (stats_persist_period_sec > 0) {
      if (!thread_persist_stats_) {
        thread_persist_stats_.reset(new rocksdb::RepeatableThread(
            [this]() { DBImpl::PersistStats(); }, "pst_st", env_,
659
            static_cast<uint64_t>(stats_persist_period_sec) * kMicrosInSecond));
660 661
      }
    }
662 663
  }
}
664

665
// esitmate the total size of stats_history_
666
size_t DBImpl::EstimateInMemoryStatsHistorySize() const {
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
  size_t size_total =
      sizeof(std::map<uint64_t, std::map<std::string, uint64_t>>);
  if (stats_history_.size() == 0) return size_total;
  size_t size_per_slice =
      sizeof(uint64_t) + sizeof(std::map<std::string, uint64_t>);
  // non-empty map, stats_history_.begin() guaranteed to exist
  std::map<std::string, uint64_t> sample_slice(stats_history_.begin()->second);
  for (const auto& pairs : sample_slice) {
    size_per_slice +=
        pairs.first.capacity() + sizeof(pairs.first) + sizeof(pairs.second);
  }
  size_total = size_per_slice * stats_history_.size();
  return size_total;
}

void DBImpl::PersistStats() {
  TEST_SYNC_POINT("DBImpl::PersistStats:Entry");
#ifndef ROCKSDB_LITE
  if (shutdown_initiated_) {
    return;
  }
688
  uint64_t now_seconds = env_->NowMicros() / kMicrosInSecond;
689 690 691 692 693 694 695 696 697 698
  Statistics* statistics = immutable_db_options_.statistics.get();
  if (!statistics) {
    return;
  }
  size_t stats_history_size_limit = 0;
  {
    InstrumentedMutexLock l(&mutex_);
    stats_history_size_limit = mutable_db_options_.stats_history_buffer_size;
  }

699 700 701 702
  std::map<std::string, uint64_t> stats_map;
  if (!statistics->getTickerMap(&stats_map)) {
    return;
  }
703 704
  ROCKS_LOG_INFO(immutable_db_options_.info_log,
                 "------- PERSISTING STATS -------");
705 706 707 708

  if (immutable_db_options_.persist_stats_to_disk) {
    WriteBatch batch;
    if (stats_slice_initialized_) {
709 710 711
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "Reading %" ROCKSDB_PRIszt " stats from statistics\n",
                     stats_slice_.size());
712 713 714 715 716 717 718 719 720 721 722
      for (const auto& stat : stats_map) {
        char key[100];
        int length =
            EncodePersistentStatsKey(now_seconds, stat.first, 100, key);
        // calculate the delta from last time
        if (stats_slice_.find(stat.first) != stats_slice_.end()) {
          uint64_t delta = stat.second - stats_slice_[stat.first];
          batch.Put(persist_stats_cf_handle_, Slice(key, std::min(100, length)),
                    ToString(delta));
        }
      }
723
    }
724 725 726 727 728 729 730 731 732
    stats_slice_initialized_ = true;
    std::swap(stats_slice_, stats_map);
    WriteOptions wo;
    wo.low_pri = true;
    wo.no_slowdown = true;
    wo.sync = false;
    Status s = Write(wo, &batch);
    if (!s.ok()) {
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
733
                     "Writing to persistent stats CF failed -- %s",
734
                     s.ToString().c_str());
735 736 737 738 739
    } else {
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "Writing %" ROCKSDB_PRIszt " stats with timestamp %" PRIu64
                     " to persistent stats CF succeeded",
                     stats_slice_.size(), now_seconds);
740 741 742
    }
    // TODO(Zhongyi): add purging for persisted data
  } else {
743 744 745 746 747 748 749 750 751
    InstrumentedMutexLock l(&stats_history_mutex_);
    // calculate the delta from last time
    if (stats_slice_initialized_) {
      std::map<std::string, uint64_t> stats_delta;
      for (const auto& stat : stats_map) {
        if (stats_slice_.find(stat.first) != stats_slice_.end()) {
          stats_delta[stat.first] = stat.second - stats_slice_[stat.first];
        }
      }
752 753 754 755
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "Storing %" ROCKSDB_PRIszt " stats with timestamp %" PRIu64
                     " to in-memory stats history",
                     stats_slice_.size(), now_seconds);
756
      stats_history_[now_seconds] = stats_delta;
757 758 759 760 761 762
    }
    stats_slice_initialized_ = true;
    std::swap(stats_slice_, stats_map);
    TEST_SYNC_POINT("DBImpl::PersistStats:StatsCopied");

    // delete older stats snapshots to control memory consumption
763 764 765 766 767 768
    size_t stats_history_size = EstimateInMemoryStatsHistorySize();
    bool purge_needed = stats_history_size > stats_history_size_limit;
    ROCKS_LOG_INFO(immutable_db_options_.info_log,
                   "[Pre-GC] In-memory stats history size: %" ROCKSDB_PRIszt
                   " bytes, slice count: %" ROCKSDB_PRIszt,
                   stats_history_size, stats_history_.size());
769 770
    while (purge_needed && !stats_history_.empty()) {
      stats_history_.erase(stats_history_.begin());
771 772
      purge_needed =
          EstimateInMemoryStatsHistorySize() > stats_history_size_limit;
773
    }
774 775 776 777
    ROCKS_LOG_INFO(immutable_db_options_.info_log,
                   "[Post-GC] In-memory stats history size: %" ROCKSDB_PRIszt
                   " bytes, slice count: %" ROCKSDB_PRIszt,
                   stats_history_size, stats_history_.size());
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
  }
#endif  // !ROCKSDB_LITE
}

bool DBImpl::FindStatsByTime(uint64_t start_time, uint64_t end_time,
                             uint64_t* new_time,
                             std::map<std::string, uint64_t>* stats_map) {
  assert(new_time);
  assert(stats_map);
  if (!new_time || !stats_map) return false;
  // lock when search for start_time
  {
    InstrumentedMutexLock l(&stats_history_mutex_);
    auto it = stats_history_.lower_bound(start_time);
    if (it != stats_history_.end() && it->first < end_time) {
      // make a copy for timestamp and stats_map
      *new_time = it->first;
      *stats_map = it->second;
      return true;
    } else {
      return false;
    }
  }
}

803 804
Status DBImpl::GetStatsHistory(
    uint64_t start_time, uint64_t end_time,
805 806 807 808
    std::unique_ptr<StatsHistoryIterator>* stats_iterator) {
  if (!stats_iterator) {
    return Status::InvalidArgument("stats_iterator not preallocated.");
  }
809 810 811 812 813 814 815
  if (immutable_db_options_.persist_stats_to_disk) {
    stats_iterator->reset(
        new PersistentStatsHistoryIterator(start_time, end_time, this));
  } else {
    stats_iterator->reset(
        new InMemoryStatsHistoryIterator(start_time, end_time, this));
  }
816 817 818
  return (*stats_iterator)->status();
}

819 820
void DBImpl::DumpStats() {
  TEST_SYNC_POINT("DBImpl::DumpStats:1");
821
#ifndef ROCKSDB_LITE
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
  const DBPropertyInfo* cf_property_info =
      GetPropertyInfo(DB::Properties::kCFStats);
  assert(cf_property_info != nullptr);
  const DBPropertyInfo* db_property_info =
      GetPropertyInfo(DB::Properties::kDBStats);
  assert(db_property_info != nullptr);

  std::string stats;
  if (shutdown_initiated_) {
    return;
  }
  {
    InstrumentedMutexLock l(&mutex_);
    default_cf_internal_stats_->GetStringProperty(
        *db_property_info, DB::Properties::kDBStats, &stats);
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      if (cfd->initialized()) {
        cfd->internal_stats()->GetStringProperty(
            *cf_property_info, DB::Properties::kCFStatsNoFileHistogram, &stats);
841 842
      }
    }
843 844 845 846
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      if (cfd->initialized()) {
        cfd->internal_stats()->GetStringProperty(
            *cf_property_info, DB::Properties::kCFFileHistogram, &stats);
S
Siying Dong 已提交
847 848
      }
    }
849 850
  }
  TEST_SYNC_POINT("DBImpl::DumpStats:2");
851
  ROCKS_LOG_INFO(immutable_db_options_.info_log,
852
                 "------- DUMPING STATS -------");
853
  ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s", stats.c_str());
854 855 856 857
  if (immutable_db_options_.dump_malloc_stats) {
    stats.clear();
    DumpMallocStats(&stats);
    if (!stats.empty()) {
858
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
859
                     "------- Malloc STATS -------");
860
      ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s", stats.c_str());
861 862
    }
  }
863
#endif  // !ROCKSDB_LITE
864

865
  PrintStatistics();
866 867
}

868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
Status DBImpl::TablesRangeTombstoneSummary(ColumnFamilyHandle* column_family,
                                           int max_entries_to_print,
                                           std::string* out_str) {
  auto* cfh =
      static_cast_with_check<ColumnFamilyHandleImpl, ColumnFamilyHandle>(
          column_family);
  ColumnFamilyData* cfd = cfh->cfd();

  SuperVersion* super_version = cfd->GetReferencedSuperVersion(&mutex_);
  Version* version = super_version->current;

  Status s =
      version->TablesRangeTombstoneSummary(max_entries_to_print, out_str);

  CleanupSuperVersion(super_version);
  return s;
}

886 887 888 889 890 891 892 893 894 895
void DBImpl::ScheduleBgLogWriterClose(JobContext* job_context) {
  if (!job_context->logs_to_free.empty()) {
    for (auto l : job_context->logs_to_free) {
      AddToLogsToFreeQueue(l);
    }
    job_context->logs_to_free.clear();
    SchedulePurge();
  }
}

896 897 898 899 900 901 902 903 904
Directory* DBImpl::GetDataDir(ColumnFamilyData* cfd, size_t path_id) const {
  assert(cfd);
  Directory* ret_dir = cfd->GetDataDir(path_id);
  if (ret_dir == nullptr) {
    return directories_.GetDataDir(path_id);
  }
  return ret_dir;
}

905 906
Status DBImpl::SetOptions(
    ColumnFamilyHandle* column_family,
S
Siying Dong 已提交
907 908
    const std::unordered_map<std::string, std::string>& options_map) {
#ifdef ROCKSDB_LITE
909 910
  (void)column_family;
  (void)options_map;
S
Siying Dong 已提交
911 912 913 914 915 916 917 918
  return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
  auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
  if (options_map.empty()) {
    ROCKS_LOG_WARN(immutable_db_options_.info_log,
                   "SetOptions() on column family [%s], empty input",
                   cfd->GetName().c_str());
    return Status::InvalidArgument("empty input");
919 920
  }

S
Siying Dong 已提交
921 922 923
  MutableCFOptions new_options;
  Status s;
  Status persist_options_status;
924
  SuperVersionContext sv_context(/* create_superversion */ true);
S
Siying Dong 已提交
925
  {
926
    auto db_options = GetDBOptions();
S
Siying Dong 已提交
927
    InstrumentedMutexLock l(&mutex_);
928
    s = cfd->SetOptions(db_options, options_map);
S
Siying Dong 已提交
929 930 931 932 933 934 935 936 937
    if (s.ok()) {
      new_options = *cfd->GetLatestMutableCFOptions();
      // Append new version to recompute compaction score.
      VersionEdit dummy_edit;
      versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_,
                             directories_.GetDbDir());
      // Trigger possible flush/compactions. This has to be before we persist
      // options to file, otherwise there will be a deadlock with writer
      // thread.
938
      InstallSuperVersionAndScheduleWork(cfd, &sv_context, new_options);
939

Y
Yi Wu 已提交
940 941
      persist_options_status = WriteOptionsFile(
          false /*need_mutex_lock*/, true /*need_enter_write_thread*/);
942
      bg_cv_.SignalAll();
943 944
    }
  }
945
  sv_context.Clean();
946

947 948 949
  ROCKS_LOG_INFO(
      immutable_db_options_.info_log,
      "SetOptions() on column family [%s], inputs:", cfd->GetName().c_str());
S
Siying Dong 已提交
950 951 952
  for (const auto& o : options_map) {
    ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
                   o.second.c_str());
953
  }
S
Siying Dong 已提交
954 955 956 957 958
  if (s.ok()) {
    ROCKS_LOG_INFO(immutable_db_options_.info_log,
                   "[%s] SetOptions() succeeded", cfd->GetName().c_str());
    new_options.Dump(immutable_db_options_.info_log.get());
    if (!persist_options_status.ok()) {
Y
Yi Wu 已提交
959
      s = persist_options_status;
960
    }
961
  } else {
S
Siying Dong 已提交
962 963
    ROCKS_LOG_WARN(immutable_db_options_.info_log, "[%s] SetOptions() failed",
                   cfd->GetName().c_str());
964
  }
S
Siying Dong 已提交
965 966 967
  LogFlush(immutable_db_options_.info_log);
  return s;
#endif  // ROCKSDB_LITE
968 969
}

S
Siying Dong 已提交
970 971 972
Status DBImpl::SetDBOptions(
    const std::unordered_map<std::string, std::string>& options_map) {
#ifdef ROCKSDB_LITE
973
  (void)options_map;
S
Siying Dong 已提交
974 975 976 977 978 979
  return Status::NotSupported("Not supported in ROCKSDB LITE");
#else
  if (options_map.empty()) {
    ROCKS_LOG_WARN(immutable_db_options_.info_log,
                   "SetDBOptions(), empty input.");
    return Status::InvalidArgument("empty input");
I
Igor Canadi 已提交
980
  }
981

S
Siying Dong 已提交
982 983 984
  MutableDBOptions new_options;
  Status s;
  Status persist_options_status;
985
  bool wal_changed = false;
S
Siying Dong 已提交
986 987 988 989 990
  WriteContext write_context;
  {
    InstrumentedMutexLock l(&mutex_);
    s = GetMutableDBOptionsFromStrings(mutable_db_options_, options_map,
                                       &new_options);
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
    if (new_options.bytes_per_sync == 0) {
      new_options.bytes_per_sync = 1024 * 1024;
    }
    DBOptions new_db_options =
        BuildDBOptions(immutable_db_options_, new_options);
    if (s.ok()) {
      s = ValidateOptions(new_db_options);
    }
    if (s.ok()) {
      for (auto c : *versions_->GetColumnFamilySet()) {
        if (!c->IsDropped()) {
          auto cf_options = c->GetLatestCFOptions();
          s = ColumnFamilyData::ValidateOptions(new_db_options, cf_options);
          if (!s.ok()) {
            break;
          }
        }
      }
    }
S
Siying Dong 已提交
1010 1011 1012 1013 1014 1015 1016
    if (s.ok()) {
      if (new_options.max_background_compactions >
          mutable_db_options_.max_background_compactions) {
        env_->IncBackgroundThreadsIfNeeded(
            new_options.max_background_compactions, Env::Priority::LOW);
        MaybeScheduleFlushOrCompaction();
      }
1017 1018
      if (new_options.stats_dump_period_sec !=
          mutable_db_options_.stats_dump_period_sec) {
1019 1020 1021 1022 1023 1024 1025 1026
        if (thread_dump_stats_) {
          mutex_.Unlock();
          thread_dump_stats_->cancel();
          mutex_.Lock();
        }
        if (new_options.stats_dump_period_sec > 0) {
          thread_dump_stats_.reset(new rocksdb::RepeatableThread(
              [this]() { DBImpl::DumpStats(); }, "dump_st", env_,
1027 1028
              static_cast<uint64_t>(new_options.stats_dump_period_sec) *
                  kMicrosInSecond));
1029 1030 1031
        } else {
          thread_dump_stats_.reset();
        }
1032
      }
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
      if (new_options.stats_persist_period_sec !=
          mutable_db_options_.stats_persist_period_sec) {
        if (thread_persist_stats_) {
          mutex_.Unlock();
          thread_persist_stats_->cancel();
          mutex_.Lock();
        }
        if (new_options.stats_persist_period_sec > 0) {
          thread_persist_stats_.reset(new rocksdb::RepeatableThread(
              [this]() { DBImpl::PersistStats(); }, "pst_st", env_,
1043 1044
              static_cast<uint64_t>(new_options.stats_persist_period_sec) *
                  kMicrosInSecond));
1045 1046 1047 1048
        } else {
          thread_persist_stats_.reset();
        }
      }
1049 1050
      write_controller_.set_max_delayed_write_rate(
          new_options.delayed_write_rate);
L
Leonidas Galanis 已提交
1051
      table_cache_.get()->SetCapacity(new_options.max_open_files == -1
1052
                                          ? TableCache::kInfiniteCapacity
L
Leonidas Galanis 已提交
1053
                                          : new_options.max_open_files - 10);
1054 1055
      wal_changed = mutable_db_options_.wal_bytes_per_sync !=
                    new_options.wal_bytes_per_sync;
1056
      mutable_db_options_ = new_options;
1057
      env_options_for_compaction_ = EnvOptions(new_db_options);
1058
      env_options_for_compaction_ = env_->OptimizeForCompactionTableWrite(
1059
          env_options_for_compaction_, immutable_db_options_);
1060
      versions_->ChangeEnvOptions(mutable_db_options_);
1061
      //TODO(xiez): clarify why apply optimize for read to write options
1062 1063 1064 1065
      env_options_for_compaction_ = env_->OptimizeForCompactionTableRead(
          env_options_for_compaction_, immutable_db_options_);
      env_options_for_compaction_.compaction_readahead_size =
          mutable_db_options_.compaction_readahead_size;
Y
Yanqin Jin 已提交
1066
      WriteThread::Writer w;
1067
      write_thread_.EnterUnbatched(&w, &mutex_);
1068 1069
      if (total_log_size_ > GetMaxTotalWalSize() || wal_changed) {
        Status purge_wal_status = SwitchWAL(&write_context);
1070 1071 1072 1073 1074
        if (!purge_wal_status.ok()) {
          ROCKS_LOG_WARN(immutable_db_options_.info_log,
                         "Unable to purge WAL files in SetDBOptions() -- %s",
                         purge_wal_status.ToString().c_str());
        }
1075
      }
Y
Yi Wu 已提交
1076 1077
      persist_options_status = WriteOptionsFile(
          false /*need_mutex_lock*/, false /*need_enter_write_thread*/);
1078
      write_thread_.ExitUnbatched(&w);
1079 1080
    }
  }
1081
  ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:");
1082
  for (const auto& o : options_map) {
1083 1084
    ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
                   o.second.c_str());
1085 1086
  }
  if (s.ok()) {
1087
    ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions() succeeded");
1088 1089 1090 1091 1092 1093 1094
    new_options.Dump(immutable_db_options_.info_log.get());
    if (!persist_options_status.ok()) {
      if (immutable_db_options_.fail_if_options_file_error) {
        s = Status::IOError(
            "SetDBOptions() succeeded, but unable to persist options",
            persist_options_status.ToString());
      }
1095 1096 1097
      ROCKS_LOG_WARN(immutable_db_options_.info_log,
                     "Unable to persist options in SetDBOptions() -- %s",
                     persist_options_status.ToString().c_str());
1098 1099
    }
  } else {
1100
    ROCKS_LOG_WARN(immutable_db_options_.info_log, "SetDBOptions failed");
L
Lei Jin 已提交
1101
  }
1102
  LogFlush(immutable_db_options_.info_log);
1103
  return s;
I
Igor Canadi 已提交
1104
#endif  // ROCKSDB_LITE
1105 1106
}

1107
// return the same level if it cannot be moved
A
Andrew Kryczka 已提交
1108 1109 1110
int DBImpl::FindMinimumEmptyLevelFitting(
    ColumnFamilyData* cfd, const MutableCFOptions& /*mutable_cf_options*/,
    int level) {
1111
  mutex_.AssertHeld();
S
sdong 已提交
1112
  const auto* vstorage = cfd->current()->storage_info();
1113
  int minimum_level = level;
1114
  for (int i = level - 1; i > 0; --i) {
1115
    // stop if level i is not empty
S
sdong 已提交
1116
    if (vstorage->NumLevelFiles(i) > 0) break;
1117
    // stop if level i is too small (cannot fit the level files)
1118
    if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) {
1119 1120
      break;
    }
1121 1122 1123 1124 1125 1126

    minimum_level = i;
  }
  return minimum_level;
}

1127
Status DBImpl::FlushWAL(bool sync) {
1128
  if (manual_wal_flush_) {
1129 1130 1131 1132 1133 1134 1135
    Status s;
    {
      // We need to lock log_write_mutex_ since logs_ might change concurrently
      InstrumentedMutexLock wl(&log_write_mutex_);
      log::Writer* cur_log_writer = logs_.back().writer;
      s = cur_log_writer->WriteBuffer();
    }
1136 1137 1138
    if (!s.ok()) {
      ROCKS_LOG_ERROR(immutable_db_options_.info_log, "WAL flush error %s",
                      s.ToString().c_str());
1139 1140 1141 1142 1143
      // In case there is a fs error we should set it globally to prevent the
      // future writes
      WriteStatusCheck(s);
      // whether sync or not, we should abort the rest of function upon error
      return s;
1144 1145 1146 1147 1148 1149
    }
    if (!sync) {
      ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "FlushWAL sync=false");
      return s;
    }
  }
1150 1151 1152
  if (!sync) {
    return Status::OK();
  }
1153 1154 1155 1156 1157
  // sync = true
  ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "FlushWAL sync=true");
  return SyncWAL();
}

1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
Status DBImpl::SyncWAL() {
  autovector<log::Writer*, 1> logs_to_sync;
  bool need_log_dir_sync;
  uint64_t current_log_number;

  {
    InstrumentedMutexLock l(&mutex_);
    assert(!logs_.empty());

    // This SyncWAL() call only cares about logs up to this number.
    current_log_number = logfile_number_;

    while (logs_.front().number <= current_log_number &&
           logs_.front().getting_synced) {
      log_sync_cv_.Wait();
    }
    // First check that logs are safe to sync in background.
    for (auto it = logs_.begin();
         it != logs_.end() && it->number <= current_log_number; ++it) {
      if (!it->writer->file()->writable_file()->IsSyncThreadSafe()) {
        return Status::NotSupported(
1179 1180 1181 1182
            "SyncWAL() is not supported for this implementation of WAL file",
            immutable_db_options_.allow_mmap_writes
                ? "try setting Options::allow_mmap_writes to false"
                : Slice());
1183 1184 1185 1186 1187
      }
    }
    for (auto it = logs_.begin();
         it != logs_.end() && it->number <= current_log_number; ++it) {
      auto& log = *it;
S
Siying Dong 已提交
1188 1189 1190 1191
      assert(!log.getting_synced);
      log.getting_synced = true;
      logs_to_sync.push_back(log.writer);
    }
1192

S
Siying Dong 已提交
1193
    need_log_dir_sync = !log_dir_synced_;
1194 1195
  }

1196
  TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:1");
S
Siying Dong 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
  RecordTick(stats_, WAL_FILE_SYNCED);
  Status status;
  for (log::Writer* log : logs_to_sync) {
    status = log->file()->SyncWithoutFlush(immutable_db_options_.use_fsync);
    if (!status.ok()) {
      break;
    }
  }
  if (status.ok() && need_log_dir_sync) {
    status = directories_.GetWalDir()->Fsync();
1207
  }
1208
  TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:2");
1209

S
Siying Dong 已提交
1210 1211 1212 1213 1214 1215
  TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:1");
  {
    InstrumentedMutexLock l(&mutex_);
    MarkLogsSynced(current_log_number, need_log_dir_sync, status);
  }
  TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:2");
1216

S
Siying Dong 已提交
1217
  return status;
1218 1219
}

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
Status DBImpl::LockWAL() {
  log_write_mutex_.Lock();
  auto cur_log_writer = logs_.back().writer;
  auto status = cur_log_writer->WriteBuffer();
  if (!status.ok()) {
    ROCKS_LOG_ERROR(immutable_db_options_.info_log, "WAL flush error %s",
                    status.ToString().c_str());
    // In case there is a fs error we should set it globally to prevent the
    // future writes
    WriteStatusCheck(status);
  }
  return status;
}

Status DBImpl::UnlockWAL() {
  log_write_mutex_.Unlock();
  return Status::OK();
}

1239 1240
void DBImpl::MarkLogsSynced(uint64_t up_to, bool synced_dir,
                            const Status& status) {
S
Siying Dong 已提交
1241
  mutex_.AssertHeld();
1242
  if (synced_dir && logfile_number_ == up_to && status.ok()) {
S
Siying Dong 已提交
1243 1244 1245 1246 1247 1248 1249
    log_dir_synced_ = true;
  }
  for (auto it = logs_.begin(); it != logs_.end() && it->number <= up_to;) {
    auto& log = *it;
    assert(log.getting_synced);
    if (status.ok() && logs_.size() > 1) {
      logs_to_free_.push_back(log.ReleaseWriter());
1250 1251
      // To modify logs_ both mutex_ and log_write_mutex_ must be held
      InstrumentedMutexLock l(&log_write_mutex_);
S
Siying Dong 已提交
1252 1253 1254 1255 1256 1257 1258 1259 1260
      it = logs_.erase(it);
    } else {
      log.getting_synced = false;
      ++it;
    }
  }
  assert(!status.ok() || logs_.empty() || logs_[0].number > up_to ||
         (logs_.size() == 1 && !logs_[0].getting_synced));
  log_sync_cv_.SignalAll();
1261 1262
}

S
Siying Dong 已提交
1263 1264
SequenceNumber DBImpl::GetLatestSequenceNumber() const {
  return versions_->LastSequence();
1265 1266
}

1267 1268
void DBImpl::SetLastPublishedSequence(SequenceNumber seq) {
  versions_->SetLastPublishedSequence(seq);
1269 1270
}

1271 1272 1273 1274 1275 1276 1277 1278 1279
bool DBImpl::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) {
  if (seqnum > preserve_deletes_seqnum_.load()) {
    preserve_deletes_seqnum_.store(seqnum);
    return true;
  } else {
    return false;
  }
}

S
Siying Dong 已提交
1280
InternalIterator* DBImpl::NewInternalIterator(
1281
    Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
S
Siying Dong 已提交
1282 1283 1284 1285 1286 1287 1288
    ColumnFamilyHandle* column_family) {
  ColumnFamilyData* cfd;
  if (column_family == nullptr) {
    cfd = default_cf_handle_->cfd();
  } else {
    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
    cfd = cfh->cfd();
1289
  }
1290

S
Siying Dong 已提交
1291 1292 1293 1294
  mutex_.Lock();
  SuperVersion* super_version = cfd->GetSuperVersion()->Ref();
  mutex_.Unlock();
  ReadOptions roptions;
1295 1296
  return NewInternalIterator(roptions, cfd, super_version, arena, range_del_agg,
                             sequence);
S
Siying Dong 已提交
1297
}
I
Islam AbdelRahman 已提交
1298

S
Siying Dong 已提交
1299 1300 1301
void DBImpl::SchedulePurge() {
  mutex_.AssertHeld();
  assert(opened_successfully_);
I
Islam AbdelRahman 已提交
1302

S
Siying Dong 已提交
1303 1304 1305
  // Purge operations are put into High priority queue
  bg_purge_scheduled_++;
  env_->Schedule(&DBImpl::BGWorkPurge, this, Env::Priority::HIGH, nullptr);
I
Islam AbdelRahman 已提交
1306 1307
}

1308 1309 1310
void DBImpl::BackgroundCallPurge() {
  mutex_.Lock();

1311 1312 1313 1314
  // We use one single loop to clear both queues so that after existing the loop
  // both queues are empty. This is stricter than what is needed, but can make
  // it easier for us to reason the correctness.
  while (!purge_queue_.empty() || !logs_to_free_queue_.empty()) {
1315 1316 1317 1318 1319 1320 1321 1322 1323
    // Check logs_to_free_queue_ first and close log writers.
    if (!logs_to_free_queue_.empty()) {
      assert(!logs_to_free_queue_.empty());
      log::Writer* log_writer = *(logs_to_free_queue_.begin());
      logs_to_free_queue_.pop_front();
      mutex_.Unlock();
      delete log_writer;
      mutex_.Lock();
    } else {
1324 1325
      auto purge_file = purge_queue_.begin();
      auto fname = purge_file->fname;
1326
      auto dir_to_sync = purge_file->dir_to_sync;
1327 1328 1329 1330
      auto type = purge_file->type;
      auto number = purge_file->number;
      auto job_id = purge_file->job_id;
      purge_queue_.pop_front();
1331

1332
      mutex_.Unlock();
1333
      DeleteObsoleteFileImpl(job_id, fname, dir_to_sync, type, number);
1334 1335
      mutex_.Lock();
    }
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
  }
  bg_purge_scheduled_--;

  bg_cv_.SignalAll();
  // IMPORTANT:there should be no code after calling SignalAll. This call may
  // signal the DB destructor that it's OK to proceed with destruction. In
  // that case, all DB variables will be dealloacated and referencing them
  // will cause trouble.
  mutex_.Unlock();
}

1347 1348
namespace {
struct IterState {
1349
  IterState(DBImpl* _db, InstrumentedMutex* _mu, SuperVersion* _super_version,
1350
            bool _background_purge)
1351 1352 1353
      : db(_db),
        mu(_mu),
        super_version(_super_version),
1354
        background_purge(_background_purge) {}
1355 1356

  DBImpl* db;
1357
  InstrumentedMutex* mu;
1358
  SuperVersion* super_version;
1359
  bool background_purge;
1360 1361
};

A
Andrew Kryczka 已提交
1362
static void CleanupIteratorState(void* arg1, void* /*arg2*/) {
1363
  IterState* state = reinterpret_cast<IterState*>(arg1);
1364

1365
  if (state->super_version->Unref()) {
1366 1367 1368
    // Job id == 0 means that this is not our background process, but rather
    // user thread
    JobContext job_context(0);
1369

1370 1371
    state->mu->Lock();
    state->super_version->Cleanup();
I
Igor Canadi 已提交
1372
    state->db->FindObsoleteFiles(&job_context, false, true);
1373 1374 1375
    if (state->background_purge) {
      state->db->ScheduleBgLogWriterClose(&job_context);
    }
1376 1377 1378
    state->mu->Unlock();

    delete state->super_version;
I
Igor Canadi 已提交
1379
    if (job_context.HaveSomethingToDelete()) {
1380
      if (state->background_purge) {
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
        // PurgeObsoleteFiles here does not delete files. Instead, it adds the
        // files to be deleted to a job queue, and deletes it in a separate
        // background thread.
        state->db->PurgeObsoleteFiles(job_context, true /* schedule only */);
        state->mu->Lock();
        state->db->SchedulePurge();
        state->mu->Unlock();
      } else {
        state->db->PurgeObsoleteFiles(job_context);
      }
1391
    }
I
Igor Canadi 已提交
1392
    job_context.Clean();
I
Igor Canadi 已提交
1393
  }
T
Tomislav Novak 已提交
1394

1395 1396
  delete state;
}
H
Hans Wennborg 已提交
1397
}  // namespace
1398

1399 1400 1401 1402 1403 1404
InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options,
                                              ColumnFamilyData* cfd,
                                              SuperVersion* super_version,
                                              Arena* arena,
                                              RangeDelAggregator* range_del_agg,
                                              SequenceNumber sequence) {
S
sdong 已提交
1405
  InternalIterator* internal_iter;
1406
  assert(arena != nullptr);
A
Andrew Kryczka 已提交
1407
  assert(range_del_agg != nullptr);
1408
  // Need to create internal iterator from the arena.
1409 1410 1411
  MergeIteratorBuilder merge_iter_builder(
      &cfd->internal_comparator(), arena,
      !read_options.total_order_seek &&
1412
          super_version->mutable_cf_options.prefix_extractor != nullptr);
1413 1414
  // Collect iterator for mutable mem
  merge_iter_builder.AddIterator(
L
Lei Jin 已提交
1415
      super_version->mem->NewIterator(read_options, arena));
1416
  std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter;
A
Andrew Kryczka 已提交
1417 1418
  Status s;
  if (!read_options.ignore_range_deletions) {
1419
    range_del_iter.reset(
1420 1421
        super_version->mem->NewRangeTombstoneIterator(read_options, sequence));
    range_del_agg->AddTombstones(std::move(range_del_iter));
A
Andrew Kryczka 已提交
1422
  }
1423
  // Collect all needed child iterators for immutable memtables
A
Andrew Kryczka 已提交
1424 1425 1426 1427 1428 1429 1430
  if (s.ok()) {
    super_version->imm->AddIterators(read_options, &merge_iter_builder);
    if (!read_options.ignore_range_deletions) {
      s = super_version->imm->AddRangeTombstoneIterators(read_options, arena,
                                                         range_del_agg);
    }
  }
1431
  TEST_SYNC_POINT_CALLBACK("DBImpl::NewInternalIterator:StatusCallback", &s);
A
Andrew Kryczka 已提交
1432 1433
  if (s.ok()) {
    // Collect iterators for files in L0 - Ln
S
Sagar Vemuri 已提交
1434 1435 1436 1437
    if (read_options.read_tier != kMemtableTier) {
      super_version->current->AddIterators(read_options, env_options_,
                                           &merge_iter_builder, range_del_agg);
    }
A
Andrew Kryczka 已提交
1438 1439 1440
    internal_iter = merge_iter_builder.Finish();
    IterState* cleanup =
        new IterState(this, &mutex_, super_version,
1441 1442
                      read_options.background_purge_on_iterator_cleanup ||
                      immutable_db_options_.avoid_unnecessary_blocking_io);
A
Andrew Kryczka 已提交
1443 1444 1445
    internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr);

    return internal_iter;
1446 1447
  } else {
    CleanupSuperVersion(super_version);
A
Andrew Kryczka 已提交
1448
  }
1449
  return NewErrorInternalIterator<Slice>(s, arena);
J
jorlow@chromium.org 已提交
1450 1451
}

1452 1453 1454 1455
ColumnFamilyHandle* DBImpl::DefaultColumnFamily() const {
  return default_cf_handle_;
}

1456 1457 1458 1459
ColumnFamilyHandle* DBImpl::PersistentStatsColumnFamily() const {
  return persist_stats_cf_handle_;
}

L
Lei Jin 已提交
1460
Status DBImpl::Get(const ReadOptions& read_options,
S
Siying Dong 已提交
1461 1462
                   ColumnFamilyHandle* column_family, const Slice& key,
                   PinnableSlice* value) {
1463 1464 1465 1466
  GetImplOptions get_impl_options;
  get_impl_options.column_family = column_family;
  get_impl_options.value = value;
  return GetImpl(read_options, key, get_impl_options);
L
Lei Jin 已提交
1467 1468
}

1469 1470 1471 1472
Status DBImpl::GetImpl(const ReadOptions& read_options, const Slice& key,
                       GetImplOptions get_impl_options) {
  assert(get_impl_options.value != nullptr ||
         get_impl_options.merge_operands != nullptr);
1473
  PERF_CPU_TIMER_GUARD(get_cpu_nanos, env_);
L
Lei Jin 已提交
1474
  StopWatch sw(env_, stats_, DB_GET);
1475
  PERF_TIMER_GUARD(get_snapshot_time);
1476

1477 1478
  auto cfh =
      reinterpret_cast<ColumnFamilyHandleImpl*>(get_impl_options.column_family);
1479 1480
  auto cfd = cfh->cfd();

1481 1482 1483 1484 1485
  if (tracer_) {
    // TODO: This mutex should be removed later, to improve performance when
    // tracing is enabled.
    InstrumentedMutexLock lock(&trace_mutex_);
    if (tracer_) {
1486
      tracer_->Get(get_impl_options.column_family, key);
1487 1488 1489
    }
  }

1490 1491 1492 1493 1494 1495
  // Acquire SuperVersion
  SuperVersion* sv = GetAndRefSuperVersion(cfd);

  TEST_SYNC_POINT("DBImpl::GetImpl:1");
  TEST_SYNC_POINT("DBImpl::GetImpl:2");

1496
  SequenceNumber snapshot;
L
Lei Jin 已提交
1497
  if (read_options.snapshot != nullptr) {
1498
    if (get_impl_options.callback) {
1499
      // Already calculated based on read_options.snapshot
1500
      snapshot = get_impl_options.callback->max_visible_seq();
1501 1502 1503
    } else {
      snapshot =
          reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)->number_;
1504
    }
1505
  } else {
1506 1507 1508 1509 1510
    // Note that the snapshot is assigned AFTER referencing the super
    // version because otherwise a flush happening in between may compact away
    // data for the snapshot, so the reader would see neither data that was be
    // visible to the snapshot before compaction nor the newer data inserted
    // afterwards.
1511 1512 1513
    snapshot = last_seq_same_as_publish_seq_
                   ? versions_->LastSequence()
                   : versions_->LastPublishedSequence();
1514
    if (get_impl_options.callback) {
1515 1516 1517 1518
      // The unprep_seqs are not published for write unprepared, so it could be
      // that max_visible_seq is larger. Seek to the std::max of the two.
      // However, we still want our callback to contain the actual snapshot so
      // that it can do the correct visibility filtering.
1519
      get_impl_options.callback->Refresh(snapshot);
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529

      // Internally, WriteUnpreparedTxnReadCallback::Refresh would set
      // max_visible_seq = max(max_visible_seq, snapshot)
      //
      // Currently, the commented out assert is broken by
      // InvalidSnapshotReadCallback, but if write unprepared recovery followed
      // the regular transaction flow, then this special read callback would not
      // be needed.
      //
      // assert(callback->max_visible_seq() >= snapshot);
1530
      snapshot = get_impl_options.callback->max_visible_seq();
1531
    }
J
jorlow@chromium.org 已提交
1532
  }
1533 1534
  TEST_SYNC_POINT("DBImpl::GetImpl:3");
  TEST_SYNC_POINT("DBImpl::GetImpl:4");
1535

1536
  // Prepare to store a list of merge operations if merge occurs.
1537
  MergeContext merge_context;
1538
  SequenceNumber max_covering_tombstone_seq = 0;
1539

1540
  Status s;
1541
  // First look in the memtable, then in the immutable memtable (if any).
1542
  // s is both in/out. When in, s could either be OK or MergeInProgress.
1543
  // merge_operands will contain the sequence of merges in the latter case.
1544
  LookupKey lkey(key, snapshot, read_options.timestamp);
L
Lei Jin 已提交
1545
  PERF_TIMER_STOP(get_snapshot_time);
1546

1547 1548
  bool skip_memtable = (read_options.read_tier == kPersistedTier &&
                        has_unpersisted_data_.load(std::memory_order_relaxed));
1549 1550
  bool done = false;
  if (!skip_memtable) {
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
    // Get value associated with key
    if (get_impl_options.get_value) {
      if (sv->mem->Get(lkey, get_impl_options.value->GetSelf(), &s,
                       &merge_context, &max_covering_tombstone_seq,
                       read_options, get_impl_options.callback,
                       get_impl_options.is_blob_index)) {
        done = true;
        get_impl_options.value->PinSelf();
        RecordTick(stats_, MEMTABLE_HIT);
      } else if ((s.ok() || s.IsMergeInProgress()) &&
                 sv->imm->Get(lkey, get_impl_options.value->GetSelf(), &s,
                              &merge_context, &max_covering_tombstone_seq,
                              read_options, get_impl_options.callback,
                              get_impl_options.is_blob_index)) {
        done = true;
        get_impl_options.value->PinSelf();
        RecordTick(stats_, MEMTABLE_HIT);
      }
    } else {
      // Get Merge Operands associated with key, Merge Operands should not be
      // merged and raw values should be returned to the user.
      if (sv->mem->Get(lkey, nullptr, &s, &merge_context,
                       &max_covering_tombstone_seq, read_options, nullptr,
                       nullptr, false)) {
        done = true;
        RecordTick(stats_, MEMTABLE_HIT);
      } else if ((s.ok() || s.IsMergeInProgress()) &&
                 sv->imm->GetMergeOperands(lkey, &s, &merge_context,
                                           &max_covering_tombstone_seq,
                                           read_options)) {
        done = true;
        RecordTick(stats_, MEMTABLE_HIT);
      }
1584
    }
A
Andrew Kryczka 已提交
1585
    if (!done && !s.ok() && !s.IsMergeInProgress()) {
1586
      ReturnAndCleanupSuperVersion(cfd, sv);
A
Andrew Kryczka 已提交
1587 1588
      return s;
    }
1589 1590
  }
  if (!done) {
1591
    PERF_TIMER_GUARD(get_from_output_files_time);
1592 1593 1594 1595 1596 1597 1598 1599
    sv->current->Get(
        read_options, lkey, get_impl_options.value, &s, &merge_context,
        &max_covering_tombstone_seq,
        get_impl_options.get_value ? get_impl_options.value_found : nullptr,
        nullptr, nullptr,
        get_impl_options.get_value ? get_impl_options.callback : nullptr,
        get_impl_options.get_value ? get_impl_options.is_blob_index : nullptr,
        get_impl_options.get_value);
L
Lei Jin 已提交
1600
    RecordTick(stats_, MEMTABLE_MISS);
1601
  }
1602

1603 1604
  {
    PERF_TIMER_GUARD(get_post_process_time);
1605

1606
    ReturnAndCleanupSuperVersion(cfd, sv);
1607

1608
    RecordTick(stats_, NUMBER_KEYS_READ);
1609 1610
    size_t size = 0;
    if (s.ok()) {
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
      if (get_impl_options.get_value) {
        size = get_impl_options.value->size();
      } else {
        // Return all merge operands for get_impl_options.key
        *get_impl_options.number_of_operands =
            static_cast<int>(merge_context.GetNumOperands());
        if (*get_impl_options.number_of_operands >
            get_impl_options.get_merge_operands_options
                ->expected_max_number_of_operands) {
          s = Status::Incomplete(
              Status::SubCode::KMergeOperandsInsufficientCapacity);
        } else {
          for (const Slice& sl : merge_context.GetOperands()) {
            size += sl.size();
            get_impl_options.merge_operands->PinSelf(sl);
            get_impl_options.merge_operands++;
          }
        }
      }
1630 1631 1632
      RecordTick(stats_, BYTES_READ, size);
      PERF_COUNTER_ADD(get_read_bytes, size);
    }
S
Siying Dong 已提交
1633
    RecordInHistogram(stats_, BYTES_PER_READ, size);
1634
  }
1635
  return s;
J
jorlow@chromium.org 已提交
1636 1637
}

1638
std::vector<Status> DBImpl::MultiGet(
L
Lei Jin 已提交
1639
    const ReadOptions& read_options,
1640
    const std::vector<ColumnFamilyHandle*>& column_family,
1641
    const std::vector<Slice>& keys, std::vector<std::string>* values) {
1642
  PERF_CPU_TIMER_GUARD(get_cpu_nanos, env_);
L
Lei Jin 已提交
1643
  StopWatch sw(env_, stats_, DB_MULTIGET);
1644
  PERF_TIMER_GUARD(get_snapshot_time);
K
Kai Liu 已提交
1645

1646
  SequenceNumber snapshot;
1647

1648
  struct MultiGetColumnFamilyData {
I
Igor Canadi 已提交
1649
    ColumnFamilyData* cfd;
1650
    SuperVersion* super_version;
A
Anand Ananthabhotla 已提交
1651 1652
    MultiGetColumnFamilyData(ColumnFamilyData* cf, SuperVersion* sv)
        : cfd(cf), super_version(sv) {}
1653
  };
A
Anand Ananthabhotla 已提交
1654 1655
  std::unordered_map<uint32_t, MultiGetColumnFamilyData> multiget_cf_data(
      column_family.size());
1656
  for (auto cf : column_family) {
1657 1658 1659
    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(cf);
    auto cfd = cfh->cfd();
    if (multiget_cf_data.find(cfd->GetID()) == multiget_cf_data.end()) {
A
Anand Ananthabhotla 已提交
1660 1661
      multiget_cf_data.emplace(cfd->GetID(),
                               MultiGetColumnFamilyData(cfd, nullptr));
1662 1663 1664
    }
  }

A
Anand Ananthabhotla 已提交
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
  bool last_try = false;
  {
    // If we end up with the same issue of memtable geting sealed during 2
    // consecutive retries, it means the write rate is very high. In that case
    // its probably ok to take the mutex on the 3rd try so we can succeed for
    // sure
    static const int num_retries = 3;
    for (auto i = 0; i < num_retries; ++i) {
      last_try = (i == num_retries - 1);
      bool retry = false;

      if (i > 0) {
        for (auto mgd_iter = multiget_cf_data.begin();
             mgd_iter != multiget_cf_data.end(); ++mgd_iter) {
          auto super_version = mgd_iter->second.super_version;
          auto cfd = mgd_iter->second.cfd;
          if (super_version != nullptr) {
            ReturnAndCleanupSuperVersion(cfd, super_version);
          }
          mgd_iter->second.super_version = nullptr;
        }
      }

      if (read_options.snapshot == nullptr) {
        if (last_try) {
          TEST_SYNC_POINT("DBImpl::MultiGet::LastTry");
          // We're close to max number of retries. For the last retry,
          // acquire the lock so we're sure to succeed
          mutex_.Lock();
        }
        snapshot = last_seq_same_as_publish_seq_
                       ? versions_->LastSequence()
                       : versions_->LastPublishedSequence();
      } else {
        snapshot = reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)
                       ->number_;
      }

      for (auto mgd_iter = multiget_cf_data.begin();
           mgd_iter != multiget_cf_data.end(); ++mgd_iter) {
        if (!last_try) {
          mgd_iter->second.super_version =
              GetAndRefSuperVersion(mgd_iter->second.cfd);
        } else {
          mgd_iter->second.super_version =
              mgd_iter->second.cfd->GetSuperVersion()->Ref();
        }
        TEST_SYNC_POINT("DBImpl::MultiGet::AfterRefSV");
        if (read_options.snapshot != nullptr || last_try) {
          // If user passed a snapshot, then we don't care if a memtable is
          // sealed or compaction happens because the snapshot would ensure
          // that older key versions are kept around. If this is the last
          // retry, then we have the lock so nothing bad can happen
          continue;
        }
        // We could get the earliest sequence number for the whole list of
        // memtables, which will include immutable memtables as well, but that
        // might be tricky to maintain in case we decide, in future, to do
        // memtable compaction.
        if (!last_try) {
          auto seq =
              mgd_iter->second.super_version->mem->GetEarliestSequenceNumber();
          if (seq > snapshot) {
            retry = true;
            break;
          }
        }
      }
      if (!retry) {
        if (last_try) {
          mutex_.Unlock();
        }
        break;
      }
    }
1740
  }
1741

1742 1743
  // Contain a list of merge operations if merge occurs.
  MergeContext merge_context;
1744

1745
  // Note: this always resizes the values array
1746 1747 1748
  size_t num_keys = keys.size();
  std::vector<Status> stat_list(num_keys);
  values->resize(num_keys);
1749 1750

  // Keep track of bytes that we read for statistics-recording later
1751
  uint64_t bytes_read = 0;
L
Lei Jin 已提交
1752
  PERF_TIMER_STOP(get_snapshot_time);
1753 1754 1755 1756

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
1757
  // merge_operands will contain the sequence of merges in the latter case.
1758
  size_t num_found = 0;
1759
  for (size_t i = 0; i < num_keys; ++i) {
1760
    merge_context.Clear();
1761
    Status& s = stat_list[i];
1762 1763 1764
    std::string* value = &(*values)[i];

    LookupKey lkey(keys[i], snapshot);
1765
    auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family[i]);
1766
    SequenceNumber max_covering_tombstone_seq = 0;
1767
    auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID());
1768 1769
    assert(mgd_iter != multiget_cf_data.end());
    auto mgd = mgd_iter->second;
A
Anand Ananthabhotla 已提交
1770
    auto super_version = mgd.super_version;
1771
    bool skip_memtable =
1772 1773
        (read_options.read_tier == kPersistedTier &&
         has_unpersisted_data_.load(std::memory_order_relaxed));
1774 1775
    bool done = false;
    if (!skip_memtable) {
M
Maysam Yabandeh 已提交
1776
      if (super_version->mem->Get(lkey, value, &s, &merge_context,
1777
                                  &max_covering_tombstone_seq, read_options)) {
1778
        done = true;
1779
        RecordTick(stats_, MEMTABLE_HIT);
M
Maysam Yabandeh 已提交
1780
      } else if (super_version->imm->Get(lkey, value, &s, &merge_context,
1781 1782
                                         &max_covering_tombstone_seq,
                                         read_options)) {
1783
        done = true;
1784
        RecordTick(stats_, MEMTABLE_HIT);
1785 1786 1787
      }
    }
    if (!done) {
M
Maysam Yabandeh 已提交
1788
      PinnableSlice pinnable_val;
1789
      PERF_TIMER_GUARD(get_from_output_files_time);
M
Maysam Yabandeh 已提交
1790
      super_version->current->Get(read_options, lkey, &pinnable_val, &s,
1791
                                  &merge_context, &max_covering_tombstone_seq);
M
Maysam Yabandeh 已提交
1792
      value->assign(pinnable_val.data(), pinnable_val.size());
1793
      RecordTick(stats_, MEMTABLE_MISS);
1794 1795 1796
    }

    if (s.ok()) {
M
Maysam Yabandeh 已提交
1797
      bytes_read += value->size();
1798
      num_found++;
1799 1800 1801 1802
    }
  }

  // Post processing (decrement reference counts and record statistics)
1803
  PERF_TIMER_GUARD(get_post_process_time);
1804 1805 1806 1807
  autovector<SuperVersion*> superversions_to_delete;

  for (auto mgd_iter : multiget_cf_data) {
    auto mgd = mgd_iter.second;
A
Anand Ananthabhotla 已提交
1808 1809 1810 1811
    if (!last_try) {
      ReturnAndCleanupSuperVersion(mgd.cfd, mgd.super_version);
    } else {
      mgd.cfd->GetSuperVersion()->Unref();
1812 1813
    }
  }
L
Lei Jin 已提交
1814 1815
  RecordTick(stats_, NUMBER_MULTIGET_CALLS);
  RecordTick(stats_, NUMBER_MULTIGET_KEYS_READ, num_keys);
1816
  RecordTick(stats_, NUMBER_MULTIGET_KEYS_FOUND, num_found);
L
Lei Jin 已提交
1817
  RecordTick(stats_, NUMBER_MULTIGET_BYTES_READ, bytes_read);
S
Siying Dong 已提交
1818
  RecordInHistogram(stats_, BYTES_PER_MULTIGET, bytes_read);
1819
  PERF_COUNTER_ADD(multiget_read_bytes, bytes_read);
L
Lei Jin 已提交
1820
  PERF_TIMER_STOP(get_post_process_time);
1821

1822
  return stat_list;
1823 1824
}

1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
// Order keys by CF ID, followed by key contents
struct CompareKeyContext {
  inline bool operator()(const KeyContext* lhs, const KeyContext* rhs) {
    const Comparator* comparator = cfd->user_comparator();
    int cmp = comparator->Compare(*(lhs->key), *(rhs->key));
    if (cmp < 0) {
      return true;
    }
    return false;
  }
  const ColumnFamilyData* cfd;
};

void DBImpl::MultiGet(const ReadOptions& read_options,
                      ColumnFamilyHandle* column_family, const size_t num_keys,
                      const Slice* keys, PinnableSlice* values,
                      Status* statuses, const bool sorted_input) {
  autovector<KeyContext, MultiGetContext::MAX_BATCH_SIZE> key_context;
  for (size_t i = 0; i < num_keys; ++i) {
    key_context.emplace_back(keys[i], &values[i], &statuses[i]);
  }

  MultiGetImpl(read_options, column_family, key_context, sorted_input, nullptr,
               nullptr);
}

void DBImpl::MultiGetImpl(
    const ReadOptions& read_options, ColumnFamilyHandle* column_family,
    autovector<KeyContext, MultiGetContext::MAX_BATCH_SIZE>& key_context,
    bool sorted_input, ReadCallback* callback, bool* is_blob_index) {
  PERF_CPU_TIMER_GUARD(get_cpu_nanos, env_);
  StopWatch sw(env_, stats_, DB_MULTIGET);
  size_t num_keys = key_context.size();

  PERF_TIMER_GUARD(get_snapshot_time);

  ColumnFamilyHandleImpl* cfh =
      reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  ColumnFamilyData* cfd = cfh->cfd();

  autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE> sorted_keys;
  sorted_keys.resize(num_keys);
  {
    size_t index = 0;
    for (KeyContext& key : key_context) {
#ifndef NDEBUG
1871
      if (index > 0 && sorted_input) {
1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
        KeyContext* lhs = &key_context[index-1];
        KeyContext* rhs = &key_context[index];
        const Comparator* comparator = cfd->user_comparator();
        int cmp = comparator->Compare(*(lhs->key), *(rhs->key));
        assert(cmp <= 0);
      }
#endif

      sorted_keys[index] = &key;
      index++;
    }
    if (!sorted_input) {
      CompareKeyContext sort_comparator;
      sort_comparator.cfd = cfd;
      std::sort(sorted_keys.begin(), sorted_keys.begin() + index,
                sort_comparator);
    }
  }

  // Keep track of bytes that we read for statistics-recording later
  PERF_TIMER_STOP(get_snapshot_time);

  // Acquire SuperVersion
  SuperVersion* super_version = GetAndRefSuperVersion(cfd);
  SequenceNumber snapshot;
  if (read_options.snapshot != nullptr) {
    // Note: In WritePrepared txns this is not necessary but not harmful
    // either.  Because prep_seq > snapshot => commit_seq > snapshot so if
    // a snapshot is specified we should be fine with skipping seq numbers
    // that are greater than that.
    //
    // In WriteUnprepared, we cannot set snapshot in the lookup key because we
    // may skip uncommitted data that should be visible to the transaction for
    // reading own writes.
    snapshot =
        reinterpret_cast<const SnapshotImpl*>(read_options.snapshot)->number_;
    if (callback) {
      snapshot = std::max(snapshot, callback->max_visible_seq());
    }
  } else {
    // Since we get and reference the super version before getting
    // the snapshot number, without a mutex protection, it is possible
    // that a memtable switch happened in the middle and not all the
    // data for this snapshot is available. But it will contain all
    // the data available in the super version we have, which is also
    // a valid snapshot to read from.
    // We shouldn't get snapshot before finding and referencing the super
    // version because a flush happening in between may compact away data for
    // the snapshot, but the snapshot is earlier than the data overwriting it,
    // so users may see wrong results.
    snapshot = last_seq_same_as_publish_seq_
                   ? versions_->LastSequence()
                   : versions_->LastPublishedSequence();
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
    if (callback) {
      // The unprep_seqs are not published for write unprepared, so it could be
      // that max_visible_seq is larger. Seek to the std::max of the two.
      // However, we still want our callback to contain the actual snapshot so
      // that it can do the correct visibility filtering.
      callback->Refresh(snapshot);

      // Internally, WriteUnpreparedTxnReadCallback::Refresh would set
      // max_visible_seq = max(max_visible_seq, snapshot)
      //
      // Currently, the commented out assert is broken by
      // InvalidSnapshotReadCallback, but if write unprepared recovery followed
      // the regular transaction flow, then this special read callback would not
      // be needed.
      //
      // assert(callback->max_visible_seq() >= snapshot);
      snapshot = callback->max_visible_seq();
    }
1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
  }

  // For each of the given keys, apply the entire "get" process as follows:
  // First look in the memtable, then in the immutable memtable (if any).
  // s is both in/out. When in, s could either be OK or MergeInProgress.
  // merge_operands will contain the sequence of merges in the latter case.
  size_t keys_left = num_keys;
  while (keys_left) {
    size_t batch_size = (keys_left > MultiGetContext::MAX_BATCH_SIZE)
                            ? MultiGetContext::MAX_BATCH_SIZE
                            : keys_left;
    MultiGetContext ctx(&sorted_keys[num_keys - keys_left], batch_size,
                        snapshot);
    MultiGetRange range = ctx.GetMultiGetRange();
    bool lookup_current = false;

    keys_left -= batch_size;
    for (auto mget_iter = range.begin(); mget_iter != range.end();
         ++mget_iter) {
      MergeContext& merge_context = mget_iter->merge_context;
      merge_context.Clear();
      Status& s = *mget_iter->s;
      PinnableSlice* value = mget_iter->value;
      s = Status::OK();

      bool skip_memtable =
          (read_options.read_tier == kPersistedTier &&
           has_unpersisted_data_.load(std::memory_order_relaxed));
      bool done = false;
      if (!skip_memtable) {
        if (super_version->mem->Get(*(mget_iter->lkey), value->GetSelf(), &s,
                                    &merge_context,
                                    &mget_iter->max_covering_tombstone_seq,
                                    read_options, callback, is_blob_index)) {
          done = true;
          value->PinSelf();
          RecordTick(stats_, MEMTABLE_HIT);
        } else if (super_version->imm->Get(
                       *(mget_iter->lkey), value->GetSelf(), &s, &merge_context,
                       &mget_iter->max_covering_tombstone_seq, read_options,
                       callback, is_blob_index)) {
          done = true;
          value->PinSelf();
          RecordTick(stats_, MEMTABLE_HIT);
        }
      }
      if (done) {
        range.MarkKeyDone(mget_iter);
      } else {
        RecordTick(stats_, MEMTABLE_MISS);
        lookup_current = true;
      }
    }

    if (lookup_current) {
      PERF_TIMER_GUARD(get_from_output_files_time);
      super_version->current->MultiGet(read_options, &range, callback,
                                       is_blob_index);
    }
  }

  // Post processing (decrement reference counts and record statistics)
  PERF_TIMER_GUARD(get_post_process_time);
  size_t num_found = 0;
  uint64_t bytes_read = 0;
  for (KeyContext& key : key_context) {
    if (key.s->ok()) {
      bytes_read += key.value->size();
      num_found++;
    }
  }

  ReturnAndCleanupSuperVersion(cfd, super_version);

  RecordTick(stats_, NUMBER_MULTIGET_CALLS);
  RecordTick(stats_, NUMBER_MULTIGET_KEYS_READ, num_keys);
  RecordTick(stats_, NUMBER_MULTIGET_KEYS_FOUND, num_found);
  RecordTick(stats_, NUMBER_MULTIGET_BYTES_READ, bytes_read);
  RecordInHistogram(stats_, BYTES_PER_MULTIGET, bytes_read);
  PERF_COUNTER_ADD(multiget_read_bytes, bytes_read);
  PERF_TIMER_STOP(get_post_process_time);
}

L
Lei Jin 已提交
2026
Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
Y
Yi Wu 已提交
2027
                                  const std::string& column_family,
2028
                                  ColumnFamilyHandle** handle) {
Y
Yi Wu 已提交
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
  assert(handle != nullptr);
  Status s = CreateColumnFamilyImpl(cf_options, column_family, handle);
  if (s.ok()) {
    s = WriteOptionsFile(true /*need_mutex_lock*/,
                         true /*need_enter_write_thread*/);
  }
  return s;
}

Status DBImpl::CreateColumnFamilies(
    const ColumnFamilyOptions& cf_options,
    const std::vector<std::string>& column_family_names,
    std::vector<ColumnFamilyHandle*>* handles) {
  assert(handles != nullptr);
  handles->clear();
  size_t num_cf = column_family_names.size();
  Status s;
  bool success_once = false;
  for (size_t i = 0; i < num_cf; i++) {
    ColumnFamilyHandle* handle;
    s = CreateColumnFamilyImpl(cf_options, column_family_names[i], &handle);
    if (!s.ok()) {
      break;
    }
    handles->push_back(handle);
    success_once = true;
  }
  if (success_once) {
    Status persist_options_status = WriteOptionsFile(
        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
    if (s.ok() && !persist_options_status.ok()) {
      s = persist_options_status;
    }
  }
  return s;
}

Status DBImpl::CreateColumnFamilies(
    const std::vector<ColumnFamilyDescriptor>& column_families,
    std::vector<ColumnFamilyHandle*>* handles) {
  assert(handles != nullptr);
  handles->clear();
  size_t num_cf = column_families.size();
  Status s;
  bool success_once = false;
  for (size_t i = 0; i < num_cf; i++) {
    ColumnFamilyHandle* handle;
    s = CreateColumnFamilyImpl(column_families[i].options,
                               column_families[i].name, &handle);
    if (!s.ok()) {
      break;
    }
    handles->push_back(handle);
    success_once = true;
  }
  if (success_once) {
    Status persist_options_status = WriteOptionsFile(
        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
    if (s.ok() && !persist_options_status.ok()) {
      s = persist_options_status;
    }
  }
  return s;
}

Status DBImpl::CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
                                      const std::string& column_family_name,
                                      ColumnFamilyHandle** handle) {
Y
Yueh-Hsuan Chiang 已提交
2097
  Status s;
2098
  Status persist_options_status;
I
Igor Canadi 已提交
2099
  *handle = nullptr;
2100

2101 2102 2103
  DBOptions db_options =
      BuildDBOptions(immutable_db_options_, mutable_db_options_);
  s = ColumnFamilyData::ValidateOptions(db_options, cf_options);
2104 2105 2106 2107 2108 2109 2110 2111
  if (s.ok()) {
    for (auto& cf_path : cf_options.cf_paths) {
      s = env_->CreateDirIfMissing(cf_path.path);
      if (!s.ok()) {
        break;
      }
    }
  }
2112 2113 2114 2115
  if (!s.ok()) {
    return s;
  }

2116
  SuperVersionContext sv_context(/* create_superversion */ true);
Y
Yueh-Hsuan Chiang 已提交
2117
  {
2118
    InstrumentedMutexLock l(&mutex_);
I
Igor Canadi 已提交
2119

Y
Yueh-Hsuan Chiang 已提交
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
    if (versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name) !=
        nullptr) {
      return Status::InvalidArgument("Column family already exists");
    }
    VersionEdit edit;
    edit.AddColumnFamily(column_family_name);
    uint32_t new_id = versions_->GetColumnFamilySet()->GetNextColumnFamilyID();
    edit.SetColumnFamily(new_id);
    edit.SetLogNumber(logfile_number_);
    edit.SetComparatorName(cf_options.comparator->Name());

    // LogAndApply will both write the creation in MANIFEST and create
    // ColumnFamilyData object
I
Igor Canadi 已提交
2133
    {  // write thread
2134 2135
      WriteThread::Writer w;
      write_thread_.EnterUnbatched(&w, &mutex_);
I
Igor Canadi 已提交
2136 2137
      // LogAndApply will both write the creation in MANIFEST and create
      // ColumnFamilyData object
2138 2139 2140
      s = versions_->LogAndApply(nullptr, MutableCFOptions(cf_options), &edit,
                                 &mutex_, directories_.GetDbDir(), false,
                                 &cf_options);
2141
      write_thread_.ExitUnbatched(&w);
I
Igor Canadi 已提交
2142
    }
2143 2144 2145 2146 2147 2148
    if (s.ok()) {
      auto* cfd =
          versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name);
      assert(cfd != nullptr);
      s = cfd->AddDirectories();
    }
Y
Yueh-Hsuan Chiang 已提交
2149 2150 2151 2152 2153
    if (s.ok()) {
      single_column_family_mode_ = false;
      auto* cfd =
          versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name);
      assert(cfd != nullptr);
2154 2155
      InstallSuperVersionAndScheduleWork(cfd, &sv_context,
                                         *cfd->GetLatestMutableCFOptions());
S
sdong 已提交
2156 2157 2158 2159 2160

      if (!cfd->mem()->IsSnapshotSupported()) {
        is_snapshot_supported_ = false;
      }

2161 2162
      cfd->set_initialized();

Y
Yueh-Hsuan Chiang 已提交
2163
      *handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_);
2164 2165 2166
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "Created column family [%s] (ID %u)",
                     column_family_name.c_str(), (unsigned)cfd->GetID());
Y
Yueh-Hsuan Chiang 已提交
2167
    } else {
2168 2169 2170
      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                      "Creating column family [%s] FAILED -- %s",
                      column_family_name.c_str(), s.ToString().c_str());
Y
Yueh-Hsuan Chiang 已提交
2171
    }
2172
  }  // InstrumentedMutexLock l(&mutex_)
Y
Yueh-Hsuan Chiang 已提交
2173

2174
  sv_context.Clean();
Y
Yueh-Hsuan Chiang 已提交
2175
  // this is outside the mutex
2176
  if (s.ok()) {
2177 2178
    NewThreadStatusCfInfo(
        reinterpret_cast<ColumnFamilyHandleImpl*>(*handle)->cfd());
2179
  }
2180
  return s;
2181 2182
}

2183
Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
Y
Yi Wu 已提交
2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
  assert(column_family != nullptr);
  Status s = DropColumnFamilyImpl(column_family);
  if (s.ok()) {
    s = WriteOptionsFile(true /*need_mutex_lock*/,
                         true /*need_enter_write_thread*/);
  }
  return s;
}

Status DBImpl::DropColumnFamilies(
    const std::vector<ColumnFamilyHandle*>& column_families) {
  Status s;
  bool success_once = false;
  for (auto* handle : column_families) {
    s = DropColumnFamilyImpl(handle);
    if (!s.ok()) {
      break;
    }
    success_once = true;
  }
  if (success_once) {
    Status persist_options_status = WriteOptionsFile(
        true /*need_mutex_lock*/, true /*need_enter_write_thread*/);
    if (s.ok() && !persist_options_status.ok()) {
      s = persist_options_status;
    }
  }
  return s;
}

Status DBImpl::DropColumnFamilyImpl(ColumnFamilyHandle* column_family) {
2215 2216 2217
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  auto cfd = cfh->cfd();
  if (cfd->GetID() == 0) {
2218 2219
    return Status::InvalidArgument("Can't drop default column family");
  }
2220

S
sdong 已提交
2221 2222
  bool cf_support_snapshot = cfd->mem()->IsSnapshotSupported();

I
Igor Canadi 已提交
2223 2224
  VersionEdit edit;
  edit.DropColumnFamily();
2225 2226
  edit.SetColumnFamily(cfd->GetID());

2227
  Status s;
2228
  {
2229
    InstrumentedMutexLock l(&mutex_);
2230 2231 2232 2233
    if (cfd->IsDropped()) {
      s = Status::InvalidArgument("Column family already dropped!\n");
    }
    if (s.ok()) {
2234
      // we drop column family from a single write thread
2235 2236
      WriteThread::Writer w;
      write_thread_.EnterUnbatched(&w, &mutex_);
2237 2238
      s = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), &edit,
                                 &mutex_);
2239
      write_thread_.ExitUnbatched(&w);
2240
    }
Y
Yi Wu 已提交
2241 2242 2243 2244 2245
    if (s.ok()) {
      auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
      max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size *
                                    mutable_cf_options->max_write_buffer_number;
    }
S
sdong 已提交
2246 2247 2248 2249 2250 2251

    if (!cf_support_snapshot) {
      // Dropped Column Family doesn't support snapshot. Need to recalculate
      // is_snapshot_supported_.
      bool new_is_snapshot_supported = true;
      for (auto c : *versions_->GetColumnFamilySet()) {
2252
        if (!c->IsDropped() && !c->mem()->IsSnapshotSupported()) {
S
sdong 已提交
2253 2254 2255 2256 2257 2258
          new_is_snapshot_supported = false;
          break;
        }
      }
      is_snapshot_supported_ = new_is_snapshot_supported;
    }
2259
    bg_cv_.SignalAll();
2260
  }
2261

2262
  if (s.ok()) {
Y
Yueh-Hsuan Chiang 已提交
2263 2264 2265 2266
    // Note that here we erase the associated cf_info of the to-be-dropped
    // cfd before its ref-count goes to zero to avoid having to erase cf_info
    // later inside db_mutex.
    EraseThreadStatusCfInfo(cfd);
I
Igor Canadi 已提交
2267
    assert(cfd->IsDropped());
2268 2269
    ROCKS_LOG_INFO(immutable_db_options_.info_log,
                   "Dropped column family with id %u\n", cfd->GetID());
2270
  } else {
2271 2272 2273
    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                    "Dropping column family with id %u FAILED -- %s\n",
                    cfd->GetID(), s.ToString().c_str());
2274 2275
  }

2276
  return s;
2277 2278
}

L
Lei Jin 已提交
2279
bool DBImpl::KeyMayExist(const ReadOptions& read_options,
2280 2281
                         ColumnFamilyHandle* column_family, const Slice& key,
                         std::string* value, bool* value_found) {
M
Maysam Yabandeh 已提交
2282
  assert(value != nullptr);
2283
  if (value_found != nullptr) {
K
Kai Liu 已提交
2284 2285
    // falsify later if key-may-exist but can't fetch value
    *value_found = true;
2286
  }
L
Lei Jin 已提交
2287
  ReadOptions roptions = read_options;
2288
  roptions.read_tier = kBlockCacheTier;  // read from block cache only
M
Maysam Yabandeh 已提交
2289
  PinnableSlice pinnable_val;
2290 2291 2292 2293 2294
  GetImplOptions get_impl_options;
  get_impl_options.column_family = column_family;
  get_impl_options.value = &pinnable_val;
  get_impl_options.value_found = value_found;
  auto s = GetImpl(roptions, key, get_impl_options);
M
Maysam Yabandeh 已提交
2295
  value->assign(pinnable_val.data(), pinnable_val.size());
K
Kai Liu 已提交
2296

2297
  // If block_cache is enabled and the index block of the table didn't
K
Kai Liu 已提交
2298 2299 2300
  // not present in block_cache, the return value will be Status::Incomplete.
  // In this case, key may still exist in the table.
  return s.ok() || s.IsIncomplete();
2301 2302
}

2303
Iterator* DBImpl::NewIterator(const ReadOptions& read_options,
2304
                              ColumnFamilyHandle* column_family) {
S
Siying Dong 已提交
2305 2306 2307 2308
  if (read_options.managed) {
    return NewErrorIterator(
        Status::NotSupported("Managed iterator is not supported anymore."));
  }
2309
  Iterator* result = nullptr;
2310 2311 2312 2313
  if (read_options.read_tier == kPersistedTier) {
    return NewErrorIterator(Status::NotSupported(
        "ReadTier::kPersistedData is not yet supported in iterators."));
  }
2314 2315 2316 2317 2318
  // if iterator wants internal keys, we can only proceed if
  // we can guarantee the deletes haven't been processed yet
  if (immutable_db_options_.preserve_deletes &&
      read_options.iter_start_seqnum > 0 &&
      read_options.iter_start_seqnum < preserve_deletes_seqnum_.load()) {
2319 2320 2321 2322
    return NewErrorIterator(Status::InvalidArgument(
        "Iterator requested internal keys which are too old and are not"
        " guaranteed to be preserved, try larger iter_start_seqnum opt."));
  }
2323 2324
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  auto cfd = cfh->cfd();
Y
Yi Wu 已提交
2325
  ReadCallback* read_callback = nullptr;  // No read callback provided.
2326
  if (read_options.tailing) {
I
Igor Canadi 已提交
2327 2328
#ifdef ROCKSDB_LITE
    // not supported in lite version
2329 2330
    result = nullptr;

I
Igor Canadi 已提交
2331
#else
2332 2333
    SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
    auto iter = new ForwardIterator(this, read_options, cfd, sv);
2334 2335 2336
    result = NewDBIterator(
        env_, read_options, *cfd->ioptions(), sv->mutable_cf_options,
        cfd->user_comparator(), iter, kMaxSequenceNumber,
2337 2338
        sv->mutable_cf_options.max_sequential_skip_in_iterations, read_callback,
        this, cfd);
I
Igor Canadi 已提交
2339
#endif
T
Tomislav Novak 已提交
2340
  } else {
2341
    // Note: no need to consider the special case of
2342
    // last_seq_same_as_publish_seq_==false since NewIterator is overridden in
2343
    // WritePreparedTxnDB
Y
Yi Wu 已提交
2344 2345 2346
    auto snapshot = read_options.snapshot != nullptr
                        ? read_options.snapshot->GetSequenceNumber()
                        : versions_->LastSequence();
2347
    result = NewIteratorImpl(read_options, cfd, snapshot, read_callback);
2348
  }
2349
  return result;
2350 2351
}

Y
Yi Wu 已提交
2352 2353 2354
ArenaWrappedDBIter* DBImpl::NewIteratorImpl(const ReadOptions& read_options,
                                            ColumnFamilyData* cfd,
                                            SequenceNumber snapshot,
Y
Yi Wu 已提交
2355
                                            ReadCallback* read_callback,
2356 2357
                                            bool allow_blob,
                                            bool allow_refresh) {
Y
Yi Wu 已提交
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
  SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);

  // Try to generate a DB iterator tree in continuous memory area to be
  // cache friendly. Here is an example of result:
  // +-------------------------------+
  // |                               |
  // | ArenaWrappedDBIter            |
  // |  +                            |
  // |  +---> Inner Iterator   ------------+
  // |  |                            |     |
  // |  |    +-- -- -- -- -- -- -- --+     |
  // |  +--- | Arena                 |     |
  // |       |                       |     |
  // |          Allocated Memory:    |     |
  // |       |   +-------------------+     |
  // |       |   | DBIter            | <---+
  // |           |  +                |
  // |       |   |  +-> iter_  ------------+
  // |       |   |                   |     |
  // |       |   +-------------------+     |
  // |       |   | MergingIterator   | <---+
  // |           |  +                |
  // |       |   |  +->child iter1  ------------+
  // |       |   |  |                |          |
  // |           |  +->child iter2  ----------+ |
  // |       |   |  |                |        | |
  // |       |   |  +->child iter3  --------+ | |
  // |           |                   |      | | |
  // |       |   +-------------------+      | | |
  // |       |   | Iterator1         | <--------+
  // |       |   +-------------------+      | |
  // |       |   | Iterator2         | <------+
  // |       |   +-------------------+      |
  // |       |   | Iterator3         | <----+
  // |       |   +-------------------+
  // |       |                       |
  // +-------+-----------------------+
  //
  // ArenaWrappedDBIter inlines an arena area where all the iterators in
  // the iterator tree are allocated in the order of being accessed when
  // querying.
  // Laying out the iterators in the order of being accessed makes it more
  // likely that any iterator pointer is close to the iterator it points to so
  // that they are likely to be in the same cache line and/or page.
  ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator(
2403
      env_, read_options, *cfd->ioptions(), sv->mutable_cf_options, snapshot,
Y
Yi Wu 已提交
2404
      sv->mutable_cf_options.max_sequential_skip_in_iterations,
2405 2406
      sv->version_number, read_callback, this, cfd, allow_blob,
      ((read_options.snapshot != nullptr) ? false : allow_refresh));
Y
Yi Wu 已提交
2407 2408 2409

  InternalIterator* internal_iter =
      NewInternalIterator(read_options, cfd, sv, db_iter->GetArena(),
2410
                          db_iter->GetRangeDelAggregator(), snapshot);
Y
Yi Wu 已提交
2411 2412 2413 2414 2415
  db_iter->SetIterUnderDBIter(internal_iter);

  return db_iter;
}

S
Siying Dong 已提交
2416 2417 2418 2419
Status DBImpl::NewIterators(
    const ReadOptions& read_options,
    const std::vector<ColumnFamilyHandle*>& column_families,
    std::vector<Iterator*>* iterators) {
S
Siying Dong 已提交
2420 2421 2422
  if (read_options.managed) {
    return Status::NotSupported("Managed iterator is not supported anymore.");
  }
S
Siying Dong 已提交
2423 2424 2425 2426
  if (read_options.read_tier == kPersistedTier) {
    return Status::NotSupported(
        "ReadTier::kPersistedData is not yet supported in iterators.");
  }
Y
Yi Wu 已提交
2427
  ReadCallback* read_callback = nullptr;  // No read callback provided.
S
Siying Dong 已提交
2428 2429
  iterators->clear();
  iterators->reserve(column_families.size());
S
Siying Dong 已提交
2430
  if (read_options.tailing) {
S
Siying Dong 已提交
2431 2432
#ifdef ROCKSDB_LITE
    return Status::InvalidArgument(
F
Faustin Lammler 已提交
2433
        "Tailing iterator not supported in RocksDB lite");
S
Siying Dong 已提交
2434 2435 2436 2437 2438 2439
#else
    for (auto cfh : column_families) {
      auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(cfh)->cfd();
      SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
      auto iter = new ForwardIterator(this, read_options, cfd, sv);
      iterators->push_back(NewDBIterator(
2440 2441
          env_, read_options, *cfd->ioptions(), sv->mutable_cf_options,
          cfd->user_comparator(), iter, kMaxSequenceNumber,
Y
Yi Wu 已提交
2442
          sv->mutable_cf_options.max_sequential_skip_in_iterations,
2443
          read_callback, this, cfd));
2444
    }
S
Siying Dong 已提交
2445 2446
#endif
  } else {
2447
    // Note: no need to consider the special case of
2448
    // last_seq_same_as_publish_seq_==false since NewIterators is overridden in
2449
    // WritePreparedTxnDB
Y
Yi Wu 已提交
2450 2451 2452
    auto snapshot = read_options.snapshot != nullptr
                        ? read_options.snapshot->GetSequenceNumber()
                        : versions_->LastSequence();
S
Siying Dong 已提交
2453
    for (size_t i = 0; i < column_families.size(); ++i) {
2454 2455
      auto* cfd =
          reinterpret_cast<ColumnFamilyHandleImpl*>(column_families[i])->cfd();
Y
Yi Wu 已提交
2456 2457
      iterators->push_back(
          NewIteratorImpl(read_options, cfd, snapshot, read_callback));
2458 2459
    }
  }
2460

I
Igor Canadi 已提交
2461
  return Status::OK();
S
Stanislau Hlebik 已提交
2462 2463
}

S
Siying Dong 已提交
2464
const Snapshot* DBImpl::GetSnapshot() { return GetSnapshotImpl(false); }
2465

S
Siying Dong 已提交
2466 2467 2468
#ifndef ROCKSDB_LITE
const Snapshot* DBImpl::GetSnapshotForWriteConflictBoundary() {
  return GetSnapshotImpl(true);
2469 2470 2471
}
#endif  // ROCKSDB_LITE

2472 2473
SnapshotImpl* DBImpl::GetSnapshotImpl(bool is_write_conflict_boundary,
                                      bool lock) {
S
Siying Dong 已提交
2474 2475 2476 2477
  int64_t unix_time = 0;
  env_->GetCurrentTime(&unix_time);  // Ignore error
  SnapshotImpl* s = new SnapshotImpl;

2478 2479 2480
  if (lock) {
    mutex_.Lock();
  }
S
Siying Dong 已提交
2481 2482
  // returns null if the underlying memtable does not support snapshot.
  if (!is_snapshot_supported_) {
2483 2484 2485
    if (lock) {
      mutex_.Unlock();
    }
S
Siying Dong 已提交
2486 2487
    delete s;
    return nullptr;
S
Sage Weil 已提交
2488
  }
2489
  auto snapshot_seq = last_seq_same_as_publish_seq_
2490
                          ? versions_->LastSequence()
2491
                          : versions_->LastPublishedSequence();
2492 2493 2494 2495 2496 2497
  SnapshotImpl* snapshot =
      snapshots_.New(s, snapshot_seq, unix_time, is_write_conflict_boundary);
  if (lock) {
    mutex_.Unlock();
  }
  return snapshot;
S
Siying Dong 已提交
2498
}
2499

2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511
namespace {
typedef autovector<ColumnFamilyData*, 2> CfdList;
bool CfdListContains(const CfdList& list, ColumnFamilyData* cfd) {
  for (const ColumnFamilyData* t : list) {
    if (t == cfd) {
      return true;
    }
  }
  return false;
}
}  //  namespace

S
Siying Dong 已提交
2512 2513
void DBImpl::ReleaseSnapshot(const Snapshot* s) {
  const SnapshotImpl* casted_s = reinterpret_cast<const SnapshotImpl*>(s);
S
Stanislau Hlebik 已提交
2514
  {
S
Siying Dong 已提交
2515 2516
    InstrumentedMutexLock l(&mutex_);
    snapshots_.Delete(casted_s);
2517 2518
    uint64_t oldest_snapshot;
    if (snapshots_.empty()) {
2519
      oldest_snapshot = last_seq_same_as_publish_seq_
2520
                            ? versions_->LastSequence()
2521
                            : versions_->LastPublishedSequence();
2522 2523 2524
    } else {
      oldest_snapshot = snapshots_.oldest()->number_;
    }
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551
    // Avoid to go through every column family by checking a global threshold
    // first.
    if (oldest_snapshot > bottommost_files_mark_threshold_) {
      CfdList cf_scheduled;
      for (auto* cfd : *versions_->GetColumnFamilySet()) {
        cfd->current()->storage_info()->UpdateOldestSnapshot(oldest_snapshot);
        if (!cfd->current()
                 ->storage_info()
                 ->BottommostFilesMarkedForCompaction()
                 .empty()) {
          SchedulePendingCompaction(cfd);
          MaybeScheduleFlushOrCompaction();
          cf_scheduled.push_back(cfd);
        }
      }

      // Calculate a new threshold, skipping those CFs where compactions are
      // scheduled. We do not do the same pass as the previous loop because
      // mutex might be unlocked during the loop, making the result inaccurate.
      SequenceNumber new_bottommost_files_mark_threshold = kMaxSequenceNumber;
      for (auto* cfd : *versions_->GetColumnFamilySet()) {
        if (CfdListContains(cf_scheduled, cfd)) {
          continue;
        }
        new_bottommost_files_mark_threshold = std::min(
            new_bottommost_files_mark_threshold,
            cfd->current()->storage_info()->bottommost_files_mark_threshold());
2552
      }
2553
      bottommost_files_mark_threshold_ = new_bottommost_files_mark_threshold;
2554
    }
2555
  }
S
Siying Dong 已提交
2556
  delete casted_s;
2557 2558
}

I
Igor Canadi 已提交
2559
#ifndef ROCKSDB_LITE
I
Igor Canadi 已提交
2560 2561 2562 2563 2564
Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
                                        TablePropertiesCollection* props) {
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  auto cfd = cfh->cfd();

2565 2566
  // Increment the ref count
  mutex_.Lock();
I
Igor Canadi 已提交
2567
  auto version = cfd->current();
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
  version->Ref();
  mutex_.Unlock();

  auto s = version->GetPropertiesOfAllTables(props);

  // Decrement the ref count
  mutex_.Lock();
  version->Unref();
  mutex_.Unlock();

  return s;
}
2580 2581

Status DBImpl::GetPropertiesOfTablesInRange(ColumnFamilyHandle* column_family,
2582
                                            const Range* range, std::size_t n,
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
                                            TablePropertiesCollection* props) {
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  auto cfd = cfh->cfd();

  // Increment the ref count
  mutex_.Lock();
  auto version = cfd->current();
  version->Ref();
  mutex_.Unlock();

  auto s = version->GetPropertiesOfTablesInRange(range, n, props);

  // Decrement the ref count
  mutex_.Lock();
  version->Unref();
  mutex_.Unlock();

  return s;
}

I
Igor Canadi 已提交
2603
#endif  // ROCKSDB_LITE
2604

2605
const std::string& DBImpl::GetName() const { return dbname_; }
I
Igor Canadi 已提交
2606

2607
Env* DBImpl::GetEnv() const { return env_; }
2608

2609 2610
Options DBImpl::GetOptions(ColumnFamilyHandle* column_family) const {
  InstrumentedMutexLock l(&mutex_);
2611
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
2612 2613
  return Options(BuildDBOptions(immutable_db_options_, mutable_db_options_),
                 cfh->cfd()->GetLatestCFOptions());
I
Igor Canadi 已提交
2614 2615
}

2616 2617 2618 2619
DBOptions DBImpl::GetDBOptions() const {
  InstrumentedMutexLock l(&mutex_);
  return BuildDBOptions(immutable_db_options_, mutable_db_options_);
}
2620

2621
bool DBImpl::GetProperty(ColumnFamilyHandle* column_family,
2622
                         const Slice& property, std::string* value) {
2623
  const DBPropertyInfo* property_info = GetPropertyInfo(property);
2624
  value->clear();
2625
  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
2626 2627 2628
  if (property_info == nullptr) {
    return false;
  } else if (property_info->handle_int) {
2629
    uint64_t int_value;
2630 2631
    bool ret_value =
        GetIntPropertyInternal(cfd, *property_info, false, &int_value);
2632
    if (ret_value) {
2633
      *value = ToString(int_value);
2634 2635
    }
    return ret_value;
2636
  } else if (property_info->handle_string) {
2637
    InstrumentedMutexLock l(&mutex_);
2638
    return cfd->internal_stats()->GetStringProperty(*property_info, property,
2639
                                                    value);
2640 2641 2642 2643 2644 2645 2646
  } else if (property_info->handle_string_dbimpl) {
    std::string tmp_value;
    bool ret_value = (this->*(property_info->handle_string_dbimpl))(&tmp_value);
    if (ret_value) {
      *value = tmp_value;
    }
    return ret_value;
2647
  }
2648 2649 2650 2651
  // Shouldn't reach here since exactly one of handle_string and handle_int
  // should be non-nullptr.
  assert(false);
  return false;
J
jorlow@chromium.org 已提交
2652 2653
}

2654 2655
bool DBImpl::GetMapProperty(ColumnFamilyHandle* column_family,
                            const Slice& property,
2656
                            std::map<std::string, std::string>* value) {
2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671
  const DBPropertyInfo* property_info = GetPropertyInfo(property);
  value->clear();
  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
  if (property_info == nullptr) {
    return false;
  } else if (property_info->handle_map) {
    InstrumentedMutexLock l(&mutex_);
    return cfd->internal_stats()->GetMapProperty(*property_info, property,
                                                 value);
  }
  // If we reach this point it means that handle_map is not provided for the
  // requested property
  return false;
}

2672 2673
bool DBImpl::GetIntProperty(ColumnFamilyHandle* column_family,
                            const Slice& property, uint64_t* value) {
2674 2675
  const DBPropertyInfo* property_info = GetPropertyInfo(property);
  if (property_info == nullptr || property_info->handle_int == nullptr) {
2676 2677
    return false;
  }
2678
  auto cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
2679
  return GetIntPropertyInternal(cfd, *property_info, false, value);
2680 2681
}

2682
bool DBImpl::GetIntPropertyInternal(ColumnFamilyData* cfd,
2683 2684 2685 2686
                                    const DBPropertyInfo& property_info,
                                    bool is_locked, uint64_t* value) {
  assert(property_info.handle_int != nullptr);
  if (!property_info.need_out_of_mutex) {
2687 2688
    if (is_locked) {
      mutex_.AssertHeld();
2689
      return cfd->internal_stats()->GetIntProperty(property_info, value, this);
2690 2691
    } else {
      InstrumentedMutexLock l(&mutex_);
2692
      return cfd->internal_stats()->GetIntProperty(property_info, value, this);
2693
    }
2694
  } else {
2695 2696 2697 2698 2699 2700
    SuperVersion* sv = nullptr;
    if (!is_locked) {
      sv = GetAndRefSuperVersion(cfd);
    } else {
      sv = cfd->GetSuperVersion();
    }
2701 2702

    bool ret = cfd->internal_stats()->GetIntPropertyOutOfMutex(
2703
        property_info, sv->current, value);
2704

2705 2706 2707
    if (!is_locked) {
      ReturnAndCleanupSuperVersion(cfd, sv);
    }
2708 2709 2710 2711 2712

    return ret;
  }
}

2713 2714 2715 2716 2717 2718 2719 2720 2721 2722
bool DBImpl::GetPropertyHandleOptionsStatistics(std::string* value) {
  assert(value != nullptr);
  Statistics* statistics = immutable_db_options_.statistics.get();
  if (!statistics) {
    return false;
  }
  *value = statistics->ToString();
  return true;
}

S
Siying Dong 已提交
2723 2724 2725 2726
#ifndef ROCKSDB_LITE
Status DBImpl::ResetStats() {
  InstrumentedMutexLock l(&mutex_);
  for (auto* cfd : *versions_->GetColumnFamilySet()) {
2727 2728 2729
    if (cfd->initialized()) {
      cfd->internal_stats()->Clear();
    }
S
Siying Dong 已提交
2730 2731 2732 2733 2734
  }
  return Status::OK();
}
#endif  // ROCKSDB_LITE

2735 2736
bool DBImpl::GetAggregatedIntProperty(const Slice& property,
                                      uint64_t* aggregated_value) {
2737 2738
  const DBPropertyInfo* property_info = GetPropertyInfo(property);
  if (property_info == nullptr || property_info->handle_int == nullptr) {
2739 2740 2741 2742 2743 2744 2745 2746 2747
    return false;
  }

  uint64_t sum = 0;
  {
    // Needs mutex to protect the list of column families.
    InstrumentedMutexLock l(&mutex_);
    uint64_t value;
    for (auto* cfd : *versions_->GetColumnFamilySet()) {
2748 2749 2750
      if (!cfd->initialized()) {
        continue;
      }
2751
      if (GetIntPropertyInternal(cfd, *property_info, true, &value)) {
2752 2753 2754 2755 2756 2757 2758 2759 2760 2761
        sum += value;
      } else {
        return false;
      }
    }
  }
  *aggregated_value = sum;
  return true;
}

2762 2763
SuperVersion* DBImpl::GetAndRefSuperVersion(ColumnFamilyData* cfd) {
  // TODO(ljin): consider using GetReferencedSuperVersion() directly
I
Igor Canadi 已提交
2764
  return cfd->GetThreadLocalSuperVersion(&mutex_);
2765 2766
}

A
agiardullo 已提交
2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778
// REQUIRED: this function should only be called on the write thread or if the
// mutex is held.
SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) {
  auto column_family_set = versions_->GetColumnFamilySet();
  auto cfd = column_family_set->GetColumnFamily(column_family_id);
  if (!cfd) {
    return nullptr;
  }

  return GetAndRefSuperVersion(cfd);
}

2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791
void DBImpl::CleanupSuperVersion(SuperVersion* sv) {
  // Release SuperVersion
  if (sv->Unref()) {
    {
      InstrumentedMutexLock l(&mutex_);
      sv->Cleanup();
    }
    delete sv;
    RecordTick(stats_, NUMBER_SUPERVERSION_CLEANUPS);
  }
  RecordTick(stats_, NUMBER_SUPERVERSION_RELEASES);
}

2792 2793
void DBImpl::ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd,
                                          SuperVersion* sv) {
2794 2795
  if (!cfd->ReturnThreadLocalSuperVersion(sv)) {
    CleanupSuperVersion(sv);
2796
  }
2797 2798
}

A
agiardullo 已提交
2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
// REQUIRED: this function should only be called on the write thread.
void DBImpl::ReturnAndCleanupSuperVersion(uint32_t column_family_id,
                                          SuperVersion* sv) {
  auto column_family_set = versions_->GetColumnFamilySet();
  auto cfd = column_family_set->GetColumnFamily(column_family_id);

  // If SuperVersion is held, and we successfully fetched a cfd using
  // GetAndRefSuperVersion(), it must still exist.
  assert(cfd != nullptr);
  ReturnAndCleanupSuperVersion(cfd, sv);
}

// REQUIRED: this function should only be called on the write thread or if the
// mutex is held.
ColumnFamilyHandle* DBImpl::GetColumnFamilyHandle(uint32_t column_family_id) {
  ColumnFamilyMemTables* cf_memtables = column_family_memtables_.get();

  if (!cf_memtables->Seek(column_family_id)) {
    return nullptr;
  }

  return cf_memtables->GetColumnFamilyHandle();
}

A
Anirban Rahut 已提交
2823
// REQUIRED: mutex is NOT held.
2824
std::unique_ptr<ColumnFamilyHandle> DBImpl::GetColumnFamilyHandleUnlocked(
A
Anirban Rahut 已提交
2825 2826 2827
    uint32_t column_family_id) {
  InstrumentedMutexLock l(&mutex_);

2828 2829 2830
  auto* cfd =
      versions_->GetColumnFamilySet()->GetColumnFamily(column_family_id);
  if (cfd == nullptr) {
A
Anirban Rahut 已提交
2831 2832 2833
    return nullptr;
  }

2834
  return std::unique_ptr<ColumnFamilyHandleImpl>(
2835
      new ColumnFamilyHandleImpl(cfd, this, &mutex_));
A
Anirban Rahut 已提交
2836 2837
}

2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
void DBImpl::GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
                                         const Range& range,
                                         uint64_t* const count,
                                         uint64_t* const size) {
  ColumnFamilyHandleImpl* cfh =
      reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  ColumnFamilyData* cfd = cfh->cfd();
  SuperVersion* sv = GetAndRefSuperVersion(cfd);

  // Convert user_key into a corresponding internal key.
  InternalKey k1(range.start, kMaxSequenceNumber, kValueTypeForSeek);
  InternalKey k2(range.limit, kMaxSequenceNumber, kValueTypeForSeek);
  MemTable::MemTableStats memStats =
      sv->mem->ApproximateStats(k1.Encode(), k2.Encode());
  MemTable::MemTableStats immStats =
      sv->imm->ApproximateStats(k1.Encode(), k2.Encode());
  *count = memStats.count + immStats.count;
  *size = memStats.size + immStats.size;

  ReturnAndCleanupSuperVersion(cfd, sv);
}

2860 2861 2862 2863 2864 2865 2866
Status DBImpl::GetApproximateSizes(const SizeApproximationOptions& options,
                                   ColumnFamilyHandle* column_family,
                                   const Range* range, int n, uint64_t* sizes) {
  if (!options.include_memtabtles && !options.include_files) {
    return Status::InvalidArgument("Invalid options");
  }

J
jorlow@chromium.org 已提交
2867
  Version* v;
2868 2869
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  auto cfd = cfh->cfd();
2870 2871
  SuperVersion* sv = GetAndRefSuperVersion(cfd);
  v = sv->current;
J
jorlow@chromium.org 已提交
2872 2873 2874

  for (int i = 0; i < n; i++) {
    // Convert user_key into a corresponding internal key.
2875 2876
    InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
    InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
V
Vitaliy Liptchinsky 已提交
2877
    sizes[i] = 0;
2878
    if (options.include_files) {
2879
      sizes[i] += versions_->ApproximateSize(
2880 2881
          options, v, k1.Encode(), k2.Encode(), /*start_level=*/0,
          /*end_level=*/-1, TableReaderCaller::kUserApproximateSize);
2882
    }
2883
    if (options.include_memtabtles) {
2884 2885
      sizes[i] += sv->mem->ApproximateStats(k1.Encode(), k2.Encode()).size;
      sizes[i] += sv->imm->ApproximateStats(k1.Encode(), k2.Encode()).size;
2886
    }
J
jorlow@chromium.org 已提交
2887 2888
  }

2889
  ReturnAndCleanupSuperVersion(cfd, sv);
2890
  return Status::OK();
J
jorlow@chromium.org 已提交
2891 2892
}

I
Igor Canadi 已提交
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
std::list<uint64_t>::iterator
DBImpl::CaptureCurrentFileNumberInPendingOutputs() {
  // We need to remember the iterator of our insert, because after the
  // background job is done, we need to remove that element from
  // pending_outputs_.
  pending_outputs_.push_back(versions_->current_next_file_number());
  auto pending_outputs_inserted_elem = pending_outputs_.end();
  --pending_outputs_inserted_elem;
  return pending_outputs_inserted_elem;
}

void DBImpl::ReleaseFileNumberFromPendingOutputs(
    std::list<uint64_t>::iterator v) {
  pending_outputs_.erase(v);
}

I
Igor Canadi 已提交
2909 2910
#ifndef ROCKSDB_LITE
Status DBImpl::GetUpdatesSince(
2911
    SequenceNumber seq, std::unique_ptr<TransactionLogIterator>* iter,
I
Igor Canadi 已提交
2912
    const TransactionLogIterator::ReadOptions& read_options) {
L
Lei Jin 已提交
2913
  RecordTick(stats_, GET_UPDATES_SINCE_CALLS);
I
Igor Canadi 已提交
2914 2915 2916
  if (seq > versions_->LastSequence()) {
    return Status::NotFound("Requested sequence not yet written in the db");
  }
I
Igor Canadi 已提交
2917
  return wal_manager_.GetUpdatesSince(seq, iter, read_options, versions_.get());
I
Igor Canadi 已提交
2918 2919
}

2920 2921 2922
Status DBImpl::DeleteFile(std::string name) {
  uint64_t number;
  FileType type;
2923 2924 2925
  WalFileType log_type;
  if (!ParseFileName(name, &number, &type, &log_type) ||
      (type != kTableFile && type != kLogFile)) {
2926 2927
    ROCKS_LOG_ERROR(immutable_db_options_.info_log, "DeleteFile %s failed.\n",
                    name.c_str());
2928 2929 2930
    return Status::InvalidArgument("Invalid file name");
  }

2931 2932 2933 2934
  Status status;
  if (type == kLogFile) {
    // Only allow deleting archived log files
    if (log_type != kArchivedLogFile) {
2935 2936 2937
      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                      "DeleteFile %s failed - not archived log.\n",
                      name.c_str());
2938 2939
      return Status::NotSupported("Delete only supported for archived logs");
    }
C
Changli Gao 已提交
2940
    status = wal_manager_.DeleteFile(name, number);
2941
    if (!status.ok()) {
2942 2943 2944
      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                      "DeleteFile %s failed -- %s.\n", name.c_str(),
                      status.ToString().c_str());
2945 2946 2947 2948
    }
    return status;
  }

2949
  int level;
I
Igor Canadi 已提交
2950
  FileMetaData* metadata;
2951
  ColumnFamilyData* cfd;
2952
  VersionEdit edit;
2953
  JobContext job_context(next_job_id_.fetch_add(1), true);
D
Dhruba Borthakur 已提交
2954
  {
2955
    InstrumentedMutexLock l(&mutex_);
2956
    status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd);
D
Dhruba Borthakur 已提交
2957
    if (!status.ok()) {
2958 2959
      ROCKS_LOG_WARN(immutable_db_options_.info_log,
                     "DeleteFile %s failed. File not found\n", name.c_str());
I
Igor Canadi 已提交
2960
      job_context.Clean();
D
Dhruba Borthakur 已提交
2961 2962
      return Status::InvalidArgument("File not found");
    }
2963
    assert(level < cfd->NumberLevels());
2964

D
Dhruba Borthakur 已提交
2965
    // If the file is being compacted no need to delete.
2966
    if (metadata->being_compacted) {
2967 2968 2969
      ROCKS_LOG_INFO(immutable_db_options_.info_log,
                     "DeleteFile %s Skipped. File about to be compacted\n",
                     name.c_str());
I
Igor Canadi 已提交
2970
      job_context.Clean();
D
Dhruba Borthakur 已提交
2971
      return Status::OK();
2972 2973
    }

D
Dhruba Borthakur 已提交
2974 2975 2976
    // Only the files in the last level can be deleted externally.
    // This is to make sure that any deletion tombstones are not
    // lost. Check that the level passed is the last level.
S
sdong 已提交
2977
    auto* vstoreage = cfd->current()->storage_info();
I
Igor Canadi 已提交
2978
    for (int i = level + 1; i < cfd->NumberLevels(); i++) {
S
sdong 已提交
2979
      if (vstoreage->NumLevelFiles(i) != 0) {
2980 2981 2982
        ROCKS_LOG_WARN(immutable_db_options_.info_log,
                       "DeleteFile %s FAILED. File not in last level\n",
                       name.c_str());
I
Igor Canadi 已提交
2983
        job_context.Clean();
D
Dhruba Borthakur 已提交
2984 2985 2986
        return Status::InvalidArgument("File not in last level");
      }
    }
2987
    // if level == 0, it has to be the oldest file
S
sdong 已提交
2988 2989
    if (level == 0 &&
        vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) {
2990 2991 2992 2993
      ROCKS_LOG_WARN(immutable_db_options_.info_log,
                     "DeleteFile %s failed ---"
                     " target file in level 0 must be the oldest.",
                     name.c_str());
I
Igor Canadi 已提交
2994
      job_context.Clean();
2995 2996 2997
      return Status::InvalidArgument("File in level 0, but not oldest");
    }
    edit.SetColumnFamily(cfd->GetID());
D
Dhruba Borthakur 已提交
2998
    edit.DeleteFile(level, number);
2999
    status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
3000
                                    &edit, &mutex_, directories_.GetDbDir());
I
Igor Canadi 已提交
3001
    if (status.ok()) {
Y
Yanqin Jin 已提交
3002 3003 3004
      InstallSuperVersionAndScheduleWork(cfd,
                                         &job_context.superversion_contexts[0],
                                         *cfd->GetLatestMutableCFOptions());
I
Igor Canadi 已提交
3005
    }
I
Igor Canadi 已提交
3006 3007
    FindObsoleteFiles(&job_context, false);
  }  // lock released here
3008

3009
  LogFlush(immutable_db_options_.info_log);
3010 3011 3012 3013 3014 3015 3016 3017 3018
  // remove files outside the db-lock
  if (job_context.HaveSomethingToDelete()) {
    // Call PurgeObsoleteFiles() without holding mutex.
    PurgeObsoleteFiles(job_context);
  }
  job_context.Clean();
  return status;
}

3019 3020 3021
Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
                                   const RangePtr* ranges, size_t n,
                                   bool include_end) {
3022 3023 3024 3025
  Status status;
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family);
  ColumnFamilyData* cfd = cfh->cfd();
  VersionEdit edit;
3026
  std::set<FileMetaData*> deleted_files;
3027 3028 3029
  JobContext job_context(next_job_id_.fetch_add(1), true);
  {
    InstrumentedMutexLock l(&mutex_);
3030
    Version* input_version = cfd->current();
3031

3032
    auto* vstorage = input_version->storage_info();
3033 3034 3035 3036 3037
    for (size_t r = 0; r < n; r++) {
      auto begin = ranges[r].start, end = ranges[r].limit;
      for (int i = 1; i < cfd->NumberLevels(); i++) {
        if (vstorage->LevelFiles(i).empty() ||
            !vstorage->OverlapInLevel(i, begin, end)) {
3038
          continue;
3039
        }
3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
        std::vector<FileMetaData*> level_files;
        InternalKey begin_storage, end_storage, *begin_key, *end_key;
        if (begin == nullptr) {
          begin_key = nullptr;
        } else {
          begin_storage.SetMinPossibleForUserKey(*begin);
          begin_key = &begin_storage;
        }
        if (end == nullptr) {
          end_key = nullptr;
        } else {
          end_storage.SetMaxPossibleForUserKey(*end);
          end_key = &end_storage;
        }

3055 3056 3057
        vstorage->GetCleanInputsWithinInterval(
            i, begin_key, end_key, &level_files, -1 /* hint_index */,
            nullptr /* file_index */);
3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
        FileMetaData* level_file;
        for (uint32_t j = 0; j < level_files.size(); j++) {
          level_file = level_files[j];
          if (level_file->being_compacted) {
            continue;
          }
          if (deleted_files.find(level_file) != deleted_files.end()) {
            continue;
          }
          if (!include_end && end != nullptr &&
3068 3069
              cfd->user_comparator()->Compare(level_file->largest.user_key(),
                                              *end) == 0) {
3070 3071 3072 3073 3074 3075 3076
            continue;
          }
          edit.SetColumnFamily(cfd->GetID());
          edit.DeleteFile(i, level_file->fd.GetNumber());
          deleted_files.insert(level_file);
          level_file->being_compacted = true;
        }
3077 3078 3079
      }
    }
    if (edit.GetDeletedFiles().empty()) {
3080
      job_context.Clean();
3081 3082
      return Status::OK();
    }
3083
    input_version->Ref();
3084 3085 3086
    status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
                                    &edit, &mutex_, directories_.GetDbDir());
    if (status.ok()) {
Y
Yanqin Jin 已提交
3087 3088 3089
      InstallSuperVersionAndScheduleWork(cfd,
                                         &job_context.superversion_contexts[0],
                                         *cfd->GetLatestMutableCFOptions());
3090
    }
3091 3092 3093 3094
    for (auto* deleted_file : deleted_files) {
      deleted_file->being_compacted = false;
    }
    input_version->Unref();
3095 3096 3097
    FindObsoleteFiles(&job_context, false);
  }  // lock released here

3098
  LogFlush(immutable_db_options_.info_log);
I
Igor Canadi 已提交
3099
  // remove files outside the db-lock
I
Igor Canadi 已提交
3100
  if (job_context.HaveSomethingToDelete()) {
3101
    // Call PurgeObsoleteFiles() without holding mutex.
I
Igor Canadi 已提交
3102
    PurgeObsoleteFiles(job_context);
3103
  }
I
Igor Canadi 已提交
3104
  job_context.Clean();
3105 3106 3107
  return status;
}

3108
void DBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
3109
  InstrumentedMutexLock l(&mutex_);
3110
  versions_->GetLiveFilesMetaData(metadata);
3111
}
3112

3113 3114
void DBImpl::GetColumnFamilyMetaData(ColumnFamilyHandle* column_family,
                                     ColumnFamilyMetaData* cf_meta) {
3115 3116 3117 3118 3119 3120 3121
  assert(column_family);
  auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
  auto* sv = GetAndRefSuperVersion(cfd);
  sv->current->GetColumnFamilyMetaData(cf_meta);
  ReturnAndCleanupSuperVersion(cfd, sv);
}

I
Igor Canadi 已提交
3122
#endif  // ROCKSDB_LITE
3123

I
Igor Canadi 已提交
3124 3125 3126 3127
Status DBImpl::CheckConsistency() {
  mutex_.AssertHeld();
  std::vector<LiveFileMetaData> metadata;
  versions_->GetLiveFilesMetaData(&metadata);
3128
  TEST_SYNC_POINT("DBImpl::CheckConsistency:AfterGetLiveFilesMetaData");
I
Igor Canadi 已提交
3129 3130 3131

  std::string corruption_messages;
  for (const auto& md : metadata) {
3132 3133
    // md.name has a leading "/".
    std::string file_path = md.db_path + md.name;
3134

I
Igor Canadi 已提交
3135
    uint64_t fsize = 0;
3136
    TEST_SYNC_POINT("DBImpl::CheckConsistency:BeforeGetFileSize");
I
Igor Canadi 已提交
3137
    Status s = env_->GetFileSize(file_path, &fsize);
D
dyniusz 已提交
3138 3139 3140 3141
    if (!s.ok() &&
        env_->GetFileSize(Rocks2LevelTableFileName(file_path), &fsize).ok()) {
      s = Status::OK();
    }
I
Igor Canadi 已提交
3142 3143 3144 3145
    if (!s.ok()) {
      corruption_messages +=
          "Can't access " + md.name + ": " + s.ToString() + "\n";
    } else if (fsize != md.size) {
3146
      corruption_messages += "Sst file size mismatch: " + file_path +
I
Igor Canadi 已提交
3147
                             ". Size recorded in manifest " +
3148 3149
                             ToString(md.size) + ", actual size " +
                             ToString(fsize) + "\n";
I
Igor Canadi 已提交
3150 3151 3152 3153 3154 3155 3156 3157 3158
    }
  }
  if (corruption_messages.size() == 0) {
    return Status::OK();
  } else {
    return Status::Corruption(corruption_messages);
  }
}

3159
Status DBImpl::GetDbIdentity(std::string& identity) const {
3160 3161
  std::string idfilename = IdentityFileName(dbname_);
  const EnvOptions soptions;
3162
  std::unique_ptr<SequentialFileReader> id_file_reader;
3163 3164
  Status s;
  {
3165
    std::unique_ptr<SequentialFile> idfile;
3166 3167 3168 3169
    s = env_->NewSequentialFile(idfilename, &idfile, soptions);
    if (!s.ok()) {
      return s;
    }
3170 3171
    id_file_reader.reset(
        new SequentialFileReader(std::move(idfile), idfilename));
3172
  }
3173

3174 3175 3176 3177 3178
  uint64_t file_size;
  s = env_->GetFileSize(idfilename, &file_size);
  if (!s.ok()) {
    return s;
  }
3179 3180
  char* buffer =
      reinterpret_cast<char*>(alloca(static_cast<size_t>(file_size)));
3181
  Slice id;
3182
  s = id_file_reader->Read(static_cast<size_t>(file_size), &id, buffer);
3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
  if (!s.ok()) {
    return s;
  }
  identity.assign(id.ToString());
  // If last character is '\n' remove it from identity
  if (identity.size() > 0 && identity.back() == '\n') {
    identity.pop_back();
  }
  return s;
}

3194
// Default implementation -- returns not supported status
A
Andrew Kryczka 已提交
3195 3196 3197
Status DB::CreateColumnFamily(const ColumnFamilyOptions& /*cf_options*/,
                              const std::string& /*column_family_name*/,
                              ColumnFamilyHandle** /*handle*/) {
3198
  return Status::NotSupported("");
3199
}
Y
Yi Wu 已提交
3200 3201

Status DB::CreateColumnFamilies(
A
Andrew Kryczka 已提交
3202 3203 3204
    const ColumnFamilyOptions& /*cf_options*/,
    const std::vector<std::string>& /*column_family_names*/,
    std::vector<ColumnFamilyHandle*>* /*handles*/) {
Y
Yi Wu 已提交
3205 3206 3207 3208
  return Status::NotSupported("");
}

Status DB::CreateColumnFamilies(
A
Andrew Kryczka 已提交
3209 3210
    const std::vector<ColumnFamilyDescriptor>& /*column_families*/,
    std::vector<ColumnFamilyHandle*>* /*handles*/) {
Y
Yi Wu 已提交
3211 3212 3213
  return Status::NotSupported("");
}

A
Andrew Kryczka 已提交
3214
Status DB::DropColumnFamily(ColumnFamilyHandle* /*column_family*/) {
3215
  return Status::NotSupported("");
3216
}
Y
Yi Wu 已提交
3217 3218

Status DB::DropColumnFamilies(
A
Andrew Kryczka 已提交
3219
    const std::vector<ColumnFamilyHandle*>& /*column_families*/) {
Y
Yi Wu 已提交
3220 3221 3222
  return Status::NotSupported("");
}

3223 3224 3225 3226
Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) {
  delete column_family;
  return Status::OK();
}
3227

3228 3229 3230 3231
DB::~DB() {}

Status DBImpl::Close() {
  if (!closed_) {
3232 3233 3234 3235 3236 3237 3238 3239
    {
      InstrumentedMutexLock l(&mutex_);
      // If there is unreleased snapshot, fail the close call
      if (!snapshots_.empty()) {
        return Status::Aborted("Cannot close DB with unreleased snapshot.");
      }
    }

3240 3241 3242 3243 3244
    closed_ = true;
    return CloseImpl();
  }
  return Status::OK();
}
J
jorlow@chromium.org 已提交
3245

3246 3247 3248
Status DB::ListColumnFamilies(const DBOptions& db_options,
                              const std::string& name,
                              std::vector<std::string>* column_families) {
I
Igor Canadi 已提交
3249
  return VersionSet::ListColumnFamilies(column_families, name, db_options.env);
3250 3251
}

3252
Snapshot::~Snapshot() {}
3253

3254 3255
Status DestroyDB(const std::string& dbname, const Options& options,
                 const std::vector<ColumnFamilyDescriptor>& column_families) {
D
Dmitri Smirnov 已提交
3256
  ImmutableDBOptions soptions(SanitizeOptions(dbname, options));
3257
  Env* env = soptions.env;
J
jorlow@chromium.org 已提交
3258
  std::vector<std::string> filenames;
3259
  bool wal_in_db_path = IsWalDirSameAsDBPath(&soptions);
3260

D
Dmitri Smirnov 已提交
3261 3262 3263
  // Reset the logger because it holds a handle to the
  // log file and prevents cleanup and directory removal
  soptions.info_log.reset();
J
jorlow@chromium.org 已提交
3264 3265
  // Ignore error in case directory does not exist
  env->GetChildren(dbname, &filenames);
3266

J
jorlow@chromium.org 已提交
3267
  FileLock* lock;
3268 3269
  const std::string lockname = LockFileName(dbname);
  Status result = env->LockFile(lockname, &lock);
J
jorlow@chromium.org 已提交
3270 3271 3272
  if (result.ok()) {
    uint64_t number;
    FileType type;
3273
    InfoLogPrefix info_log_prefix(!soptions.db_log_dir.empty(), dbname);
D
Dmitri Smirnov 已提交
3274 3275
    for (const auto& fname : filenames) {
      if (ParseFileName(fname, &number, info_log_prefix.prefix, &type) &&
3276
          type != kDBLockFile) {  // Lock file will be deleted at end
K
Kosie van der Merwe 已提交
3277
        Status del;
D
Dmitri Smirnov 已提交
3278
        std::string path_to_delete = dbname + "/" + fname;
K
Kosie van der Merwe 已提交
3279
        if (type == kMetaDatabase) {
3280
          del = DestroyDB(path_to_delete, options);
3281
        } else if (type == kTableFile || type == kLogFile) {
3282 3283 3284
          del =
              DeleteDBFile(&soptions, path_to_delete, dbname,
                           /*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
K
Kosie van der Merwe 已提交
3285
        } else {
3286
          del = env->DeleteFile(path_to_delete);
K
Kosie van der Merwe 已提交
3287
        }
J
jorlow@chromium.org 已提交
3288 3289 3290 3291 3292
        if (result.ok() && !del.ok()) {
          result = del;
        }
      }
    }
3293

3294 3295
    std::vector<std::string> paths;

D
Dmitri Smirnov 已提交
3296 3297
    for (const auto& path : options.db_paths) {
      paths.emplace_back(path.path);
3298
    }
D
Dmitri Smirnov 已提交
3299 3300 3301
    for (const auto& cf : column_families) {
      for (const auto& path : cf.options.cf_paths) {
        paths.emplace_back(path.path);
3302 3303 3304 3305 3306 3307 3308 3309 3310 3311
      }
    }

    // Remove duplicate paths.
    // Note that we compare only the actual paths but not path ids.
    // This reason is that same path can appear at different path_ids
    // for different column families.
    std::sort(paths.begin(), paths.end());
    paths.erase(std::unique(paths.begin(), paths.end()), paths.end());

D
Dmitri Smirnov 已提交
3312 3313 3314 3315
    for (const auto& path : paths) {
      if (env->GetChildren(path, &filenames).ok()) {
        for (const auto& fname : filenames) {
          if (ParseFileName(fname, &number, &type) &&
3316
              type == kTableFile) {  // Lock file will be deleted at end
D
Dmitri Smirnov 已提交
3317
            std::string table_path = path + "/" + fname;
3318 3319
            Status del = DeleteDBFile(&soptions, table_path, dbname,
                                      /*force_bg=*/false, /*force_fg=*/false);
D
Dmitri Smirnov 已提交
3320 3321 3322
            if (result.ok() && !del.ok()) {
              result = del;
            }
3323 3324
          }
        }
D
Dmitri Smirnov 已提交
3325
        env->DeleteDir(path);
3326 3327 3328
      }
    }

I
Igor Canadi 已提交
3329 3330
    std::vector<std::string> walDirFiles;
    std::string archivedir = ArchivalDirectory(dbname);
D
Dmitri Smirnov 已提交
3331
    bool wal_dir_exists = false;
I
Igor Canadi 已提交
3332
    if (dbname != soptions.wal_dir) {
D
Dmitri Smirnov 已提交
3333
      wal_dir_exists = env->GetChildren(soptions.wal_dir, &walDirFiles).ok();
I
Igor Canadi 已提交
3334 3335 3336
      archivedir = ArchivalDirectory(soptions.wal_dir);
    }

D
Dmitri Smirnov 已提交
3337 3338 3339 3340 3341 3342 3343
    // Archive dir may be inside wal dir or dbname and should be
    // processed and removed before those otherwise we have issues
    // removing them
    std::vector<std::string> archiveFiles;
    if (env->GetChildren(archivedir, &archiveFiles).ok()) {
      // Delete archival files.
      for (const auto& file : archiveFiles) {
3344
        if (ParseFileName(file, &number, &type) && type == kLogFile) {
3345
          Status del =
3346 3347
              DeleteDBFile(&soptions, archivedir + "/" + file, archivedir,
                           /*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
D
Dmitri Smirnov 已提交
3348 3349 3350
          if (result.ok() && !del.ok()) {
            result = del;
          }
I
Igor Canadi 已提交
3351 3352
        }
      }
D
Dmitri Smirnov 已提交
3353
      env->DeleteDir(archivedir);
I
Igor Canadi 已提交
3354 3355
    }

D
Dmitri Smirnov 已提交
3356 3357 3358 3359
    // Delete log files in the WAL dir
    if (wal_dir_exists) {
      for (const auto& file : walDirFiles) {
        if (ParseFileName(file, &number, &type) && type == kLogFile) {
3360 3361
          Status del =
              DeleteDBFile(&soptions, LogFileName(soptions.wal_dir, number),
3362 3363
                           soptions.wal_dir, /*force_bg=*/false,
                           /*force_fg=*/!wal_in_db_path);
D
Dmitri Smirnov 已提交
3364 3365 3366
          if (result.ok() && !del.ok()) {
            result = del;
          }
3367 3368
        }
      }
D
Dmitri Smirnov 已提交
3369
      env->DeleteDir(soptions.wal_dir);
3370
    }
3371

J
jorlow@chromium.org 已提交
3372
    env->UnlockFile(lock);  // Ignore error since state is already gone
3373
    env->DeleteFile(lockname);
J
jorlow@chromium.org 已提交
3374 3375 3376 3377 3378
    env->DeleteDir(dbname);  // Ignore error in case dir contains other files
  }
  return result;
}

Y
Yi Wu 已提交
3379 3380
Status DBImpl::WriteOptionsFile(bool need_mutex_lock,
                                bool need_enter_write_thread) {
3381
#ifndef ROCKSDB_LITE
Y
Yi Wu 已提交
3382 3383 3384 3385 3386 3387 3388 3389 3390
  WriteThread::Writer w;
  if (need_mutex_lock) {
    mutex_.Lock();
  } else {
    mutex_.AssertHeld();
  }
  if (need_enter_write_thread) {
    write_thread_.EnterUnbatched(&w, &mutex_);
  }
3391 3392 3393

  std::vector<std::string> cf_names;
  std::vector<ColumnFamilyOptions> cf_opts;
3394 3395 3396 3397 3398

  // This part requires mutex to protect the column family options
  for (auto cfd : *versions_->GetColumnFamilySet()) {
    if (cfd->IsDropped()) {
      continue;
3399
    }
3400
    cf_names.push_back(cfd->GetName());
3401
    cf_opts.push_back(cfd->GetLatestCFOptions());
3402 3403
  }

3404 3405
  // Unlock during expensive operations.  New writes cannot get here
  // because the single write thread ensures all new writes get queued.
3406 3407
  DBOptions db_options =
      BuildDBOptions(immutable_db_options_, mutable_db_options_);
3408 3409
  mutex_.Unlock();

3410 3411 3412
  TEST_SYNC_POINT("DBImpl::WriteOptionsFile:1");
  TEST_SYNC_POINT("DBImpl::WriteOptionsFile:2");

3413 3414
  std::string file_name =
      TempOptionsFileName(GetName(), versions_->NewFileNumber());
3415 3416
  Status s =
      PersistRocksDBOptions(db_options, cf_names, cf_opts, file_name, GetEnv());
3417 3418 3419 3420

  if (s.ok()) {
    s = RenameTempFileToOptionsFile(file_name);
  }
Y
Yi Wu 已提交
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435
  // restore lock
  if (!need_mutex_lock) {
    mutex_.Lock();
  }
  if (need_enter_write_thread) {
    write_thread_.ExitUnbatched(&w);
  }
  if (!s.ok()) {
    ROCKS_LOG_WARN(immutable_db_options_.info_log,
                   "Unnable to persist options -- %s", s.ToString().c_str());
    if (immutable_db_options_.fail_if_options_file_error) {
      return Status::IOError("Unable to persist options.",
                             s.ToString().c_str());
    }
  }
3436 3437 3438
#else
  (void)need_mutex_lock;
  (void)need_enter_write_thread;
3439
#endif  // !ROCKSDB_LITE
Y
Yi Wu 已提交
3440
  return Status::OK();
3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
}

#ifndef ROCKSDB_LITE
namespace {
void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
                              const size_t num_files_to_keep,
                              const std::shared_ptr<Logger>& info_log,
                              Env* env) {
  if (filenames.size() <= num_files_to_keep) {
    return;
  }
  for (auto iter = std::next(filenames.begin(), num_files_to_keep);
       iter != filenames.end(); ++iter) {
    if (!env->DeleteFile(iter->second).ok()) {
3455 3456
      ROCKS_LOG_WARN(info_log, "Unable to delete options file %s",
                     iter->second.c_str());
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486
    }
  }
}
}  // namespace
#endif  // !ROCKSDB_LITE

Status DBImpl::DeleteObsoleteOptionsFiles() {
#ifndef ROCKSDB_LITE
  std::vector<std::string> filenames;
  // use ordered map to store keep the filenames sorted from the newest
  // to the oldest.
  std::map<uint64_t, std::string> options_filenames;
  Status s;
  s = GetEnv()->GetChildren(GetName(), &filenames);
  if (!s.ok()) {
    return s;
  }
  for (auto& filename : filenames) {
    uint64_t file_number;
    FileType type;
    if (ParseFileName(filename, &file_number, &type) && type == kOptionsFile) {
      options_filenames.insert(
          {std::numeric_limits<uint64_t>::max() - file_number,
           GetName() + "/" + filename});
    }
  }

  // Keeps the latest 2 Options file
  const size_t kNumOptionsFilesKept = 2;
  DeleteOptionsFilesHelper(options_filenames, kNumOptionsFilesKept,
3487
                           immutable_db_options_.info_log, GetEnv());
3488 3489 3490 3491 3492 3493 3494 3495 3496
  return Status::OK();
#else
  return Status::OK();
#endif  // !ROCKSDB_LITE
}

Status DBImpl::RenameTempFileToOptionsFile(const std::string& file_name) {
#ifndef ROCKSDB_LITE
  Status s;
W
Wanning Jiang 已提交
3497

3498
  uint64_t options_file_number = versions_->NewFileNumber();
3499
  std::string options_file_name =
3500
      OptionsFileName(GetName(), options_file_number);
3501 3502
  // Retry if the file name happen to conflict with an existing one.
  s = GetEnv()->RenameFile(file_name, options_file_name);
3503 3504 3505 3506
  if (s.ok()) {
    InstrumentedMutexLock l(&mutex_);
    versions_->options_file_number_ = options_file_number;
  }
3507

3508 3509 3510
  if (0 == disable_delete_obsolete_files_) {
    DeleteObsoleteOptionsFiles();
  }
3511 3512
  return s;
#else
3513
  (void)file_name;
3514 3515 3516 3517
  return Status::OK();
#endif  // !ROCKSDB_LITE
}

D
Daniel Black 已提交
3518
#ifdef ROCKSDB_USING_THREAD_STATUS
3519

3520
void DBImpl::NewThreadStatusCfInfo(ColumnFamilyData* cfd) const {
3521
  if (immutable_db_options_.enable_thread_tracking) {
3522 3523
    ThreadStatusUtil::NewColumnFamilyInfo(this, cfd, cfd->GetName(),
                                          cfd->ioptions()->env);
3524
  }
Y
Yueh-Hsuan Chiang 已提交
3525 3526
}

3527
void DBImpl::EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const {
3528
  if (immutable_db_options_.enable_thread_tracking) {
3529
    ThreadStatusUtil::EraseColumnFamilyInfo(cfd);
3530
  }
Y
Yueh-Hsuan Chiang 已提交
3531 3532 3533
}

void DBImpl::EraseThreadStatusDbInfo() const {
3534
  if (immutable_db_options_.enable_thread_tracking) {
3535
    ThreadStatusUtil::EraseDatabaseInfo(this);
3536
  }
Y
Yueh-Hsuan Chiang 已提交
3537 3538 3539
}

#else
3540
void DBImpl::NewThreadStatusCfInfo(ColumnFamilyData* /*cfd*/) const {}
Y
Yueh-Hsuan Chiang 已提交
3541

3542
void DBImpl::EraseThreadStatusCfInfo(ColumnFamilyData* /*cfd*/) const {}
Y
Yueh-Hsuan Chiang 已提交
3543

3544
void DBImpl::EraseThreadStatusDbInfo() const {}
Y
Yueh-Hsuan Chiang 已提交
3545 3546
#endif  // ROCKSDB_USING_THREAD_STATUS

3547 3548
//
// A global method that can dump out the build version
3549
void DumpRocksDBBuildVersion(Logger* log) {
I
Igor Canadi 已提交
3550
#if !defined(IOS_CROSS_COMPILE)
H
hyunwoo 已提交
3551
  // if we compile with Xcode, we don't run build_detect_version, so we don't
I
Igor Canadi 已提交
3552
  // generate util/build_version.cc
3553 3554 3555 3556
  ROCKS_LOG_HEADER(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR,
                   ROCKSDB_MINOR, ROCKSDB_PATCH);
  ROCKS_LOG_HEADER(log, "Git sha %s", rocksdb_build_git_sha);
  ROCKS_LOG_HEADER(log, "Compile date %s", rocksdb_build_compile_date);
3557
#else
3558
  (void)log;  // ignore "-Wunused-parameter"
I
Igor Canadi 已提交
3559
#endif
3560 3561
}

A
agiardullo 已提交
3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578
#ifndef ROCKSDB_LITE
SequenceNumber DBImpl::GetEarliestMemTableSequenceNumber(SuperVersion* sv,
                                                         bool include_history) {
  // Find the earliest sequence number that we know we can rely on reading
  // from the memtable without needing to check sst files.
  SequenceNumber earliest_seq =
      sv->imm->GetEarliestSequenceNumber(include_history);
  if (earliest_seq == kMaxSequenceNumber) {
    earliest_seq = sv->mem->GetEarliestSequenceNumber();
  }
  assert(sv->mem->GetEarliestSequenceNumber() >= earliest_seq);

  return earliest_seq;
}
#endif  // ROCKSDB_LITE

#ifndef ROCKSDB_LITE
3579
Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
3580 3581 3582
                                       bool cache_only,
                                       SequenceNumber lower_bound_seq,
                                       SequenceNumber* seq,
3583 3584
                                       bool* found_record_for_key,
                                       bool* is_blob_index) {
A
agiardullo 已提交
3585 3586
  Status s;
  MergeContext merge_context;
3587
  SequenceNumber max_covering_tombstone_seq = 0;
A
agiardullo 已提交
3588

A
Andrew Kryczka 已提交
3589
  ReadOptions read_options;
A
agiardullo 已提交
3590 3591 3592 3593
  SequenceNumber current_seq = versions_->LastSequence();
  LookupKey lkey(key, current_seq);

  *seq = kMaxSequenceNumber;
3594 3595
  *found_record_for_key = false;

A
agiardullo 已提交
3596
  // Check if there is a record for this key in the latest memtable
3597 3598
  sv->mem->Get(lkey, nullptr, &s, &merge_context, &max_covering_tombstone_seq,
               seq, read_options, nullptr /*read_callback*/, is_blob_index);
A
agiardullo 已提交
3599 3600 3601

  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
    // unexpected error reading memtable.
3602 3603 3604
    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                    "Unexpected status returned from MemTable::Get: %s\n",
                    s.ToString().c_str());
A
agiardullo 已提交
3605 3606 3607 3608 3609 3610

    return s;
  }

  if (*seq != kMaxSequenceNumber) {
    // Found a sequence number, no need to check immutable memtables
3611
    *found_record_for_key = true;
A
agiardullo 已提交
3612 3613 3614
    return Status::OK();
  }

3615 3616 3617 3618 3619 3620 3621
  SequenceNumber lower_bound_in_mem = sv->mem->GetEarliestSequenceNumber();
  if (lower_bound_in_mem != kMaxSequenceNumber &&
      lower_bound_in_mem < lower_bound_seq) {
    *found_record_for_key = false;
    return Status::OK();
  }

A
agiardullo 已提交
3622
  // Check if there is a record for this key in the immutable memtables
3623 3624
  sv->imm->Get(lkey, nullptr, &s, &merge_context, &max_covering_tombstone_seq,
               seq, read_options, nullptr /*read_callback*/, is_blob_index);
A
agiardullo 已提交
3625 3626 3627

  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
    // unexpected error reading memtable.
3628 3629 3630
    ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                    "Unexpected status returned from MemTableList::Get: %s\n",
                    s.ToString().c_str());
A
agiardullo 已提交
3631 3632 3633 3634 3635 3636

    return s;
  }

  if (*seq != kMaxSequenceNumber) {
    // Found a sequence number, no need to check memtable history
3637
    *found_record_for_key = true;
A
agiardullo 已提交
3638 3639 3640
    return Status::OK();
  }

3641 3642 3643 3644 3645 3646 3647
  SequenceNumber lower_bound_in_imm = sv->imm->GetEarliestSequenceNumber();
  if (lower_bound_in_imm != kMaxSequenceNumber &&
      lower_bound_in_imm < lower_bound_seq) {
    *found_record_for_key = false;
    return Status::OK();
  }

A
agiardullo 已提交
3648
  // Check if there is a record for this key in the immutable memtables
3649 3650 3651
  sv->imm->GetFromHistory(lkey, nullptr, &s, &merge_context,
                          &max_covering_tombstone_seq, seq, read_options,
                          is_blob_index);
A
agiardullo 已提交
3652 3653 3654

  if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
    // unexpected error reading memtable.
3655 3656
    ROCKS_LOG_ERROR(
        immutable_db_options_.info_log,
A
agiardullo 已提交
3657 3658 3659 3660 3661 3662
        "Unexpected status returned from MemTableList::GetFromHistory: %s\n",
        s.ToString().c_str());

    return s;
  }

3663 3664 3665 3666 3667 3668
  if (*seq != kMaxSequenceNumber) {
    // Found a sequence number, no need to check SST files
    *found_record_for_key = true;
    return Status::OK();
  }

3669 3670 3671 3672
  // We could do a sv->imm->GetEarliestSequenceNumber(/*include_history*/ true)
  // check here to skip the history if possible. But currently the caller
  // already does that. Maybe we should move the logic here later.

3673 3674 3675 3676
  // TODO(agiardullo): possible optimization: consider checking cached
  // SST files if cache_only=true?
  if (!cache_only) {
    // Check tables
R
Reid Horuff 已提交
3677
    sv->current->Get(read_options, lkey, nullptr, &s, &merge_context,
3678
                     &max_covering_tombstone_seq, nullptr /* value_found */,
3679 3680
                     found_record_for_key, seq, nullptr /*read_callback*/,
                     is_blob_index);
3681 3682 3683

    if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
      // unexpected error reading SST files
3684 3685 3686
      ROCKS_LOG_ERROR(immutable_db_options_.info_log,
                      "Unexpected status returned from Version::Get: %s\n",
                      s.ToString().c_str());
3687 3688 3689
    }
  }

3690
  return s;
A
agiardullo 已提交
3691
}
3692 3693 3694 3695 3696

Status DBImpl::IngestExternalFile(
    ColumnFamilyHandle* column_family,
    const std::vector<std::string>& external_files,
    const IngestExternalFileOptions& ingestion_options) {
Y
Yanqin Jin 已提交
3697 3698 3699 3700 3701 3702
  IngestExternalFileArg arg;
  arg.column_family = column_family;
  arg.external_files = external_files;
  arg.options = ingestion_options;
  return IngestExternalFiles({arg});
}
3703

Y
Yanqin Jin 已提交
3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
Status DBImpl::IngestExternalFiles(
    const std::vector<IngestExternalFileArg>& args) {
  if (args.empty()) {
    return Status::InvalidArgument("ingestion arg list is empty");
  }
  {
    std::unordered_set<ColumnFamilyHandle*> unique_cfhs;
    for (const auto& arg : args) {
      if (arg.column_family == nullptr) {
        return Status::InvalidArgument("column family handle is null");
      } else if (unique_cfhs.count(arg.column_family) > 0) {
        return Status::InvalidArgument(
            "ingestion args have duplicate column families");
      }
      unique_cfhs.insert(arg.column_family);
    }
  }
  // Ingest multiple external SST files atomically.
  size_t num_cfs = args.size();
  for (size_t i = 0; i != num_cfs; ++i) {
    if (args[i].external_files.empty()) {
      char err_msg[128] = {0};
      snprintf(err_msg, 128, "external_files[%zu] is empty", i);
      return Status::InvalidArgument(err_msg);
    }
  }
  for (const auto& arg : args) {
    const IngestExternalFileOptions& ingest_opts = arg.options;
    if (ingest_opts.ingest_behind &&
        !immutable_db_options_.allow_ingest_behind) {
3734
      return Status::InvalidArgument(
Y
Yanqin Jin 已提交
3735
          "can't ingest_behind file in DB with allow_ingest_behind=false");
3736 3737 3738
    }
  }

Y
Yanqin Jin 已提交
3739 3740
  // TODO (yanqin) maybe handle the case in which column_families have
  // duplicates
3741
  std::list<uint64_t>::iterator pending_output_elem;
Y
Yanqin Jin 已提交
3742 3743 3744
  size_t total = 0;
  for (const auto& arg : args) {
    total += arg.external_files.size();
3745
  }
Y
Yanqin Jin 已提交
3746 3747 3748 3749
  uint64_t next_file_number = 0;
  Status status = ReserveFileNumbersBeforeIngestion(
      static_cast<ColumnFamilyHandleImpl*>(args[0].column_family)->cfd(), total,
      &pending_output_elem, &next_file_number);
3750
  if (!status.ok()) {
3751 3752
    InstrumentedMutexLock l(&mutex_);
    ReleaseFileNumberFromPendingOutputs(pending_output_elem);
3753
    return status;
3754 3755
  }

Y
Yanqin Jin 已提交
3756 3757 3758 3759 3760
  std::vector<ExternalSstFileIngestionJob> ingestion_jobs;
  for (const auto& arg : args) {
    auto* cfd = static_cast<ColumnFamilyHandleImpl*>(arg.column_family)->cfd();
    ingestion_jobs.emplace_back(env_, versions_.get(), cfd,
                                immutable_db_options_, env_options_,
3761
                                &snapshots_, arg.options, &directories_);
Y
Yanqin Jin 已提交
3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795
  }
  std::vector<std::pair<bool, Status>> exec_results;
  for (size_t i = 0; i != num_cfs; ++i) {
    exec_results.emplace_back(false, Status::OK());
  }
  // TODO(yanqin) maybe make jobs run in parallel
  for (size_t i = 1; i != num_cfs; ++i) {
    uint64_t start_file_number =
        next_file_number + args[i - 1].external_files.size();
    auto* cfd =
        static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)->cfd();
    SuperVersion* super_version = cfd->GetReferencedSuperVersion(&mutex_);
    exec_results[i].second = ingestion_jobs[i].Prepare(
        args[i].external_files, start_file_number, super_version);
    exec_results[i].first = true;
    CleanupSuperVersion(super_version);
  }
  TEST_SYNC_POINT("DBImpl::IngestExternalFiles:BeforeLastJobPrepare:0");
  TEST_SYNC_POINT("DBImpl::IngestExternalFiles:BeforeLastJobPrepare:1");
  {
    auto* cfd =
        static_cast<ColumnFamilyHandleImpl*>(args[0].column_family)->cfd();
    SuperVersion* super_version = cfd->GetReferencedSuperVersion(&mutex_);
    exec_results[0].second = ingestion_jobs[0].Prepare(
        args[0].external_files, next_file_number, super_version);
    exec_results[0].first = true;
    CleanupSuperVersion(super_version);
  }
  for (const auto& exec_result : exec_results) {
    if (!exec_result.second.ok()) {
      status = exec_result.second;
      break;
    }
  }
3796
  if (!status.ok()) {
Y
Yanqin Jin 已提交
3797 3798 3799 3800 3801
    for (size_t i = 0; i != num_cfs; ++i) {
      if (exec_results[i].first) {
        ingestion_jobs[i].Cleanup(status);
      }
    }
3802
    InstrumentedMutexLock l(&mutex_);
3803
    ReleaseFileNumberFromPendingOutputs(pending_output_elem);
3804 3805 3806
    return status;
  }

Y
Yanqin Jin 已提交
3807 3808 3809 3810 3811 3812
  std::vector<SuperVersionContext> sv_ctxs;
  for (size_t i = 0; i != num_cfs; ++i) {
    sv_ctxs.emplace_back(true /* create_superversion */);
  }
  TEST_SYNC_POINT("DBImpl::IngestExternalFiles:BeforeJobsRun:0");
  TEST_SYNC_POINT("DBImpl::IngestExternalFiles:BeforeJobsRun:1");
3813
  TEST_SYNC_POINT("DBImpl::AddFile:Start");
3814 3815 3816 3817
  {
    InstrumentedMutexLock l(&mutex_);
    TEST_SYNC_POINT("DBImpl::AddFile:MutexLock");

3818
    // Stop writes to the DB by entering both write threads
3819 3820
    WriteThread::Writer w;
    write_thread_.EnterUnbatched(&w, &mutex_);
3821
    WriteThread::Writer nonmem_w;
3822
    if (two_write_queues_) {
3823 3824
      nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
    }
3825

Y
Yanqin Jin 已提交
3826
    num_running_ingest_file_ += static_cast<int>(num_cfs);
3827
    TEST_SYNC_POINT("DBImpl::IngestExternalFile:AfterIncIngestFileCounter");
3828

Y
Yanqin Jin 已提交
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847
    bool at_least_one_cf_need_flush = false;
    std::vector<bool> need_flush(num_cfs, false);
    for (size_t i = 0; i != num_cfs; ++i) {
      auto* cfd =
          static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)->cfd();
      if (cfd->IsDropped()) {
        // TODO (yanqin) investigate whether we should abort ingestion or
        // proceed with other non-dropped column families.
        status = Status::InvalidArgument(
            "cannot ingest an external file into a dropped CF");
        break;
      }
      bool tmp = false;
      status = ingestion_jobs[i].NeedsFlush(&tmp, cfd->GetSuperVersion());
      need_flush[i] = tmp;
      at_least_one_cf_need_flush = (at_least_one_cf_need_flush || tmp);
      if (!status.ok()) {
        break;
      }
3848
    }
Y
Yanqin Jin 已提交
3849 3850
    TEST_SYNC_POINT_CALLBACK("DBImpl::IngestExternalFile:NeedFlush",
                             &at_least_one_cf_need_flush);
3851

Y
Yanqin Jin 已提交
3852 3853 3854 3855 3856 3857 3858 3859 3860 3861
    if (status.ok() && at_least_one_cf_need_flush) {
      FlushOptions flush_opts;
      flush_opts.allow_write_stall = true;
      if (immutable_db_options_.atomic_flush) {
        autovector<ColumnFamilyData*> cfds_to_flush;
        SelectColumnFamiliesForAtomicFlush(&cfds_to_flush);
        mutex_.Unlock();
        status = AtomicFlushMemTables(cfds_to_flush, flush_opts,
                                      FlushReason::kExternalFileIngestion,
                                      true /* writes_stopped */);
3862
        mutex_.Lock();
Y
Yanqin Jin 已提交
3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
      } else {
        for (size_t i = 0; i != num_cfs; ++i) {
          if (need_flush[i]) {
            mutex_.Unlock();
            auto* cfd =
                static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)
                    ->cfd();
            status = FlushMemTable(cfd, flush_opts,
                                   FlushReason::kExternalFileIngestion,
                                   true /* writes_stopped */);
            mutex_.Lock();
            if (!status.ok()) {
              break;
            }
          }
        }
3879
      }
3880
    }
Y
Yanqin Jin 已提交
3881
    // Run ingestion jobs.
3882
    if (status.ok()) {
Y
Yanqin Jin 已提交
3883 3884 3885 3886 3887 3888
      for (size_t i = 0; i != num_cfs; ++i) {
        status = ingestion_jobs[i].Run();
        if (!status.ok()) {
          break;
        }
      }
3889 3890
    }
    if (status.ok()) {
Y
Yanqin Jin 已提交
3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921
      bool should_increment_last_seqno =
          ingestion_jobs[0].ShouldIncrementLastSequence();
#ifndef NDEBUG
      for (size_t i = 1; i != num_cfs; ++i) {
        assert(should_increment_last_seqno ==
               ingestion_jobs[i].ShouldIncrementLastSequence());
      }
#endif
      if (should_increment_last_seqno) {
        const SequenceNumber last_seqno = versions_->LastSequence();
        versions_->SetLastAllocatedSequence(last_seqno + 1);
        versions_->SetLastPublishedSequence(last_seqno + 1);
        versions_->SetLastSequence(last_seqno + 1);
      }
      autovector<ColumnFamilyData*> cfds_to_commit;
      autovector<const MutableCFOptions*> mutable_cf_options_list;
      autovector<autovector<VersionEdit*>> edit_lists;
      uint32_t num_entries = 0;
      for (size_t i = 0; i != num_cfs; ++i) {
        auto* cfd =
            static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)->cfd();
        if (cfd->IsDropped()) {
          continue;
        }
        cfds_to_commit.push_back(cfd);
        mutable_cf_options_list.push_back(cfd->GetLatestMutableCFOptions());
        autovector<VersionEdit*> edit_list;
        edit_list.push_back(ingestion_jobs[i].edit());
        edit_lists.push_back(edit_list);
        ++num_entries;
      }
3922 3923 3924 3925 3926 3927 3928 3929
      // Mark the version edits as an atomic group if the number of version
      // edits exceeds 1.
      if (cfds_to_commit.size() > 1) {
        for (auto& edits : edit_lists) {
          assert(edits.size() == 1);
          edits[0]->MarkAtomicGroup(--num_entries);
        }
        assert(0 == num_entries);
Y
Yanqin Jin 已提交
3930
      }
3931
      status =
Y
Yanqin Jin 已提交
3932 3933
          versions_->LogAndApply(cfds_to_commit, mutable_cf_options_list,
                                 edit_lists, &mutex_, directories_.GetDbDir());
3934
    }
Y
Yanqin Jin 已提交
3935

3936
    if (status.ok()) {
Y
Yanqin Jin 已提交
3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952
      for (size_t i = 0; i != num_cfs; ++i) {
        auto* cfd =
            static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)->cfd();
        if (!cfd->IsDropped()) {
          InstallSuperVersionAndScheduleWork(cfd, &sv_ctxs[i],
                                             *cfd->GetLatestMutableCFOptions());
#ifndef NDEBUG
          if (0 == i && num_cfs > 1) {
            TEST_SYNC_POINT(
                "DBImpl::IngestExternalFiles:InstallSVForFirstCF:0");
            TEST_SYNC_POINT(
                "DBImpl::IngestExternalFiles:InstallSVForFirstCF:1");
          }
#endif  // !NDEBUG
        }
      }
3953 3954 3955
    }

    // Resume writes to the DB
3956
    if (two_write_queues_) {
3957 3958
      nonmem_write_thread_.ExitUnbatched(&nonmem_w);
    }
3959 3960 3961
    write_thread_.ExitUnbatched(&w);

    if (status.ok()) {
Y
Yanqin Jin 已提交
3962 3963 3964
      for (auto& job : ingestion_jobs) {
        job.UpdateStats();
      }
3965 3966
    }
    ReleaseFileNumberFromPendingOutputs(pending_output_elem);
Y
Yanqin Jin 已提交
3967 3968
    num_running_ingest_file_ -= static_cast<int>(num_cfs);
    if (0 == num_running_ingest_file_) {
3969 3970 3971 3972 3973 3974 3975
      bg_cv_.SignalAll();
    }
    TEST_SYNC_POINT("DBImpl::AddFile:MutexUnlock");
  }
  // mutex_ is unlocked here

  // Cleanup
Y
Yanqin Jin 已提交
3976 3977 3978 3979 3980 3981
  for (size_t i = 0; i != num_cfs; ++i) {
    sv_ctxs[i].Clean();
    // This may rollback jobs that have completed successfully. This is
    // intended for atomicity.
    ingestion_jobs[i].Cleanup(status);
  }
3982
  if (status.ok()) {
Y
Yanqin Jin 已提交
3983 3984 3985 3986 3987 3988 3989
    for (size_t i = 0; i != num_cfs; ++i) {
      auto* cfd =
          static_cast<ColumnFamilyHandleImpl*>(args[i].column_family)->cfd();
      if (!cfd->IsDropped()) {
        NotifyOnExternalFileIngested(cfd, ingestion_jobs[i]);
      }
    }
3990
  }
3991 3992 3993
  return status;
}

3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114
Status DBImpl::CreateColumnFamilyWithImport(
    const ColumnFamilyOptions& options, const std::string& column_family_name,
    const ImportColumnFamilyOptions& import_options,
    const ExportImportFilesMetaData& metadata,
    ColumnFamilyHandle** handle) {
  assert(handle != nullptr);
  assert(*handle == nullptr);
  std::string cf_comparator_name = options.comparator->Name();
  if (cf_comparator_name != metadata.db_comparator_name) {
    return Status::InvalidArgument("Comparator name mismatch");
  }

  // Create column family.
  auto status = CreateColumnFamily(options, column_family_name, handle);
  if (!status.ok()) {
    return status;
  }

  // Import sst files from metadata.
  auto cfh = reinterpret_cast<ColumnFamilyHandleImpl*>(*handle);
  auto cfd = cfh->cfd();
  ImportColumnFamilyJob import_job(env_, versions_.get(), cfd,
                                   immutable_db_options_, env_options_,
                                   import_options, metadata.files);

  SuperVersionContext dummy_sv_ctx(/* create_superversion */ true);
  VersionEdit dummy_edit;
  uint64_t next_file_number = 0;
  std::list<uint64_t>::iterator pending_output_elem;
  {
    // Lock db mutex
    InstrumentedMutexLock l(&mutex_);
    if (error_handler_.IsDBStopped()) {
      // Don't import files when there is a bg_error
      status = error_handler_.GetBGError();
    }

    // Make sure that bg cleanup wont delete the files that we are importing
    pending_output_elem = CaptureCurrentFileNumberInPendingOutputs();

    if (status.ok()) {
      // If crash happen after a hard link established, Recover function may
      // reuse the file number that has already assigned to the internal file,
      // and this will overwrite the external file. To protect the external
      // file, we have to make sure the file number will never being reused.
      next_file_number =
          versions_->FetchAddFileNumber(metadata.files.size());
      auto cf_options = cfd->GetLatestMutableCFOptions();
      status = versions_->LogAndApply(cfd, *cf_options, &dummy_edit, &mutex_,
                                      directories_.GetDbDir());
      if (status.ok()) {
        InstallSuperVersionAndScheduleWork(cfd, &dummy_sv_ctx, *cf_options);
      }
    }
  }
  dummy_sv_ctx.Clean();

  if (status.ok()) {
    SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_);
    status = import_job.Prepare(next_file_number, sv);
    CleanupSuperVersion(sv);
  }

  if (status.ok()) {
    SuperVersionContext sv_context(true /*create_superversion*/);
    {
      // Lock db mutex
      InstrumentedMutexLock l(&mutex_);

      // Stop writes to the DB by entering both write threads
      WriteThread::Writer w;
      write_thread_.EnterUnbatched(&w, &mutex_);
      WriteThread::Writer nonmem_w;
      if (two_write_queues_) {
        nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
      }

      num_running_ingest_file_++;
      assert(!cfd->IsDropped());
      status = import_job.Run();

      // Install job edit [Mutex will be unlocked here]
      if (status.ok()) {
        auto cf_options = cfd->GetLatestMutableCFOptions();
        status = versions_->LogAndApply(cfd, *cf_options, import_job.edit(),
                                        &mutex_, directories_.GetDbDir());
        if (status.ok()) {
          InstallSuperVersionAndScheduleWork(cfd, &sv_context, *cf_options);
        }
      }

      // Resume writes to the DB
      if (two_write_queues_) {
        nonmem_write_thread_.ExitUnbatched(&nonmem_w);
      }
      write_thread_.ExitUnbatched(&w);

      num_running_ingest_file_--;
      if (num_running_ingest_file_ == 0) {
        bg_cv_.SignalAll();
      }
    }
    // mutex_ is unlocked here

    sv_context.Clean();
  }

  {
    InstrumentedMutexLock l(&mutex_);
    ReleaseFileNumberFromPendingOutputs(pending_output_elem);
  }

  import_job.Cleanup(status);
  if (!status.ok()) {
    DropColumnFamily(*handle);
    DestroyColumnFamilyHandle(*handle);
    *handle = nullptr;
  }
  return status;
}

A
Aaron G 已提交
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
Status DBImpl::VerifyChecksum() {
  Status s;
  std::vector<ColumnFamilyData*> cfd_list;
  {
    InstrumentedMutexLock l(&mutex_);
    for (auto cfd : *versions_->GetColumnFamilySet()) {
      if (!cfd->IsDropped() && cfd->initialized()) {
        cfd->Ref();
        cfd_list.push_back(cfd);
      }
    }
  }
  std::vector<SuperVersion*> sv_list;
  for (auto cfd : cfd_list) {
    sv_list.push_back(cfd->GetReferencedSuperVersion(&mutex_));
  }
  for (auto& sv : sv_list) {
    VersionStorageInfo* vstorage = sv->current->storage_info();
4133
    ColumnFamilyData* cfd = sv->current->cfd();
4134 4135 4136
    Options opts;
    {
      InstrumentedMutexLock l(&mutex_);
4137 4138
      opts = Options(BuildDBOptions(immutable_db_options_, mutable_db_options_),
                     cfd->GetLatestCFOptions());
4139
    }
A
Aaron G 已提交
4140 4141 4142 4143
    for (int i = 0; i < vstorage->num_non_empty_levels() && s.ok(); i++) {
      for (size_t j = 0; j < vstorage->LevelFilesBrief(i).num_files && s.ok();
           j++) {
        const auto& fd = vstorage->LevelFilesBrief(i).files[j].fd;
4144
        std::string fname = TableFileName(cfd->ioptions()->cf_paths,
A
Aaron G 已提交
4145
                                          fd.GetNumber(), fd.GetPathId());
4146
        s = rocksdb::VerifySstFileChecksum(opts, env_options_, fname);
A
Aaron G 已提交
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161
      }
    }
    if (!s.ok()) {
      break;
    }
  }
  {
    InstrumentedMutexLock l(&mutex_);
    for (auto sv : sv_list) {
      if (sv && sv->Unref()) {
        sv->Cleanup();
        delete sv;
      }
    }
    for (auto cfd : cfd_list) {
4162
      cfd->Unref();
A
Aaron G 已提交
4163 4164 4165 4166 4167
    }
  }
  return s;
}

4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186
void DBImpl::NotifyOnExternalFileIngested(
    ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job) {
  if (immutable_db_options_.listeners.empty()) {
    return;
  }

  for (const IngestedFileInfo& f : ingestion_job.files_to_ingest()) {
    ExternalFileIngestionInfo info;
    info.cf_name = cfd->GetName();
    info.external_file_path = f.external_file_path;
    info.internal_file_path = f.internal_file_path;
    info.global_seqno = f.assigned_seqno;
    info.table_properties = f.table_properties;
    for (auto listener : immutable_db_options_.listeners) {
      listener->OnExternalFileIngested(this, info);
    }
  }
}

4187 4188 4189 4190 4191 4192 4193
void DBImpl::WaitForIngestFile() {
  mutex_.AssertHeld();
  while (num_running_ingest_file_ > 0) {
    bg_cv_.Wait();
  }
}

4194
Status DBImpl::StartTrace(const TraceOptions& trace_options,
4195 4196
                          std::unique_ptr<TraceWriter>&& trace_writer) {
  InstrumentedMutexLock lock(&trace_mutex_);
4197
  tracer_.reset(new Tracer(env_, trace_options, std::move(trace_writer)));
4198 4199 4200 4201 4202
  return Status::OK();
}

Status DBImpl::EndTrace() {
  InstrumentedMutexLock lock(&trace_mutex_);
4203 4204 4205 4206 4207 4208 4209
  Status s;
  if (tracer_ != nullptr) {
    s = tracer_->Close();
    tracer_.reset();
  } else {
    return Status::IOError("No trace file to close");
  }
4210 4211 4212
  return s;
}

4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
Status DBImpl::StartBlockCacheTrace(
    const TraceOptions& trace_options,
    std::unique_ptr<TraceWriter>&& trace_writer) {
  return block_cache_tracer_.StartTrace(env_, trace_options,
                                        std::move(trace_writer));
}

Status DBImpl::EndBlockCacheTrace() {
  block_cache_tracer_.EndTrace();
  return Status::OK();
}

4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235
Status DBImpl::TraceIteratorSeek(const uint32_t& cf_id, const Slice& key) {
  Status s;
  if (tracer_) {
    InstrumentedMutexLock lock(&trace_mutex_);
    if (tracer_) {
      s = tracer_->IteratorSeek(cf_id, key);
    }
  }
  return s;
}

4236 4237
Status DBImpl::TraceIteratorSeekForPrev(const uint32_t& cf_id,
                                        const Slice& key) {
4238 4239 4240 4241 4242 4243 4244 4245 4246 4247
  Status s;
  if (tracer_) {
    InstrumentedMutexLock lock(&trace_mutex_);
    if (tracer_) {
      s = tracer_->IteratorSeekForPrev(cf_id, key);
    }
  }
  return s;
}

Y
Yanqin Jin 已提交
4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276
Status DBImpl::ReserveFileNumbersBeforeIngestion(
    ColumnFamilyData* cfd, uint64_t num,
    std::list<uint64_t>::iterator* pending_output_elem,
    uint64_t* next_file_number) {
  Status s;
  SuperVersionContext dummy_sv_ctx(true /* create_superversion */);
  assert(nullptr != pending_output_elem);
  assert(nullptr != next_file_number);
  InstrumentedMutexLock l(&mutex_);
  if (error_handler_.IsDBStopped()) {
    // Do not ingest files when there is a bg_error
    return error_handler_.GetBGError();
  }
  *pending_output_elem = CaptureCurrentFileNumberInPendingOutputs();
  *next_file_number = versions_->FetchAddFileNumber(static_cast<uint64_t>(num));
  auto cf_options = cfd->GetLatestMutableCFOptions();
  VersionEdit dummy_edit;
  // If crash happen after a hard link established, Recover function may
  // reuse the file number that has already assigned to the internal file,
  // and this will overwrite the external file. To protect the external
  // file, we have to make sure the file number will never being reused.
  s = versions_->LogAndApply(cfd, *cf_options, &dummy_edit, &mutex_,
                             directories_.GetDbDir());
  if (s.ok()) {
    InstallSuperVersionAndScheduleWork(cfd, &dummy_sv_ctx, *cf_options);
  }
  dummy_sv_ctx.Clean();
  return s;
}
A
agiardullo 已提交
4277
#endif  // ROCKSDB_LITE
4278

4279
}  // namespace rocksdb