db_impl.h 51.9 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#pragma once
K
Kai Liu 已提交
10

H
Haobo Xu 已提交
11
#include <atomic>
12
#include <deque>
13
#include <functional>
14
#include <limits>
I
Igor Canadi 已提交
15
#include <list>
16
#include <map>
17
#include <queue>
18
#include <set>
I
Igor Canadi 已提交
19
#include <string>
20 21
#include <utility>
#include <vector>
K
kailiu 已提交
22

23
#include "db/column_family.h"
24
#include "db/compaction_job.h"
25
#include "db/dbformat.h"
26
#include "db/external_sst_file_ingestion_job.h"
27
#include "db/flush_job.h"
28 29
#include "db/flush_scheduler.h"
#include "db/internal_stats.h"
A
agiardullo 已提交
30 31
#include "db/log_writer.h"
#include "db/snapshot_impl.h"
32
#include "db/version_edit.h"
I
Igor Canadi 已提交
33
#include "db/wal_manager.h"
34 35
#include "db/write_controller.h"
#include "db/write_thread.h"
K
Kai Liu 已提交
36
#include "memtable_list.h"
37 38
#include "monitoring/instrumented_mutex.h"
#include "options/db_options.h"
K
Kai Liu 已提交
39
#include "port/port.h"
40 41 42
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
S
Siying Dong 已提交
43
#include "rocksdb/status.h"
44
#include "rocksdb/transaction_log.h"
45
#include "rocksdb/write_buffer_manager.h"
S
sdong 已提交
46
#include "table/scoped_arena_iterator.h"
47
#include "util/autovector.h"
I
Igor Canadi 已提交
48 49
#include "util/event_logger.h"
#include "util/hash.h"
50 51
#include "util/stop_watch.h"
#include "util/thread_local.h"
52

53
namespace rocksdb {
J
jorlow@chromium.org 已提交
54 55 56 57 58 59

class MemTable;
class TableCache;
class Version;
class VersionEdit;
class VersionSet;
60
class Arena;
A
agiardullo 已提交
61
class WriteCallback;
I
Igor Canadi 已提交
62
struct JobContext;
63
struct ExternalSstFileInfo;
64
struct MemTableInfo;
J
jorlow@chromium.org 已提交
65 66 67

class DBImpl : public DB {
 public:
I
Igor Canadi 已提交
68
  DBImpl(const DBOptions& options, const std::string& dbname);
J
jorlow@chromium.org 已提交
69 70 71
  virtual ~DBImpl();

  // Implementations of the DB interface
72 73
  using DB::Put;
  virtual Status Put(const WriteOptions& options,
74
                     ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
75
                     const Slice& value) override;
76 77
  using DB::Merge;
  virtual Status Merge(const WriteOptions& options,
78
                       ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
79
                       const Slice& value) override;
80 81
  using DB::Delete;
  virtual Status Delete(const WriteOptions& options,
I
Igor Sugak 已提交
82 83
                        ColumnFamilyHandle* column_family,
                        const Slice& key) override;
A
Andres Noetzli 已提交
84 85 86 87
  using DB::SingleDelete;
  virtual Status SingleDelete(const WriteOptions& options,
                              ColumnFamilyHandle* column_family,
                              const Slice& key) override;
88
  using DB::Write;
I
Igor Sugak 已提交
89 90
  virtual Status Write(const WriteOptions& options,
                       WriteBatch* updates) override;
A
agiardullo 已提交
91

92
  using DB::Get;
J
jorlow@chromium.org 已提交
93
  virtual Status Get(const ReadOptions& options,
94
                     ColumnFamilyHandle* column_family, const Slice& key,
M
Maysam Yabandeh 已提交
95
                     PinnableSlice* value) override;
96 97 98
  using DB::MultiGet;
  virtual std::vector<Status> MultiGet(
      const ReadOptions& options,
99
      const std::vector<ColumnFamilyHandle*>& column_family,
I
Igor Sugak 已提交
100 101
      const std::vector<Slice>& keys,
      std::vector<std::string>* values) override;
102

Y
Yi Wu 已提交
103
  virtual Status CreateColumnFamily(const ColumnFamilyOptions& cf_options,
104
                                    const std::string& column_family,
I
Igor Sugak 已提交
105
                                    ColumnFamilyHandle** handle) override;
Y
Yi Wu 已提交
106 107 108 109 110 111 112
  virtual Status CreateColumnFamilies(
      const ColumnFamilyOptions& cf_options,
      const std::vector<std::string>& column_family_names,
      std::vector<ColumnFamilyHandle*>* handles) override;
  virtual Status CreateColumnFamilies(
      const std::vector<ColumnFamilyDescriptor>& column_families,
      std::vector<ColumnFamilyHandle*>* handles) override;
I
Igor Sugak 已提交
113
  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
Y
Yi Wu 已提交
114 115
  virtual Status DropColumnFamilies(
      const std::vector<ColumnFamilyHandle*>& column_families) override;
116

117 118 119 120
  // Returns false if key doesn't exist in the database and true if it may.
  // If value_found is not passed in as null, then return the value if found in
  // memory. On return, if value was found, then value_found will be set to true
  // , otherwise false.
121
  using DB::KeyMayExist;
122
  virtual bool KeyMayExist(const ReadOptions& options,
123
                           ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
124 125
                           std::string* value,
                           bool* value_found = nullptr) override;
126 127
  using DB::NewIterator;
  virtual Iterator* NewIterator(const ReadOptions& options,
I
Igor Sugak 已提交
128
                                ColumnFamilyHandle* column_family) override;
129 130
  virtual Status NewIterators(
      const ReadOptions& options,
I
Igor Canadi 已提交
131
      const std::vector<ColumnFamilyHandle*>& column_families,
I
Igor Sugak 已提交
132 133 134
      std::vector<Iterator*>* iterators) override;
  virtual const Snapshot* GetSnapshot() override;
  virtual void ReleaseSnapshot(const Snapshot* snapshot) override;
135
  using DB::GetProperty;
136
  virtual bool GetProperty(ColumnFamilyHandle* column_family,
I
Igor Sugak 已提交
137
                           const Slice& property, std::string* value) override;
138
  using DB::GetMapProperty;
139 140 141
  virtual bool GetMapProperty(
      ColumnFamilyHandle* column_family, const Slice& property,
      std::map<std::string, std::string>* value) override;
142 143 144
  using DB::GetIntProperty;
  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
                              const Slice& property, uint64_t* value) override;
145 146 147
  using DB::GetAggregatedIntProperty;
  virtual bool GetAggregatedIntProperty(const Slice& property,
                                        uint64_t* aggregated_value) override;
148
  using DB::GetApproximateSizes;
149
  virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
150
                                   const Range* range, int n, uint64_t* sizes,
151
                                   uint8_t include_flags
152
                                   = INCLUDE_FILES) override;
153 154 155 156 157
  using DB::GetApproximateMemTableStats;
  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
                                           const Range& range,
                                           uint64_t* const count,
                                           uint64_t* const size) override;
158
  using DB::CompactRange;
159 160 161
  virtual Status CompactRange(const CompactRangeOptions& options,
                              ColumnFamilyHandle* column_family,
                              const Slice* begin, const Slice* end) override;
162

163
  using DB::CompactFiles;
I
Igor Sugak 已提交
164 165 166 167 168
  virtual Status CompactFiles(const CompactionOptions& compact_options,
                              ColumnFamilyHandle* column_family,
                              const std::vector<std::string>& input_file_names,
                              const int output_level,
                              const int output_path_id = -1) override;
169

170 171 172
  virtual Status PauseBackgroundWork() override;
  virtual Status ContinueBackgroundWork() override;

173 174 175
  virtual Status EnableAutoCompaction(
      const std::vector<ColumnFamilyHandle*>& column_family_handles) override;

176
  using DB::SetOptions;
I
Igor Sugak 已提交
177 178 179
  Status SetOptions(
      ColumnFamilyHandle* column_family,
      const std::unordered_map<std::string, std::string>& options_map) override;
180

181 182 183
  virtual Status SetDBOptions(
      const std::unordered_map<std::string, std::string>& options_map) override;

184
  using DB::NumberLevels;
I
Igor Sugak 已提交
185
  virtual int NumberLevels(ColumnFamilyHandle* column_family) override;
186
  using DB::MaxMemCompactionLevel;
I
Igor Sugak 已提交
187
  virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) override;
188
  using DB::Level0StopWriteTrigger;
I
Igor Sugak 已提交
189 190 191 192
  virtual int Level0StopWriteTrigger(
      ColumnFamilyHandle* column_family) override;
  virtual const std::string& GetName() const override;
  virtual Env* GetEnv() const override;
193
  using DB::GetOptions;
194
  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override;
195
  using DB::GetDBOptions;
196
  virtual DBOptions GetDBOptions() const override;
197 198
  using DB::Flush;
  virtual Status Flush(const FlushOptions& options,
I
Igor Sugak 已提交
199
                       ColumnFamilyHandle* column_family) override;
200
  virtual Status FlushWAL(bool sync) override;
201
  virtual Status SyncWAL() override;
I
Igor Canadi 已提交
202

I
Igor Sugak 已提交
203
  virtual SequenceNumber GetLatestSequenceNumber() const override;
I
Igor Canadi 已提交
204

A
Anirban Rahut 已提交
205 206
  bool HasActiveSnapshotLaterThanSN(SequenceNumber sn);

I
Igor Canadi 已提交
207
#ifndef ROCKSDB_LITE
S
Siying Dong 已提交
208 209
  using DB::ResetStats;
  virtual Status ResetStats() override;
I
Igor Sugak 已提交
210 211
  virtual Status DisableFileDeletions() override;
  virtual Status EnableFileDeletions(bool force) override;
212
  virtual int IsFileDeletionsEnabled() const;
I
Igor Canadi 已提交
213
  // All the returned filenames start with "/"
214
  virtual Status GetLiveFiles(std::vector<std::string>&,
215
                              uint64_t* manifest_file_size,
I
Igor Sugak 已提交
216 217
                              bool flush_memtable = true) override;
  virtual Status GetSortedWalFiles(VectorLogPtr& files) override;
I
Igor Canadi 已提交
218

219 220 221
  virtual Status GetUpdatesSince(
      SequenceNumber seq_number, unique_ptr<TransactionLogIterator>* iter,
      const TransactionLogIterator::ReadOptions&
I
Igor Sugak 已提交
222 223
          read_options = TransactionLogIterator::ReadOptions()) override;
  virtual Status DeleteFile(std::string name) override;
224 225
  Status DeleteFilesInRange(ColumnFamilyHandle* column_family,
                            const Slice* begin, const Slice* end);
226

I
Igor Sugak 已提交
227 228
  virtual void GetLiveFilesMetaData(
      std::vector<LiveFileMetaData>* metadata) override;
229 230 231 232 233 234 235 236 237

  // Obtains the meta data of the specified column family of the DB.
  // Status::NotFound() will be returned if the current DB does not have
  // any column family match the specified name.
  // TODO(yhchiang): output parameter is placed in the end in this codebase.
  virtual void GetColumnFamilyMetaData(
      ColumnFamilyHandle* column_family,
      ColumnFamilyMetaData* metadata) override;

238
  Status SuggestCompactRange(ColumnFamilyHandle* column_family,
S
Siying Dong 已提交
239
                             const Slice* begin, const Slice* end) override;
240

S
Siying Dong 已提交
241 242
  Status PromoteL0(ColumnFamilyHandle* column_family,
                   int target_level) override;
243

A
agiardullo 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
  // Similar to Write() but will call the callback once on the single write
  // thread to determine whether it is safe to perform the write.
  virtual Status WriteWithCallback(const WriteOptions& write_options,
                                   WriteBatch* my_batch,
                                   WriteCallback* callback);

  // Returns the sequence number that is guaranteed to be smaller than or equal
  // to the sequence number of any key that could be inserted into the current
  // memtables. It can then be assumed that any write with a larger(or equal)
  // sequence number will be present in this memtable or a later memtable.
  //
  // If the earliest sequence number could not be determined,
  // kMaxSequenceNumber will be returned.
  //
  // If include_history=true, will also search Memtables in MemTableList
  // History.
  SequenceNumber GetEarliestMemTableSequenceNumber(SuperVersion* sv,
                                                   bool include_history);

  // For a given key, check to see if there are any records for this key
264 265 266 267
  // in the memtables, including memtable history.  If cache_only is false,
  // SST files will also be checked.
  //
  // If a key is found, *found_record_for_key will be set to true and
268
  // *seq will be set to the stored sequence number for the latest
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
  // operation on this key or kMaxSequenceNumber if unknown.
  // If no key is found, *found_record_for_key will be set to false.
  //
  // Note: If cache_only=false, it is possible for *seq to be set to 0 if
  // the sequence number has been cleared from the record.  If the caller is
  // holding an active db snapshot, we know the missing sequence must be less
  // than the snapshot's sequence number (sequence numbers are only cleared
  // when there are no earlier active snapshots).
  //
  // If NotFound is returned and found_record_for_key is set to false, then no
  // record for this key was found.  If the caller is holding an active db
  // snapshot, we know that no key could have existing after this snapshot
  // (since we do not compact keys that have an earlier snapshot).
  //
  // Returns OK or NotFound on success,
  // other status on unexpected error.
285
  // TODO(andrewkr): this API need to be aware of range deletion operations
286 287 288
  Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
                                 bool cache_only, SequenceNumber* seq,
                                 bool* found_record_for_key);
A
agiardullo 已提交
289

290 291 292 293 294
  using DB::IngestExternalFile;
  virtual Status IngestExternalFile(
      ColumnFamilyHandle* column_family,
      const std::vector<std::string>& external_files,
      const IngestExternalFileOptions& ingestion_options) override;
295

A
Aaron G 已提交
296 297
  virtual Status VerifyChecksum() override;

I
Igor Canadi 已提交
298
#endif  // ROCKSDB_LITE
299

300 301 302 303 304 305
  // Similar to GetSnapshot(), but also lets the db know that this snapshot
  // will be used for transaction write-conflict checking.  The DB can then
  // make sure not to compact any keys that would prevent a write-conflict from
  // being detected.
  const Snapshot* GetSnapshotForWriteConflictBoundary();

I
Igor Canadi 已提交
306 307 308 309
  // checks if all live files exist on file system and that their file sizes
  // match to our in-memory records
  virtual Status CheckConsistency();

310
  virtual Status GetDbIdentity(std::string& identity) const override;
311

I
Igor Canadi 已提交
312
  Status RunManualCompaction(ColumnFamilyData* cfd, int input_level,
313
                             int output_level, uint32_t output_path_id,
314
                             const Slice* begin, const Slice* end,
315
                             bool exclusive,
316
                             bool disallow_trivial_move = false);
317

318 319 320
  // Return an internal iterator over the current state of the database.
  // The keys of this iterator are internal keys (see format.h).
  // The returned iterator should be deleted when no longer needed.
S
sdong 已提交
321
  InternalIterator* NewInternalIterator(
A
Andrew Kryczka 已提交
322 323
      Arena* arena, RangeDelAggregator* range_del_agg,
      ColumnFamilyHandle* column_family = nullptr);
324

325
#ifndef NDEBUG
J
jorlow@chromium.org 已提交
326
  // Extra methods (for testing) that are not in the public DB interface
I
Igor Canadi 已提交
327
  // Implemented in db_impl_debug.cc
J
jorlow@chromium.org 已提交
328

329
  // Compact any files in the named level that overlap [*begin, *end]
330
  Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
331 332
                           ColumnFamilyHandle* column_family = nullptr,
                           bool disallow_trivial_move = false);
J
jorlow@chromium.org 已提交
333

334
  void TEST_HandleWALFull();
335 336 337 338 339 340 341 342 343

  bool TEST_UnableToFlushOldestLog() {
    return unable_to_flush_oldest_log_;
  }

  bool TEST_IsLogGettingFlushed() {
    return alive_log_files_.begin()->getting_flushed;
  }

344
  // Force current memtable contents to be flushed.
345 346
  Status TEST_FlushMemTable(bool wait = true,
                            ColumnFamilyHandle* cfh = nullptr);
J
jorlow@chromium.org 已提交
347

348
  // Wait for memtable compaction
349
  Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
350 351 352 353

  // Wait for any compaction
  Status TEST_WaitForCompact();

354 355
  // Return the maximum overlapping data (in bytes) at next level for any
  // file at a level >= 1.
356 357
  int64_t TEST_MaxNextLevelOverlappingBytes(ColumnFamilyHandle* column_family =
                                                nullptr);
358

A
Abhishek Kona 已提交
359 360
  // Return the current manifest file no.
  uint64_t TEST_Current_Manifest_FileNo();
361

362
  // get total level0 file size. Only for testing.
363
  uint64_t TEST_GetLevel0TotalSize();
364

365 366
  void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family,
                             std::vector<std::vector<FileMetaData>>* metadata);
367

368 369 370 371 372 373 374 375 376 377
  void TEST_LockMutex();

  void TEST_UnlockMutex();

  // REQUIRES: mutex locked
  void* TEST_BeginWrite();

  // REQUIRES: mutex locked
  // pass the pointer that you got from TEST_BeginWrite()
  void TEST_EndWrite(void* w);
378

379
  uint64_t TEST_MaxTotalInMemoryState() const {
380 381
    return max_total_in_memory_state_;
  }
382

383 384
  size_t TEST_LogsToFreeSize();

385 386
  uint64_t TEST_LogfileNumber();

S
sdong 已提交
387 388
  uint64_t TEST_total_log_size() const { return total_log_size_; }

389 390 391 392
  // Returns column family name to ImmutableCFOptions map.
  Status TEST_GetAllImmutableCFOptions(
      std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map);

H
hyunwoo 已提交
393
  // Return the lastest MutableCFOptions of a column family
A
Aaron Gao 已提交
394
  Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
Y
yizhu.sun 已提交
395
                                        MutableCFOptions* mutable_cf_options);
A
Aaron Gao 已提交
396

397 398
  Cache* TEST_table_cache() { return table_cache_.get(); }

399
  WriteController& TEST_write_controler() { return write_controller_; }
400

401 402 403
  uint64_t TEST_FindMinLogContainingOutstandingPrep();
  uint64_t TEST_FindMinPrepLogReferencedByMemTable();

404
  int TEST_BGCompactionsAllowed() const;
405
  int TEST_BGFlushesAllowed() const;
406

407
#endif  // NDEBUG
I
Igor Canadi 已提交
408

409 410 411 412 413 414 415 416 417 418 419
  struct BGJobLimits {
    int max_flushes;
    int max_compactions;
  };
  // Returns maximum background flushes and compactions allowed to be scheduled
  BGJobLimits GetBGJobLimits() const;
  // Need a static version that can be called during SanitizeOptions().
  static BGJobLimits GetBGJobLimits(int max_background_flushes,
                                    int max_background_compactions,
                                    int max_background_jobs,
                                    bool parallelize_compactions);
420

421 422 423 424
  // move logs pending closing from job_context to the DB queue and
  // schedule a purge
  void ScheduleBgLogWriterClose(JobContext* job_context);

425 426
  uint64_t MinLogNumberToKeep();

I
Igor Canadi 已提交
427
  // Returns the list of live files in 'live' and the list
K
kailiu 已提交
428
  // of all files in the filesystem in 'candidate_files'.
I
Igor Canadi 已提交
429
  // If force == false and the last call was less than
430
  // db_options_.delete_obsolete_files_period_micros microseconds ago,
I
Igor Canadi 已提交
431 432
  // it will not fill up the job_context
  void FindObsoleteFiles(JobContext* job_context, bool force,
I
Igor Canadi 已提交
433 434 435 436 437 438
                         bool no_full_scan = false);

  // Diffs the files listed in filenames and those that do not
  // belong to live files are posibly removed. Also, removes all the
  // files in sst_delete_files and log_delete_files.
  // It is not necessary to hold the mutex when invoking this method.
439 440 441 442
  void PurgeObsoleteFiles(const JobContext& background_contet,
                          bool schedule_only = false);

  void SchedulePurge();
I
Igor Canadi 已提交
443

I
Igor Sugak 已提交
444
  ColumnFamilyHandle* DefaultColumnFamily() const override;
445

446 447
  const SnapshotList& snapshots() const { return snapshots_; }

448 449 450 451
  const ImmutableDBOptions& immutable_db_options() const {
    return immutable_db_options_;
  }

452
  void CancelAllBackgroundWork(bool wait);
453

A
agiardullo 已提交
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
  // Find Super version and reference it. Based on options, it might return
  // the thread local cached one.
  // Call ReturnAndCleanupSuperVersion() when it is no longer needed.
  SuperVersion* GetAndRefSuperVersion(ColumnFamilyData* cfd);

  // Similar to the previous function but looks up based on a column family id.
  // nullptr will be returned if this column family no longer exists.
  // REQUIRED: this function should only be called on the write thread or if the
  // mutex is held.
  SuperVersion* GetAndRefSuperVersion(uint32_t column_family_id);

  // Un-reference the super version and return it to thread local cache if
  // needed. If it is the last reference of the super version. Clean it up
  // after un-referencing it.
  void ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, SuperVersion* sv);

  // Similar to the previous function but looks up based on a column family id.
  // nullptr will be returned if this column family no longer exists.
  // REQUIRED: this function should only be called on the write thread.
  void ReturnAndCleanupSuperVersion(uint32_t colun_family_id, SuperVersion* sv);

  // REQUIRED: this function should only be called on the write thread or if the
  // mutex is held.  Return value only valid until next call to this function or
  // mutex is released.
  ColumnFamilyHandle* GetColumnFamilyHandle(uint32_t column_family_id);
A
agiardullo 已提交
479

A
Anirban Rahut 已提交
480 481 482
  // Same as above, should called without mutex held and not on write thread.
  ColumnFamilyHandle* GetColumnFamilyHandleUnlocked(uint32_t column_family_id);

483 484 485 486 487 488 489 490 491 492 493 494 495 496
  // Returns the number of currently running flushes.
  // REQUIREMENT: mutex_ must be held when calling this function.
  int num_running_flushes() {
    mutex_.AssertHeld();
    return num_running_flushes_;
  }

  // Returns the number of currently running compactions.
  // REQUIREMENT: mutex_ must be held when calling this function.
  int num_running_compactions() {
    mutex_.AssertHeld();
    return num_running_compactions_;
  }

497 498
  const WriteController& write_controller() { return write_controller_; }

S
Siying Dong 已提交
499 500 501 502 503 504
  InternalIterator* NewInternalIterator(const ReadOptions&,
                                        ColumnFamilyData* cfd,
                                        SuperVersion* super_version,
                                        Arena* arena,
                                        RangeDelAggregator* range_del_agg);

505 506 507 508 509 510 511 512 513 514 515 516 517 518
  // hollow transactions shell used for recovery.
  // these will then be passed to TransactionDB so that
  // locks can be reacquired before writing can resume.
  struct RecoveredTransaction {
    uint64_t log_number_;
    std::string name_;
    WriteBatch* batch_;
    explicit RecoveredTransaction(const uint64_t log, const std::string& name,
                                  WriteBatch* batch)
        : log_number_(log), name_(name), batch_(batch) {}

    ~RecoveredTransaction() { delete batch_; }
  };

519
  bool allow_2pc() const { return immutable_db_options_.allow_2pc; }
520

R
Reid Horuff 已提交
521 522 523 524 525
  std::unordered_map<std::string, RecoveredTransaction*>
  recovered_transactions() {
    return recovered_transactions_;
  }

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
  RecoveredTransaction* GetRecoveredTransaction(const std::string& name) {
    auto it = recovered_transactions_.find(name);
    if (it == recovered_transactions_.end()) {
      return nullptr;
    } else {
      return it->second;
    }
  }

  void InsertRecoveredTransaction(const uint64_t log, const std::string& name,
                                  WriteBatch* batch) {
    recovered_transactions_[name] = new RecoveredTransaction(log, name, batch);
    MarkLogAsContainingPrepSection(log);
  }

  void DeleteRecoveredTransaction(const std::string& name) {
    auto it = recovered_transactions_.find(name);
    assert(it != recovered_transactions_.end());
    auto* trx = it->second;
    recovered_transactions_.erase(it);
    MarkLogAsHavingPrepSectionFlushed(trx->log_number_);
    delete trx;
  }

R
Reid Horuff 已提交
550 551 552 553 554 555 556 557
  void DeleteAllRecoveredTransactions() {
    for (auto it = recovered_transactions_.begin();
         it != recovered_transactions_.end(); it++) {
      delete it->second;
    }
    recovered_transactions_.clear();
  }

558 559
  void MarkLogAsHavingPrepSectionFlushed(uint64_t log);
  void MarkLogAsContainingPrepSection(uint64_t log);
560 561 562
  void AddToLogsToFreeQueue(log::Writer* log_writer) {
    logs_to_free_queue_.push_back(log_writer);
  }
S
Siying Dong 已提交
563
  InstrumentedMutex* mutex() { return &mutex_; }
564

565 566
  Status NewDB();

567
 protected:
H
heyongqiang 已提交
568 569
  Env* const env_;
  const std::string dbname_;
570
  unique_ptr<VersionSet> versions_;
571
  const DBOptions initial_db_options_;
572 573
  const ImmutableDBOptions immutable_db_options_;
  MutableDBOptions mutable_db_options_;
L
Lei Jin 已提交
574
  Statistics* stats_;
575 576
  std::unordered_map<std::string, RecoveredTransaction*>
      recovered_transactions_;
H
heyongqiang 已提交
577

578
  // Except in DB::Open(), WriteOptionsFile can only be called when:
Y
Yi Wu 已提交
579 580 581 582
  // Persist options to options file.
  // If need_mutex_lock = false, the method will lock DB mutex.
  // If need_enter_write_thread = false, the method will enter write thread.
  Status WriteOptionsFile(bool need_mutex_lock, bool need_enter_write_thread);
583 584 585 586

  // The following two functions can only be called when:
  // 1. WriteThread::Writer::EnterUnbatched() is used.
  // 2. db_mutex is NOT held
587 588 589
  Status RenameTempFileToOptionsFile(const std::string& file_name);
  Status DeleteObsoleteOptionsFiles();

Y
Yi Wu 已提交
590 591 592 593
  void NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
                          const MutableCFOptions& mutable_cf_options,
                          int job_id, TableProperties prop);

594
  void NotifyOnFlushCompleted(ColumnFamilyData* cfd, FileMetaData* file_meta,
595
                              const MutableCFOptions& mutable_cf_options,
596
                              int job_id, TableProperties prop);
597

O
Ori Bernstein 已提交
598
  void NotifyOnCompactionCompleted(ColumnFamilyData* cfd,
599 600
                                   Compaction *c, const Status &st,
                                   const CompactionJobStats& job_stats,
601
                                   int job_id);
W
Wanning Jiang 已提交
602
  void NotifyOnMemTableSealed(ColumnFamilyData* cfd,
603
                              const MemTableInfo& mem_table_info);
O
Ori Bernstein 已提交
604

605
#ifndef ROCKSDB_LITE
606 607
  void NotifyOnExternalFileIngested(
      ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job);
608
#endif  // !ROCKSDB_LITE
609

Y
Yueh-Hsuan Chiang 已提交
610 611 612 613 614 615
  void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const;

  void EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const;

  void EraseThreadStatusDbInfo() const;

A
agiardullo 已提交
616
  Status WriteImpl(const WriteOptions& options, WriteBatch* updates,
617 618
                   WriteCallback* callback = nullptr,
                   uint64_t* log_used = nullptr, uint64_t log_ref = 0,
619
                   bool disable_memtable = false, uint64_t* seq_used = nullptr);
620

621 622 623
  Status PipelinedWriteImpl(const WriteOptions& options, WriteBatch* updates,
                            WriteCallback* callback = nullptr,
                            uint64_t* log_used = nullptr, uint64_t log_ref = 0,
624 625
                            bool disable_memtable = false,
                            uint64_t* seq_used = nullptr);
626

627 628
  Status WriteImplWALOnly(const WriteOptions& options, WriteBatch* updates,
                          WriteCallback* callback = nullptr,
629 630
                          uint64_t* log_used = nullptr, uint64_t log_ref = 0,
                          uint64_t* seq_used = nullptr);
631

632 633
  uint64_t FindMinLogContainingOutstandingPrep();
  uint64_t FindMinPrepLogReferencedByMemTable();
A
agiardullo 已提交
634

J
jorlow@chromium.org 已提交
635 636
 private:
  friend class DB;
637
  friend class InternalStats;
638 639 640
  friend class PessimisticTransaction;
  friend class WriteCommittedTxn;
  friend class WritePreparedTxn;
I
Igor Canadi 已提交
641
#ifndef ROCKSDB_LITE
L
Lei Jin 已提交
642
  friend class ForwardIterator;
I
Igor Canadi 已提交
643
#endif
644
  friend struct SuperVersion;
L
Lei Jin 已提交
645
  friend class CompactedDBImpl;
A
agiardullo 已提交
646 647 648
#ifndef NDEBUG
  friend class XFTransactionWriteHandler;
#endif
649
  struct CompactionState;
650

651 652 653 654 655 656 657 658 659 660 661 662 663
  struct WriteContext {
    autovector<SuperVersion*> superversions_to_free_;
    autovector<MemTable*> memtables_to_free_;

    ~WriteContext() {
      for (auto& sv : superversions_to_free_) {
        delete sv;
      }
      for (auto& m : memtables_to_free_) {
        delete m;
      }
    }
  };
J
jorlow@chromium.org 已提交
664

665
  struct PrepickedCompaction;
666 667
  struct PurgeFileInfo;

J
jorlow@chromium.org 已提交
668 669 670
  // Recover the descriptor from persistent storage.  May do a significant
  // amount of work to recover recently logged updates.  Any changes to
  // be made to the descriptor are added to *edit.
671
  Status Recover(const std::vector<ColumnFamilyDescriptor>& column_families,
672 673
                 bool read_only = false, bool error_if_log_file_exist = false,
                 bool error_if_data_exists_in_logs = false);
J
jorlow@chromium.org 已提交
674 675 676

  void MaybeIgnoreError(Status* s) const;

677 678
  const Status CreateArchivalDirectory();

Y
Yi Wu 已提交
679 680 681 682 683 684
  Status CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
                                const std::string& cf_name,
                                ColumnFamilyHandle** handle);

  Status DropColumnFamilyImpl(ColumnFamilyHandle* column_family);

J
jorlow@chromium.org 已提交
685 686
  // Delete any unneeded files and stale in-memory entries.
  void DeleteObsoleteFiles();
687 688 689 690
  // Delete obsolete files and log status and information of file deletion
  void DeleteObsoleteFileImpl(Status file_deletion_status, int job_id,
                              const std::string& fname, FileType type,
                              uint64_t number, uint32_t path_id);
J
jorlow@chromium.org 已提交
691

I
Igor Canadi 已提交
692 693
  // Background process needs to call
  //     auto x = CaptureCurrentFileNumberInPendingOutputs()
694
  //     auto file_num = versions_->NewFileNumber();
I
Igor Canadi 已提交
695 696
  //     <do something>
  //     ReleaseFileNumberFromPendingOutputs(x)
697 698
  // This will protect any file with number `file_num` or greater from being
  // deleted while <do something> is running.
I
Igor Canadi 已提交
699 700 701 702 703 704 705 706 707 708 709 710
  // -----------
  // This function will capture current file number and append it to
  // pending_outputs_. This will prevent any background process to delete any
  // file created after this point.
  std::list<uint64_t>::iterator CaptureCurrentFileNumberInPendingOutputs();
  // This function should be called with the result of
  // CaptureCurrentFileNumberInPendingOutputs(). It then marks that any file
  // created between the calls CaptureCurrentFileNumberInPendingOutputs() and
  // ReleaseFileNumberFromPendingOutputs() can now be deleted (if it's not live
  // and blocked by any other pending_outputs_ calls)
  void ReleaseFileNumberFromPendingOutputs(std::list<uint64_t>::iterator v);

711 712
  Status SyncClosedLogs(JobContext* job_context);

713
  // Flush the in-memory write buffer to storage.  Switches to a new
J
jorlow@chromium.org 已提交
714
  // log-file/memtable and writes a new descriptor iff successful.
I
Igor Canadi 已提交
715 716 717 718
  Status FlushMemTableToOutputFile(ColumnFamilyData* cfd,
                                   const MutableCFOptions& mutable_cf_options,
                                   bool* madeProgress, JobContext* job_context,
                                   LogBuffer* log_buffer);
J
jorlow@chromium.org 已提交
719

S
Stanislau Hlebik 已提交
720 721
  // REQUIRES: log_numbers are sorted in ascending order
  Status RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
722
                         SequenceNumber* next_sequence, bool read_only);
J
jorlow@chromium.org 已提交
723

724
  // The following two methods are used to flush a memtable to
Y
Yueh-Hsuan Chiang 已提交
725
  // storage. The first one is used at database RecoveryTime (when the
726 727 728
  // database is opened) and is heavyweight because it holds the mutex
  // for the entire period. The second method WriteLevel0Table supports
  // concurrent flush memtables to storage.
729 730
  Status WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
                                     MemTable* mem, VersionEdit* edit);
S
sdong 已提交
731 732 733

  // num_bytes: for slowdown case, delay time is calculated based on
  //            `num_bytes` going through.
M
Maysam Yabandeh 已提交
734
  Status DelayWrite(uint64_t num_bytes, const WriteOptions& write_options);
735

736 737 738
  Status ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
                                      WriteBatch* my_batch);

I
Igor Canadi 已提交
739
  Status ScheduleFlushes(WriteContext* context);
740

I
Igor Canadi 已提交
741
  Status SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context);
S
Stanislau Hlebik 已提交
742

H
heyongqiang 已提交
743
  // Force current memtable contents to be flushed.
744 745
  Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options,
                       bool writes_stopped = false);
H
heyongqiang 已提交
746

747
  // Wait for memtable flushed
748
  Status WaitForFlushMemTable(ColumnFamilyData* cfd);
H
heyongqiang 已提交
749

750 751 752 753 754 755 756
  // REQUIRES: mutex locked
  Status HandleWALFull(WriteContext* write_context);

  // REQUIRES: mutex locked
  Status HandleWriteBufferFull(WriteContext* write_context);

  // REQUIRES: mutex locked
757 758
  Status PreprocessWrite(const WriteOptions& write_options, bool* need_log_sync,
                         WriteContext* write_context);
759

760 761 762 763 764 765
  WriteBatch* MergeBatch(const WriteThread::WriteGroup& write_group,
                         WriteBatch* tmp_batch, size_t* write_with_wal);

  Status WriteToWAL(const WriteBatch& merged_batch, log::Writer* log_writer,
                    uint64_t* log_used, uint64_t* log_size);

766
  Status WriteToWAL(const WriteThread::WriteGroup& write_group,
767 768 769 770 771 772 773
                    log::Writer* log_writer, uint64_t* log_used,
                    bool need_log_sync, bool need_log_dir_sync,
                    SequenceNumber sequence);

  Status ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
                              uint64_t* log_used, SequenceNumber* last_sequence,
                              int total_count);
774

775
  // Used by WriteImpl to update bg_error_ if paranoid check is enabled.
776
  void WriteCallbackStatusCheck(const Status& status);
777 778 779

  // Used by WriteImpl to update bg_error_ in case of memtable insert error.
  void MemTableInsertStatusCheck(const Status& memtable_insert_status);
Y
Yi Wu 已提交
780

I
Igor Canadi 已提交
781
#ifndef ROCKSDB_LITE
782

783 784 785 786 787 788
  Status CompactFilesImpl(const CompactionOptions& compact_options,
                          ColumnFamilyData* cfd, Version* version,
                          const std::vector<std::string>& input_file_names,
                          const int output_level, int output_path_id,
                          JobContext* job_context, LogBuffer* log_buffer);

789 790 791
  // Wait for current IngestExternalFile() calls to finish.
  // REQUIRES: mutex_ held
  void WaitForIngestFile();
792

793
#else
794
  // IngestExternalFile is not supported in ROCKSDB_LITE so this function
795
  // will be no-op
796
  void WaitForIngestFile() {}
I
Igor Canadi 已提交
797
#endif  // ROCKSDB_LITE
798 799 800

  ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name);

801
  void MaybeScheduleFlushOrCompaction();
802 803
  void SchedulePendingFlush(ColumnFamilyData* cfd);
  void SchedulePendingCompaction(ColumnFamilyData* cfd);
804 805
  void SchedulePendingPurge(std::string fname, FileType type, uint64_t number,
                            uint32_t path_id, int job_id);
806
  static void BGWorkCompaction(void* arg);
807 808 809
  // Runs a pre-chosen universal compaction involving bottom level in a
  // separate, bottom-pri thread pool.
  static void BGWorkBottomCompaction(void* arg);
810
  static void BGWorkFlush(void* db);
811
  static void BGWorkPurge(void* arg);
812
  static void UnscheduleCallback(void* arg);
813 814
  void BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
                                Env::Priority bg_thread_pri);
815
  void BackgroundCallFlush();
816
  void BackgroundCallPurge();
I
Igor Canadi 已提交
817
  Status BackgroundCompaction(bool* madeProgress, JobContext* job_context,
818 819
                              LogBuffer* log_buffer,
                              PrepickedCompaction* prepicked_compaction);
I
Igor Canadi 已提交
820
  Status BackgroundFlush(bool* madeProgress, JobContext* job_context,
H
Haobo Xu 已提交
821
                         LogBuffer* log_buffer);
J
jorlow@chromium.org 已提交
822

823 824
  void PrintStatistics();

825
  // dump rocksdb.stats to LOG
826 827
  void MaybeDumpStats();

828 829
  // Return the minimum empty level that could hold the total data in the
  // input level. Return the input level, if such level could not be found.
830 831
  int FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
      const MutableCFOptions& mutable_cf_options, int level);
832

833 834 835
  // Move the files in the input level to the target level.
  // If target_level < 0, automatically calculate the minimum level that could
  // hold the data set.
I
Igor Canadi 已提交
836
  Status ReFitLevel(ColumnFamilyData* cfd, int level, int target_level = -1);
837

838 839 840 841 842 843
  // helper functions for adding and removing from flush & compaction queues
  void AddToCompactionQueue(ColumnFamilyData* cfd);
  ColumnFamilyData* PopFirstFromCompactionQueue();
  void AddToFlushQueue(ColumnFamilyData* cfd);
  ColumnFamilyData* PopFirstFromFlushQueue();

844 845 846
  // helper function to call after some of the logs_ were synced
  void MarkLogsSynced(uint64_t up_to, bool synced_dir, const Status& status);

847 848
  const Snapshot* GetSnapshotImpl(bool is_write_conflict_boundary);

849 850
  uint64_t GetMaxTotalWalSize() const;

J
jorlow@chromium.org 已提交
851
  // table_cache_ provides its own synchronization
I
Igor Canadi 已提交
852
  std::shared_ptr<Cache> table_cache_;
J
jorlow@chromium.org 已提交
853

854
  // Lock over the persistent DB state.  Non-nullptr iff successfully acquired.
J
jorlow@chromium.org 已提交
855 856
  FileLock* db_lock_;

857 858 859 860
  // In addition to mutex_, log_write_mutex_ protected writes to logs_ and
  // logfile_number_. With concurrent_prepare it also protects alive_log_files_,
  // and log_empty_. Refer to the definition of each variable below for more
  // details.
861
  InstrumentedMutex log_write_mutex_;
J
jorlow@chromium.org 已提交
862
  // State below is protected by mutex_
863 864 865 866
  // With concurrent_prepare enabled, some of the variables that accessed during
  // WriteToWAL need different synchronization: log_empty_, alive_log_files_,
  // logs_, logfile_number_. Refer to the definition of each variable below for
  // more description.
867
  mutable InstrumentedMutex mutex_;
868

I
Igor Canadi 已提交
869
  std::atomic<bool> shutting_down_;
870 871
  // This condition variable is signaled on these conditions:
  // * whenever bg_compaction_scheduled_ goes down to 0
872
  // * if AnyManualCompaction, whenever a compaction finishes, even if it hasn't
873 874
  // made any progress
  // * whenever a compaction made any progress
875 876 877
  // * whenever bg_flush_scheduled_ or bg_purge_scheduled_ value decreases
  // (i.e. whenever a flush is done, even if it didn't make any progress)
  // * whenever there is an error in background purge, flush or compaction
878
  // * whenever num_running_ingest_file_ goes to 0.
879
  InstrumentedCondVar bg_cv_;
880 881 882 883
  // Writes are protected by locking both mutex_ and log_write_mutex_, and reads
  // must be under either mutex_ or log_write_mutex_. Since after ::Open,
  // logfile_number_ is currently updated only in write_thread_, it can be read
  // from the same write_thread_ without any locks.
884
  uint64_t logfile_number_;
S
Sage Weil 已提交
885 886
  std::deque<uint64_t>
      log_recycle_files;  // a list of log files that we can recycle
887
  bool log_dir_synced_;
888 889 890 891 892 893
  // Without concurrent_prepare, read and writes to log_empty_ are protected by
  // mutex_. Since it is currently updated/read only in write_thread_, it can be
  // accessed from the same write_thread_ without any locks. With
  // concurrent_prepare writes, where it can be updated in different threads,
  // read and writes are protected by log_write_mutex_ instead. This is to avoid
  // expesnive mutex_ lock during WAL write, which update log_empty_.
I
Igor Canadi 已提交
894
  bool log_empty_;
895
  ColumnFamilyHandleImpl* default_cf_handle_;
896
  InternalStats* default_cf_internal_stats_;
897
  unique_ptr<ColumnFamilyMemTablesImpl> column_family_memtables_;
I
Igor Canadi 已提交
898 899
  struct LogFileNumberSize {
    explicit LogFileNumberSize(uint64_t _number)
900
        : number(_number) {}
I
Igor Canadi 已提交
901 902
    void AddSize(uint64_t new_size) { size += new_size; }
    uint64_t number;
903 904 905 906
    uint64_t size = 0;
    bool getting_flushed = false;
  };
  struct LogWriterNumber {
907 908 909 910 911 912 913 914 915 916 917 918 919 920
    // pass ownership of _writer
    LogWriterNumber(uint64_t _number, log::Writer* _writer)
        : number(_number), writer(_writer) {}

    log::Writer* ReleaseWriter() {
      auto* w = writer;
      writer = nullptr;
      return w;
    }
    void ClearWriter() {
      delete writer;
      writer = nullptr;
    }

921
    uint64_t number;
922 923 924
    // Visual Studio doesn't support deque's member to be noncopyable because
    // of a unique_ptr as a member.
    log::Writer* writer;  // own
925 926
    // true for some prefix of logs_
    bool getting_synced = false;
I
Igor Canadi 已提交
927
  };
928 929 930 931 932 933
  // Without concurrent_prepare, read and writes to alive_log_files_ are
  // protected by mutex_. However since back() is never popped, and push_back()
  // is done only from write_thread_, the same thread can access the item
  // reffered by back() without mutex_. With concurrent_prepare_, writes
  // are protected by locking both mutex_ and log_write_mutex_, and reads must
  // be under either mutex_ or log_write_mutex_.
I
Igor Canadi 已提交
934
  std::deque<LogFileNumberSize> alive_log_files_;
935 936
  // Log files that aren't fully synced, and the current log file.
  // Synchronization:
937 938 939 940 941
  //  - push_back() is done from write_thread_ with locked mutex_ and
  //  log_write_mutex_
  //  - pop_front() is done from any thread with locked mutex_ and
  //  log_write_mutex_
  //  - reads are done with either locked mutex_ or log_write_mutex_
942
  //  - back() and items with getting_synced=true are not popped,
943 944 945 946 947
  //  - The same thread that sets getting_synced=true will reset it.
  //  - it follows that the object referred by back() can be safely read from
  //  the write_thread_ without using mutex
  //  - it follows that the items with getting_synced=true can be safely read
  //  from the same thread that has set getting_synced=true
948 949 950
  std::deque<LogWriterNumber> logs_;
  // Signaled when getting_synced becomes false for some of the logs_.
  InstrumentedCondVar log_sync_cv_;
A
Aaron Gao 已提交
951
  std::atomic<uint64_t> total_log_size_;
I
Igor Canadi 已提交
952 953 954
  // only used for dynamically adjusting max_total_wal_size. it is a sum of
  // [write_buffer_size * max_write_buffer_number] over all column families
  uint64_t max_total_in_memory_state_;
955 956 957
  // If true, we have only one (default) column family. We use this to optimize
  // some code-paths
  bool single_column_family_mode_;
958 959 960
  // If this is non-empty, we need to delete these log files in background
  // threads. Protected by db mutex.
  autovector<log::Writer*> logs_to_free_;
I
Igor Canadi 已提交
961

S
sdong 已提交
962 963
  bool is_snapshot_supported_;

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
  // Class to maintain directories for all database paths other than main one.
  class Directories {
   public:
    Status SetDirectories(Env* env, const std::string& dbname,
                          const std::string& wal_dir,
                          const std::vector<DbPath>& data_paths);

    Directory* GetDataDir(size_t path_id);

    Directory* GetWalDir() {
      if (wal_dir_) {
        return wal_dir_.get();
      }
      return db_dir_.get();
    }

    Directory* GetDbDir() { return db_dir_.get(); }

   private:
    std::unique_ptr<Directory> db_dir_;
    std::vector<std::unique_ptr<Directory>> data_dirs_;
    std::unique_ptr<Directory> wal_dir_;

    Status CreateAndNewDirectory(Env* env, const std::string& dirname,
                                 std::unique_ptr<Directory>* directory) const;
  };

  Directories directories_;
992

993
  WriteBufferManager* write_buffer_manager_;
994

I
Igor Canadi 已提交
995
  WriteThread write_thread_;
996
  WriteBatch tmp_batch_;
997 998 999
  // The write thread when the writers have no memtable write. This will be used
  // in 2PC to batch the prepares separately from the serial commit.
  WriteThread nonmem_write_thread_;
1000

1001
  WriteController write_controller_;
S
sdong 已提交
1002

1003 1004
  unique_ptr<RateLimiter> low_pri_write_rate_limiter_;

S
sdong 已提交
1005 1006
  // Size of the last batch group. In slowdown mode, next write needs to
  // sleep if it uses up the quota.
1007 1008
  // Note: This is to protect memtable and compaction. If the batch only writes
  // to the WAL its size need not to be included in this.
S
sdong 已提交
1009 1010
  uint64_t last_batch_group_size_;

I
Igor Canadi 已提交
1011
  FlushScheduler flush_scheduler_;
1012

J
jorlow@chromium.org 已提交
1013 1014
  SnapshotList snapshots_;

I
Igor Canadi 已提交
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
  // For each background job, pending_outputs_ keeps the current file number at
  // the time that background job started.
  // FindObsoleteFiles()/PurgeObsoleteFiles() never deletes any file that has
  // number bigger than any of the file number in pending_outputs_. Since file
  // numbers grow monotonically, this also means that pending_outputs_ is always
  // sorted. After a background job is done executing, its file number is
  // deleted from pending_outputs_, which allows PurgeObsoleteFiles() to clean
  // it up.
  // State is protected with db mutex.
  std::list<uint64_t> pending_outputs_;
J
jorlow@chromium.org 已提交
1025

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
  // PurgeFileInfo is a structure to hold information of files to be deleted in
  // purge_queue_
  struct PurgeFileInfo {
    std::string fname;
    FileType type;
    uint64_t number;
    uint32_t path_id;
    int job_id;
    PurgeFileInfo(std::string fn, FileType t, uint64_t num, uint32_t pid,
                  int jid)
        : fname(fn), type(t), number(num), path_id(pid), job_id(jid) {}
  };

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
  // flush_queue_ and compaction_queue_ hold column families that we need to
  // flush and compact, respectively.
  // A column family is inserted into flush_queue_ when it satisfies condition
  // cfd->imm()->IsFlushPending()
  // A column family is inserted into compaction_queue_ when it satisfied
  // condition cfd->NeedsCompaction()
  // Column families in this list are all Ref()-erenced
  // TODO(icanadi) Provide some kind of ReferencedColumnFamily class that will
  // do RAII on ColumnFamilyData
  // Column families are in this queue when they need to be flushed or
  // compacted. Consumers of these queues are flush and compaction threads. When
  // column family is put on this queue, we increase unscheduled_flushes_ and
  // unscheduled_compactions_. When these variables are bigger than zero, that
  // means we need to schedule background threads for compaction and thread.
  // Once the background threads are scheduled, we decrease unscheduled_flushes_
  // and unscheduled_compactions_. That way we keep track of number of
  // compaction and flush threads we need to schedule. This scheduling is done
  // in MaybeScheduleFlushOrCompaction()
  // invariant(column family present in flush_queue_ <==>
  // ColumnFamilyData::pending_flush_ == true)
  std::deque<ColumnFamilyData*> flush_queue_;
  // invariant(column family present in compaction_queue_ <==>
  // ColumnFamilyData::pending_compaction_ == true)
  std::deque<ColumnFamilyData*> compaction_queue_;
1063 1064 1065

  // A queue to store filenames of the files to be purged
  std::deque<PurgeFileInfo> purge_queue_;
1066 1067 1068

  // A queue to store log writers to close
  std::deque<log::Writer*> logs_to_free_queue_;
1069 1070
  int unscheduled_flushes_;
  int unscheduled_compactions_;
1071

1072 1073 1074 1075
  // count how many background compactions are running or have been scheduled in
  // the BOTTOM pool
  int bg_bottom_compaction_scheduled_;

1076
  // count how many background compactions are running or have been scheduled
1077
  int bg_compaction_scheduled_;
J
jorlow@chromium.org 已提交
1078

1079 1080 1081
  // stores the number of compactions are currently running
  int num_running_compactions_;

1082 1083 1084
  // number of background memtable flush jobs, submitted to the HIGH pool
  int bg_flush_scheduled_;

1085 1086 1087
  // stores the number of flushes are currently running
  int num_running_flushes_;

1088 1089 1090
  // number of background obsolete file purge jobs, submitted to the HIGH pool
  int bg_purge_scheduled_;

H
hans@chromium.org 已提交
1091
  // Information for a manual compaction
1092
  struct ManualCompactionState {
I
Igor Canadi 已提交
1093
    ColumnFamilyData* cfd;
1094 1095
    int input_level;
    int output_level;
1096
    uint32_t output_path_id;
L
Lei Jin 已提交
1097
    Status status;
1098
    bool done;
1099
    bool in_progress;             // compaction request being processed?
1100 1101 1102
    bool incomplete;              // only part of requested range compacted
    bool exclusive;               // current behavior of only one manual
    bool disallow_trivial_move;   // Force actual compaction to run
1103 1104
    const InternalKey* begin;     // nullptr means beginning of key range
    const InternalKey* end;       // nullptr means end of key range
1105
    InternalKey* manual_end;      // how far we are compacting
1106
    InternalKey tmp_storage;      // Used to keep track of compaction progress
1107
    InternalKey tmp_storage1;     // Used to keep track of compaction progress
1108 1109 1110
  };
  struct PrepickedCompaction {
    // background compaction takes ownership of `compaction`.
1111
    Compaction* compaction;
1112 1113 1114
    // caller retains ownership of `manual_compaction_state` as it is reused
    // across background compactions.
    ManualCompactionState* manual_compaction_state;  // nullptr if non-manual
1115
  };
1116
  std::deque<ManualCompactionState*> manual_compaction_dequeue_;
1117 1118

  struct CompactionArg {
1119
    // caller retains ownership of `db`.
1120
    DBImpl* db;
1121 1122
    // background compaction takes ownership of `prepicked_compaction`.
    PrepickedCompaction* prepicked_compaction;
H
hans@chromium.org 已提交
1123
  };
J
jorlow@chromium.org 已提交
1124 1125 1126 1127

  // Have we encountered a background error in paranoid mode?
  Status bg_error_;

1128
  // shall we disable deletion of obsolete files
1129 1130 1131 1132 1133 1134
  // if 0 the deletion is enabled.
  // if non-zero, files will not be getting deleted
  // This enables two different threads to call
  // EnableFileDeletions() and DisableFileDeletions()
  // without any synchronization
  int disable_delete_obsolete_files_;
1135

1136 1137 1138
  // last time when DeleteObsoleteFiles with full scan was executed. Originaly
  // initialized with startup time.
  uint64_t delete_obsolete_files_last_run_;
1139

1140
  // last time stats were dumped to LOG
H
Haobo Xu 已提交
1141
  std::atomic<uint64_t> last_stats_dump_time_microsec_;
1142

1143 1144 1145 1146
  // Each flush or compaction gets its own job id. this counter makes sure
  // they're unique
  std::atomic<int> next_job_id_;

1147 1148 1149
  // A flag indicating whether the current rocksdb database has any
  // data that is not yet persisted into either WAL or SST file.
  // Used when disableWAL is true.
1150
  std::atomic<bool> has_unpersisted_data_;
1151 1152 1153 1154 1155 1156 1157 1158 1159

  // if an attempt was made to flush all column families that
  // the oldest log depends on but uncommited data in the oldest
  // log prevents the log from being released.
  // We must attempt to free the dependent memtables again
  // at a later time after the transaction in the oldest
  // log is fully commited.
  bool unable_to_flush_oldest_log_;

H
heyongqiang 已提交
1160
  static const int KEEP_LOG_FILE_NUM = 1000;
D
Dmitri Smirnov 已提交
1161
  // MSVC version 1800 still does not have constexpr for ::max()
1162
  static const uint64_t kNoTimeOut = port::kMaxUint64;
D
Dmitri Smirnov 已提交
1163

H
heyongqiang 已提交
1164
  std::string db_absolute_path_;
H
heyongqiang 已提交
1165

1166
  // The options to access storage files
L
Lei Jin 已提交
1167
  const EnvOptions env_options_;
1168

1169
  // Number of running IngestExternalFile() calls.
1170
  // REQUIRES: mutex held
1171
  int num_running_ingest_file_;
1172

I
Igor Canadi 已提交
1173 1174 1175 1176
#ifndef ROCKSDB_LITE
  WalManager wal_manager_;
#endif  // ROCKSDB_LITE

I
Igor Canadi 已提交
1177 1178 1179
  // Unified interface for logging events
  EventLogger event_logger_;

1180
  // A value of > 0 temporarily disables scheduling of background work
1181
  int bg_work_paused_;
1182

1183 1184 1185
  // A value of > 0 temporarily disables scheduling of background compaction
  int bg_compaction_paused_;

1186 1187 1188
  // Guard against multiple concurrent refitting
  bool refitting_level_;

1189 1190 1191
  // Indicate DB was opened successfully
  bool opened_successfully_;

Y
yizhu.sun 已提交
1192
  // minimum log number still containing prepared data.
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204
  // this is used by FindObsoleteFiles to determine which
  // flushed logs we must keep around because they still
  // contain prepared data which has not been flushed or rolled back
  std::priority_queue<uint64_t, std::vector<uint64_t>, std::greater<uint64_t>>
      min_log_with_prep_;

  // to be used in conjunction with min_log_with_prep_.
  // once a transaction with data in log L is committed or rolled back
  // rather than removing the value from the heap we add that value
  // to prepared_section_completed_ which maps LOG -> instance_count
  // since a log could contain multiple prepared sections
  //
Y
yizhu.sun 已提交
1205
  // when trying to determine the minimum log still active we first
1206 1207 1208 1209 1210 1211 1212 1213
  // consult min_log_with_prep_. while that root value maps to
  // a value > 0 in prepared_section_completed_ we decrement the
  // instance_count for that log and pop the root value in
  // min_log_with_prep_. This will work the same as a min_heap
  // where we are deleteing arbitrary elements and the up heaping.
  std::unordered_map<uint64_t, uint64_t> prepared_section_completed_;
  std::mutex prep_heap_mutex_;

J
jorlow@chromium.org 已提交
1214 1215 1216 1217
  // No copying allowed
  DBImpl(const DBImpl&);
  void operator=(const DBImpl&);

I
Igor Canadi 已提交
1218
  // Background threads call this function, which is just a wrapper around
I
Igor Canadi 已提交
1219 1220 1221
  // the InstallSuperVersion() function. Background threads carry
  // job_context which can have new_superversion already
  // allocated.
I
Igor Canadi 已提交
1222
  void InstallSuperVersionAndScheduleWorkWrapper(
I
Igor Canadi 已提交
1223 1224
      ColumnFamilyData* cfd, JobContext* job_context,
      const MutableCFOptions& mutable_cf_options);
L
Lei Jin 已提交
1225

1226 1227 1228
  // All ColumnFamily state changes go through this function. Here we analyze
  // the new state and we schedule background work if we detect that the new
  // state needs flush or compaction.
I
Igor Canadi 已提交
1229 1230 1231
  SuperVersion* InstallSuperVersionAndScheduleWork(
      ColumnFamilyData* cfd, SuperVersion* new_sv,
      const MutableCFOptions& mutable_cf_options);
I
Igor Canadi 已提交
1232

I
Igor Canadi 已提交
1233
#ifndef ROCKSDB_LITE
I
Igor Canadi 已提交
1234 1235 1236
  using DB::GetPropertiesOfAllTables;
  virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family,
                                          TablePropertiesCollection* props)
1237
      override;
1238
  virtual Status GetPropertiesOfTablesInRange(
1239
      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
1240 1241
      TablePropertiesCollection* props) override;

I
Igor Canadi 已提交
1242
#endif  // ROCKSDB_LITE
1243

1244 1245
  // Function that Get and KeyMayExist call with no_io true or false
  // Note: 'value_found' from KeyMayExist propagates here
1246
  Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family,
M
Maysam Yabandeh 已提交
1247
                 const Slice& key, PinnableSlice* value,
1248
                 bool* value_found = nullptr);
1249

1250
  bool GetIntPropertyInternal(ColumnFamilyData* cfd,
1251 1252
                              const DBPropertyInfo& property_info,
                              bool is_locked, uint64_t* value);
1253 1254 1255

  bool HasPendingManualCompaction();
  bool HasExclusiveManualCompaction();
1256 1257 1258
  void AddManualCompaction(ManualCompactionState* m);
  void RemoveManualCompaction(ManualCompactionState* m);
  bool ShouldntRunManualCompaction(ManualCompactionState* m);
1259
  bool HaveManualCompaction(ColumnFamilyData* cfd);
1260
  bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
1261

1262
  size_t GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
1263 1264 1265 1266 1267

  // When set, we use a seprate queue for writes that dont write to memtable. In
  // 2PC these are the writes at Prepare phase.
  const bool concurrent_prepare_;
  const bool manual_wal_flush_;
J
jorlow@chromium.org 已提交
1268 1269 1270 1271
};

extern Options SanitizeOptions(const std::string& db,
                               const Options& src);
1272

1273
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
S
Siying Dong 已提交
1274

S
Siying Dong 已提交
1275 1276 1277
extern CompressionType GetCompressionFlush(
    const ImmutableCFOptions& ioptions,
    const MutableCFOptions& mutable_cf_options);
1278

M
miguelportilla 已提交
1279 1280 1281 1282 1283 1284 1285
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}

1286
}  // namespace rocksdb