db_impl.h 77.1 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#pragma once
K
Kai Liu 已提交
10

H
Haobo Xu 已提交
11
#include <atomic>
12
#include <deque>
13
#include <functional>
14
#include <limits>
I
Igor Canadi 已提交
15
#include <list>
16
#include <map>
17
#include <set>
I
Igor Canadi 已提交
18
#include <string>
19 20
#include <utility>
#include <vector>
K
kailiu 已提交
21

22
#include "db/column_family.h"
23
#include "db/compaction_job.h"
24
#include "db/dbformat.h"
25 26
#include "db/error_handler.h"
#include "db/event_helpers.h"
27
#include "db/external_sst_file_ingestion_job.h"
28
#include "db/flush_job.h"
29 30
#include "db/flush_scheduler.h"
#include "db/internal_stats.h"
A
agiardullo 已提交
31
#include "db/log_writer.h"
S
Siying Dong 已提交
32
#include "db/logs_with_prep_tracker.h"
33
#include "db/pre_release_callback.h"
34
#include "db/range_del_aggregator.h"
35
#include "db/read_callback.h"
Y
Yi Wu 已提交
36
#include "db/snapshot_checker.h"
A
agiardullo 已提交
37
#include "db/snapshot_impl.h"
38
#include "db/version_edit.h"
I
Igor Canadi 已提交
39
#include "db/wal_manager.h"
40 41
#include "db/write_controller.h"
#include "db/write_thread.h"
K
Kai Liu 已提交
42
#include "memtable_list.h"
43 44
#include "monitoring/instrumented_mutex.h"
#include "options/db_options.h"
K
Kai Liu 已提交
45
#include "port/port.h"
46 47 48
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/memtablerep.h"
赵明 已提交
49
#include "rocksdb/metrics_reporter.h"
S
Siying Dong 已提交
50
#include "rocksdb/status.h"
51
#include "rocksdb/trace_reader_writer.h"
52
#include "rocksdb/transaction_log.h"
53
#include "rocksdb/write_buffer_manager.h"
S
sdong 已提交
54
#include "table/scoped_arena_iterator.h"
55
#include "util/autovector.h"
I
Igor Canadi 已提交
56 57
#include "util/event_logger.h"
#include "util/hash.h"
58
#include "util/repeatable_thread.h"
59 60
#include "util/stop_watch.h"
#include "util/thread_local.h"
61
#include "util/trace_replay.h"
L
linyuanjin 已提交
62
#include "utilities/console/server.h"
63

64 65
#include "rocksdb/terark_namespace.h"
namespace TERARKDB_NAMESPACE {
J
jorlow@chromium.org 已提交
66

Y
Yi Wu 已提交
67
class Arena;
Y
Yi Wu 已提交
68
class ArenaWrappedDBIter;
69
class InMemoryStatsHistoryIterator;
J
jorlow@chromium.org 已提交
70
class MemTable;
W
wangyi.ywq 已提交
71 72 73 74 75
class PersistentStatsHistoryIterator;
class StatsDumpScheduler;
#ifndef NDEBUG
class StatsDumpTestScheduler;
#endif  // !NDEBUG
Z
Zhongyi Xie 已提交
76

J
jorlow@chromium.org 已提交
77 78 79 80
class TableCache;
class Version;
class VersionEdit;
class VersionSet;
A
agiardullo 已提交
81
class WriteCallback;
I
Igor Canadi 已提交
82
struct JobContext;
83
struct ExternalSstFileInfo;
84
struct MemTableInfo;
J
jorlow@chromium.org 已提交
85

W
wangyi.ywq 已提交
86 87
const uint64_t kMicrosInSecond = 1000 * 1000;

J
jorlow@chromium.org 已提交
88 89
class DBImpl : public DB {
 public:
90
  DBImpl(const DBOptions& options, const std::string& dbname,
91
         const bool seq_per_batch = false, const bool batch_per_txn = true);
J
jorlow@chromium.org 已提交
92 93
  virtual ~DBImpl();

94 95 96
  using DB::Resume;
  virtual Status Resume() override;

J
jorlow@chromium.org 已提交
97
  // Implementations of the DB interface
98 99
  using DB::Put;
  virtual Status Put(const WriteOptions& options,
100
                     ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
101
                     const Slice& value) override;
102 103
  using DB::Merge;
  virtual Status Merge(const WriteOptions& options,
104
                       ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
105
                       const Slice& value) override;
106 107
  using DB::Delete;
  virtual Status Delete(const WriteOptions& options,
I
Igor Sugak 已提交
108 109
                        ColumnFamilyHandle* column_family,
                        const Slice& key) override;
A
Andres Noetzli 已提交
110 111 112 113
  using DB::SingleDelete;
  virtual Status SingleDelete(const WriteOptions& options,
                              ColumnFamilyHandle* column_family,
                              const Slice& key) override;
114
  using DB::Write;
I
Igor Sugak 已提交
115 116
  virtual Status Write(const WriteOptions& options,
                       WriteBatch* updates) override;
A
agiardullo 已提交
117

118
  using DB::Get;
J
jorlow@chromium.org 已提交
119
  virtual Status Get(const ReadOptions& options,
120
                     ColumnFamilyHandle* column_family, const Slice& key,
Z
ZhaoMing 已提交
121
                     LazyBuffer* value) override;
122 123 124 125

  // Function that Get and KeyMayExist call with no_io true or false
  // Note: 'value_found' from KeyMayExist propagates here
  Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family,
Z
ZhaoMing 已提交
126
                 const Slice& key, LazyBuffer* value,
赵明 已提交
127
                 bool* value_found = nullptr, ReadCallback* callback = nullptr);
128

129 130 131
  using DB::MultiGet;
  virtual std::vector<Status> MultiGet(
      const ReadOptions& options,
132
      const std::vector<ColumnFamilyHandle*>& column_family,
I
Igor Sugak 已提交
133 134
      const std::vector<Slice>& keys,
      std::vector<std::string>* values) override;
135

Y
Yi Wu 已提交
136
  virtual Status CreateColumnFamily(const ColumnFamilyOptions& cf_options,
137
                                    const std::string& column_family,
I
Igor Sugak 已提交
138
                                    ColumnFamilyHandle** handle) override;
Y
Yi Wu 已提交
139 140 141 142 143 144 145
  virtual Status CreateColumnFamilies(
      const ColumnFamilyOptions& cf_options,
      const std::vector<std::string>& column_family_names,
      std::vector<ColumnFamilyHandle*>* handles) override;
  virtual Status CreateColumnFamilies(
      const std::vector<ColumnFamilyDescriptor>& column_families,
      std::vector<ColumnFamilyHandle*>* handles) override;
I
Igor Sugak 已提交
146
  virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override;
Y
Yi Wu 已提交
147 148
  virtual Status DropColumnFamilies(
      const std::vector<ColumnFamilyHandle*>& column_families) override;
149

150 151 152 153
  // Returns false if key doesn't exist in the database and true if it may.
  // If value_found is not passed in as null, then return the value if found in
  // memory. On return, if value was found, then value_found will be set to true
  // , otherwise false.
154
  using DB::KeyMayExist;
155
  virtual bool KeyMayExist(const ReadOptions& options,
156
                           ColumnFamilyHandle* column_family, const Slice& key,
I
Igor Sugak 已提交
157 158
                           std::string* value,
                           bool* value_found = nullptr) override;
Y
Yi Wu 已提交
159

160 161
  using DB::NewIterator;
  virtual Iterator* NewIterator(const ReadOptions& options,
I
Igor Sugak 已提交
162
                                ColumnFamilyHandle* column_family) override;
163 164
  virtual Status NewIterators(
      const ReadOptions& options,
I
Igor Canadi 已提交
165
      const std::vector<ColumnFamilyHandle*>& column_families,
I
Igor Sugak 已提交
166
      std::vector<Iterator*>* iterators) override;
Y
Yi Wu 已提交
167 168 169
  ArenaWrappedDBIter* NewIteratorImpl(const ReadOptions& options,
                                      ColumnFamilyData* cfd,
                                      SequenceNumber snapshot,
Y
Yi Wu 已提交
170
                                      ReadCallback* read_callback,
171
                                      bool allow_refresh = true);
Y
Yi Wu 已提交
172

I
Igor Sugak 已提交
173 174
  virtual const Snapshot* GetSnapshot() override;
  virtual void ReleaseSnapshot(const Snapshot* snapshot) override;
175
  using DB::GetProperty;
176
  virtual bool GetProperty(ColumnFamilyHandle* column_family,
I
Igor Sugak 已提交
177
                           const Slice& property, std::string* value) override;
178
  using DB::GetMapProperty;
179 180 181
  virtual bool GetMapProperty(
      ColumnFamilyHandle* column_family, const Slice& property,
      std::map<std::string, std::string>* value) override;
182 183 184
  using DB::GetIntProperty;
  virtual bool GetIntProperty(ColumnFamilyHandle* column_family,
                              const Slice& property, uint64_t* value) override;
185 186 187
  using DB::GetAggregatedIntProperty;
  virtual bool GetAggregatedIntProperty(const Slice& property,
                                        uint64_t* aggregated_value) override;
188
  using DB::GetApproximateSizes;
赵明 已提交
189 190 191
  virtual void GetApproximateSizes(
      ColumnFamilyHandle* column_family, const Range* range, int n,
      uint64_t* sizes, uint8_t include_flags = INCLUDE_FILES) override;
192 193 194 195 196
  using DB::GetApproximateMemTableStats;
  virtual void GetApproximateMemTableStats(ColumnFamilyHandle* column_family,
                                           const Range& range,
                                           uint64_t* const count,
                                           uint64_t* const size) override;
197
  using DB::CompactRange;
198 199 200
  virtual Status CompactRange(const CompactRangeOptions& options,
                              ColumnFamilyHandle* column_family,
                              const Slice* begin, const Slice* end) override;
201

202
  using DB::CompactFiles;
赵明 已提交
203 204 205 206 207 208
  virtual Status CompactFiles(
      const CompactionOptions& compact_options,
      ColumnFamilyHandle* column_family,
      const std::vector<std::string>& input_file_names, const int output_level,
      const int output_path_id = -1,
      std::vector<std::string>* const output_file_names = nullptr) override;
209

210 211 212
  virtual Status PauseBackgroundWork() override;
  virtual Status ContinueBackgroundWork() override;

213 214 215
  virtual Status EnableAutoCompaction(
      const std::vector<ColumnFamilyHandle*>& column_family_handles) override;

216
  using DB::SetOptions;
I
Igor Sugak 已提交
217 218 219
  Status SetOptions(
      ColumnFamilyHandle* column_family,
      const std::unordered_map<std::string, std::string>& options_map) override;
220

221 222 223
  virtual Status SetDBOptions(
      const std::unordered_map<std::string, std::string>& options_map) override;

224
  using DB::NumberLevels;
I
Igor Sugak 已提交
225
  virtual int NumberLevels(ColumnFamilyHandle* column_family) override;
226
  using DB::MaxMemCompactionLevel;
I
Igor Sugak 已提交
227
  virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) override;
228
  using DB::Level0StopWriteTrigger;
I
Igor Sugak 已提交
229 230 231 232
  virtual int Level0StopWriteTrigger(
      ColumnFamilyHandle* column_family) override;
  virtual const std::string& GetName() const override;
  virtual Env* GetEnv() const override;
233
  using DB::GetOptions;
234
  virtual Options GetOptions(ColumnFamilyHandle* column_family) const override;
235
  using DB::GetDBOptions;
236
  virtual DBOptions GetDBOptions() const override;
237 238
  using DB::Flush;
  virtual Status Flush(const FlushOptions& options,
I
Igor Sugak 已提交
239
                       ColumnFamilyHandle* column_family) override;
Y
Yanqin Jin 已提交
240 241 242
  virtual Status Flush(
      const FlushOptions& options,
      const std::vector<ColumnFamilyHandle*>& column_families) override;
243
  virtual Status FlushWAL(bool sync) override;
L
liuyangming 已提交
244
  bool TEST_WALBufferIsEmpty(bool lock = true);
245
  virtual Status SyncWAL() override;
L
liuyangming 已提交
246 247
  virtual Status LockWAL() override;
  virtual Status UnlockWAL() override;
I
Igor Canadi 已提交
248

I
Igor Sugak 已提交
249
  virtual SequenceNumber GetLatestSequenceNumber() const override;
250 251
  // REQUIRES: joined the main write queue if two_write_queues is disabled, and
  // the second write queue otherwise.
252 253
  virtual void SetLastPublishedSequence(SequenceNumber seq);
  // Returns LastSequence in last_seq_same_as_publish_seq_
254 255 256
  // mode and LastAllocatedSequence otherwise. This is useful when visiblility
  // depends also on data written to the WAL but not to the memtable.
  SequenceNumber TEST_GetLastVisibleSequence() const;
I
Igor Canadi 已提交
257

258 259
  virtual bool SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) override;

Z
Zitan Chen 已提交
260 261 262
  virtual Status GetDbIdentity(std::string& identity) const override;

  virtual Status GetDbSessionId(std::string& session_id) const override;
Z
Zhongyi Xie 已提交
263 264 265 266 267 268 269 270 271
  ColumnFamilyHandle* DefaultColumnFamily() const override;

  ColumnFamilyHandle* PersistentStatsColumnFamily() const;

  virtual Status Close() override;

  Status GetStatsHistory(
      uint64_t start_time, uint64_t end_time,
      std::unique_ptr<StatsHistoryIterator>* stats_iterator) override;
I
Igor Canadi 已提交
272
#ifndef ROCKSDB_LITE
S
Siying Dong 已提交
273 274
  using DB::ResetStats;
  virtual Status ResetStats() override;
I
Igor Sugak 已提交
275 276
  virtual Status DisableFileDeletions() override;
  virtual Status EnableFileDeletions(bool force) override;
277
  virtual int IsFileDeletionsEnabled() const;
I
Igor Canadi 已提交
278
  // All the returned filenames start with "/"
279
  virtual Status GetLiveFiles(std::vector<std::string>&,
280
                              uint64_t* manifest_file_size,
I
Igor Sugak 已提交
281 282
                              bool flush_memtable = true) override;
  virtual Status GetSortedWalFiles(VectorLogPtr& files) override;
I
Igor Canadi 已提交
283

284
  virtual Status GetUpdatesSince(
285 286 287
      SequenceNumber seq_number, std::unique_ptr<TransactionLogIterator>* iter,
      const TransactionLogIterator::ReadOptions& read_options =
          TransactionLogIterator::ReadOptions()) override;
G
guokuankuan 已提交
288
  virtual void SetGuardSeqno(SequenceNumber guard_seqno) override;
I
Igor Sugak 已提交
289
  virtual Status DeleteFile(std::string name) override;
290
  Status DeleteFilesInRanges(ColumnFamilyHandle* column_family,
奏之章 已提交
291
                             const RangePtr* ranges, size_t n);
292

I
Igor Sugak 已提交
293 294
  virtual void GetLiveFilesMetaData(
      std::vector<LiveFileMetaData>* metadata) override;
295 296 297 298 299

  // Obtains the meta data of the specified column family of the DB.
  // Status::NotFound() will be returned if the current DB does not have
  // any column family match the specified name.
  // TODO(yhchiang): output parameter is placed in the end in this codebase.
赵明 已提交
300 301
  virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family,
                                       ColumnFamilyMetaData* metadata) override;
302

303
  Status SuggestCompactRange(ColumnFamilyHandle* column_family,
S
Siying Dong 已提交
304
                             const Slice* begin, const Slice* end) override;
305

S
Siying Dong 已提交
306 307
  Status PromoteL0(ColumnFamilyHandle* column_family,
                   int target_level) override;
308

A
agiardullo 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
  // Similar to Write() but will call the callback once on the single write
  // thread to determine whether it is safe to perform the write.
  virtual Status WriteWithCallback(const WriteOptions& write_options,
                                   WriteBatch* my_batch,
                                   WriteCallback* callback);

  // Returns the sequence number that is guaranteed to be smaller than or equal
  // to the sequence number of any key that could be inserted into the current
  // memtables. It can then be assumed that any write with a larger(or equal)
  // sequence number will be present in this memtable or a later memtable.
  //
  // If the earliest sequence number could not be determined,
  // kMaxSequenceNumber will be returned.
  //
  // If include_history=true, will also search Memtables in MemTableList
  // History.
  SequenceNumber GetEarliestMemTableSequenceNumber(SuperVersion* sv,
                                                   bool include_history);

  // For a given key, check to see if there are any records for this key
329 330 331 332
  // in the memtables, including memtable history.  If cache_only is false,
  // SST files will also be checked.
  //
  // If a key is found, *found_record_for_key will be set to true and
333
  // *seq will be set to the stored sequence number for the latest
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
  // operation on this key or kMaxSequenceNumber if unknown.
  // If no key is found, *found_record_for_key will be set to false.
  //
  // Note: If cache_only=false, it is possible for *seq to be set to 0 if
  // the sequence number has been cleared from the record.  If the caller is
  // holding an active db snapshot, we know the missing sequence must be less
  // than the snapshot's sequence number (sequence numbers are only cleared
  // when there are no earlier active snapshots).
  //
  // If NotFound is returned and found_record_for_key is set to false, then no
  // record for this key was found.  If the caller is holding an active db
  // snapshot, we know that no key could have existing after this snapshot
  // (since we do not compact keys that have an earlier snapshot).
  //
  // Returns OK or NotFound on success,
  // other status on unexpected error.
350
  // TODO(andrewkr): this API need to be aware of range deletion operations
351 352
  Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
                                 bool cache_only, SequenceNumber* seq,
Z
ZhaoMing 已提交
353
                                 bool* found_record_for_key);
A
agiardullo 已提交
354

355 356 357 358 359
  using DB::IngestExternalFile;
  virtual Status IngestExternalFile(
      ColumnFamilyHandle* column_family,
      const std::vector<std::string>& external_files,
      const IngestExternalFileOptions& ingestion_options) override;
360

A
Aaron G 已提交
361 362
  virtual Status VerifyChecksum() override;

363 364 365 366 367 368 369
  using DB::StartTrace;
  virtual Status StartTrace(
      const TraceOptions& options,
      std::unique_ptr<TraceWriter>&& trace_writer) override;

  using DB::EndTrace;
  virtual Status EndTrace() override;
370 371
  Status TraceIteratorSeek(const uint32_t& cf_id, const Slice& key);
  Status TraceIteratorSeekForPrev(const uint32_t& cf_id, const Slice& key);
I
Igor Canadi 已提交
372
#endif  // ROCKSDB_LITE
373

374 375 376 377 378 379
  // Similar to GetSnapshot(), but also lets the db know that this snapshot
  // will be used for transaction write-conflict checking.  The DB can then
  // make sure not to compact any keys that would prevent a write-conflict from
  // being detected.
  const Snapshot* GetSnapshotForWriteConflictBoundary();

I
Igor Canadi 已提交
380 381
  // checks if all live files exist on file system and that their file sizes
  // match to our in-memory records
Z
ZhaoMing 已提交
382
  virtual Status CheckConsistency(bool read_only);
I
Igor Canadi 已提交
383

奏之章 已提交
384 385 386 387
  Status RunManualCompaction(
      ColumnFamilyData* cfd, int input_level, int output_level,
      uint32_t output_path_id, uint32_t max_subcompactions, const Slice* begin,
      const Slice* end, const std::unordered_set<uint64_t>* files_being_compact,
Z
ZhaoMing 已提交
388
      bool exclusive, bool disallow_trivial_move = false);
389

390 391 392
  // Return an internal iterator over the current state of the database.
  // The keys of this iterator are internal keys (see format.h).
  // The returned iterator should be deleted when no longer needed.
S
sdong 已提交
393
  InternalIterator* NewInternalIterator(
394
      Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
Z
ZhaoMing 已提交
395
      ColumnFamilyHandle* column_family = nullptr,
Z
ZhaoMing 已提交
396
      SeparateHelper** separate_helper = nullptr);
397

S
Siying Dong 已提交
398 399 400
  LogsWithPrepTracker* logs_with_prep_tracker() {
    return &logs_with_prep_tracker_;
  }
W
wangyi.ywq 已提交
401 402 403 404 405 406 407 408
  // persist stats to column family "_persistent_stats"
  void PersistStats();

  // dump rocksdb.stats to LOG
  void DumpStats();

  //
  void ScheduleGCTTL();
S
Siying Dong 已提交
409

410
#ifndef NDEBUG
J
jorlow@chromium.org 已提交
411
  // Extra methods (for testing) that are not in the public DB interface
I
Igor Canadi 已提交
412
  // Implemented in db_impl_debug.cc
J
jorlow@chromium.org 已提交
413

414
  // Compact any files in the named level that overlap [*begin, *end]
415
  Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
416 417
                           ColumnFamilyHandle* column_family = nullptr,
                           bool disallow_trivial_move = false);
J
jorlow@chromium.org 已提交
418

419
  void TEST_SwitchWAL();
420

S
Siying Dong 已提交
421
  bool TEST_UnableToReleaseOldestLog() { return unable_to_release_oldest_log_; }
422 423 424 425 426

  bool TEST_IsLogGettingFlushed() {
    return alive_log_files_.begin()->getting_flushed;
  }

Y
Yi Wu 已提交
427 428
  Status TEST_SwitchMemtable(ColumnFamilyData* cfd = nullptr);

429
  // Force current memtable contents to be flushed.
430
  Status TEST_FlushMemTable(bool wait = true, bool allow_write_stall = false,
431
                            ColumnFamilyHandle* cfh = nullptr);
J
jorlow@chromium.org 已提交
432

433
  // Wait for memtable compaction
434
  Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr);
435 436

  // Wait for any compaction
437 438 439
  // We add a bool parameter to wait for unscheduledCompactions_ == 0, but this
  // is only for the special test of CancelledCompactions
  Status TEST_WaitForCompact(bool waitUnscheduled = false);
440

441 442
  // Return the maximum overlapping data (in bytes) at next level for any
  // file at a level >= 1.
赵明 已提交
443 444
  int64_t TEST_MaxNextLevelOverlappingBytes(
      ColumnFamilyHandle* column_family = nullptr);
445

A
Abhishek Kona 已提交
446 447
  // Return the current manifest file no.
  uint64_t TEST_Current_Manifest_FileNo();
448

A
Andrew Kryczka 已提交
449 450 451
  // Returns the number that'll be assigned to the next file that's created.
  uint64_t TEST_Current_Next_FileNo();

452
  // get total level0 file size. Only for testing.
453
  uint64_t TEST_GetLevel0TotalSize();
454

455 456
  void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family,
                             std::vector<std::vector<FileMetaData>>* metadata);
457

458 459 460 461 462 463 464 465 466 467
  void TEST_LockMutex();

  void TEST_UnlockMutex();

  // REQUIRES: mutex locked
  void* TEST_BeginWrite();

  // REQUIRES: mutex locked
  // pass the pointer that you got from TEST_BeginWrite()
  void TEST_EndWrite(void* w);
468

469
  uint64_t TEST_MaxTotalInMemoryState() const {
470 471
    return max_total_in_memory_state_;
  }
472

473 474
  size_t TEST_LogsToFreeSize();

475 476
  uint64_t TEST_LogfileNumber();

S
sdong 已提交
477 478
  uint64_t TEST_total_log_size() const { return total_log_size_; }

479 480 481 482
  // Returns column family name to ImmutableCFOptions map.
  Status TEST_GetAllImmutableCFOptions(
      std::unordered_map<std::string, const ImmutableCFOptions*>* iopts_map);

H
hyunwoo 已提交
483
  // Return the lastest MutableCFOptions of a column family
A
Aaron Gao 已提交
484
  Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
Y
yizhu.sun 已提交
485
                                        MutableCFOptions* mutable_cf_options);
A
Aaron Gao 已提交
486

487 488
  Cache* TEST_table_cache() { return table_cache_.get(); }

489
  WriteController& TEST_write_controler() { return write_controller_; }
490

491 492
  uint64_t TEST_FindMinLogContainingOutstandingPrep();
  uint64_t TEST_FindMinPrepLogReferencedByMemTable();
493 494
  size_t TEST_PreparedSectionCompletedSize();
  size_t TEST_LogsWithPrepSize();
495

496
  int TEST_BGCompactionsAllowed() const;
赵明 已提交
497
  int TEST_BGGarbageCollectionAllowed() const;
498
  int TEST_BGFlushesAllowed() const;
499
  size_t TEST_GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
W
wangyi.ywq 已提交
500
  void TEST_WaitForStatsDumpRun(std::function<void()> callback) const;
501 502
  bool TEST_IsPersistentStatsEnabled() const;
  size_t TEST_EstiamteStatsHistorySize() const;
503

504
#endif  // NDEBUG
I
Igor Canadi 已提交
505

506 507 508
  struct BGJobLimits {
    int max_flushes;
    int max_compactions;
C
temp  
chenchanglong 已提交
509
    int max_garbage_collections;
510 511 512 513 514 515
  };
  // Returns maximum background flushes and compactions allowed to be scheduled
  BGJobLimits GetBGJobLimits() const;
  // Need a static version that can be called during SanitizeOptions().
  static BGJobLimits GetBGJobLimits(int max_background_flushes,
                                    int max_background_compactions,
C
temp  
chenchanglong 已提交
516
                                    int max_background_garbage_collections,
517 518
                                    int max_background_jobs,
                                    bool parallelize_compactions);
Z
ZhaoMing 已提交
519
  int GetSubCompactionSlots(uint32_t max_subcompactions);
520

521 522
  // move logs pending closing from job_context to the DB queue and
  // schedule a purge
523 524
  // add superversion to the DB queue and schedule a purge
  void ScheduleBgFree(JobContext* job_context, SuperVersion* sv);
525

526 527
  uint64_t MinLogNumberToKeep();

528 529 530 531 532 533 534 535
  // Returns the lower bound file number for SSTs that won't be deleted, even if
  // they're obsolete. This lower bound is used internally to prevent newly
  // created flush/compaction output files from being deleted before they're
  // installed. This technique avoids the need for tracking the exact numbers of
  // files pending creation, although it prevents more files than necessary from
  // being deleted.
  uint64_t MinObsoleteSstNumberToKeep();

I
Igor Canadi 已提交
536
  // Returns the list of live files in 'live' and the list
K
kailiu 已提交
537
  // of all files in the filesystem in 'candidate_files'.
I
Igor Canadi 已提交
538
  // If force == false and the last call was less than
539
  // db_options_.delete_obsolete_files_period_micros microseconds ago,
I
Igor Canadi 已提交
540 541
  // it will not fill up the job_context
  void FindObsoleteFiles(JobContext* job_context, bool force,
I
Igor Canadi 已提交
542 543 544
                         bool no_full_scan = false);

  // Diffs the files listed in filenames and those that do not
545
  // belong to live files are possibly removed. Also, removes all the
I
Igor Canadi 已提交
546 547
  // files in sst_delete_files and log_delete_files.
  // It is not necessary to hold the mutex when invoking this method.
548 549
  // If FindObsoleteFiles() was run, we need to also run
  // PurgeObsoleteFiles(), even if disable_delete_obsolete_files_ is true
550
  void PurgeObsoleteFiles(JobContext& background_contet,
551 552 553
                          bool schedule_only = false);

  void SchedulePurge();
I
Igor Canadi 已提交
554

555 556
  const SnapshotList& snapshots() const { return snapshots_; }

557 558 559 560
  const ImmutableDBOptions& immutable_db_options() const {
    return immutable_db_options_;
  }

561
  void CancelAllBackgroundWork(bool wait);
562

A
agiardullo 已提交
563 564 565 566 567 568 569 570 571 572 573
  // Find Super version and reference it. Based on options, it might return
  // the thread local cached one.
  // Call ReturnAndCleanupSuperVersion() when it is no longer needed.
  SuperVersion* GetAndRefSuperVersion(ColumnFamilyData* cfd);

  // Similar to the previous function but looks up based on a column family id.
  // nullptr will be returned if this column family no longer exists.
  // REQUIRED: this function should only be called on the write thread or if the
  // mutex is held.
  SuperVersion* GetAndRefSuperVersion(uint32_t column_family_id);

574 575 576
  // Un-reference the super version and clean it up if it is the last reference.
  void CleanupSuperVersion(SuperVersion* sv);

A
agiardullo 已提交
577 578 579 580 581 582 583 584 585 586 587 588 589 590
  // Un-reference the super version and return it to thread local cache if
  // needed. If it is the last reference of the super version. Clean it up
  // after un-referencing it.
  void ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, SuperVersion* sv);

  // Similar to the previous function but looks up based on a column family id.
  // nullptr will be returned if this column family no longer exists.
  // REQUIRED: this function should only be called on the write thread.
  void ReturnAndCleanupSuperVersion(uint32_t colun_family_id, SuperVersion* sv);

  // REQUIRED: this function should only be called on the write thread or if the
  // mutex is held.  Return value only valid until next call to this function or
  // mutex is released.
  ColumnFamilyHandle* GetColumnFamilyHandle(uint32_t column_family_id);
A
agiardullo 已提交
591

A
Anirban Rahut 已提交
592
  // Same as above, should called without mutex held and not on write thread.
593 594
  std::unique_ptr<ColumnFamilyHandle> GetColumnFamilyHandleUnlocked(
      uint32_t column_family_id);
A
Anirban Rahut 已提交
595

596 597 598 599 600 601 602 603 604 605 606 607 608 609
  // Returns the number of currently running flushes.
  // REQUIREMENT: mutex_ must be held when calling this function.
  int num_running_flushes() {
    mutex_.AssertHeld();
    return num_running_flushes_;
  }

  // Returns the number of currently running compactions.
  // REQUIREMENT: mutex_ must be held when calling this function.
  int num_running_compactions() {
    mutex_.AssertHeld();
    return num_running_compactions_;
  }

610 611
  const WriteController& write_controller() { return write_controller_; }

612 613
  InternalIterator* NewInternalIterator(
      const ReadOptions&, ColumnFamilyData* cfd, SuperVersion* super_version,
Z
ZhaoMing 已提交
614
      Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
Z
ZhaoMing 已提交
615
      SeparateHelper** separate_helper = nullptr);
S
Siying Dong 已提交
616

617 618 619 620 621
  // hollow transactions shell used for recovery.
  // these will then be passed to TransactionDB so that
  // locks can be reacquired before writing can resume.
  struct RecoveredTransaction {
    std::string name_;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
    bool unprepared_;

    struct BatchInfo {
      uint64_t log_number_;
      // TODO(lth): For unprepared, the memory usage here can be big for
      // unprepared transactions. This is only useful for rollbacks, and we
      // can in theory just keep keyset for that.
      WriteBatch* batch_;
      // Number of sub-batches. A new sub-batch is created if txn attempts to
      // insert a duplicate key,seq to memtable. This is currently used in
      // WritePreparedTxn/WriteUnpreparedTxn.
      size_t batch_cnt_;
    };

    // This maps the seq of the first key in the batch to BatchInfo, which
    // contains WriteBatch and other information relevant to the batch.
    //
    // For WriteUnprepared, batches_ can have size greater than 1, but for
    // other write policies, it must be of size 1.
    std::map<SequenceNumber, BatchInfo> batches_;

643
    explicit RecoveredTransaction(const uint64_t log, const std::string& name,
644
                                  WriteBatch* batch, SequenceNumber seq,
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
                                  size_t batch_cnt, bool unprepared)
        : name_(name), unprepared_(unprepared) {
      batches_[seq] = {log, batch, batch_cnt};
    }

    ~RecoveredTransaction() {
      for (auto& it : batches_) {
        delete it.second.batch_;
      }
    }

    void AddBatch(SequenceNumber seq, uint64_t log_number, WriteBatch* batch,
                  size_t batch_cnt, bool unprepared) {
      assert(batches_.count(seq) == 0);
      batches_[seq] = {log_number, batch, batch_cnt};
      // Prior state must be unprepared, since the prepare batch must be the
      // last batch.
      assert(unprepared_);
      unprepared_ = unprepared;
    }
665 666
  };

667
  bool allow_2pc() const { return immutable_db_options_.allow_2pc; }
668

林源劲 已提交
669 670
  const std::string& bytedance_tags() const { return bytedance_tags_; }

赵明 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
  using QPSReporter = CountReporterHandle&;
  QPSReporter seek_qps_reporter() { return seek_qps_reporter_; }
  QPSReporter seekforprev_qps_reporter() { return seekforprev_qps_reporter_; }
  QPSReporter next_qps_reporter() { return next_qps_reporter_; }
  QPSReporter prev_qps_reporter() { return prev_qps_reporter_; }

  using LatencyReporter = HistReporterHandle&;
  LatencyReporter seek_latency_reporter() { return seek_latency_reporter_; }
  LatencyReporter seekforprev_latency_reporter() {
    return seekforprev_latency_reporter_;
  }
  LatencyReporter next_latency_reporter() { return next_latency_reporter_; }
  LatencyReporter prev_latency_reporter() { return prev_latency_reporter_; }

  using ThroughputReporter = CountReporterHandle&;
L
linyuanjin 已提交
686

R
Reid Horuff 已提交
687 688 689 690 691
  std::unordered_map<std::string, RecoveredTransaction*>
  recovered_transactions() {
    return recovered_transactions_;
  }

692 693 694 695 696 697 698 699 700 701
  RecoveredTransaction* GetRecoveredTransaction(const std::string& name) {
    auto it = recovered_transactions_.find(name);
    if (it == recovered_transactions_.end()) {
      return nullptr;
    } else {
      return it->second;
    }
  }

  void InsertRecoveredTransaction(const uint64_t log, const std::string& name,
702
                                  WriteBatch* batch, SequenceNumber seq,
703 704 705 706 707 708 709 710 711 712 713 714 715
                                  size_t batch_cnt, bool unprepared_batch) {
    // For WriteUnpreparedTxn, InsertRecoveredTransaction is called multiple
    // times for every unprepared batch encountered during recovery.
    //
    // If the transaction is prepared, then the last call to
    // InsertRecoveredTransaction will have unprepared_batch = false.
    auto rtxn = recovered_transactions_.find(name);
    if (rtxn == recovered_transactions_.end()) {
      recovered_transactions_[name] = new RecoveredTransaction(
          log, name, batch, seq, batch_cnt, unprepared_batch);
    } else {
      rtxn->second->AddBatch(seq, log, batch, batch_cnt, unprepared_batch);
    }
S
Siying Dong 已提交
716
    logs_with_prep_tracker_.MarkLogAsContainingPrepSection(log);
717 718 719 720 721 722 723
  }

  void DeleteRecoveredTransaction(const std::string& name) {
    auto it = recovered_transactions_.find(name);
    assert(it != recovered_transactions_.end());
    auto* trx = it->second;
    recovered_transactions_.erase(it);
724 725 726 727
    for (const auto& info : trx->batches_) {
      logs_with_prep_tracker_.MarkLogAsHavingPrepSectionFlushed(
          info.second.log_number_);
    }
728 729 730
    delete trx;
  }

R
Reid Horuff 已提交
731 732 733 734 735 736 737 738
  void DeleteAllRecoveredTransactions() {
    for (auto it = recovered_transactions_.begin();
         it != recovered_transactions_.end(); it++) {
      delete it->second;
    }
    recovered_transactions_.clear();
  }

739 740 741
  void AddToLogsToFreeQueue(log::Writer* log_writer) {
    logs_to_free_queue_.push_back(log_writer);
  }
Y
Yi Wu 已提交
742

743 744 745 746
  void AddToSuperVersionToFreeQueue(SuperVersion* sv) {
    superversion_to_free_queue_.push_back(sv);
  }

Y
Yi Wu 已提交
747 748
  void SetSnapshotChecker(SnapshotChecker* snapshot_checker);

749 750 751
  // Not thread-safe.
  void SetRecoverableStatePreReleaseCallback(PreReleaseCallback* callback);

S
Siying Dong 已提交
752
  InstrumentedMutex* mutex() { return &mutex_; }
753

754 755
  Status NewDB();

756 757 758 759
  // This is to be used only by internal rocksdb classes.
  static Status Open(const DBOptions& db_options, const std::string& name,
                     const std::vector<ColumnFamilyDescriptor>& column_families,
                     std::vector<ColumnFamilyHandle*>* handles, DB** dbptr,
760
                     const bool seq_per_batch, const bool batch_per_txn);
761

762 763
  static Status CreateAndNewDirectory(Env* env, const std::string& dirname,
                                      std::unique_ptr<Directory>* directory);
764 765 766 767 768 769
  // find stats map from stats_history_ with smallest timestamp in
  // the range of [start_time, end_time)
  bool FindStatsByTime(uint64_t start_time, uint64_t end_time,
                       uint64_t* new_time,
                       std::map<std::string, uint64_t>* stats_map);

W
wangyi.ywq 已提交
770 771 772 773
  // Print information of all tombstones of all iterators to the std::string
  // This is only used by ldb. The output might be capped. Tombstones
  // printed out are not guaranteed to be in any order.
  Status TablesRangeTombstoneSummary(ColumnFamilyHandle* column_family,
W
wangyi.ywq 已提交
774
                                     int  ,
W
wangyi.ywq 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796
                                     std::string* out_str);

#ifndef NDEBUG

  Status TEST_FlushMemTable(ColumnFamilyData* cfd,
                            const FlushOptions& flush_opts);

  // Flush (multiple) ColumnFamilyData without using ColumnFamilyHandle. This
  // is because in certain cases, we can flush column families, wait for the
  // flush to complete, but delete the column family handle before the wait
  // finishes. For example in CompactRange.
  Status TEST_AtomicFlushMemTables(const autovector<ColumnFamilyData*>& cfds,
                                   const FlushOptions& flush_opts);
  size_t TEST_EstimateInMemoryStatsHistorySize() const;

  VersionSet* TEST_GetVersionSet() const { return versions_.get(); }

#ifndef ROCKSDB_LITE
  StatsDumpTestScheduler* TEST_GetStatsDumpScheduler() const;
#endif  // !ROCKSDB_LITE

#endif  // NDEBUG
797
 protected:
H
heyongqiang 已提交
798 799
  Env* const env_;
  const std::string dbname_;
Z
Zitan Chen 已提交
800 801 802 803
  std::string db_id_;
  // db_session_id_ is an identifier that gets reset
  // every time the DB is opened
  std::string db_session_id_;
804
  std::unique_ptr<VersionSet> versions_;
805 806
  // Flag to check whether we allocated and own the info log file
  bool own_info_log_;
807
  const DBOptions initial_db_options_;
808 809
  const ImmutableDBOptions immutable_db_options_;
  MutableDBOptions mutable_db_options_;
L
Lei Jin 已提交
810
  Statistics* stats_;
811 812
  std::unordered_map<std::string, RecoveredTransaction*>
      recovered_transactions_;
813 814
  std::unique_ptr<Tracer> tracer_;
  InstrumentedMutex trace_mutex_;
H
heyongqiang 已提交
815

816
  // Except in DB::Open(), WriteOptionsFile can only be called when:
Y
Yi Wu 已提交
817 818 819 820
  // Persist options to options file.
  // If need_mutex_lock = false, the method will lock DB mutex.
  // If need_enter_write_thread = false, the method will enter write thread.
  Status WriteOptionsFile(bool need_mutex_lock, bool need_enter_write_thread);
821 822 823 824

  // The following two functions can only be called when:
  // 1. WriteThread::Writer::EnterUnbatched() is used.
  // 2. db_mutex is NOT held
825 826 827
  Status RenameTempFileToOptionsFile(const std::string& file_name);
  Status DeleteObsoleteOptionsFiles();

Z
ZhaoMing 已提交
828
  void NotifyOnFlushBegin(ColumnFamilyData* cfd,
Y
Yi Wu 已提交
829
                          const MutableCFOptions& mutable_cf_options,
Z
ZhaoMing 已提交
830
                          int job_id);
Y
Yi Wu 已提交
831

Z
ZhaoMing 已提交
832 833
  void NotifyOnFlushCompleted(ColumnFamilyData* cfd,
                              const std::vector<FileMetaData>& file_meta,
834
                              const MutableCFOptions& mutable_cf_options,
Z
ZhaoMing 已提交
835 836
                              int job_id,
                              const std::vector<TableProperties>& prop);
837

赵明 已提交
838 839 840
  void NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
                               const Status& st,
                               const CompactionJobStats& job_stats, int job_id);
P
Peter Pei 已提交
841

赵明 已提交
842 843
  void NotifyOnCompactionCompleted(ColumnFamilyData* cfd, Compaction* c,
                                   const Status& st,
844
                                   const CompactionJobStats& job_stats,
845
                                   int job_id);
W
Wanning Jiang 已提交
846
  void NotifyOnMemTableSealed(ColumnFamilyData* cfd,
847
                              const MemTableInfo& mem_table_info);
O
Ori Bernstein 已提交
848

849
#ifndef ROCKSDB_LITE
850 851
  void NotifyOnExternalFileIngested(
      ColumnFamilyData* cfd, const ExternalSstFileIngestionJob& ingestion_job);
852
#endif  // !ROCKSDB_LITE
853

Y
Yueh-Hsuan Chiang 已提交
854 855 856 857 858 859
  void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const;

  void EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const;

  void EraseThreadStatusDbInfo() const;

860
  // If disable_memtable is set the application logic must guarantee that the
M
Maysam Yabandeh 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874
  // batch will still be skipped from memtable during the recovery. An excption
  // to this is seq_per_batch_ mode, in which since each batch already takes one
  // seq, it is ok for the batch to write to memtable during recovery as long as
  // it only takes one sequence number: i.e., no duplicate keys.
  // In WriteCommitted it is guarnateed since disable_memtable is used for
  // prepare batch which will be written to memtable later during the commit,
  // and in WritePrepared it is guaranteed since it will be used only for WAL
  // markers which will never be written to memtable. If the commit marker is
  // accompanied with CommitTimeWriteBatch that is not written to memtable as
  // long as it has no duplicate keys, it does not violate the one-seq-per-batch
  // policy.
  // batch_cnt is expected to be non-zero in seq_per_batch mode and
  // indicates the number of sub-patches. A sub-patch is a subset of the write
  // batch that does not have duplicate keys.
A
agiardullo 已提交
875
  Status WriteImpl(const WriteOptions& options, WriteBatch* updates,
876 877
                   WriteCallback* callback = nullptr,
                   uint64_t* log_used = nullptr, uint64_t log_ref = 0,
878
                   bool disable_memtable = false, uint64_t* seq_used = nullptr,
879
                   size_t batch_cnt = 0,
880
                   PreReleaseCallback* pre_release_callback = nullptr);
881

882 883 884
  Status PipelinedWriteImpl(const WriteOptions& options, WriteBatch* updates,
                            WriteCallback* callback = nullptr,
                            uint64_t* log_used = nullptr, uint64_t log_ref = 0,
885 886
                            bool disable_memtable = false,
                            uint64_t* seq_used = nullptr);
887

888 889 890
  // batch_cnt is expected to be non-zero in seq_per_batch mode and indicates
  // the number of sub-patches. A sub-patch is a subset of the write batch that
  // does not have duplicate keys.
891 892
  Status WriteImplWALOnly(const WriteOptions& options, WriteBatch* updates,
                          WriteCallback* callback = nullptr,
893
                          uint64_t* log_used = nullptr, uint64_t log_ref = 0,
894
                          uint64_t* seq_used = nullptr, size_t batch_cnt = 0,
895
                          PreReleaseCallback* pre_release_callback = nullptr);
896

897 898 899
  // write cached_recoverable_state_ to memtable if it is not empty
  // The writer must be the leader in write_thread_ and holding mutex_
  Status WriteRecoverableState();
A
agiardullo 已提交
900

901 902
  // Actual implementation of Close()
  Status CloseImpl();
Z
Zitan Chen 已提交
903 904 905 906 907 908 909 910
  // Recover the descriptor from persistent storage.  May do a significant
  // amount of work to recover recently logged updates.  Any changes to
  // be made to the descriptor are added to *edit.
  // recovered_seq is set to less than kMaxSequenceNumber if the log's tail is
  // skipped.
  virtual Status Recover(
      const std::vector<ColumnFamilyDescriptor>& column_families,
      bool read_only = false, bool error_if_log_file_exist = false,
W
wangyi.ywq 已提交
911
      bool error_if_data_exists_in_logs = false);
Z
Zitan Chen 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

  virtual bool OwnTablesAndLogs() const { return true; }

  // REQUIRES: db mutex held when calling this function, but the db mutex can
  // be released and re-acquired. Db mutex will be held when the function
  // returns.
  // After best-efforts recovery, there may be SST files in db/cf paths that are
  // not referenced in the MANIFEST. We delete these SST files. In the
  // meantime, we find out the largest file number present in the paths, and
  // bump up the version set's next_file_number_ to be 1 + largest_file_number.
  Status FinishBestEffortsRecovery();

  // SetDbSessionId() should be called in the constuctor DBImpl()
  // to ensure that db_session_id_ gets updated every time the DB is opened
  void SetDbSessionId();
J
jorlow@chromium.org 已提交
927 928
 private:
  friend class DB;
929
  friend class ErrorHandler;
930
  friend class InternalStats;
931
  friend class PessimisticTransaction;
932
  friend class TransactionBaseImpl;
933 934
  friend class WriteCommittedTxn;
  friend class WritePreparedTxn;
M
Maysam Yabandeh 已提交
935
  friend class WritePreparedTxnDB;
936
  friend class WriteBatchWithIndex;
937
  friend class WriteUnpreparedTxnDB;
938 939
  friend class WriteUnpreparedTxn;

I
Igor Canadi 已提交
940
#ifndef ROCKSDB_LITE
L
Lei Jin 已提交
941
  friend class ForwardIterator;
I
Igor Canadi 已提交
942
#endif
943
  friend struct SuperVersion;
L
Lei Jin 已提交
944
  friend class CompactedDBImpl;
945
  friend class DBTest_ConcurrentFlushWAL_Test;
946
  friend class DBTest_MixedSlowdownOptionsStop_Test;
Z
Zhongyi Xie 已提交
947 948 949
  friend class DBCompactionTest_CompactBottomLevelFilesWithDeletions_Test;
  friend class DBCompactionTest_CompactionDuringShutdown_Test;
  friend class StatsHistoryTest_PersistentStatsCreateColumnFamilies_Test;
A
agiardullo 已提交
950
#ifndef NDEBUG
Y
Yi Wu 已提交
951
  friend class DBTest2_ReadCallbackTest_Test;
952
  friend class WriteCallbackTest_WriteWithCallbackTest_Test;
A
agiardullo 已提交
953
  friend class XFTransactionWriteHandler;
954
  friend class WriteUnpreparedTransactionTest_RecoveryTest_Test;
A
agiardullo 已提交
955
#endif
956
  struct CompactionState;
957

958
  struct WriteContext {
959
    SuperVersionContext superversion_context;
960 961
    autovector<MemTable*> memtables_to_free_;

962 963 964
    explicit WriteContext(bool create_superversion = false)
        : superversion_context(create_superversion) {}

965
    ~WriteContext() {
966
      superversion_context.Clean();
967 968 969 970 971
      for (auto& m : memtables_to_free_) {
        delete m;
      }
    }
  };
J
jorlow@chromium.org 已提交
972

973
  struct PrepickedCompaction;
974 975
  struct PurgeFileInfo;

Z
Zhongyi Xie 已提交
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
  // Initialize the built-in column family for persistent stats. Depending on
  // whether on-disk persistent stats have been enabled before, it may either
  // create a new column family and column family handle or just a column family
  // handle.
  // Required: DB mutex held
  Status InitPersistStatsColumnFamily();

  // Persistent Stats column family has two format version key which are used
  // for compatibility check. Write format version if it's created for the
  // first time, read format version and check compatibility if recovering
  // from disk. This function requires DB mutex held at entrance but may
  // release and re-acquire DB mutex in the process.
  // Required: DB mutex held
  Status PersistentStatsProcessFormatVersion();

991 992
  Status ResumeImpl();

J
jorlow@chromium.org 已提交
993 994
  void MaybeIgnoreError(Status* s) const;

995 996
  const Status CreateArchivalDirectory();

Y
Yi Wu 已提交
997 998 999 1000 1001 1002
  Status CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options,
                                const std::string& cf_name,
                                ColumnFamilyHandle** handle);

  Status DropColumnFamilyImpl(ColumnFamilyHandle* column_family);

J
jorlow@chromium.org 已提交
1003 1004
  // Delete any unneeded files and stale in-memory entries.
  void DeleteObsoleteFiles();
1005
  // Delete obsolete files and log status and information of file deletion
S
Siying Dong 已提交
1006
  void DeleteObsoleteFileImpl(int job_id, const std::string& fname,
1007 1008
                              const std::string& path_to_sync, FileType type,
                              uint64_t number);
J
jorlow@chromium.org 已提交
1009

I
Igor Canadi 已提交
1010 1011
  // Background process needs to call
  //     auto x = CaptureCurrentFileNumberInPendingOutputs()
1012
  //     auto file_num = versions_->NewFileNumber();
I
Igor Canadi 已提交
1013 1014
  //     <do something>
  //     ReleaseFileNumberFromPendingOutputs(x)
1015 1016
  // This will protect any file with number `file_num` or greater from being
  // deleted while <do something> is running.
I
Igor Canadi 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
  // -----------
  // This function will capture current file number and append it to
  // pending_outputs_. This will prevent any background process to delete any
  // file created after this point.
  std::list<uint64_t>::iterator CaptureCurrentFileNumberInPendingOutputs();
  // This function should be called with the result of
  // CaptureCurrentFileNumberInPendingOutputs(). It then marks that any file
  // created between the calls CaptureCurrentFileNumberInPendingOutputs() and
  // ReleaseFileNumberFromPendingOutputs() can now be deleted (if it's not live
  // and blocked by any other pending_outputs_ calls)
  void ReleaseFileNumberFromPendingOutputs(std::list<uint64_t>::iterator v);

1029 1030
  Status SyncClosedLogs(JobContext* job_context);

1031
  // Flush the in-memory write buffer to storage.  Switches to a new
1032 1033
  // log-file/memtable and writes a new descriptor iff successful. Then
  // installs a new super version for the column family.
I
Igor Canadi 已提交
1034 1035 1036
  Status FlushMemTableToOutputFile(ColumnFamilyData* cfd,
                                   const MutableCFOptions& mutable_cf_options,
                                   bool* madeProgress, JobContext* job_context,
1037
                                   SuperVersionContext* superversion_context,
I
Igor Canadi 已提交
1038
                                   LogBuffer* log_buffer);
J
jorlow@chromium.org 已提交
1039

1040 1041 1042
  // Argument required by background flush thread.
  struct BGFlushArg {
    BGFlushArg()
1043 1044
        : cfd_(nullptr), max_memtable_id_(0), superversion_context_(nullptr) {}
    BGFlushArg(ColumnFamilyData* cfd, uint64_t max_memtable_id,
1045 1046
               SuperVersionContext* superversion_context)
        : cfd_(cfd),
1047
          max_memtable_id_(max_memtable_id),
1048 1049 1050 1051 1052 1053
          superversion_context_(superversion_context) {}

    // Column family to flush.
    ColumnFamilyData* cfd_;
    // Maximum ID of memtable to flush. In this column family, memtables with
    // IDs smaller than this value must be flushed before this flush completes.
1054
    uint64_t max_memtable_id_;
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
    // Pointer to a SuperVersionContext object. After flush completes, RocksDB
    // installs a new superversion for the column family. This operation
    // requires a SuperVersionContext object (currently embedded in JobContext).
    SuperVersionContext* superversion_context_;
  };

  // Flush the memtables of (multiple) column families to multiple files on
  // persistent storage.
  Status FlushMemTablesToOutputFiles(
      const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
      JobContext* job_context, LogBuffer* log_buffer);

1067 1068 1069 1070
  Status AtomicFlushMemTablesToOutputFiles(
      const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
      JobContext* job_context, LogBuffer* log_buffer);

S
Stanislau Hlebik 已提交
1071 1072
  // REQUIRES: log_numbers are sorted in ascending order
  Status RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
1073
                         SequenceNumber* next_sequence, bool read_only);
J
jorlow@chromium.org 已提交
1074

1075
  // The following two methods are used to flush a memtable to
Y
Yueh-Hsuan Chiang 已提交
1076
  // storage. The first one is used at database RecoveryTime (when the
1077 1078 1079
  // database is opened) and is heavyweight because it holds the mutex
  // for the entire period. The second method WriteLevel0Table supports
  // concurrent flush memtables to storage.
1080 1081
  Status WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
                                     MemTable* mem, VersionEdit* edit);
S
sdong 已提交
1082

1083 1084 1085 1086 1087 1088
  // Restore alive_log_files_ and total_log_size_ after recovery.
  // It needs to run only when there's no flush during recovery
  // (e.g. avoid_flush_during_recovery=true). May also trigger flush
  // in case total_log_size > max_total_wal_size.
  Status RestoreAliveLogFiles(const std::vector<uint64_t>& log_numbers);

S
sdong 已提交
1089 1090
  // num_bytes: for slowdown case, delay time is calculated based on
  //            `num_bytes` going through.
M
Maysam Yabandeh 已提交
1091
  Status DelayWrite(uint64_t num_bytes, const WriteOptions& write_options);
1092

1093 1094 1095
  Status ThrottleLowPriWritesIfNeeded(const WriteOptions& write_options,
                                      WriteBatch* my_batch);

I
Igor Canadi 已提交
1096
  Status ScheduleFlushes(WriteContext* context);
1097

Z
ZhaoMing 已提交
1098 1099 1100 1101 1102 1103
  Status NewLogWriter(std::unique_ptr<log::Writer>* new_log,
                      uint64_t recycle_log_number, const DBOptions& db_options,
                      Env::WriteLifeTimeHint write_hint);

  void FillLogWriterPool();

1104
  Status SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context);
S
Stanislau Hlebik 已提交
1105

Y
Yanqin Jin 已提交
1106 1107
  void SelectColumnFamiliesForAtomicFlush(autovector<ColumnFamilyData*>* cfds);

H
heyongqiang 已提交
1108
  // Force current memtable contents to be flushed.
1109
  Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options,
1110
                       FlushReason flush_reason, bool writes_stopped = false);
H
heyongqiang 已提交
1111

Y
Yanqin Jin 已提交
1112 1113 1114 1115 1116
  Status AtomicFlushMemTables(
      const autovector<ColumnFamilyData*>& column_family_datas,
      const FlushOptions& options, FlushReason flush_reason,
      bool writes_stopped = false);

1117 1118 1119 1120
  // Wait until flushing this column family won't stall writes
  Status WaitUntilFlushWouldNotStallWrites(ColumnFamilyData* cfd,
                                           bool* flush_needed);

1121 1122 1123 1124
  // Wait for memtable flushed.
  // If flush_memtable_id is non-null, wait until the memtable with the ID
  // gets flush. Otherwise, wait until the column family don't have any
  // memtable pending flush.
Y
Yanqin Jin 已提交
1125 1126
  // resuming_from_bg_err indicates whether the caller is attempting to resume
  // from background error.
1127
  Status WaitForFlushMemTable(ColumnFamilyData* cfd,
Y
Yanqin Jin 已提交
1128 1129 1130 1131
                              const uint64_t* flush_memtable_id = nullptr,
                              bool resuming_from_bg_err = false) {
    return WaitForFlushMemTables({cfd}, {flush_memtable_id},
                                 resuming_from_bg_err);
1132 1133 1134 1135
  }
  // Wait for memtables to be flushed for multiple column families.
  Status WaitForFlushMemTables(
      const autovector<ColumnFamilyData*>& cfds,
Y
Yanqin Jin 已提交
1136 1137 1138 1139 1140
      const autovector<const uint64_t*>& flush_memtable_ids,
      bool resuming_from_bg_err);

  // REQUIRES: mutex locked and in write thread.
  void AssignAtomicFlushSeq(const autovector<ColumnFamilyData*>& cfds);
H
heyongqiang 已提交
1141

1142
  // REQUIRES: mutex locked
1143
  Status SwitchWAL(WriteContext* write_context);
1144 1145 1146 1147 1148

  // REQUIRES: mutex locked
  Status HandleWriteBufferFull(WriteContext* write_context);

  // REQUIRES: mutex locked
1149 1150
  Status PreprocessWrite(const WriteOptions& write_options, bool* need_log_sync,
                         WriteContext* write_context);
1151

1152
  WriteBatch* MergeBatch(const WriteThread::WriteGroup& write_group,
1153 1154
                         WriteBatch* tmp_batch, size_t* write_with_wal,
                         WriteBatch** to_be_cached_state);
1155 1156 1157 1158

  Status WriteToWAL(const WriteBatch& merged_batch, log::Writer* log_writer,
                    uint64_t* log_used, uint64_t* log_size);

1159
  Status WriteToWAL(const WriteThread::WriteGroup& write_group,
1160 1161 1162 1163 1164 1165
                    log::Writer* log_writer, uint64_t* log_used,
                    bool need_log_sync, bool need_log_dir_sync,
                    SequenceNumber sequence);

  Status ConcurrentWriteToWAL(const WriteThread::WriteGroup& write_group,
                              uint64_t* log_used, SequenceNumber* last_sequence,
1166
                              size_t seq_inc);
1167

1168
  // Used by WriteImpl to update bg_error_ if paranoid check is enabled.
1169
  void WriteStatusCheck(const Status& status);
1170 1171 1172

  // Used by WriteImpl to update bg_error_ in case of memtable insert error.
  void MemTableInsertStatusCheck(const Status& memtable_insert_status);
Y
Yi Wu 已提交
1173

I
Igor Canadi 已提交
1174
#ifndef ROCKSDB_LITE
1175

1176 1177 1178
  Status CompactFilesImpl(const CompactionOptions& compact_options,
                          ColumnFamilyData* cfd, Version* version,
                          const std::vector<std::string>& input_file_names,
1179
                          std::vector<std::string>* const output_file_names,
1180 1181 1182
                          const int output_level, int output_path_id,
                          JobContext* job_context, LogBuffer* log_buffer);

1183 1184 1185
  // Wait for current IngestExternalFile() calls to finish.
  // REQUIRES: mutex_ held
  void WaitForIngestFile();
1186

1187
#else
1188
  // IngestExternalFile is not supported in ROCKSDB_LITE so this function
1189
  // will be no-op
1190
  void WaitForIngestFile() {}
I
Igor Canadi 已提交
1191
#endif  // ROCKSDB_LITE
1192 1193 1194

  ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name);

1195
  void MaybeScheduleFlushOrCompaction();
1196 1197 1198 1199 1200 1201 1202 1203 1204

  // A flush request specifies the column families to flush as well as the
  // largest memtable id to persist for each column family. Once all the
  // memtables whose IDs are smaller than or equal to this per-column-family
  // specified value, this flush request is considered to have completed its
  // work of flushing this column family. After completing the work for all
  // column families in this request, this flush is considered complete.
  typedef std::vector<std::pair<ColumnFamilyData*, uint64_t>> FlushRequest;

Y
Yanqin Jin 已提交
1205 1206 1207
  void GenerateFlushRequest(const autovector<ColumnFamilyData*>& cfds,
                            FlushRequest* req);

1208 1209
  void SchedulePendingFlush(const FlushRequest& req, FlushReason flush_reason);

1210
  void SchedulePendingCompaction(ColumnFamilyData* cfd);
C
chenchanglong 已提交
1211
  void SchedulePendingGarbageCollection(ColumnFamilyData* cfd);
Z
ZhaoMing 已提交
1212 1213 1214
  void SchedulePendingPurge(const std::string& fname,
                            const std::string& dir_to_sync, FileType type,
                            uint64_t number, int job_id);
1215
  static void BGWorkCompaction(void* arg);
C
chenchanglong 已提交
1216
  static void BGWorkGarbageCollection(void* arg);
1217 1218 1219
  // Runs a pre-chosen universal compaction involving bottom level in a
  // separate, bottom-pri thread pool.
  static void BGWorkBottomCompaction(void* arg);
1220
  static void BGWorkFlush(void* db);
1221
  static void BGWorkPurge(void* arg);
1222
  static void UnscheduleCallback(void* arg);
1223 1224
  void BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
                                Env::Priority bg_thread_pri);
Z
ZhaoMing 已提交
1225
  void BackgroundCallGarbageCollection();
1226
  void BackgroundCallFlush();
1227
  void BackgroundCallPurge();
I
Igor Canadi 已提交
1228
  Status BackgroundCompaction(bool* madeProgress, JobContext* job_context,
1229 1230
                              LogBuffer* log_buffer,
                              PrepickedCompaction* prepicked_compaction);
Z
ZhaoMing 已提交
1231 1232 1233
  Status BackgroundGarbageCollection(bool* madeProgress,
                                     JobContext* job_context,
                                     LogBuffer* log_buffer);
I
Igor Canadi 已提交
1234
  Status BackgroundFlush(bool* madeProgress, JobContext* job_context,
1235
                         LogBuffer* log_buffer, FlushReason* reason);
J
jorlow@chromium.org 已提交
1236

1237 1238
  bool EnoughRoomForCompaction(ColumnFamilyData* cfd,
                               const std::vector<CompactionInputFiles>& inputs,
1239 1240
                               bool* sfm_bookkeeping, LogBuffer* log_buffer);

1241
  // Schedule background tasks
W
wangyi.ywq 已提交
1242
  void StartStatsDumpScheduler();
1243

1244 1245
  void PrintStatistics();

Z
Zhongyi Xie 已提交
1246
  size_t EstimateInMemoryStatsHistorySize() const;
1247

1248 1249
  // Return the minimum empty level that could hold the total data in the
  // input level. Return the input level, if such level could not be found.
1250
  int FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd,
赵明 已提交
1251 1252
                                   const MutableCFOptions& mutable_cf_options,
                                   int level);
1253

1254 1255 1256
  // Move the files in the input level to the target level.
  // If target_level < 0, automatically calculate the minimum level that could
  // hold the data set.
I
Igor Canadi 已提交
1257
  Status ReFitLevel(ColumnFamilyData* cfd, int level, int target_level = -1);
1258

1259 1260 1261
  // helper functions for adding and removing from flush & compaction queues
  void AddToCompactionQueue(ColumnFamilyData* cfd);
  ColumnFamilyData* PopFirstFromCompactionQueue();
1262 1263 1264 1265

  void AddToGarbageCollectionQueue(ColumnFamilyData* cfd);
  ColumnFamilyData* PopFirstFromGarbageCollectionQueue();

1266
  FlushRequest PopFirstFromFlushQueue();
1267

1268 1269 1270
  // helper function to call after some of the logs_ were synced
  void MarkLogsSynced(uint64_t up_to, bool synced_dir, const Status& status);

1271
  SnapshotImpl* GetSnapshotImpl(bool is_write_conflict_boundary);
1272

Z
ZhaoMing 已提交
1273
  uint64_t GetMaxWalSize() const;
1274 1275
  uint64_t GetMaxTotalWalSize() const;

1276 1277
  Directory* GetDataDir(ColumnFamilyData* cfd, size_t path_id) const;

1278
  Status CloseHelper();
1279

1280 1281
  void WaitForBackgroundWork();

J
jorlow@chromium.org 已提交
1282
  // table_cache_ provides its own synchronization
I
Igor Canadi 已提交
1283
  std::shared_ptr<Cache> table_cache_;
J
jorlow@chromium.org 已提交
1284

1285
  // Lock over the persistent DB state.  Non-nullptr iff successfully acquired.
J
jorlow@chromium.org 已提交
1286 1287
  FileLock* db_lock_;

1288 1289
  // In addition to mutex_, log_write_mutex_ protected writes to stats_history_
  InstrumentedMutex stats_history_mutex_;
1290
  // In addition to mutex_, log_write_mutex_ protected writes to logs_ and
1291
  // logfile_number_. With two_write_queues it also protects alive_log_files_,
1292 1293
  // and log_empty_. Refer to the definition of each variable below for more
  // details.
1294
  InstrumentedMutex log_write_mutex_;
J
jorlow@chromium.org 已提交
1295
  // State below is protected by mutex_
1296
  // With two_write_queues enabled, some of the variables that accessed during
1297 1298 1299
  // WriteToWAL need different synchronization: log_empty_, alive_log_files_,
  // logs_, logfile_number_. Refer to the definition of each variable below for
  // more description.
1300
  mutable InstrumentedMutex mutex_;
1301

I
Igor Canadi 已提交
1302
  std::atomic<bool> shutting_down_;
1303 1304
  // This condition variable is signaled on these conditions:
  // * whenever bg_compaction_scheduled_ goes down to 0
1305
  // * if AnyManualCompaction, whenever a compaction finishes, even if it hasn't
1306 1307
  // made any progress
  // * whenever a compaction made any progress
1308 1309 1310
  // * whenever bg_flush_scheduled_ or bg_purge_scheduled_ value decreases
  // (i.e. whenever a flush is done, even if it didn't make any progress)
  // * whenever there is an error in background purge, flush or compaction
1311
  // * whenever num_running_ingest_file_ goes to 0.
1312 1313
  // * whenever pending_purge_obsolete_files_ goes to 0.
  // * whenever disable_delete_obsolete_files_ goes to 0.
1314 1315
  // * whenever SetOptions successfully updates options.
  // * whenever a column family is dropped.
1316
  InstrumentedCondVar bg_cv_;
1317 1318 1319 1320
  // Writes are protected by locking both mutex_ and log_write_mutex_, and reads
  // must be under either mutex_ or log_write_mutex_. Since after ::Open,
  // logfile_number_ is currently updated only in write_thread_, it can be read
  // from the same write_thread_ without any locks.
1321
  uint64_t logfile_number_;
S
Sage Weil 已提交
1322
  std::deque<uint64_t>
1323
      log_recycle_files_;  // a list of log files that we can recycle
Z
ZhaoMing 已提交
1324 1325
  std::deque<std::unique_ptr<log::Writer>> log_writer_pool_;
  autovector<std::pair<ColumnFamilyData*, MemTableInfo>> memtable_info_queue_;
Z
ZhaoMing 已提交
1326 1327 1328 1329 1330 1331 1332
  enum LogWriterPoolFlags : uint8_t {
    kLogWriterPoolIdle = 0,
    kLogWriterPoolWorking = 1,
    kLogWriterPoolWaiting = 2,
    kLogWriterPoolError = 3,
  };
  uint8_t log_writer_pool_state_;
Z
ZhaoMing 已提交
1333
  bool memtable_info_queue_lock_;
1334
  bool log_dir_synced_;
1335
  // Without two_write_queues, read and writes to log_empty_ are protected by
1336 1337
  // mutex_. Since it is currently updated/read only in write_thread_, it can be
  // accessed from the same write_thread_ without any locks. With
1338
  // two_write_queues writes, where it can be updated in different threads,
1339 1340
  // read and writes are protected by log_write_mutex_ instead. This is to avoid
  // expesnive mutex_ lock during WAL write, which update log_empty_.
I
Igor Canadi 已提交
1341
  bool log_empty_;
1342
  ColumnFamilyHandleImpl* default_cf_handle_;
1343
  InternalStats* default_cf_internal_stats_;
1344
  std::unique_ptr<ColumnFamilyMemTablesImpl> column_family_memtables_;
I
Igor Canadi 已提交
1345
  struct LogFileNumberSize {
赵明 已提交
1346
    explicit LogFileNumberSize(uint64_t _number) : number(_number) {}
I
Igor Canadi 已提交
1347 1348
    void AddSize(uint64_t new_size) { size += new_size; }
    uint64_t number;
1349 1350 1351 1352
    uint64_t size = 0;
    bool getting_flushed = false;
  };
  struct LogWriterNumber {
1353 1354 1355 1356 1357 1358 1359 1360 1361
    // pass ownership of _writer
    LogWriterNumber(uint64_t _number, log::Writer* _writer)
        : number(_number), writer(_writer) {}

    log::Writer* ReleaseWriter() {
      auto* w = writer;
      writer = nullptr;
      return w;
    }
1362 1363
    Status ClearWriter() {
      Status s = writer->WriteBuffer();
1364 1365
      delete writer;
      writer = nullptr;
1366
      return s;
1367 1368
    }

1369
    uint64_t number;
1370
    // Visual Studio doesn't support deque's member to be noncopyable because
1371
    // of a std::unique_ptr as a member.
1372
    log::Writer* writer;  // own
1373 1374
    // true for some prefix of logs_
    bool getting_synced = false;
I
Igor Canadi 已提交
1375
  };
Z
Zhongyi Xie 已提交
1376 1377 1378

  ColumnFamilyHandleImpl* persist_stats_cf_handle_;
  bool persistent_stats_cfd_exists_ = true;
1379
  // Without two_write_queues, read and writes to alive_log_files_ are
1380 1381
  // protected by mutex_. However since back() is never popped, and push_back()
  // is done only from write_thread_, the same thread can access the item
1382
  // reffered by back() without mutex_. With two_write_queues_, writes
1383 1384
  // are protected by locking both mutex_ and log_write_mutex_, and reads must
  // be under either mutex_ or log_write_mutex_.
I
Igor Canadi 已提交
1385
  std::deque<LogFileNumberSize> alive_log_files_;
1386 1387
  // Log files that aren't fully synced, and the current log file.
  // Synchronization:
1388 1389 1390 1391 1392
  //  - push_back() is done from write_thread_ with locked mutex_ and
  //  log_write_mutex_
  //  - pop_front() is done from any thread with locked mutex_ and
  //  log_write_mutex_
  //  - reads are done with either locked mutex_ or log_write_mutex_
1393
  //  - back() and items with getting_synced=true are not popped,
1394 1395 1396 1397 1398
  //  - The same thread that sets getting_synced=true will reset it.
  //  - it follows that the object referred by back() can be safely read from
  //  the write_thread_ without using mutex
  //  - it follows that the items with getting_synced=true can be safely read
  //  from the same thread that has set getting_synced=true
1399 1400 1401
  std::deque<LogWriterNumber> logs_;
  // Signaled when getting_synced becomes false for some of the logs_.
  InstrumentedCondVar log_sync_cv_;
1402 1403 1404 1405 1406
  // This is the app-level state that is written to the WAL but will be used
  // only during recovery. Using this feature enables not writing the state to
  // memtable on normal writes and hence improving the throughput. Each new
  // write of the state will replace the previous state entirely even if the
  // keys in the two consecuitive states do not overlap.
1407
  // It is protected by log_write_mutex_ when two_write_queues_ is enabled.
1408 1409 1410
  // Otherwise only the heaad of write_thread_ can access it.
  WriteBatch cached_recoverable_state_;
  std::atomic<bool> cached_recoverable_state_empty_ = {true};
A
Aaron Gao 已提交
1411
  std::atomic<uint64_t> total_log_size_;
I
Igor Canadi 已提交
1412 1413 1414
  // only used for dynamically adjusting max_total_wal_size. it is a sum of
  // [write_buffer_size * max_write_buffer_number] over all column families
  uint64_t max_total_in_memory_state_;
1415 1416 1417
  // If true, we have only one (default) column family. We use this to optimize
  // some code-paths
  bool single_column_family_mode_;
1418 1419 1420
  // If this is non-empty, we need to delete these log files in background
  // threads. Protected by db mutex.
  autovector<log::Writer*> logs_to_free_;
I
Igor Canadi 已提交
1421

S
sdong 已提交
1422 1423
  bool is_snapshot_supported_;

1424 1425 1426 1427 1428 1429
  std::map<uint64_t, std::map<std::string, uint64_t>> stats_history_;

  std::map<std::string, uint64_t> stats_slice_;

  bool stats_slice_initialized_ = false;

1430 1431 1432 1433 1434 1435 1436
  // Class to maintain directories for all database paths other than main one.
  class Directories {
   public:
    Status SetDirectories(Env* env, const std::string& dbname,
                          const std::string& wal_dir,
                          const std::vector<DbPath>& data_paths);

1437
    Directory* GetDataDir(size_t path_id) const;
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454

    Directory* GetWalDir() {
      if (wal_dir_) {
        return wal_dir_.get();
      }
      return db_dir_.get();
    }

    Directory* GetDbDir() { return db_dir_.get(); }

   private:
    std::unique_ptr<Directory> db_dir_;
    std::vector<std::unique_ptr<Directory>> data_dirs_;
    std::unique_ptr<Directory> wal_dir_;
  };

  Directories directories_;
1455

1456
  WriteBufferManager* write_buffer_manager_;
1457

I
Igor Canadi 已提交
1458
  WriteThread write_thread_;
1459
  WriteBatch tmp_batch_;
1460 1461 1462
  // The write thread when the writers have no memtable write. This will be used
  // in 2PC to batch the prepares separately from the serial commit.
  WriteThread nonmem_write_thread_;
1463

1464
  WriteController write_controller_;
S
sdong 已提交
1465

1466
  std::unique_ptr<RateLimiter> low_pri_write_rate_limiter_;
1467

S
sdong 已提交
1468 1469
  // Size of the last batch group. In slowdown mode, next write needs to
  // sleep if it uses up the quota.
1470 1471
  // Note: This is to protect memtable and compaction. If the batch only writes
  // to the WAL its size need not to be included in this.
S
sdong 已提交
1472 1473
  uint64_t last_batch_group_size_;

I
Igor Canadi 已提交
1474
  FlushScheduler flush_scheduler_;
1475

J
jorlow@chromium.org 已提交
1476 1477
  SnapshotList snapshots_;

I
Igor Canadi 已提交
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
  // For each background job, pending_outputs_ keeps the current file number at
  // the time that background job started.
  // FindObsoleteFiles()/PurgeObsoleteFiles() never deletes any file that has
  // number bigger than any of the file number in pending_outputs_. Since file
  // numbers grow monotonically, this also means that pending_outputs_ is always
  // sorted. After a background job is done executing, its file number is
  // deleted from pending_outputs_, which allows PurgeObsoleteFiles() to clean
  // it up.
  // State is protected with db mutex.
  std::list<uint64_t> pending_outputs_;
J
jorlow@chromium.org 已提交
1488

1489 1490 1491 1492
  // PurgeFileInfo is a structure to hold information of files to be deleted in
  // purge_queue_
  struct PurgeFileInfo {
    std::string fname;
1493
    std::string dir_to_sync;
1494 1495 1496
    FileType type;
    uint64_t number;
    int job_id;
1497 1498 1499
    PurgeFileInfo(std::string fn, std::string d, FileType t, uint64_t num,
                  int jid)
        : fname(fn), dir_to_sync(d), type(t), number(num), job_id(jid) {}
1500 1501
  };

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
  // flush_queue_ and compaction_queue_ hold column families that we need to
  // flush and compact, respectively.
  // A column family is inserted into flush_queue_ when it satisfies condition
  // cfd->imm()->IsFlushPending()
  // A column family is inserted into compaction_queue_ when it satisfied
  // condition cfd->NeedsCompaction()
  // Column families in this list are all Ref()-erenced
  // TODO(icanadi) Provide some kind of ReferencedColumnFamily class that will
  // do RAII on ColumnFamilyData
  // Column families are in this queue when they need to be flushed or
  // compacted. Consumers of these queues are flush and compaction threads. When
  // column family is put on this queue, we increase unscheduled_flushes_ and
  // unscheduled_compactions_. When these variables are bigger than zero, that
  // means we need to schedule background threads for compaction and thread.
  // Once the background threads are scheduled, we decrease unscheduled_flushes_
  // and unscheduled_compactions_. That way we keep track of number of
  // compaction and flush threads we need to schedule. This scheduling is done
  // in MaybeScheduleFlushOrCompaction()
  // invariant(column family present in flush_queue_ <==>
  // ColumnFamilyData::pending_flush_ == true)
1522
  std::deque<FlushRequest> flush_queue_;
1523 1524 1525
  // invariant(column family present in compaction_queue_ <==>
  // ColumnFamilyData::pending_compaction_ == true)
  std::deque<ColumnFamilyData*> compaction_queue_;
C
chenchanglong 已提交
1526
  std::deque<ColumnFamilyData*> garbage_collection_queue_;
1527 1528 1529

  // A queue to store filenames of the files to be purged
  std::deque<PurgeFileInfo> purge_queue_;
1530

Z
ZhaoMing 已提交
1531
  // A pointer to the file numbers that have been assigned to certain
Z
ZhaoMing 已提交
1532
  // JobContext. Current implementation tracks SST, WAL & MANIFEST files.
Z
ZhaoMing 已提交
1533
  std::unordered_set<const std::vector<uint64_t>*> files_grabbed_for_purge_;
Z
ZhaoMing 已提交
1534 1535 1536

  // when doing the full scan, we need to know which elements removed from
  // `purge_queue_` and `files_grabbed_for_purge_`
Z
ZhaoMing 已提交
1537
  std::list<std::vector<uint64_t>*> candidate_file_listener_;
1538

1539 1540 1541
  // A queue to store superversions to delete
  std::deque<SuperVersion*> superversion_to_free_queue_;

1542 1543
  // A queue to store log writers to close
  std::deque<log::Writer*> logs_to_free_queue_;
1544

1545 1546
  int unscheduled_flushes_;
  int unscheduled_compactions_;
C
chenchanglong 已提交
1547
  int unscheduled_garbage_collections_;
1548

1549 1550 1551 1552
  // count how many background compactions are running or have been scheduled in
  // the BOTTOM pool
  int bg_bottom_compaction_scheduled_;

1553
  // count how many background compactions are running or have been scheduled
1554
  int bg_compaction_scheduled_;
J
jorlow@chromium.org 已提交
1555

赵明 已提交
1556 1557
  // count how many background garbage collections are running or have been
  // scheduled
C
temp  
chenchanglong 已提交
1558 1559
  int bg_garbage_collection_scheduled_;

1560 1561 1562
  // stores the number of compactions are currently running
  int num_running_compactions_;

C
temp  
chenchanglong 已提交
1563 1564 1565
  // stores the number of garbage collections are currently running
  int num_running_garbage_collections_;

1566 1567 1568
  // number of background memtable flush jobs, submitted to the HIGH pool
  int bg_flush_scheduled_;

1569 1570 1571
  // stores the number of flushes are currently running
  int num_running_flushes_;

1572 1573 1574
  // number of background obsolete file purge jobs, submitted to the HIGH pool
  int bg_purge_scheduled_;

H
hans@chromium.org 已提交
1575
  // Information for a manual compaction
1576
  struct ManualCompactionState {
I
Igor Canadi 已提交
1577
    ColumnFamilyData* cfd;
1578 1579
    int input_level;
    int output_level;
1580
    uint32_t output_path_id;
L
Lei Jin 已提交
1581
    Status status;
1582
    bool done;
Z
ZhaoMing 已提交
1583 1584 1585 1586 1587 1588 1589 1590 1591
    bool in_progress;            // compaction request being processed?
    bool incomplete;             // only part of requested range compacted
    bool exclusive;              // current behavior of only one manual
    bool disallow_trivial_move;  // Force actual compaction to run
    const InternalKey* begin;    // nullptr means beginning of key range
    const InternalKey* end;      // nullptr means end of key range
    InternalKey* manual_end;     // how far we are compacting
    InternalKey tmp_storage;     // Used to keep track of compaction progress
    InternalKey tmp_storage1;    // Used to keep track of compaction progress
1592 1593 1594
  };
  struct PrepickedCompaction {
    // background compaction takes ownership of `compaction`.
1595
    Compaction* compaction;
1596 1597 1598
    // caller retains ownership of `manual_compaction_state` as it is reused
    // across background compactions.
    ManualCompactionState* manual_compaction_state;  // nullptr if non-manual
1599
  };
1600
  std::deque<ManualCompactionState*> manual_compaction_dequeue_;
1601 1602

  struct CompactionArg {
1603
    // caller retains ownership of `db`.
1604
    DBImpl* db;
1605 1606
    // background compaction takes ownership of `prepicked_compaction`.
    PrepickedCompaction* prepicked_compaction;
H
hans@chromium.org 已提交
1607
  };
J
jorlow@chromium.org 已提交
1608

1609
  // shall we disable deletion of obsolete files
1610 1611 1612 1613 1614 1615
  // if 0 the deletion is enabled.
  // if non-zero, files will not be getting deleted
  // This enables two different threads to call
  // EnableFileDeletions() and DisableFileDeletions()
  // without any synchronization
  int disable_delete_obsolete_files_;
1616

1617 1618 1619 1620
  // Number of times FindObsoleteFiles has found deletable files and the
  // corresponding call to PurgeObsoleteFiles has not yet finished.
  int pending_purge_obsolete_files_;

1621 1622 1623
  // last time when DeleteObsoleteFiles with full scan was executed. Originaly
  // initialized with startup time.
  uint64_t delete_obsolete_files_last_run_;
1624

1625
  // last time stats were dumped to LOG
H
Haobo Xu 已提交
1626
  std::atomic<uint64_t> last_stats_dump_time_microsec_;
1627

1628 1629 1630 1631
  // Each flush or compaction gets its own job id. this counter makes sure
  // they're unique
  std::atomic<int> next_job_id_;

1632 1633 1634
  // A flag indicating whether the current rocksdb database has any
  // data that is not yet persisted into either WAL or SST file.
  // Used when disableWAL is true.
1635
  std::atomic<bool> has_unpersisted_data_;
1636

Z
ZhaoMing 已提交
1637 1638 1639
  // full scan running lock
  bool delete_obsolete_files_lock_;

1640 1641 1642 1643 1644 1645
  // if an attempt was made to flush all column families that
  // the oldest log depends on but uncommited data in the oldest
  // log prevents the log from being released.
  // We must attempt to free the dependent memtables again
  // at a later time after the transaction in the oldest
  // log is fully commited.
S
Siying Dong 已提交
1646
  bool unable_to_release_oldest_log_;
1647

H
heyongqiang 已提交
1648
  static const int KEEP_LOG_FILE_NUM = 1000;
D
Dmitri Smirnov 已提交
1649
  // MSVC version 1800 still does not have constexpr for ::max()
1650
  static const uint64_t kNoTimeOut = port::kMaxUint64;
D
Dmitri Smirnov 已提交
1651

H
heyongqiang 已提交
1652
  std::string db_absolute_path_;
H
heyongqiang 已提交
1653

1654
  // The options to access storage files
L
Lei Jin 已提交
1655
  const EnvOptions env_options_;
1656

1657 1658 1659
  // Additonal options for compaction and flush
  EnvOptions env_options_for_compaction_;

1660
  // Number of running IngestExternalFile() calls.
1661
  // REQUIRES: mutex held
1662
  int num_running_ingest_file_;
1663

I
Igor Canadi 已提交
1664 1665 1666 1667
#ifndef ROCKSDB_LITE
  WalManager wal_manager_;
#endif  // ROCKSDB_LITE

I
Igor Canadi 已提交
1668 1669 1670
  // Unified interface for logging events
  EventLogger event_logger_;

1671
  // A value of > 0 temporarily disables scheduling of background work
1672
  int bg_work_paused_;
1673

1674 1675 1676
  // A value of > 0 temporarily disables scheduling of background compaction
  int bg_compaction_paused_;

1677 1678 1679
  // Guard against multiple concurrent refitting
  bool refitting_level_;

1680 1681 1682
  // Indicate DB was opened successfully
  bool opened_successfully_;

S
Siying Dong 已提交
1683
  LogsWithPrepTracker logs_with_prep_tracker_;
1684

Y
Yi Wu 已提交
1685 1686 1687 1688
  // Callback for compaction to check if a key is visible to a snapshot.
  // REQUIRES: mutex held
  std::unique_ptr<SnapshotChecker> snapshot_checker_;

1689 1690 1691 1692
  // Callback for when the cached_recoverable_state_ is written to memtable
  // Only to be set during initialization
  std::unique_ptr<PreReleaseCallback> recoverable_state_pre_release_callback_;

J
jorlow@chromium.org 已提交
1693 1694 1695 1696
  // No copying allowed
  DBImpl(const DBImpl&);
  void operator=(const DBImpl&);

I
Igor Canadi 已提交
1697
  // Background threads call this function, which is just a wrapper around
I
Igor Canadi 已提交
1698
  // the InstallSuperVersion() function. Background threads carry
1699
  // sv_context which can have new_superversion already
I
Igor Canadi 已提交
1700
  // allocated.
1701 1702 1703
  // All ColumnFamily state changes go through this function. Here we analyze
  // the new state and we schedule background work if we detect that the new
  // state needs flush or compaction.
1704 1705
  void InstallSuperVersionAndScheduleWork(
      ColumnFamilyData* cfd, SuperVersionContext* sv_context,
Z
Zhongyi Xie 已提交
1706 1707
      const MutableCFOptions& mutable_cf_options,
      FlushReason flush_reason = FlushReason::kOthers);
I
Igor Canadi 已提交
1708

I
Igor Canadi 已提交
1709
#ifndef ROCKSDB_LITE
I
Igor Canadi 已提交
1710
  using DB::GetPropertiesOfAllTables;
赵明 已提交
1711 1712 1713
  virtual Status GetPropertiesOfAllTables(
      ColumnFamilyHandle* column_family,
      TablePropertiesCollection* props) override;
1714
  virtual Status GetPropertiesOfTablesInRange(
1715
      ColumnFamilyHandle* column_family, const Range* range, std::size_t n,
1716
      TablePropertiesCollection* props, bool include_blob = true) override;
1717

I
Igor Canadi 已提交
1718
#endif  // ROCKSDB_LITE
1719

1720
  bool GetIntPropertyInternal(ColumnFamilyData* cfd,
1721 1722
                              const DBPropertyInfo& property_info,
                              bool is_locked, uint64_t* value);
1723
  bool GetPropertyHandleOptionsStatistics(std::string* value);
1724 1725 1726

  bool HasPendingManualCompaction();
  bool HasExclusiveManualCompaction();
1727 1728 1729
  void AddManualCompaction(ManualCompactionState* m);
  void RemoveManualCompaction(ManualCompactionState* m);
  bool ShouldntRunManualCompaction(ManualCompactionState* m);
1730
  bool HaveManualCompaction(ColumnFamilyData* cfd);
1731
  bool MCOverlap(ManualCompactionState* m, ManualCompactionState* m1);
1732

1733
  size_t GetWalPreallocateBlockSize(uint64_t write_buffer_size) const;
赵明 已提交
1734
  Env::WriteLifeTimeHint CalculateWALWriteHint() { return Env::WLTH_SHORT; }
W
wangyi.ywq 已提交
1735 1736 1737 1738 1739 1740
#ifndef ROCKSDB_LITE
  // Scheduler to run DumpStats() and PersistStats(). Currently, it always use
  // a global instance from StatsDumpScheduler::Default(). Only in unittest, it
  // can be overrided by StatsDumpTestSchduler.
  StatsDumpScheduler* stats_dump_scheduler_;
#endif
1741

1742 1743
  // When set, we use a separate queue for writes that dont write to memtable.
  // In 2PC these are the writes at Prepare phase.
1744
  const bool two_write_queues_;
1745
  const bool manual_wal_flush_;
1746 1747 1748 1749 1750 1751
  // Increase the sequence number after writing each batch, whether memtable is
  // disabled for that or not. Otherwise the sequence number is increased after
  // writing each key into memtable. This implies that when disable_memtable is
  // set, the seq is not increased at all.
  //
  // Default: false
1752
  const bool seq_per_batch_;
1753 1754 1755 1756 1757 1758 1759
  // This determines during recovery whether we expect one writebatch per
  // recovered transaction, or potentially multiple writebatches per
  // transaction. For WriteUnprepared, this is set to false, since multiple
  // batches can exist per transaction.
  //
  // Default: true
  const bool batch_per_txn_;
1760 1761 1762
  // LastSequence also indicates last published sequence visibile to the
  // readers. Otherwise LastPublishedSequence should be used.
  const bool last_seq_same_as_publish_seq_;
1763 1764 1765
  // It indicates that a customized gc algorithm must be used for
  // flush/compaction and if it is not provided vis SnapshotChecker, we should
  // disable gc to be safe.
1766
  const bool use_custom_gc_;
1767 1768 1769 1770 1771 1772
  // Flag to indicate that the DB instance shutdown has been initiated. This
  // different from shutting_down_ atomic in that it is set at the beginning
  // of shutdown sequence, specifically in order to prevent any background
  // error recovery from going on in parallel. The latter, shutting_down_,
  // is set a little later during the shutdown after scheduling memtable
  // flushes
1773
  std::atomic<bool> shutdown_initiated_;
1774 1775 1776
  // Flag to indicate whether sst_file_manager object was allocated in
  // DB::Open() or passed to us
  bool own_sfm_;
1777 1778 1779 1780 1781 1782 1783

  // Clients must periodically call SetPreserveDeletesSequenceNumber()
  // to advance this seqnum. Default value is 0 which means ALL deletes are
  // preserved. Note that this has no effect if DBOptions.preserve_deletes
  // is set to false.
  std::atomic<SequenceNumber> preserve_deletes_seqnum_;
  const bool preserve_deletes_;
1784 1785 1786

  // Flag to check whether Close() has been called on this DB
  bool closed_;
1787 1788

  ErrorHandler error_handler_;
1789

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
  // Conditional variable to coordinate installation of atomic flush results.
  // With atomic flush, each bg thread installs the result of flushing multiple
  // column families, and different threads can flush different column
  // families. It's difficult to rely on one thread to perform batch
  // installation for all threads. This is different from the non-atomic flush
  // case.
  // atomic_flush_install_cv_ makes sure that threads install atomic flush
  // results sequentially. Flush results of memtables with lower IDs get
  // installed to MANIFEST first.
  InstrumentedCondVar atomic_flush_install_cv_;
林源劲 已提交
1800 1801

  std::string bytedance_tags_;
赵明 已提交
1802
  std::shared_ptr<MetricsReporterFactory> metrics_reporter_factory_;
L
linyuanjin 已提交
1803
  cheapis::ServerRunner console_runner_;
赵明 已提交
1804

L
linyuanjin 已提交
1805 1806 1807 1808 1809 1810 1811
  QPSReporter write_qps_reporter_;
  QPSReporter read_qps_reporter_;
  QPSReporter newiterator_qps_reporter_;
  QPSReporter seek_qps_reporter_;
  QPSReporter next_qps_reporter_;
  QPSReporter seekforprev_qps_reporter_;
  QPSReporter prev_qps_reporter_;
赵明 已提交
1812 1813 1814 1815 1816 1817 1818 1819 1820 1821

  LatencyReporter write_latency_reporter_;
  LatencyReporter read_latency_reporter_;
  LatencyReporter newiterator_latency_reporter_;
  LatencyReporter seek_latency_reporter_;
  LatencyReporter next_latency_reporter_;
  LatencyReporter seekforprev_latency_reporter_;
  LatencyReporter prev_latency_reporter_;

  ThroughputReporter write_throughput_reporter_;
J
jorlow@chromium.org 已提交
1822 1823
};

赵明 已提交
1824
extern Options SanitizeOptions(const std::string& db, const Options& src);
1825

1826
extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src);
S
Siying Dong 已提交
1827

S
Siying Dong 已提交
1828 1829 1830
extern CompressionType GetCompressionFlush(
    const ImmutableCFOptions& ioptions,
    const MutableCFOptions& mutable_cf_options);
1831

S
Siying Dong 已提交
1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
// Return the earliest log file to keep after the memtable flush is
// finalized.
// `cfd_to_flush` is the column family whose memtable (specified in
// `memtables_to_flush`) will be flushed and thus will not depend on any WAL
// file.
// The function is only applicable to 2pc mode.
extern uint64_t PrecomputeMinLogNumberToKeep(
    VersionSet* vset, const ColumnFamilyData& cfd_to_flush,
    autovector<VersionEdit*> edit_list,
    const autovector<MemTable*>& memtables_to_flush,
    LogsWithPrepTracker* prep_tracker);

// `cfd_to_flush` is the column family whose memtable will be flushed and thus
// will not depend on any WAL file. nullptr means no memtable is being flushed.
// The function is only applicable to 2pc mode.
extern uint64_t FindMinPrepLogReferencedByMemTable(
    VersionSet* vset, const ColumnFamilyData* cfd_to_flush,
    const autovector<MemTable*>& memtables_to_flush);

M
miguelportilla 已提交
1851 1852 1853 1854 1855 1856 1857
// Fix user-supplied options to be reasonable
template <class T, class V>
static void ClipToRange(T* ptr, V minvalue, V maxvalue) {
  if (static_cast<V>(*ptr) > maxvalue) *ptr = maxvalue;
  if (static_cast<V>(*ptr) < minvalue) *ptr = minvalue;
}

1858
}  // namespace TERARKDB_NAMESPACE