version_set.cc 212.9 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8 9 10
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include "db/version_set.h"
I
Igor Canadi 已提交
11

J
jorlow@chromium.org 已提交
12
#include <algorithm>
13
#include <array>
14
#include <cinttypes>
15
#include <cstdio>
16
#include <list>
I
Igor Canadi 已提交
17
#include <map>
I
Igor Canadi 已提交
18
#include <set>
19
#include <string>
20
#include <unordered_map>
21
#include <vector>
22

23
#include "compaction/compaction.h"
24
#include "db/blob/blob_fetcher.h"
25 26 27
#include "db/blob/blob_file_cache.h"
#include "db/blob/blob_file_reader.h"
#include "db/blob/blob_index.h"
28
#include "db/internal_stats.h"
J
jorlow@chromium.org 已提交
29 30 31
#include "db/log_reader.h"
#include "db/log_writer.h"
#include "db/memtable.h"
32
#include "db/merge_context.h"
33
#include "db/merge_helper.h"
34
#include "db/pinned_iterators_manager.h"
J
jorlow@chromium.org 已提交
35
#include "db/table_cache.h"
S
sdong 已提交
36
#include "db/version_builder.h"
37
#include "db/version_edit_handler.h"
38
#include "file/filename.h"
39 40 41
#include "file/random_access_file_reader.h"
#include "file/read_write_util.h"
#include "file/writable_file_writer.h"
42
#include "monitoring/file_read_sample.h"
43
#include "monitoring/perf_context_imp.h"
44
#include "monitoring/persistent_stats_history.h"
45
#include "options/options_helper.h"
46 47
#include "rocksdb/env.h"
#include "rocksdb/merge_operator.h"
48
#include "rocksdb/write_buffer_manager.h"
49 50
#include "table/format.h"
#include "table/get_context.h"
S
sdong 已提交
51
#include "table/internal_iterator.h"
52
#include "table/merging_iterator.h"
53
#include "table/meta_blocks.h"
54
#include "table/multiget_context.h"
55
#include "table/plain/plain_table_factory.h"
56 57
#include "table/table_reader.h"
#include "table/two_level_iterator.h"
58
#include "test_util/sync_point.h"
59
#include "util/cast_util.h"
J
jorlow@chromium.org 已提交
60
#include "util/coding.h"
61
#include "util/stop_watch.h"
62
#include "util/string_util.h"
63
#include "util/user_comparator_wrapper.h"
J
jorlow@chromium.org 已提交
64

65
namespace ROCKSDB_NAMESPACE {
J
jorlow@chromium.org 已提交
66

67 68
namespace {

69
// Find File in LevelFilesBrief data structure
70 71
// Within an index range defined by left and right
int FindFileInRange(const InternalKeyComparator& icmp,
72
    const LevelFilesBrief& file_level,
73 74 75
    const Slice& key,
    uint32_t left,
    uint32_t right) {
76 77 78 79 80 81
  auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
    return icmp.InternalKeyComparator::Compare(f.largest_key, k) < 0;
  };
  const auto &b = file_level.files;
  return static_cast<int>(std::lower_bound(b + left,
                                           b + right, key, cmp) - b);
82 83
}

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
Status OverlapWithIterator(const Comparator* ucmp,
    const Slice& smallest_user_key,
    const Slice& largest_user_key,
    InternalIterator* iter,
    bool* overlap) {
  InternalKey range_start(smallest_user_key, kMaxSequenceNumber,
                          kValueTypeForSeek);
  iter->Seek(range_start.Encode());
  if (!iter->status().ok()) {
    return iter->status();
  }

  *overlap = false;
  if (iter->Valid()) {
    ParsedInternalKey seek_result;
99 100
    Status s = ParseInternalKey(iter->key(), &seek_result,
                                false /* log_err_key */);  // TODO
101
    if (!s.ok()) return s;
102

103 104
    if (ucmp->CompareWithoutTimestamp(seek_result.user_key, largest_user_key) <=
        0) {
105 106 107 108 109 110 111
      *overlap = true;
    }
  }

  return iter->status();
}

112 113 114 115 116 117 118 119
// Class to help choose the next file to search for the particular key.
// Searches and returns files level by level.
// We can search level-by-level since entries never hop across
// levels. Therefore we are guaranteed that if we find data
// in a smaller level, later levels are irrelevant (unless we
// are MergeInProgress).
class FilePicker {
 public:
120 121 122 123 124
  FilePicker(std::vector<FileMetaData*>* files, const Slice& user_key,
             const Slice& ikey, autovector<LevelFilesBrief>* file_levels,
             unsigned int num_levels, FileIndexer* file_indexer,
             const Comparator* user_comparator,
             const InternalKeyComparator* internal_comparator)
125
      : num_levels_(num_levels),
126
        curr_level_(static_cast<unsigned int>(-1)),
127
        returned_file_level_(static_cast<unsigned int>(-1)),
128
        hit_file_level_(static_cast<unsigned int>(-1)),
129 130
        search_left_bound_(0),
        search_right_bound_(FileIndexer::kLevelMaxIndex),
131
#ifndef NDEBUG
132
        files_(files),
133
#endif
134
        level_files_brief_(file_levels),
135
        is_hit_file_last_in_level_(false),
136
        curr_file_level_(nullptr),
137 138 139 140 141
        user_key_(user_key),
        ikey_(ikey),
        file_indexer_(file_indexer),
        user_comparator_(user_comparator),
        internal_comparator_(internal_comparator) {
142 143 144
#ifdef NDEBUG
    (void)files;
#endif
145 146 147 148
    // Setup member variables to search first level.
    search_ended_ = !PrepareNextLevel();
    if (!search_ended_) {
      // Prefetch Level 0 table data to avoid cache miss if possible.
149 150
      for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) {
        auto* r = (*level_files_brief_)[0].files[i].fd.table_reader;
151 152 153 154 155 156 157
        if (r) {
          r->Prepare(ikey);
        }
      }
    }
  }

158
  int GetCurrentLevel() const { return curr_level_; }
159

160 161 162 163 164
  FdWithKeyRange* GetNextFile() {
    while (!search_ended_) {  // Loops over different levels.
      while (curr_index_in_curr_level_ < curr_file_level_->num_files) {
        // Loops over all files in current level.
        FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_];
165
        hit_file_level_ = curr_level_;
166 167
        is_hit_file_last_in_level_ =
            curr_index_in_curr_level_ == curr_file_level_->num_files - 1;
168 169 170 171
        int cmp_largest = -1;

        // Do key range filtering of files or/and fractional cascading if:
        // (1) not all the files are in level 0, or
赵星宇 已提交
172 173
        // (2) there are more than 3 current level files
        // If there are only 3 or less current level files in the system, we skip
174 175 176 177 178 179 180 181
        // the key range filtering. In this case, more likely, the system is
        // highly tuned to minimize number of tables queried by each query,
        // so it is unlikely that key range filtering is more efficient than
        // querying the files.
        if (num_levels_ > 1 || curr_file_level_->num_files > 3) {
          // Check if key is within a file's range. If search left bound and
          // right bound point to the same find, we are sure key falls in
          // range.
182 183 184 185 186 187 188
          assert(curr_level_ == 0 ||
                 curr_index_in_curr_level_ == start_index_in_curr_level_ ||
                 user_comparator_->CompareWithoutTimestamp(
                     user_key_, ExtractUserKey(f->smallest_key)) <= 0);

          int cmp_smallest = user_comparator_->CompareWithoutTimestamp(
              user_key_, ExtractUserKey(f->smallest_key));
189
          if (cmp_smallest >= 0) {
190 191
            cmp_largest = user_comparator_->CompareWithoutTimestamp(
                user_key_, ExtractUserKey(f->largest_key));
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
          }

          // Setup file search bound for the next level based on the
          // comparison results
          if (curr_level_ > 0) {
            file_indexer_->GetNextLevelIndex(curr_level_,
                                            curr_index_in_curr_level_,
                                            cmp_smallest, cmp_largest,
                                            &search_left_bound_,
                                            &search_right_bound_);
          }
          // Key falls out of current file's range
          if (cmp_smallest < 0 || cmp_largest > 0) {
            if (curr_level_ == 0) {
              ++curr_index_in_curr_level_;
              continue;
            } else {
              // Search next level.
              break;
            }
          }
        }
#ifndef NDEBUG
        // Sanity check to make sure that the files are correctly sorted
        if (prev_file_) {
          if (curr_level_ != 0) {
            int comp_sign = internal_comparator_->Compare(
                prev_file_->largest_key, f->smallest_key);
            assert(comp_sign < 0);
          } else {
            // level == 0, the current file cannot be newer than the previous
            // one. Use compressed data structure, has no attribute seqNo
            assert(curr_index_in_curr_level_ > 0);
            assert(!NewestFirstBySeqNo(files_[0][curr_index_in_curr_level_],
                  files_[0][curr_index_in_curr_level_-1]));
          }
        }
        prev_file_ = f;
#endif
231
        returned_file_level_ = curr_level_;
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
        if (curr_level_ > 0 && cmp_largest < 0) {
          // No more files to search in this level.
          search_ended_ = !PrepareNextLevel();
        } else {
          ++curr_index_in_curr_level_;
        }
        return f;
      }
      // Start searching next level.
      search_ended_ = !PrepareNextLevel();
    }
    // Search ended.
    return nullptr;
  }

247 248 249 250
  // getter for current file level
  // for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts
  unsigned int GetHitFileLevel() { return hit_file_level_; }

251 252 253 254
  // Returns true if the most recent "hit file" (i.e., one returned by
  // GetNextFile()) is at the last index in its level.
  bool IsHitFileLastInLevel() { return is_hit_file_last_in_level_; }

255 256 257
 private:
  unsigned int num_levels_;
  unsigned int curr_level_;
258
  unsigned int returned_file_level_;
259
  unsigned int hit_file_level_;
260 261
  int32_t search_left_bound_;
  int32_t search_right_bound_;
262
#ifndef NDEBUG
263
  std::vector<FileMetaData*>* files_;
264
#endif
265
  autovector<LevelFilesBrief>* level_files_brief_;
266
  bool search_ended_;
267
  bool is_hit_file_last_in_level_;
268
  LevelFilesBrief* curr_file_level_;
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
  unsigned int curr_index_in_curr_level_;
  unsigned int start_index_in_curr_level_;
  Slice user_key_;
  Slice ikey_;
  FileIndexer* file_indexer_;
  const Comparator* user_comparator_;
  const InternalKeyComparator* internal_comparator_;
#ifndef NDEBUG
  FdWithKeyRange* prev_file_;
#endif

  // Setup local variables to search next level.
  // Returns false if there are no more levels to search.
  bool PrepareNextLevel() {
    curr_level_++;
    while (curr_level_ < num_levels_) {
285
      curr_file_level_ = &(*level_files_brief_)[curr_level_];
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
      if (curr_file_level_->num_files == 0) {
        // When current level is empty, the search bound generated from upper
        // level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
        // also empty.
        assert(search_left_bound_ == 0);
        assert(search_right_bound_ == -1 ||
               search_right_bound_ == FileIndexer::kLevelMaxIndex);
        // Since current level is empty, it will need to search all files in
        // the next level
        search_left_bound_ = 0;
        search_right_bound_ = FileIndexer::kLevelMaxIndex;
        curr_level_++;
        continue;
      }

      // Some files may overlap each other. We find
      // all files that overlap user_key and process them in order from
      // newest to oldest. In the context of merge-operator, this can occur at
      // any level. Otherwise, it only occurs at Level-0 (since Put/Deletes
      // are always compacted into a single entry).
      int32_t start_index;
      if (curr_level_ == 0) {
        // On Level-0, we read through all files to check for overlap.
        start_index = 0;
      } else {
        // On Level-n (n>=1), files are sorted. Binary search to find the
        // earliest file whose largest key >= ikey. Search left bound and
        // right bound are used to narrow the range.
314
        if (search_left_bound_ <= search_right_bound_) {
315
          if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
316 317
            search_right_bound_ =
                static_cast<int32_t>(curr_file_level_->num_files) - 1;
318
          }
319 320 321 322
          // `search_right_bound_` is an inclusive upper-bound, but since it was
          // determined based on user key, it is still possible the lookup key
          // falls to the right of `search_right_bound_`'s corresponding file.
          // So, pass a limit one higher, which allows us to detect this case.
323 324 325
          start_index =
              FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_,
                              static_cast<uint32_t>(search_left_bound_),
326 327 328 329 330 331 332 333 334 335
                              static_cast<uint32_t>(search_right_bound_) + 1);
          if (start_index == search_right_bound_ + 1) {
            // `ikey_` comes after `search_right_bound_`. The lookup key does
            // not exist on this level, so let's skip this level and do a full
            // binary search on the next level.
            search_left_bound_ = 0;
            search_right_bound_ = FileIndexer::kLevelMaxIndex;
            curr_level_++;
            continue;
          }
336 337
        } else {
          // search_left_bound > search_right_bound, key does not exist in
C
clark.kang 已提交
338
          // this level. Since no comparison is done in this level, it will
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
          // need to search all files in the next level.
          search_left_bound_ = 0;
          search_right_bound_ = FileIndexer::kLevelMaxIndex;
          curr_level_++;
          continue;
        }
      }
      start_index_in_curr_level_ = start_index;
      curr_index_in_curr_level_ = start_index;
#ifndef NDEBUG
      prev_file_ = nullptr;
#endif
      return true;
    }
    // curr_level_ = num_levels_. So, no more levels to search.
    return false;
  }
};
357 358 359 360 361 362

class FilePickerMultiGet {
 private:
  struct FilePickerContext;

 public:
363
  FilePickerMultiGet(MultiGetRange* range,
364 365 366 367 368 369 370 371 372 373 374
                     autovector<LevelFilesBrief>* file_levels,
                     unsigned int num_levels, FileIndexer* file_indexer,
                     const Comparator* user_comparator,
                     const InternalKeyComparator* internal_comparator)
      : num_levels_(num_levels),
        curr_level_(static_cast<unsigned int>(-1)),
        returned_file_level_(static_cast<unsigned int>(-1)),
        hit_file_level_(static_cast<unsigned int>(-1)),
        range_(range),
        batch_iter_(range->begin()),
        batch_iter_prev_(range->begin()),
375
        upper_key_(range->begin()),
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
        maybe_repeat_key_(false),
        current_level_range_(*range, range->begin(), range->end()),
        current_file_range_(*range, range->begin(), range->end()),
        level_files_brief_(file_levels),
        is_hit_file_last_in_level_(false),
        curr_file_level_(nullptr),
        file_indexer_(file_indexer),
        user_comparator_(user_comparator),
        internal_comparator_(internal_comparator) {
    for (auto iter = range_->begin(); iter != range_->end(); ++iter) {
      fp_ctx_array_[iter.index()] =
          FilePickerContext(0, FileIndexer::kLevelMaxIndex);
    }

    // Setup member variables to search first level.
    search_ended_ = !PrepareNextLevel();
    if (!search_ended_) {
      // REVISIT
      // Prefetch Level 0 table data to avoid cache miss if possible.
      // As of now, only PlainTableReader and CuckooTableReader do any
      // prefetching. This may not be necessary anymore once we implement
      // batching in those table readers
      for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) {
        auto* r = (*level_files_brief_)[0].files[i].fd.table_reader;
        if (r) {
          for (auto iter = range_->begin(); iter != range_->end(); ++iter) {
            r->Prepare(iter->ikey);
          }
        }
      }
    }
  }

  int GetCurrentLevel() const { return curr_level_; }

  // Iterates through files in the current level until it finds a file that
412
  // contains at least one key from the MultiGet batch
413 414 415 416 417 418 419 420
  bool GetNextFileInLevelWithKeys(MultiGetRange* next_file_range,
                                  size_t* file_index, FdWithKeyRange** fd,
                                  bool* is_last_key_in_file) {
    size_t curr_file_index = *file_index;
    FdWithKeyRange* f = nullptr;
    bool file_hit = false;
    int cmp_largest = -1;
    if (curr_file_index >= curr_file_level_->num_files) {
A
anand76 已提交
421 422 423 424 425 426 427 428 429 430 431 432
      // In the unlikely case the next key is a duplicate of the current key,
      // and the current key is the last in the level and the internal key
      // was not found, we need to skip lookup for the remaining keys and
      // reset the search bounds
      if (batch_iter_ != current_level_range_.end()) {
        ++batch_iter_;
        for (; batch_iter_ != current_level_range_.end(); ++batch_iter_) {
          struct FilePickerContext& fp_ctx = fp_ctx_array_[batch_iter_.index()];
          fp_ctx.search_left_bound = 0;
          fp_ctx.search_right_bound = FileIndexer::kLevelMaxIndex;
        }
      }
433 434 435 436 437 438 439 440 441 442 443
      return false;
    }
    // Loops over keys in the MultiGet batch until it finds a file with
    // atleast one of the keys. Then it keeps moving forward until the
    // last key in the batch that falls in that file
    while (batch_iter_ != current_level_range_.end() &&
           (fp_ctx_array_[batch_iter_.index()].curr_index_in_curr_level ==
                curr_file_index ||
            !file_hit)) {
      struct FilePickerContext& fp_ctx = fp_ctx_array_[batch_iter_.index()];
      f = &curr_file_level_->files[fp_ctx.curr_index_in_curr_level];
444
      Slice& user_key = batch_iter_->ukey_without_ts;
445 446 447 448 449 450 451 452 453 454 455 456 457

      // Do key range filtering of files or/and fractional cascading if:
      // (1) not all the files are in level 0, or
      // (2) there are more than 3 current level files
      // If there are only 3 or less current level files in the system, we
      // skip the key range filtering. In this case, more likely, the system
      // is highly tuned to minimize number of tables queried by each query,
      // so it is unlikely that key range filtering is more efficient than
      // querying the files.
      if (num_levels_ > 1 || curr_file_level_->num_files > 3) {
        // Check if key is within a file's range. If search left bound and
        // right bound point to the same find, we are sure key falls in
        // range.
458 459 460
        int cmp_smallest = user_comparator_->CompareWithoutTimestamp(
            user_key, false, ExtractUserKey(f->smallest_key), true);

461 462 463
        assert(curr_level_ == 0 ||
               fp_ctx.curr_index_in_curr_level ==
                   fp_ctx.start_index_in_curr_level ||
464
               cmp_smallest <= 0);
465 466

        if (cmp_smallest >= 0) {
467 468
          cmp_largest = user_comparator_->CompareWithoutTimestamp(
              user_key, false, ExtractUserKey(f->largest_key), true);
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
        } else {
          cmp_largest = -1;
        }

        // Setup file search bound for the next level based on the
        // comparison results
        if (curr_level_ > 0) {
          file_indexer_->GetNextLevelIndex(
              curr_level_, fp_ctx.curr_index_in_curr_level, cmp_smallest,
              cmp_largest, &fp_ctx.search_left_bound,
              &fp_ctx.search_right_bound);
        }
        // Key falls out of current file's range
        if (cmp_smallest < 0 || cmp_largest > 0) {
          next_file_range->SkipKey(batch_iter_);
        } else {
          file_hit = true;
        }
      } else {
        file_hit = true;
      }
      if (cmp_largest == 0) {
        // cmp_largest is 0, which means the next key will not be in this
492 493 494 495 496 497 498 499 500
        // file, so stop looking further. However, its possible there are
        // duplicates in the batch, so find the upper bound for the batch
        // in this file (upper_key_) by skipping past the duplicates. We
        // leave batch_iter_ as is since we may have to pick up from there
        // for the next file, if this file has a merge value rather than
        // final value
        upper_key_ = batch_iter_;
        ++upper_key_;
        while (upper_key_ != current_level_range_.end() &&
501 502 503
               user_comparator_->CompareWithoutTimestamp(
                   batch_iter_->ukey_without_ts, false,
                   upper_key_->ukey_without_ts, false) == 0) {
504 505
          ++upper_key_;
        }
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
        break;
      } else {
        if (curr_level_ == 0) {
          // We need to look through all files in level 0
          ++fp_ctx.curr_index_in_curr_level;
        }
        ++batch_iter_;
      }
      if (!file_hit) {
        curr_file_index =
            (batch_iter_ != current_level_range_.end())
                ? fp_ctx_array_[batch_iter_.index()].curr_index_in_curr_level
                : curr_file_level_->num_files;
      }
    }

    *fd = f;
    *file_index = curr_file_index;
    *is_last_key_in_file = cmp_largest == 0;
525 526 527 528 529 530
    if (!*is_last_key_in_file) {
      // If the largest key in the batch overlapping the file is not the
      // largest key in the file, upper_ley_ would not have been updated so
      // update it here
      upper_key_ = batch_iter_;
    }
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
    return file_hit;
  }

  FdWithKeyRange* GetNextFile() {
    while (!search_ended_) {
      // Start searching next level.
      if (batch_iter_ == current_level_range_.end()) {
        search_ended_ = !PrepareNextLevel();
        continue;
      } else {
        if (maybe_repeat_key_) {
          maybe_repeat_key_ = false;
          // Check if we found the final value for the last key in the
          // previous lookup range. If we did, then there's no need to look
          // any further for that key, so advance batch_iter_. Else, keep
          // batch_iter_ positioned on that key so we look it up again in
          // the next file
A
anand76 已提交
548 549 550 551
          // For L0, always advance the key because we will look in the next
          // file regardless for all keys not found yet
          if (current_level_range_.CheckKeyDone(batch_iter_) ||
              curr_level_ == 0) {
552
            batch_iter_ = upper_key_;
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
          }
        }
        // batch_iter_prev_ will become the start key for the next file
        // lookup
        batch_iter_prev_ = batch_iter_;
      }

      MultiGetRange next_file_range(current_level_range_, batch_iter_prev_,
                                    current_level_range_.end());
      size_t curr_file_index =
          (batch_iter_ != current_level_range_.end())
              ? fp_ctx_array_[batch_iter_.index()].curr_index_in_curr_level
              : curr_file_level_->num_files;
      FdWithKeyRange* f;
      bool is_last_key_in_file;
      if (!GetNextFileInLevelWithKeys(&next_file_range, &curr_file_index, &f,
                                      &is_last_key_in_file)) {
        search_ended_ = !PrepareNextLevel();
      } else {
        if (is_last_key_in_file) {
          // Since cmp_largest is 0, batch_iter_ still points to the last key
          // that falls in this file, instead of the next one. Increment
575 576 577 578 579 580
          // the file index for all keys between batch_iter_ and upper_key_
          auto tmp_iter = batch_iter_;
          while (tmp_iter != upper_key_) {
            ++(fp_ctx_array_[tmp_iter.index()].curr_index_in_curr_level);
            ++tmp_iter;
          }
581 582 583 584
          maybe_repeat_key_ = true;
        }
        // Set the range for this file
        current_file_range_ =
585
            MultiGetRange(next_file_range, batch_iter_prev_, upper_key_);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
        returned_file_level_ = curr_level_;
        hit_file_level_ = curr_level_;
        is_hit_file_last_in_level_ =
            curr_file_index == curr_file_level_->num_files - 1;
        return f;
      }
    }

    // Search ended
    return nullptr;
  }

  // getter for current file level
  // for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts
  unsigned int GetHitFileLevel() { return hit_file_level_; }

  // Returns true if the most recent "hit file" (i.e., one returned by
  // GetNextFile()) is at the last index in its level.
  bool IsHitFileLastInLevel() { return is_hit_file_last_in_level_; }

  const MultiGetRange& CurrentFileRange() { return current_file_range_; }

 private:
  unsigned int num_levels_;
  unsigned int curr_level_;
  unsigned int returned_file_level_;
  unsigned int hit_file_level_;

  struct FilePickerContext {
    int32_t search_left_bound;
    int32_t search_right_bound;
    unsigned int curr_index_in_curr_level;
    unsigned int start_index_in_curr_level;

    FilePickerContext(int32_t left, int32_t right)
A
anand76 已提交
621 622
        : search_left_bound(left), search_right_bound(right),
          curr_index_in_curr_level(0), start_index_in_curr_level(0) {}
623 624 625 626 627 628 629 630 631 632 633 634 635 636

    FilePickerContext() = default;
  };
  std::array<FilePickerContext, MultiGetContext::MAX_BATCH_SIZE> fp_ctx_array_;
  MultiGetRange* range_;
  // Iterator to iterate through the keys in a MultiGet batch, that gets reset
  // at the beginning of each level. Each call to GetNextFile() will position
  // batch_iter_ at or right after the last key that was found in the returned
  // SST file
  MultiGetRange::Iterator batch_iter_;
  // An iterator that records the previous position of batch_iter_, i.e last
  // key found in the previous SST file, in order to serve as the start of
  // the batch key range for the next SST file
  MultiGetRange::Iterator batch_iter_prev_;
637
  MultiGetRange::Iterator upper_key_;
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
  bool maybe_repeat_key_;
  MultiGetRange current_level_range_;
  MultiGetRange current_file_range_;
  autovector<LevelFilesBrief>* level_files_brief_;
  bool search_ended_;
  bool is_hit_file_last_in_level_;
  LevelFilesBrief* curr_file_level_;
  FileIndexer* file_indexer_;
  const Comparator* user_comparator_;
  const InternalKeyComparator* internal_comparator_;

  // Setup local variables to search next level.
  // Returns false if there are no more levels to search.
  bool PrepareNextLevel() {
    if (curr_level_ == 0) {
      MultiGetRange::Iterator mget_iter = current_level_range_.begin();
      if (fp_ctx_array_[mget_iter.index()].curr_index_in_curr_level <
          curr_file_level_->num_files) {
        batch_iter_prev_ = current_level_range_.begin();
657
        upper_key_ = batch_iter_ = current_level_range_.begin();
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
        return true;
      }
    }

    curr_level_++;
    // Reset key range to saved value
    while (curr_level_ < num_levels_) {
      bool level_contains_keys = false;
      curr_file_level_ = &(*level_files_brief_)[curr_level_];
      if (curr_file_level_->num_files == 0) {
        // When current level is empty, the search bound generated from upper
        // level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is
        // also empty.

        for (auto mget_iter = current_level_range_.begin();
             mget_iter != current_level_range_.end(); ++mget_iter) {
          struct FilePickerContext& fp_ctx = fp_ctx_array_[mget_iter.index()];

          assert(fp_ctx.search_left_bound == 0);
          assert(fp_ctx.search_right_bound == -1 ||
                 fp_ctx.search_right_bound == FileIndexer::kLevelMaxIndex);
          // Since current level is empty, it will need to search all files in
          // the next level
          fp_ctx.search_left_bound = 0;
          fp_ctx.search_right_bound = FileIndexer::kLevelMaxIndex;
        }
        // Skip all subsequent empty levels
685 686 687 688 689
        do {
          ++curr_level_;
        } while ((curr_level_ < num_levels_) &&
                 (*level_files_brief_)[curr_level_].num_files == 0);
        continue;
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
      }

      // Some files may overlap each other. We find
      // all files that overlap user_key and process them in order from
      // newest to oldest. In the context of merge-operator, this can occur at
      // any level. Otherwise, it only occurs at Level-0 (since Put/Deletes
      // are always compacted into a single entry).
      int32_t start_index = -1;
      current_level_range_ =
          MultiGetRange(*range_, range_->begin(), range_->end());
      for (auto mget_iter = current_level_range_.begin();
           mget_iter != current_level_range_.end(); ++mget_iter) {
        struct FilePickerContext& fp_ctx = fp_ctx_array_[mget_iter.index()];
        if (curr_level_ == 0) {
          // On Level-0, we read through all files to check for overlap.
          start_index = 0;
          level_contains_keys = true;
        } else {
          // On Level-n (n>=1), files are sorted. Binary search to find the
          // earliest file whose largest key >= ikey. Search left bound and
          // right bound are used to narrow the range.
          if (fp_ctx.search_left_bound <= fp_ctx.search_right_bound) {
            if (fp_ctx.search_right_bound == FileIndexer::kLevelMaxIndex) {
              fp_ctx.search_right_bound =
                  static_cast<int32_t>(curr_file_level_->num_files) - 1;
            }
            // `search_right_bound_` is an inclusive upper-bound, but since it
            // was determined based on user key, it is still possible the lookup
            // key falls to the right of `search_right_bound_`'s corresponding
            // file. So, pass a limit one higher, which allows us to detect this
            // case.
            Slice& ikey = mget_iter->ikey;
            start_index = FindFileInRange(
                *internal_comparator_, *curr_file_level_, ikey,
                static_cast<uint32_t>(fp_ctx.search_left_bound),
                static_cast<uint32_t>(fp_ctx.search_right_bound) + 1);
            if (start_index == fp_ctx.search_right_bound + 1) {
              // `ikey_` comes after `search_right_bound_`. The lookup key does
              // not exist on this level, so let's skip this level and do a full
              // binary search on the next level.
              fp_ctx.search_left_bound = 0;
              fp_ctx.search_right_bound = FileIndexer::kLevelMaxIndex;
              current_level_range_.SkipKey(mget_iter);
              continue;
            } else {
              level_contains_keys = true;
            }
          } else {
            // search_left_bound > search_right_bound, key does not exist in
            // this level. Since no comparison is done in this level, it will
            // need to search all files in the next level.
            fp_ctx.search_left_bound = 0;
            fp_ctx.search_right_bound = FileIndexer::kLevelMaxIndex;
            current_level_range_.SkipKey(mget_iter);
            continue;
          }
        }
        fp_ctx.start_index_in_curr_level = start_index;
        fp_ctx.curr_index_in_curr_level = start_index;
      }
      if (level_contains_keys) {
        batch_iter_prev_ = current_level_range_.begin();
752
        upper_key_ = batch_iter_ = current_level_range_.begin();
753 754 755 756 757 758 759 760
        return true;
      }
      curr_level_++;
    }
    // curr_level_ = num_levels_. So, no more levels to search.
    return false;
  }
};
761 762
}  // anonymous namespace

S
sdong 已提交
763 764
VersionStorageInfo::~VersionStorageInfo() { delete[] files_; }

J
jorlow@chromium.org 已提交
765 766
Version::~Version() {
  assert(refs_ == 0);
767 768 769 770 771 772

  // Remove from linked list
  prev_->next_ = next_;
  next_->prev_ = prev_;

  // Drop references to files
S
sdong 已提交
773 774 775
  for (int level = 0; level < storage_info_.num_levels_; level++) {
    for (size_t i = 0; i < storage_info_.files_[level].size(); i++) {
      FileMetaData* f = storage_info_.files_[level][i];
776
      assert(f->refs > 0);
J
jorlow@chromium.org 已提交
777 778
      f->refs--;
      if (f->refs <= 0) {
779 780 781 782 783
        assert(cfd_ != nullptr);
        uint32_t path_id = f->fd.GetPathId();
        assert(path_id < cfd_->ioptions()->cf_paths.size());
        vset_->obsolete_files_.push_back(
            ObsoleteFileInfo(f, cfd_->ioptions()->cf_paths[path_id].path));
J
jorlow@chromium.org 已提交
784 785 786 787 788
      }
    }
  }
}

789
int FindFile(const InternalKeyComparator& icmp,
790
             const LevelFilesBrief& file_level,
791
             const Slice& key) {
792 793
  return FindFileInRange(icmp, file_level, key, 0,
                         static_cast<uint32_t>(file_level.num_files));
794 795
}

796
void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
F
Feng Zhu 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
        const std::vector<FileMetaData*>& files,
        Arena* arena) {
  assert(file_level);
  assert(arena);

  size_t num = files.size();
  file_level->num_files = num;
  char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange));
  file_level->files = new (mem)FdWithKeyRange[num];

  for (size_t i = 0; i < num; i++) {
    Slice smallest_key = files[i]->smallest.Encode();
    Slice largest_key = files[i]->largest.Encode();

    // Copy key slice to sequential memory
    size_t smallest_size = smallest_key.size();
    size_t largest_size = largest_key.size();
    mem = arena->AllocateAligned(smallest_size + largest_size);
    memcpy(mem, smallest_key.data(), smallest_size);
    memcpy(mem + smallest_size, largest_key.data(), largest_size);

    FdWithKeyRange& f = file_level->files[i];
    f.fd = files[i]->fd;
820
    f.file_metadata = files[i];
F
Feng Zhu 已提交
821 822 823 824 825
    f.smallest_key = Slice(mem, smallest_size);
    f.largest_key = Slice(mem + smallest_size, largest_size);
  }
}

G
Gabor Cselle 已提交
826
static bool AfterFile(const Comparator* ucmp,
827
                      const Slice* user_key, const FdWithKeyRange* f) {
A
Abhishek Kona 已提交
828 829
  // nullptr user_key occurs before all keys and is therefore never after *f
  return (user_key != nullptr &&
830 831
          ucmp->CompareWithoutTimestamp(*user_key,
                                        ExtractUserKey(f->largest_key)) > 0);
G
Gabor Cselle 已提交
832 833 834
}

static bool BeforeFile(const Comparator* ucmp,
835
                       const Slice* user_key, const FdWithKeyRange* f) {
A
Abhishek Kona 已提交
836 837
  // nullptr user_key occurs after all keys and is therefore never before *f
  return (user_key != nullptr &&
838 839
          ucmp->CompareWithoutTimestamp(*user_key,
                                        ExtractUserKey(f->smallest_key)) < 0);
G
Gabor Cselle 已提交
840 841
}

842 843
bool SomeFileOverlapsRange(
    const InternalKeyComparator& icmp,
G
Gabor Cselle 已提交
844
    bool disjoint_sorted_files,
845
    const LevelFilesBrief& file_level,
G
Gabor Cselle 已提交
846 847 848 849 850
    const Slice* smallest_user_key,
    const Slice* largest_user_key) {
  const Comparator* ucmp = icmp.user_comparator();
  if (!disjoint_sorted_files) {
    // Need to check against all files
851 852
    for (size_t i = 0; i < file_level.num_files; i++) {
      const FdWithKeyRange* f = &(file_level.files[i]);
G
Gabor Cselle 已提交
853 854 855 856 857 858 859 860 861 862 863 864
      if (AfterFile(ucmp, smallest_user_key, f) ||
          BeforeFile(ucmp, largest_user_key, f)) {
        // No overlap
      } else {
        return true;  // Overlap
      }
    }
    return false;
  }

  // Binary search over file list
  uint32_t index = 0;
A
Abhishek Kona 已提交
865
  if (smallest_user_key != nullptr) {
A
Amy Xu 已提交
866
    // Find the leftmost possible internal key for smallest_user_key
867
    InternalKey small;
A
Amy Xu 已提交
868
    small.SetMinPossibleForUserKey(*smallest_user_key);
869
    index = FindFile(icmp, file_level, small.Encode());
G
Gabor Cselle 已提交
870 871
  }

872
  if (index >= file_level.num_files) {
G
Gabor Cselle 已提交
873 874 875 876
    // beginning of range is after all files, so no overlap.
    return false;
  }

877
  return !BeforeFile(ucmp, largest_user_key, &file_level.files[index]);
878 879
}

880
namespace {
881

882
class LevelIterator final : public InternalIterator {
J
jorlow@chromium.org 已提交
883
 public:
884
  // @param read_options Must outlive this iterator.
885
  LevelIterator(TableCache* table_cache, const ReadOptions& read_options,
886
                const FileOptions& file_options,
887 888 889 890 891 892
                const InternalKeyComparator& icomparator,
                const LevelFilesBrief* flevel,
                const SliceTransform* prefix_extractor, bool should_sample,
                HistogramImpl* file_read_hist, TableReaderCaller caller,
                bool skip_filters, int level, RangeDelAggregator* range_del_agg,
                const std::vector<AtomicCompactionUnitBoundary>*
893 894
                    compaction_boundaries = nullptr,
                bool allow_unprepared_value = false)
895
      : table_cache_(table_cache),
896
        read_options_(read_options),
897
        file_options_(file_options),
898
        icomparator_(icomparator),
899
        user_comparator_(icomparator.user_comparator()),
F
Feng Zhu 已提交
900
        flevel_(flevel),
901
        prefix_extractor_(prefix_extractor),
902 903
        file_read_hist_(file_read_hist),
        should_sample_(should_sample),
904
        caller_(caller),
905
        skip_filters_(skip_filters),
906
        allow_unprepared_value_(allow_unprepared_value),
907 908 909
        file_index_(flevel_->num_files),
        level_(level),
        range_del_agg_(range_del_agg),
910 911
        pinned_iters_mgr_(nullptr),
        compaction_boundaries_(compaction_boundaries) {
912 913
    // Empty level is not supported.
    assert(flevel_ != nullptr && flevel_->num_files > 0);
A
Aaron Gao 已提交
914 915
  }

M
Michael Liu 已提交
916
  ~LevelIterator() override { delete file_iter_.Set(nullptr); }
917

M
Michael Liu 已提交
918 919 920 921
  void Seek(const Slice& target) override;
  void SeekForPrev(const Slice& target) override;
  void SeekToFirst() override;
  void SeekToLast() override;
922
  void Next() final override;
923
  bool NextAndGetResult(IterateResult* result) override;
M
Michael Liu 已提交
924
  void Prev() override;
925

M
Michael Liu 已提交
926 927
  bool Valid() const override { return file_iter_.Valid(); }
  Slice key() const override {
J
jorlow@chromium.org 已提交
928
    assert(Valid());
929
    return file_iter_.key();
J
jorlow@chromium.org 已提交
930
  }
931

M
Michael Liu 已提交
932
  Slice value() const override {
J
jorlow@chromium.org 已提交
933
    assert(Valid());
934
    return file_iter_.value();
J
jorlow@chromium.org 已提交
935
  }
936

M
Michael Liu 已提交
937
  Status status() const override {
938
    return file_iter_.iter() ? file_iter_.status() : Status::OK();
J
jorlow@chromium.org 已提交
939
  }
940

941 942 943 944
  bool PrepareValue() override {
    return file_iter_.PrepareValue();
  }

945 946 947 948 949
  inline bool MayBeOutOfLowerBound() override {
    assert(Valid());
    return may_be_out_of_lower_bound_ && file_iter_.MayBeOutOfLowerBound();
  }

950 951 952 953 954 955
  inline IterBoundCheck UpperBoundCheckResult() override {
    if (Valid()) {
      return file_iter_.UpperBoundCheckResult();
    } else {
      return IterBoundCheck::kUnknown;
    }
956 957
  }

M
Michael Liu 已提交
958
  void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) override {
959 960 961
    pinned_iters_mgr_ = pinned_iters_mgr;
    if (file_iter_.iter()) {
      file_iter_.SetPinnedItersMgr(pinned_iters_mgr);
962
    }
J
jorlow@chromium.org 已提交
963
  }
964

M
Michael Liu 已提交
965
  bool IsKeyPinned() const override {
966 967 968
    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
           file_iter_.iter() && file_iter_.IsKeyPinned();
  }
969

M
Michael Liu 已提交
970
  bool IsValuePinned() const override {
971 972 973
    return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() &&
           file_iter_.iter() && file_iter_.IsValuePinned();
  }
I
Igor Sugak 已提交
974

J
jorlow@chromium.org 已提交
975
 private:
976 977
  // Return true if at least one invalid file is seen and skipped.
  bool SkipEmptyFileForward();
978 979 980
  void SkipEmptyFileBackward();
  void SetFileIterator(InternalIterator* iter);
  void InitFileIterator(size_t new_file_index);
J
jorlow@chromium.org 已提交
981

982 983 984
  const Slice& file_smallest_key(size_t file_index) {
    assert(file_index < flevel_->num_files);
    return flevel_->files[file_index].smallest_key;
J
jorlow@chromium.org 已提交
985 986
  }

987
  bool KeyReachedUpperBound(const Slice& internal_key) {
988
    return read_options_.iterate_upper_bound != nullptr &&
989
           user_comparator_.CompareWithoutTimestamp(
Y
Yanqin Jin 已提交
990 991
               ExtractUserKey(internal_key), /*a_has_ts=*/true,
               *read_options_.iterate_upper_bound, /*b_has_ts=*/false) >= 0;
992 993
  }

994 995 996 997 998 999 1000
  InternalIterator* NewFileIterator() {
    assert(file_index_ < flevel_->num_files);
    auto file_meta = flevel_->files[file_index_];
    if (should_sample_) {
      sample_file_read_inc(file_meta.file_metadata);
    }

1001 1002 1003 1004 1005 1006
    const InternalKey* smallest_compaction_key = nullptr;
    const InternalKey* largest_compaction_key = nullptr;
    if (compaction_boundaries_ != nullptr) {
      smallest_compaction_key = (*compaction_boundaries_)[file_index_].smallest;
      largest_compaction_key = (*compaction_boundaries_)[file_index_].largest;
    }
1007
    CheckMayBeOutOfLowerBound();
1008
    return table_cache_->NewIterator(
1009
        read_options_, file_options_, icomparator_, *file_meta.file_metadata,
1010
        range_del_agg_, prefix_extractor_,
1011
        nullptr /* don't need reference to table */, file_read_hist_, caller_,
1012 1013
        /*arena=*/nullptr, skip_filters_, level_,
        /*max_file_size_for_l0_meta_pin=*/0, smallest_compaction_key,
1014
        largest_compaction_key, allow_unprepared_value_);
1015 1016
  }

1017 1018 1019 1020 1021
  // Check if current file being fully within iterate_lower_bound.
  //
  // Note MyRocks may update iterate bounds between seek. To workaround it,
  // we need to check and update may_be_out_of_lower_bound_ accordingly.
  void CheckMayBeOutOfLowerBound() {
1022 1023
    if (read_options_.iterate_lower_bound != nullptr &&
        file_index_ < flevel_->num_files) {
1024
      may_be_out_of_lower_bound_ =
1025 1026 1027
          user_comparator_.CompareWithoutTimestamp(
              ExtractUserKey(file_smallest_key(file_index_)), /*a_has_ts=*/true,
              *read_options_.iterate_lower_bound, /*b_has_ts=*/false) < 0;
1028 1029 1030
    }
  }

L
Lei Jin 已提交
1031
  TableCache* table_cache_;
1032
  const ReadOptions& read_options_;
1033
  const FileOptions& file_options_;
L
Lei Jin 已提交
1034
  const InternalKeyComparator& icomparator_;
1035
  const UserComparatorWrapper user_comparator_;
1036 1037
  const LevelFilesBrief* flevel_;
  mutable FileDescriptor current_value_;
1038 1039 1040
  // `prefix_extractor_` may be non-null even for total order seek. Checking
  // this variable is not the right way to identify whether prefix iterator
  // is used.
1041
  const SliceTransform* prefix_extractor_;
1042

1043
  HistogramImpl* file_read_hist_;
1044
  bool should_sample_;
1045
  TableReaderCaller caller_;
1046
  bool skip_filters_;
1047
  bool allow_unprepared_value_;
1048
  bool may_be_out_of_lower_bound_ = true;
1049
  size_t file_index_;
1050
  int level_;
1051
  RangeDelAggregator* range_del_agg_;
1052 1053
  IteratorWrapper file_iter_;  // May be nullptr
  PinnedIteratorsManager* pinned_iters_mgr_;
1054 1055 1056 1057

  // To be propagated to RangeDelAggregator in order to safely truncate range
  // tombstones.
  const std::vector<AtomicCompactionUnitBoundary>* compaction_boundaries_;
L
Lei Jin 已提交
1058
};
T
Tyler Harter 已提交
1059

1060
void LevelIterator::Seek(const Slice& target) {
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
  // Check whether the seek key fall under the same file
  bool need_to_reseek = true;
  if (file_iter_.iter() != nullptr && file_index_ < flevel_->num_files) {
    const FdWithKeyRange& cur_file = flevel_->files[file_index_];
    if (icomparator_.InternalKeyComparator::Compare(
            target, cur_file.largest_key) <= 0 &&
        icomparator_.InternalKeyComparator::Compare(
            target, cur_file.smallest_key) >= 0) {
      need_to_reseek = false;
      assert(static_cast<size_t>(FindFile(icomparator_, *flevel_, target)) ==
             file_index_);
    }
  }
  if (need_to_reseek) {
    TEST_SYNC_POINT("LevelIterator::Seek:BeforeFindFile");
    size_t new_file_index = FindFile(icomparator_, *flevel_, target);
    InitFileIterator(new_file_index);
  }
1079 1080 1081 1082

  if (file_iter_.iter() != nullptr) {
    file_iter_.Seek(target);
  }
1083
  if (SkipEmptyFileForward() && prefix_extractor_ != nullptr &&
S
sdong 已提交
1084 1085
      !read_options_.total_order_seek && !read_options_.auto_prefix_mode &&
      file_iter_.iter() != nullptr && file_iter_.Valid()) {
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
    // We've skipped the file we initially positioned to. In the prefix
    // seek case, it is likely that the file is skipped because of
    // prefix bloom or hash, where more keys are skipped. We then check
    // the current key and invalidate the iterator if the prefix is
    // already passed.
    // When doing prefix iterator seek, when keys for one prefix have
    // been exhausted, it can jump to any key that is larger. Here we are
    // enforcing a stricter contract than that, in order to make it easier for
    // higher layers (merging and DB iterator) to reason the correctness:
    // 1. Within the prefix, the result should be accurate.
    // 2. If keys for the prefix is exhausted, it is either positioned to the
    //    next key after the prefix, or make the iterator invalid.
    // A side benefit will be that it invalidates the iterator earlier so that
    // the upper level merging iterator can merge fewer child iterators.
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
    size_t ts_sz = user_comparator_.timestamp_size();
    Slice target_user_key_without_ts =
        ExtractUserKeyAndStripTimestamp(target, ts_sz);
    Slice file_user_key_without_ts =
        ExtractUserKeyAndStripTimestamp(file_iter_.key(), ts_sz);
    if (prefix_extractor_->InDomain(target_user_key_without_ts) &&
        (!prefix_extractor_->InDomain(file_user_key_without_ts) ||
         user_comparator_.CompareWithoutTimestamp(
             prefix_extractor_->Transform(target_user_key_without_ts), false,
             prefix_extractor_->Transform(file_user_key_without_ts),
             false) != 0)) {
1111 1112 1113
      SetFileIterator(nullptr);
    }
  }
1114
  CheckMayBeOutOfLowerBound();
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
}

void LevelIterator::SeekForPrev(const Slice& target) {
  size_t new_file_index = FindFile(icomparator_, *flevel_, target);
  if (new_file_index >= flevel_->num_files) {
    new_file_index = flevel_->num_files - 1;
  }

  InitFileIterator(new_file_index);
  if (file_iter_.iter() != nullptr) {
    file_iter_.SeekForPrev(target);
    SkipEmptyFileBackward();
  }
1128
  CheckMayBeOutOfLowerBound();
1129 1130 1131 1132 1133 1134 1135 1136
}

void LevelIterator::SeekToFirst() {
  InitFileIterator(0);
  if (file_iter_.iter() != nullptr) {
    file_iter_.SeekToFirst();
  }
  SkipEmptyFileForward();
1137
  CheckMayBeOutOfLowerBound();
1138 1139 1140 1141 1142 1143 1144 1145
}

void LevelIterator::SeekToLast() {
  InitFileIterator(flevel_->num_files - 1);
  if (file_iter_.iter() != nullptr) {
    file_iter_.SeekToLast();
  }
  SkipEmptyFileBackward();
1146
  CheckMayBeOutOfLowerBound();
1147 1148
}

1149 1150 1151 1152 1153
void LevelIterator::Next() {
  assert(Valid());
  file_iter_.Next();
  SkipEmptyFileForward();
}
1154

1155
bool LevelIterator::NextAndGetResult(IterateResult* result) {
1156 1157 1158 1159 1160 1161 1162
  assert(Valid());
  bool is_valid = file_iter_.NextAndGetResult(result);
  if (!is_valid) {
    SkipEmptyFileForward();
    is_valid = Valid();
    if (is_valid) {
      result->key = key();
1163
      result->bound_check_result = file_iter_.UpperBoundCheckResult();
1164 1165 1166 1167 1168
      // Ideally, we should return the real file_iter_.value_prepared but the
      // information is not here. It would casue an extra PrepareValue()
      // for the first key of a file.
      result->value_prepared = !allow_unprepared_value_;
    }
1169 1170
  }
  return is_valid;
1171 1172 1173 1174 1175 1176 1177 1178
}

void LevelIterator::Prev() {
  assert(Valid());
  file_iter_.Prev();
  SkipEmptyFileBackward();
}

1179 1180
bool LevelIterator::SkipEmptyFileForward() {
  bool seen_empty_file = false;
1181
  while (file_iter_.iter() == nullptr ||
1182
         (!file_iter_.Valid() && file_iter_.status().ok() &&
1183 1184
          file_iter_.iter()->UpperBoundCheckResult() !=
              IterBoundCheck::kOutOfBound)) {
1185
    seen_empty_file = true;
1186 1187 1188 1189
    // Move to next file
    if (file_index_ >= flevel_->num_files - 1) {
      // Already at the last file
      SetFileIterator(nullptr);
1190
      break;
1191 1192 1193
    }
    if (KeyReachedUpperBound(file_smallest_key(file_index_ + 1))) {
      SetFileIterator(nullptr);
1194
      break;
1195 1196 1197 1198 1199 1200
    }
    InitFileIterator(file_index_ + 1);
    if (file_iter_.iter() != nullptr) {
      file_iter_.SeekToFirst();
    }
  }
1201
  return seen_empty_file;
1202 1203 1204 1205
}

void LevelIterator::SkipEmptyFileBackward() {
  while (file_iter_.iter() == nullptr ||
1206
         (!file_iter_.Valid() && file_iter_.status().ok())) {
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
    // Move to previous file
    if (file_index_ == 0) {
      // Already the first file
      SetFileIterator(nullptr);
      return;
    }
    InitFileIterator(file_index_ - 1);
    if (file_iter_.iter() != nullptr) {
      file_iter_.SeekToLast();
    }
  }
}

void LevelIterator::SetFileIterator(InternalIterator* iter) {
  if (pinned_iters_mgr_ && iter) {
    iter->SetPinnedItersMgr(pinned_iters_mgr_);
  }

  InternalIterator* old_iter = file_iter_.Set(iter);
  if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) {
    pinned_iters_mgr_->PinIterator(old_iter);
  } else {
    delete old_iter;
  }
}

void LevelIterator::InitFileIterator(size_t new_file_index) {
  if (new_file_index >= flevel_->num_files) {
    file_index_ = new_file_index;
    SetFileIterator(nullptr);
    return;
  } else {
    // If the file iterator shows incomplete, we try it again if users seek
    // to the same file, as this time we may go to a different data block
    // which is cached in block cache.
    //
    if (file_iter_.iter() != nullptr && !file_iter_.status().IsIncomplete() &&
        new_file_index == file_index_) {
      // file_iter_ is already constructed with this iterator, so
      // no need to change anything
    } else {
      file_index_ = new_file_index;
      InternalIterator* iter = NewFileIterator();
      SetFileIterator(iter);
    }
  }
}
1254
}  // anonymous namespace
1255

1256 1257
Status Version::GetTableProperties(std::shared_ptr<const TableProperties>* tp,
                                   const FileMetaData* file_meta,
1258
                                   const std::string* fname) const {
I
Igor Canadi 已提交
1259
  auto table_cache = cfd_->table_cache();
1260
  auto ioptions = cfd_->ioptions();
1261
  Status s = table_cache->GetTableProperties(
1262
      file_options_, cfd_->internal_comparator(), file_meta->fd, tp,
1263
      mutable_cf_options_.prefix_extractor.get(), true /* no io */);
1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275
  if (s.ok()) {
    return s;
  }

  // We only ignore error type `Incomplete` since it's by design that we
  // disallow table when it's not in table cache.
  if (!s.IsIncomplete()) {
    return s;
  }

  // 2. Table is not present in table cache, we'll read the table properties
  // directly from the properties block in the file.
1276
  std::unique_ptr<FSRandomAccessFile> file;
1277
  std::string file_name;
1278
  if (fname != nullptr) {
1279
    file_name = *fname;
1280
  } else {
1281
    file_name =
1282
      TableFileName(ioptions->cf_paths, file_meta->fd.GetNumber(),
1283
                    file_meta->fd.GetPathId());
1284
  }
1285 1286
  s = ioptions->fs->NewRandomAccessFile(file_name, file_options_, &file,
                                        nullptr);
1287 1288 1289 1290 1291 1292 1293
  if (!s.ok()) {
    return s;
  }

  TableProperties* raw_table_properties;
  // By setting the magic number to kInvalidTableMagicNumber, we can by
  // pass the magic number check in the footer.
1294
  std::unique_ptr<RandomAccessFileReader> file_reader(
1295
      new RandomAccessFileReader(
1296
          std::move(file), file_name, nullptr /* env */, io_tracer_,
1297
          nullptr /* stats */, 0 /* hist_type */, nullptr /* file_read_hist */,
1298
          nullptr /* rate_limiter */, ioptions->listeners));
1299
  s = ReadTableProperties(
1300
      file_reader.get(), file_meta->fd.GetFileSize(),
1301 1302
      Footer::kInvalidTableMagicNumber /* table's magic number */, *ioptions,
      &raw_table_properties, false /* compression_type_missing */);
1303 1304 1305
  if (!s.ok()) {
    return s;
  }
1306
  RecordTick(ioptions->stats, NUMBER_DIRECT_LOAD_TABLE_PROPERTIES);
1307 1308 1309 1310 1311 1312

  *tp = std::shared_ptr<const TableProperties>(raw_table_properties);
  return s;
}

Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) {
1313
  Status s;
S
sdong 已提交
1314
  for (int level = 0; level < storage_info_.num_levels_; level++) {
1315 1316 1317 1318 1319 1320 1321 1322 1323
    s = GetPropertiesOfAllTables(props, level);
    if (!s.ok()) {
      return s;
    }
  }

  return Status::OK();
}

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
Status Version::TablesRangeTombstoneSummary(int max_entries_to_print,
                                            std::string* out_str) {
  if (max_entries_to_print <= 0) {
    return Status::OK();
  }
  int num_entries_left = max_entries_to_print;

  std::stringstream ss;

  for (int level = 0; level < storage_info_.num_levels_; level++) {
    for (const auto& file_meta : storage_info_.files_[level]) {
      auto fname =
          TableFileName(cfd_->ioptions()->cf_paths, file_meta->fd.GetNumber(),
                        file_meta->fd.GetPathId());

      ss << "=== file : " << fname << " ===\n";

      TableCache* table_cache = cfd_->table_cache();
      std::unique_ptr<FragmentedRangeTombstoneIterator> tombstone_iter;

      Status s = table_cache->GetRangeTombstoneIterator(
          ReadOptions(), cfd_->internal_comparator(), *file_meta,
          &tombstone_iter);
      if (!s.ok()) {
        return s;
      }
      if (tombstone_iter) {
        tombstone_iter->SeekToFirst();

        while (tombstone_iter->Valid() && num_entries_left > 0) {
          ss << "start: " << tombstone_iter->start_key().ToString(true)
             << " end: " << tombstone_iter->end_key().ToString(true)
             << " seq: " << tombstone_iter->seq() << '\n';
          tombstone_iter->Next();
          num_entries_left--;
        }
        if (num_entries_left <= 0) {
          break;
        }
      }
    }
    if (num_entries_left <= 0) {
      break;
    }
  }
  assert(num_entries_left >= 0);
  if (num_entries_left <= 0) {
    ss << "(results may not be complete)\n";
  }

  *out_str = ss.str();
  return Status::OK();
}

1378 1379 1380 1381
Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props,
                                         int level) {
  for (const auto& file_meta : storage_info_.files_[level]) {
    auto fname =
1382
        TableFileName(cfd_->ioptions()->cf_paths, file_meta->fd.GetNumber(),
1383 1384 1385 1386 1387 1388 1389 1390 1391
                      file_meta->fd.GetPathId());
    // 1. If the table is already present in table cache, load table
    // properties from there.
    std::shared_ptr<const TableProperties> table_properties;
    Status s = GetTableProperties(&table_properties, file_meta, &fname);
    if (s.ok()) {
      props->insert({fname, table_properties});
    } else {
      return s;
1392 1393 1394 1395 1396 1397
    }
  }

  return Status::OK();
}

1398
Status Version::GetPropertiesOfTablesInRange(
1399
    const Range* range, std::size_t n, TablePropertiesCollection* props) const {
1400
  for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
1401
    for (decltype(n) i = 0; i < n; i++) {
1402 1403 1404 1405 1406 1407 1408 1409
      // Convert user_key into a corresponding internal key.
      InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek);
      InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek);
      std::vector<FileMetaData*> files;
      storage_info_.GetOverlappingInputs(level, &k1, &k2, &files, -1, nullptr,
                                         false);
      for (const auto& file_meta : files) {
        auto fname =
1410
            TableFileName(cfd_->ioptions()->cf_paths,
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
                          file_meta->fd.GetNumber(), file_meta->fd.GetPathId());
        if (props->count(fname) == 0) {
          // 1. If the table is already present in table cache, load table
          // properties from there.
          std::shared_ptr<const TableProperties> table_properties;
          Status s = GetTableProperties(&table_properties, file_meta, &fname);
          if (s.ok()) {
            props->insert({fname, table_properties});
          } else {
            return s;
          }
        }
      }
    }
  }

  return Status::OK();
}

1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
Status Version::GetAggregatedTableProperties(
    std::shared_ptr<const TableProperties>* tp, int level) {
  TablePropertiesCollection props;
  Status s;
  if (level < 0) {
    s = GetPropertiesOfAllTables(&props);
  } else {
    s = GetPropertiesOfAllTables(&props, level);
  }
  if (!s.ok()) {
    return s;
  }

  auto* new_tp = new TableProperties();
  for (const auto& item : props) {
    new_tp->Add(*item.second);
  }
  tp->reset(new_tp);
  return Status::OK();
}

1451 1452
size_t Version::GetMemoryUsageByTableReaders() {
  size_t total_usage = 0;
S
sdong 已提交
1453
  for (auto& file_level : storage_info_.level_files_brief_) {
1454 1455
    for (size_t i = 0; i < file_level.num_files; i++) {
      total_usage += cfd_->table_cache()->GetMemoryUsageByTableReader(
1456
          file_options_, cfd_->internal_comparator(), file_level.files[i].fd,
1457
          mutable_cf_options_.prefix_extractor.get());
1458 1459 1460 1461 1462
    }
  }
  return total_usage;
}

1463 1464 1465 1466 1467 1468 1469 1470 1471
void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) {
  assert(cf_meta);
  assert(cfd_);

  cf_meta->name = cfd_->GetName();
  cf_meta->size = 0;
  cf_meta->file_count = 0;
  cf_meta->levels.clear();

1472 1473 1474 1475
  cf_meta->blob_file_size = 0;
  cf_meta->blob_file_count = 0;
  cf_meta->blob_files.clear();

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
  auto* ioptions = cfd_->ioptions();
  auto* vstorage = storage_info();

  for (int level = 0; level < cfd_->NumberLevels(); level++) {
    uint64_t level_size = 0;
    cf_meta->file_count += vstorage->LevelFiles(level).size();
    std::vector<SstFileMetaData> files;
    for (const auto& file : vstorage->LevelFiles(level)) {
      uint32_t path_id = file->fd.GetPathId();
      std::string file_path;
1486 1487
      if (path_id < ioptions->cf_paths.size()) {
        file_path = ioptions->cf_paths[path_id].path;
1488
      } else {
1489 1490
        assert(!ioptions->cf_paths.empty());
        file_path = ioptions->cf_paths.back().path;
1491
      }
1492
      const uint64_t file_number = file->fd.GetNumber();
1493
      files.emplace_back(
1494 1495 1496
          MakeTableFileName("", file_number), file_number, file_path,
          static_cast<size_t>(file->fd.GetFileSize()), file->fd.smallest_seqno,
          file->fd.largest_seqno, file->smallest.user_key().ToString(),
1497
          file->largest.user_key().ToString(),
1498
          file->stats.num_reads_sampled.load(std::memory_order_relaxed),
1499 1500 1501 1502
          file->being_compacted, file->temperature,
          file->oldest_blob_file_number, file->TryGetOldestAncesterTime(),
          file->TryGetFileCreationTime(), file->file_checksum,
          file->file_checksum_func_name);
1503 1504
      files.back().num_entries = file->num_entries;
      files.back().num_deletions = file->num_deletions;
1505 1506 1507 1508 1509 1510
      level_size += file->fd.GetFileSize();
    }
    cf_meta->levels.emplace_back(
        level, level_size, std::move(files));
    cf_meta->size += level_size;
  }
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
  for (const auto& iter : vstorage->GetBlobFiles()) {
    const auto meta = iter.second.get();
    cf_meta->blob_files.emplace_back(
        meta->GetBlobFileNumber(), BlobFileName("", meta->GetBlobFileNumber()),
        ioptions->cf_paths.front().path, meta->GetBlobFileSize(),
        meta->GetTotalBlobCount(), meta->GetTotalBlobBytes(),
        meta->GetGarbageBlobCount(), meta->GetGarbageBlobBytes(),
        meta->GetChecksumMethod(), meta->GetChecksumValue());
    cf_meta->blob_file_count++;
    cf_meta->blob_file_size += meta->GetBlobFileSize();
  }
1522 1523
}

1524 1525 1526 1527 1528 1529 1530 1531 1532
uint64_t Version::GetSstFilesSize() {
  uint64_t sst_files_size = 0;
  for (int level = 0; level < storage_info_.num_levels_; level++) {
    for (const auto& file_meta : storage_info_.LevelFiles(level)) {
      sst_files_size += file_meta->fd.GetFileSize();
    }
  }
  return sst_files_size;
}
1533

1534 1535 1536 1537 1538
void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) {
  uint64_t oldest_time = port::kMaxUint64;
  for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) {
    for (FileMetaData* meta : storage_info_.LevelFiles(level)) {
      assert(meta->fd.table_reader != nullptr);
1539 1540 1541
      uint64_t file_creation_time = meta->TryGetFileCreationTime();
      if (file_creation_time == kUnknownFileCreationTime) {
        *creation_time = 0;
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551
        return;
      }
      if (file_creation_time < oldest_time) {
        oldest_time = file_creation_time;
      }
    }
  }
  *creation_time = oldest_time;
}

S
sdong 已提交
1552
uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const {
1553 1554
  // Estimation will be inaccurate when:
  // (1) there exist merge keys
S
sdong 已提交
1555 1556
  // (2) keys are directly overwritten
  // (3) deletion on non-existing keys
1557
  // (4) low number of samples
1558
  if (current_num_samples_ == 0) {
1559 1560 1561
    return 0;
  }

1562
  if (current_num_non_deletions_ <= current_num_deletions_) {
1563 1564 1565
    return 0;
  }

1566
  uint64_t est = current_num_non_deletions_ - current_num_deletions_;
1567

Y
Yueh-Hsuan Chiang 已提交
1568 1569 1570 1571 1572
  uint64_t file_count = 0;
  for (int level = 0; level < num_levels_; ++level) {
    file_count += files_[level].size();
  }

1573
  if (current_num_samples_ < file_count) {
1574
    // casting to avoid overflowing
1575
    return
D
Dmitri Smirnov 已提交
1576
      static_cast<uint64_t>(
D
Dmitri Smirnov 已提交
1577
        (est * static_cast<double>(file_count) / current_num_samples_)
D
Dmitri Smirnov 已提交
1578
      );
1579
  } else {
1580
    return est;
1581
  }
S
sdong 已提交
1582 1583
}

1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel(
    int level) const {
  assert(level < num_levels_);
  uint64_t sum_file_size_bytes = 0;
  uint64_t sum_data_size_bytes = 0;
  for (auto* file_meta : files_[level]) {
    sum_file_size_bytes += file_meta->fd.GetFileSize();
    sum_data_size_bytes += file_meta->raw_key_size + file_meta->raw_value_size;
  }
  if (sum_file_size_bytes == 0) {
    return -1.0;
  }
  return static_cast<double>(sum_data_size_bytes) / sum_file_size_bytes;
}

1599
void Version::AddIterators(const ReadOptions& read_options,
1600
                           const FileOptions& soptions,
A
Andrew Kryczka 已提交
1601
                           MergeIteratorBuilder* merge_iter_builder,
1602 1603
                           RangeDelAggregator* range_del_agg,
                           bool allow_unprepared_value) {
S
sdong 已提交
1604
  assert(storage_info_.finalized_);
1605

1606
  for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
A
Andrew Kryczka 已提交
1607
    AddIteratorsForLevel(read_options, soptions, merge_iter_builder, level,
1608
                         range_del_agg, allow_unprepared_value);
1609 1610 1611 1612
  }
}

void Version::AddIteratorsForLevel(const ReadOptions& read_options,
1613
                                   const FileOptions& soptions,
1614
                                   MergeIteratorBuilder* merge_iter_builder,
A
Andrew Kryczka 已提交
1615
                                   int level,
1616 1617
                                   RangeDelAggregator* range_del_agg,
                                   bool allow_unprepared_value) {
1618 1619 1620 1621 1622 1623
  assert(storage_info_.finalized_);
  if (level >= storage_info_.num_non_empty_levels()) {
    // This is an empty level
    return;
  } else if (storage_info_.LevelFilesBrief(level).num_files == 0) {
    // No files in this level
S
sdong 已提交
1624 1625 1626
    return;
  }

1627 1628
  bool should_sample = should_sample_file_read();

1629
  auto* arena = merge_iter_builder->GetArena();
1630 1631 1632 1633 1634
  if (level == 0) {
    // Merge all level zero files together since they may overlap
    for (size_t i = 0; i < storage_info_.LevelFilesBrief(0).num_files; i++) {
      const auto& file = storage_info_.LevelFilesBrief(0).files[i];
      merge_iter_builder->AddIterator(cfd_->table_cache()->NewIterator(
1635 1636 1637 1638 1639
          read_options, soptions, cfd_->internal_comparator(),
          *file.file_metadata, range_del_agg,
          mutable_cf_options_.prefix_extractor.get(), nullptr,
          cfd_->internal_stats()->GetFileReadHist(0),
          TableReaderCaller::kUserIterator, arena,
1640
          /*skip_filters=*/false, /*level=*/0, max_file_size_for_l0_meta_pin_,
1641
          /*smallest_compaction_key=*/nullptr,
1642
          /*largest_compaction_key=*/nullptr, allow_unprepared_value));
1643
    }
1644 1645 1646 1647 1648 1649 1650 1651 1652
    if (should_sample) {
      // Count ones for every L0 files. This is done per iterator creation
      // rather than Seek(), while files in other levels are recored per seek.
      // If users execute one range query per iterator, there may be some
      // discrepancy here.
      for (FileMetaData* meta : storage_info_.LevelFiles(0)) {
        sample_file_read_inc(meta);
      }
    }
1653
  } else if (storage_info_.LevelFilesBrief(level).num_files > 0) {
1654 1655 1656
    // For levels > 0, we can use a concatenating iterator that sequentially
    // walks through the non-overlapping files in the level, opening them
    // lazily.
1657 1658 1659
    auto* mem = arena->AllocateAligned(sizeof(LevelIterator));
    merge_iter_builder->AddIterator(new (mem) LevelIterator(
        cfd_->table_cache(), read_options, soptions,
1660
        cfd_->internal_comparator(), &storage_info_.LevelFilesBrief(level),
1661
        mutable_cf_options_.prefix_extractor.get(), should_sample_file_read(),
1662
        cfd_->internal_stats()->GetFileReadHist(level),
1663
        TableReaderCaller::kUserIterator, IsFilterSkipped(level), level,
1664 1665
        range_del_agg,
        /*compaction_boundaries=*/nullptr, allow_unprepared_value));
1666 1667 1668
  }
}

1669
Status Version::OverlapWithLevelIterator(const ReadOptions& read_options,
1670
                                         const FileOptions& file_options,
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
                                         const Slice& smallest_user_key,
                                         const Slice& largest_user_key,
                                         int level, bool* overlap) {
  assert(storage_info_.finalized_);

  auto icmp = cfd_->internal_comparator();
  auto ucmp = icmp.user_comparator();

  Arena arena;
  Status status;
1681 1682
  ReadRangeDelAggregator range_del_agg(&icmp,
                                       kMaxSequenceNumber /* upper_bound */);
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693

  *overlap = false;

  if (level == 0) {
    for (size_t i = 0; i < storage_info_.LevelFilesBrief(0).num_files; i++) {
      const auto file = &storage_info_.LevelFilesBrief(0).files[i];
      if (AfterFile(ucmp, &smallest_user_key, file) ||
          BeforeFile(ucmp, &largest_user_key, file)) {
        continue;
      }
      ScopedArenaIterator iter(cfd_->table_cache()->NewIterator(
1694
          read_options, file_options, cfd_->internal_comparator(),
1695 1696 1697 1698
          *file->file_metadata, &range_del_agg,
          mutable_cf_options_.prefix_extractor.get(), nullptr,
          cfd_->internal_stats()->GetFileReadHist(0),
          TableReaderCaller::kUserIterator, &arena,
1699
          /*skip_filters=*/false, /*level=*/0, max_file_size_for_l0_meta_pin_,
1700
          /*smallest_compaction_key=*/nullptr,
1701 1702
          /*largest_compaction_key=*/nullptr,
          /*allow_unprepared_value=*/false));
1703 1704 1705 1706 1707 1708 1709 1710 1711
      status = OverlapWithIterator(
          ucmp, smallest_user_key, largest_user_key, iter.get(), overlap);
      if (!status.ok() || *overlap) {
        break;
      }
    }
  } else if (storage_info_.LevelFilesBrief(level).num_files > 0) {
    auto mem = arena.AllocateAligned(sizeof(LevelIterator));
    ScopedArenaIterator iter(new (mem) LevelIterator(
1712
        cfd_->table_cache(), read_options, file_options,
1713
        cfd_->internal_comparator(), &storage_info_.LevelFilesBrief(level),
1714
        mutable_cf_options_.prefix_extractor.get(), should_sample_file_read(),
1715
        cfd_->internal_stats()->GetFileReadHist(level),
1716
        TableReaderCaller::kUserIterator, IsFilterSkipped(level), level,
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
        &range_del_agg));
    status = OverlapWithIterator(
        ucmp, smallest_user_key, largest_user_key, iter.get(), overlap);
  }

  if (status.ok() && *overlap == false &&
      range_del_agg.IsRangeOverlapped(smallest_user_key, largest_user_key)) {
    *overlap = true;
  }
  return status;
}

S
sdong 已提交
1729 1730
VersionStorageInfo::VersionStorageInfo(
    const InternalKeyComparator* internal_comparator,
I
Igor Canadi 已提交
1731
    const Comparator* user_comparator, int levels,
1732 1733
    CompactionStyle compaction_style, VersionStorageInfo* ref_vstorage,
    bool _force_consistency_checks)
S
sdong 已提交
1734 1735
    : internal_comparator_(internal_comparator),
      user_comparator_(user_comparator),
1736
      // cfd is nullptr if Version is dummy
I
Igor Canadi 已提交
1737
      num_levels_(levels),
S
sdong 已提交
1738
      num_non_empty_levels_(0),
S
sdong 已提交
1739 1740
      file_indexer_(user_comparator),
      compaction_style_(compaction_style),
1741
      files_(new std::vector<FileMetaData*>[num_levels_]),
S
sdong 已提交
1742
      base_level_(num_levels_ == 1 ? -1 : 1),
1743
      level_multiplier_(0.0),
1744
      files_by_compaction_pri_(num_levels_),
1745
      level0_non_overlapping_(false),
1746 1747 1748
      next_file_to_compact_by_size_(num_levels_),
      compaction_score_(num_levels_),
      compaction_level_(num_levels_),
S
sdong 已提交
1749
      l0_delay_trigger_count_(0),
1750 1751 1752 1753 1754
      accumulated_file_size_(0),
      accumulated_raw_key_size_(0),
      accumulated_raw_value_size_(0),
      accumulated_num_non_deletions_(0),
      accumulated_num_deletions_(0),
1755 1756 1757
      current_num_non_deletions_(0),
      current_num_deletions_(0),
      current_num_samples_(0),
1758
      estimated_compaction_needed_bytes_(0),
1759 1760
      finalized_(false),
      force_consistency_checks_(_force_consistency_checks) {
S
sdong 已提交
1761 1762 1763 1764 1765 1766 1767
  if (ref_vstorage != nullptr) {
    accumulated_file_size_ = ref_vstorage->accumulated_file_size_;
    accumulated_raw_key_size_ = ref_vstorage->accumulated_raw_key_size_;
    accumulated_raw_value_size_ = ref_vstorage->accumulated_raw_value_size_;
    accumulated_num_non_deletions_ =
        ref_vstorage->accumulated_num_non_deletions_;
    accumulated_num_deletions_ = ref_vstorage->accumulated_num_deletions_;
1768 1769 1770
    current_num_non_deletions_ = ref_vstorage->current_num_non_deletions_;
    current_num_deletions_ = ref_vstorage->current_num_deletions_;
    current_num_samples_ = ref_vstorage->current_num_samples_;
1771
    oldest_snapshot_seqnum_ = ref_vstorage->oldest_snapshot_seqnum_;
1772
  }
1773
}
1774

I
Igor Canadi 已提交
1775
Version::Version(ColumnFamilyData* column_family_data, VersionSet* vset,
1776
                 const FileOptions& file_opt,
1777
                 const MutableCFOptions mutable_cf_options,
1778
                 const std::shared_ptr<IOTracer>& io_tracer,
1779
                 uint64_t version_number)
1780
    : env_(vset->env_),
1781
      clock_(vset->clock_),
1782
      cfd_(column_family_data),
1783 1784
      info_log_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->logger),
      db_statistics_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->stats),
I
Igor Canadi 已提交
1785
      table_cache_((cfd_ == nullptr) ? nullptr : cfd_->table_cache()),
1786
      blob_file_cache_(cfd_ ? cfd_->blob_file_cache() : nullptr),
1787 1788
      merge_operator_(
          (cfd_ == nullptr) ? nullptr : cfd_->ioptions()->merge_operator.get()),
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798
      storage_info_(
          (cfd_ == nullptr) ? nullptr : &cfd_->internal_comparator(),
          (cfd_ == nullptr) ? nullptr : cfd_->user_comparator(),
          cfd_ == nullptr ? 0 : cfd_->NumberLevels(),
          cfd_ == nullptr ? kCompactionStyleLevel
                          : cfd_->ioptions()->compaction_style,
          (cfd_ == nullptr || cfd_->current() == nullptr)
              ? nullptr
              : cfd_->current()->storage_info(),
          cfd_ == nullptr ? false : cfd_->ioptions()->force_consistency_checks),
S
sdong 已提交
1799 1800 1801 1802
      vset_(vset),
      next_(this),
      prev_(this),
      refs_(0),
1803
      file_options_(file_opt),
1804
      mutable_cf_options_(mutable_cf_options),
1805 1806
      max_file_size_for_l0_meta_pin_(
          MaxFileSizeForL0MetaPin(mutable_cf_options_)),
1807 1808
      version_number_(version_number),
      io_tracer_(io_tracer) {}
S
sdong 已提交
1809

1810
Status Version::GetBlob(const ReadOptions& read_options, const Slice& user_key,
1811 1812
                        const Slice& blob_index_slice, PinnableSlice* value,
                        uint64_t* bytes_read) const {
1813 1814 1815 1816 1817 1818 1819
  if (read_options.read_tier == kBlockCacheTier) {
    return Status::Incomplete("Cannot read blob: no disk I/O allowed");
  }

  BlobIndex blob_index;

  {
L
Levi Tamasi 已提交
1820
    Status s = blob_index.DecodeFrom(blob_index_slice);
1821 1822 1823 1824 1825
    if (!s.ok()) {
      return s;
    }
  }

1826
  return GetBlob(read_options, user_key, blob_index, value, bytes_read);
1827 1828 1829
}

Status Version::GetBlob(const ReadOptions& read_options, const Slice& user_key,
1830 1831
                        const BlobIndex& blob_index, PinnableSlice* value,
                        uint64_t* bytes_read) const {
L
Levi Tamasi 已提交
1832 1833
  assert(value);

1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860
  if (blob_index.HasTTL() || blob_index.IsInlined()) {
    return Status::Corruption("Unexpected TTL/inlined blob index");
  }

  const auto& blob_files = storage_info_.GetBlobFiles();

  const uint64_t blob_file_number = blob_index.file_number();

  const auto it = blob_files.find(blob_file_number);
  if (it == blob_files.end()) {
    return Status::Corruption("Invalid blob file number");
  }

  CacheHandleGuard<BlobFileReader> blob_file_reader;

  {
    assert(blob_file_cache_);
    const Status s = blob_file_cache_->GetBlobFileReader(blob_file_number,
                                                         &blob_file_reader);
    if (!s.ok()) {
      return s;
    }
  }

  assert(blob_file_reader.GetValue());
  const Status s = blob_file_reader.GetValue()->GetBlob(
      read_options, user_key, blob_index.offset(), blob_index.size(),
1861
      blob_index.compression(), value, bytes_read);
1862 1863 1864 1865

  return s;
}

1866
void Version::Get(const ReadOptions& read_options, const LookupKey& k,
H
Huisheng Liu 已提交
1867
                  PinnableSlice* value, std::string* timestamp, Status* status,
A
Andrew Kryczka 已提交
1868
                  MergeContext* merge_context,
1869
                  SequenceNumber* max_covering_tombstone_seq, bool* value_found,
Y
Yi Wu 已提交
1870
                  bool* key_exists, SequenceNumber* seq, ReadCallback* callback,
1871
                  bool* is_blob, bool do_merge) {
1872 1873
  Slice ikey = k.internal_key();
  Slice user_key = k.user_key();
1874 1875

  assert(status->ok() || status->IsMergeInProgress());
1876

1877 1878 1879 1880 1881
  if (key_exists != nullptr) {
    // will falsify below if not found
    *key_exists = true;
  }

1882
  PinnedIteratorsManager pinned_iters_mgr;
1883 1884 1885 1886 1887
  uint64_t tracing_get_id = BlockCacheTraceHelper::kReservedGetId;
  if (vset_ && vset_->block_cache_tracer_ &&
      vset_->block_cache_tracer_->is_tracing_enabled()) {
    tracing_get_id = vset_->block_cache_tracer_->NextGetId();
  }
1888 1889 1890 1891 1892 1893

  // Note: the old StackableDB-based BlobDB passes in
  // GetImplOptions::is_blob_index; for the integrated BlobDB implementation, we
  // need to provide it here.
  bool is_blob_index = false;
  bool* const is_blob_to_use = is_blob ? is_blob : &is_blob_index;
1894
  BlobFetcher blob_fetcher(this, read_options);
1895

S
sdong 已提交
1896
  GetContext get_context(
S
sdong 已提交
1897
      user_comparator(), merge_operator_, info_log_, db_statistics_,
S
sdong 已提交
1898
      status->ok() ? GetContext::kNotFound : GetContext::kMerge, user_key,
H
Huisheng Liu 已提交
1899
      do_merge ? value : nullptr, do_merge ? timestamp : nullptr, value_found,
1900
      merge_context, do_merge, max_covering_tombstone_seq, clock_, seq,
1901
      merge_operator_ ? &pinned_iters_mgr : nullptr, callback, is_blob_to_use,
1902
      tracing_get_id, &blob_fetcher);
1903 1904 1905 1906 1907

  // Pin blocks that we read to hold merge operands
  if (merge_operator_) {
    pinned_iters_mgr.StartPinning();
  }
1908

S
sdong 已提交
1909 1910 1911 1912
  FilePicker fp(
      storage_info_.files_, user_key, ikey, &storage_info_.level_files_brief_,
      storage_info_.num_non_empty_levels_, &storage_info_.file_indexer_,
      user_comparator(), internal_comparator());
1913
  FdWithKeyRange* f = fp.GetNextFile();
1914

1915
  while (f != nullptr) {
1916
    if (*max_covering_tombstone_seq > 0) {
1917 1918 1919
      // The remaining files we look at will only contain covered keys, so we
      // stop here.
      break;
1920
    }
1921 1922 1923
    if (get_context.sample()) {
      sample_file_read_inc(f->file_metadata);
    }
1924

1925 1926 1927
    bool timer_enabled =
        GetPerfLevel() >= PerfLevel::kEnableTimeExceptForMutex &&
        get_perf_context()->per_level_perf_context_enabled;
1928
    StopWatchNano timer(clock_, timer_enabled /* auto_start */);
1929
    *status = table_cache_->Get(
1930 1931
        read_options, *internal_comparator(), *f->file_metadata, ikey,
        &get_context, mutable_cf_options_.prefix_extractor.get(),
1932
        cfd_->internal_stats()->GetFileReadHist(fp.GetHitFileLevel()),
1933
        IsFilterSkipped(static_cast<int>(fp.GetHitFileLevel()),
1934
                        fp.IsHitFileLastInLevel()),
1935
        fp.GetHitFileLevel(), max_file_size_for_l0_meta_pin_);
1936
    // TODO: examine the behavior for corrupted key
1937 1938
    if (timer_enabled) {
      PERF_COUNTER_BY_LEVEL_ADD(get_from_table_nanos, timer.ElapsedNanos(),
1939
                                fp.GetHitFileLevel());
1940
    }
1941 1942
    if (!status->ok()) {
      return;
1943
    }
1944

1945 1946
    // report the counters before returning
    if (get_context.State() != GetContext::kNotFound &&
1947 1948 1949
        get_context.State() != GetContext::kMerge &&
        db_statistics_ != nullptr) {
      get_context.ReportCounters();
1950
    }
1951 1952 1953 1954
    switch (get_context.State()) {
      case GetContext::kNotFound:
        // Keep searching in other files
        break;
1955
      case GetContext::kMerge:
1956
        // TODO: update per-level perfcontext user_key_return_count for kMerge
1957
        break;
1958
      case GetContext::kFound:
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
        if (fp.GetHitFileLevel() == 0) {
          RecordTick(db_statistics_, GET_HIT_L0);
        } else if (fp.GetHitFileLevel() == 1) {
          RecordTick(db_statistics_, GET_HIT_L1);
        } else if (fp.GetHitFileLevel() >= 2) {
          RecordTick(db_statistics_, GET_HIT_L2_AND_UP);
        }

        PERF_COUNTER_BY_LEVEL_ADD(user_key_return_count, 1,
                                  fp.GetHitFileLevel());

1970 1971
        if (is_blob_index) {
          if (do_merge && value) {
1972 1973 1974 1975
            constexpr uint64_t* bytes_read = nullptr;

            *status =
                GetBlob(read_options, user_key, *value, value, bytes_read);
1976 1977 1978 1979 1980 1981 1982 1983 1984
            if (!status->ok()) {
              if (status->IsIncomplete()) {
                get_context.MarkKeyMayExist();
              }
              return;
            }
          }
        }

1985
        return;
1986 1987 1988
      case GetContext::kDeleted:
        // Use empty error message for speed
        *status = Status::NotFound();
1989
        return;
1990
      case GetContext::kCorrupt:
1991 1992
        *status = Status::Corruption("corrupted key for ", user_key);
        return;
1993
      case GetContext::kUnexpectedBlobIndex:
Y
Yi Wu 已提交
1994 1995 1996
        ROCKS_LOG_ERROR(info_log_, "Encounter unexpected blob index.");
        *status = Status::NotSupported(
            "Encounter unexpected blob index. Please open DB with "
1997
            "ROCKSDB_NAMESPACE::blob_db::BlobDB instead.");
Y
Yi Wu 已提交
1998
        return;
1999
    }
2000
    f = fp.GetNextFile();
2001
  }
2002 2003
  if (db_statistics_ != nullptr) {
    get_context.ReportCounters();
2004
  }
2005
  if (GetContext::kMerge == get_context.State()) {
2006 2007 2008 2009
    if (!do_merge) {
      *status = Status::OK();
      return;
    }
2010 2011 2012 2013 2014
    if (!merge_operator_) {
      *status =  Status::InvalidArgument(
          "merge_operator is not properly initialized.");
      return;
    }
2015 2016
    // merge_operands are in saver and we hit the beginning of the key history
    // do a final merge of nullptr and operands;
M
Maysam Yabandeh 已提交
2017 2018 2019
    std::string* str_value = value != nullptr ? value->GetSelf() : nullptr;
    *status = MergeHelper::TimedFullMerge(
        merge_operator_, user_key, nullptr, merge_context->GetOperands(),
2020
        str_value, info_log_, db_statistics_, clock_,
2021
        nullptr /* result_operand */, true);
M
Maysam Yabandeh 已提交
2022 2023 2024
    if (LIKELY(value != nullptr)) {
      value->PinSelf();
    }
2025
  } else {
2026 2027 2028
    if (key_exists != nullptr) {
      *key_exists = false;
    }
2029
    *status = Status::NotFound(); // Use an empty error message for speed
2030
  }
2031 2032
}

2033
void Version::MultiGet(const ReadOptions& read_options, MultiGetRange* range,
2034
                       ReadCallback* callback) {
2035 2036 2037 2038 2039 2040
  PinnedIteratorsManager pinned_iters_mgr;

  // Pin blocks that we read to hold merge operands
  if (merge_operator_) {
    pinned_iters_mgr.StartPinning();
  }
2041
  uint64_t tracing_mget_id = BlockCacheTraceHelper::kReservedGetId;
2042

2043 2044 2045 2046
  if (vset_ && vset_->block_cache_tracer_ &&
      vset_->block_cache_tracer_->is_tracing_enabled()) {
    tracing_mget_id = vset_->block_cache_tracer_->NextGetId();
  }
2047 2048 2049 2050
  // Even though we know the batch size won't be > MAX_BATCH_SIZE,
  // use autovector in order to avoid unnecessary construction of GetContext
  // objects, which is expensive
  autovector<GetContext, 16> get_ctx;
2051
  BlobFetcher blob_fetcher(this, read_options);
2052 2053 2054 2055
  for (auto iter = range->begin(); iter != range->end(); ++iter) {
    assert(iter->s->ok() || iter->s->IsMergeInProgress());
    get_ctx.emplace_back(
        user_comparator(), merge_operator_, info_log_, db_statistics_,
2056 2057
        iter->s->ok() ? GetContext::kNotFound : GetContext::kMerge,
        iter->ukey_with_ts, iter->value, iter->timestamp, nullptr,
2058 2059
        &(iter->merge_context), true, &iter->max_covering_tombstone_seq, clock_,
        nullptr, merge_operator_ ? &pinned_iters_mgr : nullptr, callback,
2060
        &iter->is_blob_index, tracing_mget_id, &blob_fetcher);
2061 2062 2063 2064 2065
    // MergeInProgress status, if set, has been transferred to the get_context
    // state, so we set status to ok here. From now on, the iter status will
    // be used for IO errors, and get_context state will be used for any
    // key level errors
    *(iter->s) = Status::OK();
2066 2067 2068 2069 2070
  }
  int get_ctx_index = 0;
  for (auto iter = range->begin(); iter != range->end();
       ++iter, get_ctx_index++) {
    iter->get_context = &(get_ctx[get_ctx_index]);
2071 2072 2073 2074
  }

  MultiGetRange file_picker_range(*range, range->begin(), range->end());
  FilePickerMultiGet fp(
2075
      &file_picker_range,
2076 2077 2078
      &storage_info_.level_files_brief_, storage_info_.num_non_empty_levels_,
      &storage_info_.file_indexer_, user_comparator(), internal_comparator());
  FdWithKeyRange* f = fp.GetNextFile();
2079
  Status s;
A
Akanksha Mahajan 已提交
2080 2081 2082 2083
  uint64_t num_index_read = 0;
  uint64_t num_filter_read = 0;
  uint64_t num_data_read = 0;
  uint64_t num_sst_read = 0;
2084 2085 2086 2087 2088 2089

  while (f != nullptr) {
    MultiGetRange file_range = fp.CurrentFileRange();
    bool timer_enabled =
        GetPerfLevel() >= PerfLevel::kEnableTimeExceptForMutex &&
        get_perf_context()->per_level_perf_context_enabled;
2090
    StopWatchNano timer(clock_, timer_enabled /* auto_start */);
2091
    s = table_cache_->MultiGet(
2092 2093 2094 2095 2096
        read_options, *internal_comparator(), *f->file_metadata, &file_range,
        mutable_cf_options_.prefix_extractor.get(),
        cfd_->internal_stats()->GetFileReadHist(fp.GetHitFileLevel()),
        IsFilterSkipped(static_cast<int>(fp.GetHitFileLevel()),
                        fp.IsHitFileLastInLevel()),
2097
        fp.GetHitFileLevel());
2098 2099 2100
    // TODO: examine the behavior for corrupted key
    if (timer_enabled) {
      PERF_COUNTER_BY_LEVEL_ADD(get_from_table_nanos, timer.ElapsedNanos(),
2101
                                fp.GetHitFileLevel());
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111
    }
    if (!s.ok()) {
      // TODO: Set status for individual keys appropriately
      for (auto iter = file_range.begin(); iter != file_range.end(); ++iter) {
        *iter->s = s;
        file_range.MarkKeyDone(iter);
      }
      return;
    }
    uint64_t batch_size = 0;
2112 2113
    for (auto iter = file_range.begin(); s.ok() && iter != file_range.end();
         ++iter) {
2114 2115
      GetContext& get_context = *iter->get_context;
      Status* status = iter->s;
2116 2117 2118 2119 2120 2121 2122 2123 2124
      // The Status in the KeyContext takes precedence over GetContext state
      // Status may be an error if there were any IO errors in the table
      // reader. We never expect Status to be NotFound(), as that is
      // determined by get_context
      assert(!status->IsNotFound());
      if (!status->ok()) {
        file_range.MarkKeyDone(iter);
        continue;
      }
2125 2126 2127 2128 2129

      if (get_context.sample()) {
        sample_file_read_inc(f->file_metadata);
      }
      batch_size++;
A
Akanksha Mahajan 已提交
2130 2131 2132 2133 2134
      num_index_read += get_context.get_context_stats_.num_index_read;
      num_filter_read += get_context.get_context_stats_.num_filter_read;
      num_data_read += get_context.get_context_stats_.num_data_read;
      num_sst_read += get_context.get_context_stats_.num_sst_read;

2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
      // report the counters before returning
      if (get_context.State() != GetContext::kNotFound &&
          get_context.State() != GetContext::kMerge &&
          db_statistics_ != nullptr) {
        get_context.ReportCounters();
      } else {
        if (iter->max_covering_tombstone_seq > 0) {
          // The remaining files we look at will only contain covered keys, so
          // we stop here for this key
          file_picker_range.SkipKey(iter);
        }
      }
      switch (get_context.State()) {
        case GetContext::kNotFound:
          // Keep searching in other files
          break;
        case GetContext::kMerge:
          // TODO: update per-level perfcontext user_key_return_count for kMerge
          break;
        case GetContext::kFound:
          if (fp.GetHitFileLevel() == 0) {
            RecordTick(db_statistics_, GET_HIT_L0);
          } else if (fp.GetHitFileLevel() == 1) {
            RecordTick(db_statistics_, GET_HIT_L1);
          } else if (fp.GetHitFileLevel() >= 2) {
            RecordTick(db_statistics_, GET_HIT_L2_AND_UP);
          }
2162

2163 2164
          PERF_COUNTER_BY_LEVEL_ADD(user_key_return_count, 1,
                                    fp.GetHitFileLevel());
2165

2166
          file_range.MarkKeyDone(iter);
2167 2168 2169

          if (iter->is_blob_index) {
            if (iter->value) {
2170 2171
              constexpr uint64_t* bytes_read = nullptr;

2172
              *status = GetBlob(read_options, iter->ukey_with_ts, *iter->value,
2173
                                iter->value, bytes_read);
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
              if (!status->ok()) {
                if (status->IsIncomplete()) {
                  get_context.MarkKeyMayExist();
                }

                continue;
              }
            }
          }

          file_range.AddValueSize(iter->value->size());
2185 2186 2187 2188
          if (file_range.GetValueSize() > read_options.value_size_soft_limit) {
            s = Status::Aborted();
            break;
          }
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
          continue;
        case GetContext::kDeleted:
          // Use empty error message for speed
          *status = Status::NotFound();
          file_range.MarkKeyDone(iter);
          continue;
        case GetContext::kCorrupt:
          *status =
              Status::Corruption("corrupted key for ", iter->lkey->user_key());
          file_range.MarkKeyDone(iter);
          continue;
2200
        case GetContext::kUnexpectedBlobIndex:
2201 2202 2203
          ROCKS_LOG_ERROR(info_log_, "Encounter unexpected blob index.");
          *status = Status::NotSupported(
              "Encounter unexpected blob index. Please open DB with "
2204
              "ROCKSDB_NAMESPACE::blob_db::BlobDB instead.");
2205 2206 2207 2208
          file_range.MarkKeyDone(iter);
          continue;
      }
    }
A
Akanksha Mahajan 已提交
2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225

    // Report MultiGet stats per level.
    if (fp.IsHitFileLastInLevel()) {
      // Dump the stats if this is the last file of this level and reset for
      // next level.
      RecordInHistogram(db_statistics_,
                        NUM_INDEX_AND_FILTER_BLOCKS_READ_PER_LEVEL,
                        num_index_read + num_filter_read);
      RecordInHistogram(db_statistics_, NUM_DATA_BLOCKS_READ_PER_LEVEL,
                        num_data_read);
      RecordInHistogram(db_statistics_, NUM_SST_READ_PER_LEVEL, num_sst_read);
      num_filter_read = 0;
      num_index_read = 0;
      num_data_read = 0;
      num_sst_read = 0;
    }

2226
    RecordInHistogram(db_statistics_, SST_BATCH_SIZE, batch_size);
2227
    if (!s.ok() || file_picker_range.empty()) {
2228 2229 2230 2231 2232 2233
      break;
    }
    f = fp.GetNextFile();
  }

  // Process any left over keys
2234
  for (auto iter = range->begin(); s.ok() && iter != range->end(); ++iter) {
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
    GetContext& get_context = *iter->get_context;
    Status* status = iter->s;
    Slice user_key = iter->lkey->user_key();

    if (db_statistics_ != nullptr) {
      get_context.ReportCounters();
    }
    if (GetContext::kMerge == get_context.State()) {
      if (!merge_operator_) {
        *status = Status::InvalidArgument(
            "merge_operator is not properly initialized.");
        range->MarkKeyDone(iter);
        continue;
      }
      // merge_operands are in saver and we hit the beginning of the key history
      // do a final merge of nullptr and operands;
      std::string* str_value =
          iter->value != nullptr ? iter->value->GetSelf() : nullptr;
      *status = MergeHelper::TimedFullMerge(
          merge_operator_, user_key, nullptr, iter->merge_context.GetOperands(),
2255
          str_value, info_log_, db_statistics_, clock_,
2256 2257 2258
          nullptr /* result_operand */, true);
      if (LIKELY(iter->value != nullptr)) {
        iter->value->PinSelf();
2259 2260 2261 2262 2263 2264
        range->AddValueSize(iter->value->size());
        range->MarkKeyDone(iter);
        if (range->GetValueSize() > read_options.value_size_soft_limit) {
          s = Status::Aborted();
          break;
        }
2265 2266 2267 2268 2269 2270
      }
    } else {
      range->MarkKeyDone(iter);
      *status = Status::NotFound();  // Use an empty error message for speed
    }
  }
2271 2272 2273 2274 2275

  for (auto iter = range->begin(); iter != range->end(); ++iter) {
    range->MarkKeyDone(iter);
    *(iter->s) = s;
  }
2276 2277
}

2278
bool Version::IsFilterSkipped(int level, bool is_file_last_in_level) {
2279 2280
  // Reaching the bottom level implies misses at all upper levels, so we'll
  // skip checking the filters when we predict a hit.
2281 2282
  return cfd_->ioptions()->optimize_filters_for_hits &&
         (level > 0 || is_file_last_in_level) &&
2283 2284 2285
         level == storage_info_.num_non_empty_levels() - 1;
}

S
sdong 已提交
2286
void VersionStorageInfo::GenerateLevelFilesBrief() {
2287
  level_files_brief_.resize(num_non_empty_levels_);
2288
  for (int level = 0; level < num_non_empty_levels_; level++) {
2289 2290
    DoGenerateLevelFilesBrief(
        &level_files_brief_[level], files_[level], &arena_);
2291 2292 2293
  }
}

2294 2295 2296
void Version::PrepareApply(
    const MutableCFOptions& mutable_cf_options,
    bool update_stats) {
S
sdong 已提交
2297 2298 2299
  TEST_SYNC_POINT_CALLBACK(
      "Version::PrepareApply:forced_check",
      reinterpret_cast<void*>(&storage_info_.force_consistency_checks_));
2300
  UpdateAccumulatedStats(update_stats);
S
sdong 已提交
2301
  storage_info_.UpdateNumNonEmptyLevels();
2302
  storage_info_.CalculateBaseBytes(*cfd_->ioptions(), mutable_cf_options);
Y
Yi Wu 已提交
2303
  storage_info_.UpdateFilesByCompactionPri(cfd_->ioptions()->compaction_pri);
S
sdong 已提交
2304 2305
  storage_info_.GenerateFileIndexer();
  storage_info_.GenerateLevelFilesBrief();
2306
  storage_info_.GenerateLevel0NonOverlapping();
2307
  storage_info_.GenerateBottommostFiles();
2308 2309
}

2310
bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
2311 2312
  if (file_meta->init_stats_from_file ||
      file_meta->compensated_file_size > 0) {
2313 2314 2315 2316
    return false;
  }
  std::shared_ptr<const TableProperties> tp;
  Status s = GetTableProperties(&tp, file_meta);
2317
  file_meta->init_stats_from_file = true;
2318
  if (!s.ok()) {
2319 2320 2321 2322
    ROCKS_LOG_ERROR(vset_->db_options_->info_log,
                    "Unable to load table properties for file %" PRIu64
                    " --- %s\n",
                    file_meta->fd.GetNumber(), s.ToString().c_str());
2323 2324 2325 2326
    return false;
  }
  if (tp.get() == nullptr) return false;
  file_meta->num_entries = tp->num_entries;
2327
  file_meta->num_deletions = tp->num_deletions;
2328 2329 2330 2331 2332 2333
  file_meta->raw_value_size = tp->raw_value_size;
  file_meta->raw_key_size = tp->raw_key_size;

  return true;
}

S
sdong 已提交
2334
void VersionStorageInfo::UpdateAccumulatedStats(FileMetaData* file_meta) {
2335 2336 2337
  TEST_SYNC_POINT_CALLBACK("VersionStorageInfo::UpdateAccumulatedStats",
                           nullptr);

2338 2339 2340 2341 2342 2343 2344
  assert(file_meta->init_stats_from_file);
  accumulated_file_size_ += file_meta->fd.GetFileSize();
  accumulated_raw_key_size_ += file_meta->raw_key_size;
  accumulated_raw_value_size_ += file_meta->raw_value_size;
  accumulated_num_non_deletions_ +=
      file_meta->num_entries - file_meta->num_deletions;
  accumulated_num_deletions_ += file_meta->num_deletions;
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358

  current_num_non_deletions_ +=
      file_meta->num_entries - file_meta->num_deletions;
  current_num_deletions_ += file_meta->num_deletions;
  current_num_samples_++;
}

void VersionStorageInfo::RemoveCurrentStats(FileMetaData* file_meta) {
  if (file_meta->init_stats_from_file) {
    current_num_non_deletions_ -=
        file_meta->num_entries - file_meta->num_deletions;
    current_num_deletions_ -= file_meta->num_deletions;
    current_num_samples_--;
  }
2359 2360
}

2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
void Version::UpdateAccumulatedStats(bool update_stats) {
  if (update_stats) {
    // maximum number of table properties loaded from files.
    const int kMaxInitCount = 20;
    int init_count = 0;
    // here only the first kMaxInitCount files which haven't been
    // initialized from file will be updated with num_deletions.
    // The motivation here is to cap the maximum I/O per Version creation.
    // The reason for choosing files from lower-level instead of higher-level
    // is that such design is able to propagate the initialization from
    // lower-level to higher-level:  When the num_deletions of lower-level
    // files are updated, it will make the lower-level files have accurate
    // compensated_file_size, making lower-level to higher-level compaction
    // will be triggered, which creates higher-level files whose num_deletions
    // will be updated here.
    for (int level = 0;
         level < storage_info_.num_levels_ && init_count < kMaxInitCount;
         ++level) {
      for (auto* file_meta : storage_info_.files_[level]) {
        if (MaybeInitializeFileMetaData(file_meta)) {
          // each FileMeta will be initialized only once.
          storage_info_.UpdateAccumulatedStats(file_meta);
2383 2384
          // when option "max_open_files" is -1, all the file metadata has
          // already been read, so MaybeInitializeFileMetaData() won't incur
L
Leonidas Galanis 已提交
2385 2386
          // any I/O cost. "max_open_files=-1" means that the table cache passed
          // to the VersionSet and then to the ColumnFamilySet has a size of
2387
          // TableCache::kInfiniteCapacity
L
Leonidas Galanis 已提交
2388
          if (vset_->GetColumnFamilySet()->get_table_cache()->GetCapacity() ==
2389
              TableCache::kInfiniteCapacity) {
2390 2391
            continue;
          }
2392 2393 2394
          if (++init_count >= kMaxInitCount) {
            break;
          }
2395 2396 2397
        }
      }
    }
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
    // In case all sampled-files contain only deletion entries, then we
    // load the table-property of a file in higher-level to initialize
    // that value.
    for (int level = storage_info_.num_levels_ - 1;
         storage_info_.accumulated_raw_value_size_ == 0 && level >= 0;
         --level) {
      for (int i = static_cast<int>(storage_info_.files_[level].size()) - 1;
           storage_info_.accumulated_raw_value_size_ == 0 && i >= 0; --i) {
        if (MaybeInitializeFileMetaData(storage_info_.files_[level][i])) {
          storage_info_.UpdateAccumulatedStats(storage_info_.files_[level][i]);
        }
2409 2410 2411 2412
      }
    }
  }

S
sdong 已提交
2413
  storage_info_.ComputeCompensatedSizes();
S
sdong 已提交
2414 2415 2416 2417
}

void VersionStorageInfo::ComputeCompensatedSizes() {
  static const int kDeletionWeightOnCompaction = 2;
2418 2419 2420 2421 2422
  uint64_t average_value_size = GetAverageValueSize();

  // compute the compensated size
  for (int level = 0; level < num_levels_; level++) {
    for (auto* file_meta : files_[level]) {
2423
      // Here we only compute compensated_file_size for those file_meta
I
Igor Canadi 已提交
2424 2425 2426
      // which compensated_file_size is uninitialized (== 0). This is true only
      // for files that have been created right now and no other thread has
      // access to them. That's why we can safely mutate compensated_file_size.
2427
      if (file_meta->compensated_file_size == 0) {
2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
        file_meta->compensated_file_size = file_meta->fd.GetFileSize();
        // Here we only boost the size of deletion entries of a file only
        // when the number of deletion entries is greater than the number of
        // non-deletion entries in the file.  The motivation here is that in
        // a stable workload, the number of deletion entries should be roughly
        // equal to the number of non-deletion entries.  If we compensate the
        // size of deletion entries in a stable workload, the deletion
        // compensation logic might introduce unwanted effet which changes the
        // shape of LSM tree.
        if (file_meta->num_deletions * 2 >= file_meta->num_entries) {
          file_meta->compensated_file_size +=
2439 2440
              (file_meta->num_deletions * 2 - file_meta->num_entries) *
              average_value_size * kDeletionWeightOnCompaction;
2441
        }
2442
      }
2443 2444 2445 2446
    }
  }
}

S
sdong 已提交
2447 2448
int VersionStorageInfo::MaxInputLevel() const {
  if (compaction_style_ == kCompactionStyleLevel) {
2449
    return num_levels() - 2;
S
sdong 已提交
2450 2451 2452 2453
  }
  return 0;
}

2454 2455 2456 2457 2458 2459 2460 2461
int VersionStorageInfo::MaxOutputLevel(bool allow_ingest_behind) const {
  if (allow_ingest_behind) {
    assert(num_levels() > 1);
    return num_levels() - 2;
  }
  return num_levels() - 1;
}

2462 2463 2464 2465
void VersionStorageInfo::EstimateCompactionBytesNeeded(
    const MutableCFOptions& mutable_cf_options) {
  // Only implemented for level-based compaction
  if (compaction_style_ != kCompactionStyleLevel) {
2466
    estimated_compaction_needed_bytes_ = 0;
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
    return;
  }

  // Start from Level 0, if level 0 qualifies compaction to level 1,
  // we estimate the size of compaction.
  // Then we move on to the next level and see whether it qualifies compaction
  // to the next level. The size of the level is estimated as the actual size
  // on the level plus the input bytes from the previous level if there is any.
  // If it exceeds, take the exceeded bytes as compaction input and add the size
  // of the compaction size to tatal size.
  // We keep doing it to Level 2, 3, etc, until the last level and return the
  // accumulated bytes.

I
Igor Canadi 已提交
2480
  uint64_t bytes_compact_to_next_level = 0;
2481 2482 2483 2484
  uint64_t level_size = 0;
  for (auto* f : files_[0]) {
    level_size += f->fd.GetFileSize();
  }
2485 2486
  // Level 0
  bool level0_compact_triggered = false;
2487 2488 2489
  if (static_cast<int>(files_[0].size()) >=
          mutable_cf_options.level0_file_num_compaction_trigger ||
      level_size >= mutable_cf_options.max_bytes_for_level_base) {
2490
    level0_compact_triggered = true;
2491 2492
    estimated_compaction_needed_bytes_ = level_size;
    bytes_compact_to_next_level = level_size;
2493 2494 2495 2496 2497
  } else {
    estimated_compaction_needed_bytes_ = 0;
  }

  // Level 1 and up.
2498
  uint64_t bytes_next_level = 0;
2499
  for (int level = base_level(); level <= MaxInputLevel(); level++) {
2500
    level_size = 0;
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
    if (bytes_next_level > 0) {
#ifndef NDEBUG
      uint64_t level_size2 = 0;
      for (auto* f : files_[level]) {
        level_size2 += f->fd.GetFileSize();
      }
      assert(level_size2 == bytes_next_level);
#endif
      level_size = bytes_next_level;
      bytes_next_level = 0;
    } else {
      for (auto* f : files_[level]) {
        level_size += f->fd.GetFileSize();
      }
2515 2516 2517 2518 2519 2520 2521 2522
    }
    if (level == base_level() && level0_compact_triggered) {
      // Add base level size to compaction if level0 compaction triggered.
      estimated_compaction_needed_bytes_ += level_size;
    }
    // Add size added by previous compaction
    level_size += bytes_compact_to_next_level;
    bytes_compact_to_next_level = 0;
I
Igor Canadi 已提交
2523
    uint64_t level_target = MaxBytesForLevel(level);
2524 2525
    if (level_size > level_target) {
      bytes_compact_to_next_level = level_size - level_target;
2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
      // Estimate the actual compaction fan-out ratio as size ratio between
      // the two levels.

      assert(bytes_next_level == 0);
      if (level + 1 < num_levels_) {
        for (auto* f : files_[level + 1]) {
          bytes_next_level += f->fd.GetFileSize();
        }
      }
      if (bytes_next_level > 0) {
        assert(level_size > 0);
        estimated_compaction_needed_bytes_ += static_cast<uint64_t>(
            static_cast<double>(bytes_compact_to_next_level) *
            (static_cast<double>(bytes_next_level) /
                 static_cast<double>(level_size) +
             1));
      }
2543 2544 2545 2546
    }
  }
}

S
Sagar Vemuri 已提交
2547
namespace {
2548
uint32_t GetExpiredTtlFilesCount(const ImmutableOptions& ioptions,
2549
                                 const MutableCFOptions& mutable_cf_options,
S
Sagar Vemuri 已提交
2550 2551 2552 2553
                                 const std::vector<FileMetaData*>& files) {
  uint32_t ttl_expired_files_count = 0;

  int64_t _current_time;
2554
  auto status = ioptions.clock->GetCurrentTime(&_current_time);
S
Sagar Vemuri 已提交
2555 2556
  if (status.ok()) {
    const uint64_t current_time = static_cast<uint64_t>(_current_time);
2557 2558 2559 2560 2561
    for (FileMetaData* f : files) {
      if (!f->being_compacted) {
        uint64_t oldest_ancester_time = f->TryGetOldestAncesterTime();
        if (oldest_ancester_time != 0 &&
            oldest_ancester_time < (current_time - mutable_cf_options.ttl)) {
S
Sagar Vemuri 已提交
2562 2563 2564 2565 2566 2567 2568 2569 2570
          ttl_expired_files_count++;
        }
      }
    }
  }
  return ttl_expired_files_count;
}
}  // anonymous namespace

S
sdong 已提交
2571
void VersionStorageInfo::ComputeCompactionScore(
2572
    const ImmutableOptions& immutable_options,
2573
    const MutableCFOptions& mutable_cf_options) {
S
sdong 已提交
2574
  for (int level = 0; level <= MaxInputLevel(); level++) {
2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
    double score;
    if (level == 0) {
      // We treat level-0 specially by bounding the number of files
      // instead of number of bytes for two reasons:
      //
      // (1) With larger write-buffer sizes, it is nice not to do too
      // many level-0 compactions.
      //
      // (2) The files in level-0 are merged on every read and
      // therefore we wish to avoid too many files when the individual
      // file size is small (perhaps because of a small write-buffer
      // setting, or very high compression ratios, or lots of
      // overwrites/deletions).
S
sdong 已提交
2588
      int num_sorted_runs = 0;
I
Igor Canadi 已提交
2589
      uint64_t total_size = 0;
2590 2591 2592
      for (auto* f : files_[level]) {
        if (!f->being_compacted) {
          total_size += f->compensated_file_size;
S
sdong 已提交
2593
          num_sorted_runs++;
2594 2595
        }
      }
S
sdong 已提交
2596 2597 2598 2599 2600
      if (compaction_style_ == kCompactionStyleUniversal) {
        // For universal compaction, we use level0 score to indicate
        // compaction score for the whole DB. Adding other levels as if
        // they are L0 files.
        for (int i = 1; i < num_levels(); i++) {
2601 2602 2603 2604 2605
          // Its possible that a subset of the files in a level may be in a
          // compaction, due to delete triggered compaction or trivial move.
          // In that case, the below check may not catch a level being
          // compacted as it only checks the first file. The worst that can
          // happen is a scheduled compaction thread will find nothing to do.
S
sdong 已提交
2606 2607 2608 2609 2610 2611
          if (!files_[i].empty() && !files_[i][0]->being_compacted) {
            num_sorted_runs++;
          }
        }
      }

S
sdong 已提交
2612
      if (compaction_style_ == kCompactionStyleFIFO) {
2613 2614 2615
        score = static_cast<double>(total_size) /
                mutable_cf_options.compaction_options_fifo.max_table_files_size;
        if (mutable_cf_options.compaction_options_fifo.allow_compaction) {
2616 2617 2618 2619 2620
          score = std::max(
              static_cast<double>(num_sorted_runs) /
                  mutable_cf_options.level0_file_num_compaction_trigger,
              score);
        }
2621
        if (mutable_cf_options.ttl > 0) {
2622 2623
          score = std::max(
              static_cast<double>(GetExpiredTtlFilesCount(
2624
                  immutable_options, mutable_cf_options, files_[level])),
2625
              score);
S
Sagar Vemuri 已提交
2626
        }
2627

2628
      } else {
S
sdong 已提交
2629
        score = static_cast<double>(num_sorted_runs) /
2630
                mutable_cf_options.level0_file_num_compaction_trigger;
A
Andrew Kryczka 已提交
2631 2632 2633 2634
        if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) {
          // Level-based involves L0->L0 compactions that can lead to oversized
          // L0 files. Take into account size as well to avoid later giant
          // compactions to the base level.
2635
          uint64_t l0_target_size = mutable_cf_options.max_bytes_for_level_base;
2636
          if (immutable_options.level_compaction_dynamic_level_bytes &&
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649
              level_multiplier_ != 0.0) {
            // Prevent L0 to Lbase fanout from growing larger than
            // `level_multiplier_`. This prevents us from getting stuck picking
            // L0 forever even when it is hurting write-amp. That could happen
            // in dynamic level compaction's write-burst mode where the base
            // level's target size can grow to be enormous.
            l0_target_size =
                std::max(l0_target_size,
                         static_cast<uint64_t>(level_max_bytes_[base_level_] /
                                               level_multiplier_));
          }
          score =
              std::max(score, static_cast<double>(total_size) / l0_target_size);
A
Andrew Kryczka 已提交
2650
        }
2651 2652 2653
      }
    } else {
      // Compute the ratio of current size to size limit.
I
Igor Canadi 已提交
2654 2655
      uint64_t level_bytes_no_compacting = 0;
      for (auto f : files_[level]) {
2656
        if (!f->being_compacted) {
I
Igor Canadi 已提交
2657 2658 2659 2660
          level_bytes_no_compacting += f->compensated_file_size;
        }
      }
      score = static_cast<double>(level_bytes_no_compacting) /
2661
              MaxBytesForLevel(level);
2662 2663 2664 2665 2666 2667 2668
    }
    compaction_level_[level] = level;
    compaction_score_[level] = score;
  }

  // sort all the levels based on their score. Higher scores get listed
  // first. Use bubble sort because the number of entries are small.
2669 2670
  for (int i = 0; i < num_levels() - 2; i++) {
    for (int j = i + 1; j < num_levels() - 1; j++) {
2671 2672 2673 2674 2675 2676 2677 2678 2679 2680
      if (compaction_score_[i] < compaction_score_[j]) {
        double score = compaction_score_[i];
        int level = compaction_level_[i];
        compaction_score_[i] = compaction_score_[j];
        compaction_level_[i] = compaction_level_[j];
        compaction_score_[j] = score;
        compaction_level_[j] = level;
      }
    }
  }
2681
  ComputeFilesMarkedForCompaction();
2682
  ComputeBottommostFilesMarkedForCompaction();
2683
  if (mutable_cf_options.ttl > 0) {
2684
    ComputeExpiredTtlFiles(immutable_options, mutable_cf_options.ttl);
S
Sagar Vemuri 已提交
2685
  }
2686
  if (mutable_cf_options.periodic_compaction_seconds > 0) {
S
Sagar Vemuri 已提交
2687
    ComputeFilesMarkedForPeriodicCompaction(
2688
        immutable_options, mutable_cf_options.periodic_compaction_seconds);
S
Sagar Vemuri 已提交
2689
  }
2690
  EstimateCompactionBytesNeeded(mutable_cf_options);
2691 2692 2693 2694
}

void VersionStorageInfo::ComputeFilesMarkedForCompaction() {
  files_marked_for_compaction_.clear();
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
  int last_qualify_level = 0;

  // Do not include files from the last level with data
  // If table properties collector suggests a file on the last level,
  // we should not move it to a new level.
  for (int level = num_levels() - 1; level >= 1; level--) {
    if (!files_[level].empty()) {
      last_qualify_level = level - 1;
      break;
    }
  }

  for (int level = 0; level <= last_qualify_level; level++) {
2708 2709 2710 2711 2712 2713
    for (auto* f : files_[level]) {
      if (!f->being_compacted && f->marked_for_compaction) {
        files_marked_for_compaction_.emplace_back(level, f);
      }
    }
  }
2714 2715
}

S
Sagar Vemuri 已提交
2716
void VersionStorageInfo::ComputeExpiredTtlFiles(
2717
    const ImmutableOptions& ioptions, const uint64_t ttl) {
2718
  assert(ttl > 0);
S
Sagar Vemuri 已提交
2719 2720 2721 2722

  expired_ttl_files_.clear();

  int64_t _current_time;
2723
  auto status = ioptions.clock->GetCurrentTime(&_current_time);
S
Sagar Vemuri 已提交
2724 2725 2726 2727 2728 2729
  if (!status.ok()) {
    return;
  }
  const uint64_t current_time = static_cast<uint64_t>(_current_time);

  for (int level = 0; level < num_levels() - 1; level++) {
2730 2731 2732 2733 2734
    for (FileMetaData* f : files_[level]) {
      if (!f->being_compacted) {
        uint64_t oldest_ancester_time = f->TryGetOldestAncesterTime();
        if (oldest_ancester_time > 0 &&
            oldest_ancester_time < (current_time - ttl)) {
S
Sagar Vemuri 已提交
2735 2736 2737 2738 2739 2740 2741
          expired_ttl_files_.emplace_back(level, f);
        }
      }
    }
  }
}

S
Sagar Vemuri 已提交
2742
void VersionStorageInfo::ComputeFilesMarkedForPeriodicCompaction(
2743
    const ImmutableOptions& ioptions,
S
Sagar Vemuri 已提交
2744
    const uint64_t periodic_compaction_seconds) {
2745
  assert(periodic_compaction_seconds > 0);
S
Sagar Vemuri 已提交
2746 2747 2748 2749

  files_marked_for_periodic_compaction_.clear();

  int64_t temp_current_time;
2750
  auto status = ioptions.clock->GetCurrentTime(&temp_current_time);
S
Sagar Vemuri 已提交
2751 2752 2753 2754
  if (!status.ok()) {
    return;
  }
  const uint64_t current_time = static_cast<uint64_t>(temp_current_time);
2755

2756 2757
  // If periodic_compaction_seconds is larger than current time, periodic
  // compaction can't possibly be triggered.
2758 2759 2760
  if (periodic_compaction_seconds > current_time) {
    return;
  }
2761

S
Sagar Vemuri 已提交
2762 2763 2764 2765 2766
  const uint64_t allowed_time_limit =
      current_time - periodic_compaction_seconds;

  for (int level = 0; level < num_levels(); level++) {
    for (auto f : files_[level]) {
2767
      if (!f->being_compacted) {
2768 2769 2770 2771 2772 2773
        // Compute a file's modification time in the following order:
        // 1. Use file_creation_time table property if it is > 0.
        // 2. Use creation_time table property if it is > 0.
        // 3. Use file's mtime metadata if the above two table properties are 0.
        // Don't consider the file at all if the modification time cannot be
        // correctly determined based on the above conditions.
2774 2775
        uint64_t file_modification_time = f->TryGetFileCreationTime();
        if (file_modification_time == kUnknownFileCreationTime) {
2776
          file_modification_time = f->TryGetOldestAncesterTime();
2777
        }
2778
        if (file_modification_time == kUnknownOldestAncesterTime) {
2779 2780 2781 2782 2783
          auto file_path = TableFileName(ioptions.cf_paths, f->fd.GetNumber(),
                                         f->fd.GetPathId());
          status = ioptions.env->GetFileModificationTime(
              file_path, &file_modification_time);
          if (!status.ok()) {
2784
            ROCKS_LOG_WARN(ioptions.logger,
2785 2786 2787 2788 2789 2790 2791
                           "Can't get file modification time: %s: %s",
                           file_path.c_str(), status.ToString().c_str());
            continue;
          }
        }
        if (file_modification_time > 0 &&
            file_modification_time < allowed_time_limit) {
S
Sagar Vemuri 已提交
2792 2793 2794 2795 2796 2797 2798
          files_marked_for_periodic_compaction_.emplace_back(level, f);
        }
      }
    }
  }
}

2799
namespace {
2800 2801 2802

// used to sort files by size
struct Fsize {
2803
  size_t index;
2804 2805 2806
  FileMetaData* file;
};

2807
// Comparator that is used to sort files based on their size
2808
// In normal mode: descending size
2809
bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) {
2810 2811
  return (first.file->compensated_file_size >
      second.file->compensated_file_size);
2812
}
K
kailiu 已提交
2813
} // anonymous namespace
2814

2815 2816 2817 2818
void VersionStorageInfo::AddFile(int level, FileMetaData* f) {
  auto& level_files = files_[level];
  level_files.push_back(f);

S
sdong 已提交
2819
  f->refs++;
2820 2821 2822 2823 2824

  const uint64_t file_number = f->fd.GetNumber();

  assert(file_locations_.find(file_number) == file_locations_.end());
  file_locations_.emplace(file_number,
2825
                          FileLocation(level, level_files.size() - 1));
S
sdong 已提交
2826 2827
}

2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840
void VersionStorageInfo::AddBlobFile(
    std::shared_ptr<BlobFileMetaData> blob_file_meta) {
  assert(blob_file_meta);

  const uint64_t blob_file_number = blob_file_meta->GetBlobFileNumber();

  auto it = blob_files_.lower_bound(blob_file_number);
  assert(it == blob_files_.end() || it->first != blob_file_number);

  blob_files_.insert(
      it, BlobFiles::value_type(blob_file_number, std::move(blob_file_meta)));
}

2841 2842 2843 2844
// Version::PrepareApply() need to be called before calling the function, or
// following functions called:
// 1. UpdateNumNonEmptyLevels();
// 2. CalculateBaseBytes();
2845
// 3. UpdateFilesByCompactionPri();
2846 2847
// 4. GenerateFileIndexer();
// 5. GenerateLevelFilesBrief();
2848
// 6. GenerateLevel0NonOverlapping();
2849
// 7. GenerateBottommostFiles();
2850 2851 2852
void VersionStorageInfo::SetFinalized() {
  finalized_ = true;
#ifndef NDEBUG
S
sdong 已提交
2853 2854 2855 2856 2857 2858
  if (compaction_style_ != kCompactionStyleLevel) {
    // Not level based compaction.
    return;
  }
  assert(base_level_ < 0 || num_levels() == 1 ||
         (base_level_ >= 1 && base_level_ < num_levels()));
2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
  // Verify all levels newer than base_level are empty except L0
  for (int level = 1; level < base_level(); level++) {
    assert(NumLevelBytes(level) == 0);
  }
  uint64_t max_bytes_prev_level = 0;
  for (int level = base_level(); level < num_levels() - 1; level++) {
    if (LevelFiles(level).size() == 0) {
      continue;
    }
    assert(MaxBytesForLevel(level) >= max_bytes_prev_level);
    max_bytes_prev_level = MaxBytesForLevel(level);
  }
  int num_empty_non_l0_level = 0;
  for (int level = 0; level < num_levels(); level++) {
    assert(LevelFiles(level).size() == 0 ||
           LevelFiles(level).size() == LevelFilesBrief(level).num_files);
    if (level > 0 && NumLevelBytes(level) > 0) {
      num_empty_non_l0_level++;
    }
    if (LevelFiles(level).size() > 0) {
      assert(level < num_non_empty_levels());
    }
  }
  assert(compaction_level_.size() > 0);
  assert(compaction_level_.size() == compaction_score_.size());
#endif
}

S
sdong 已提交
2887
void VersionStorageInfo::UpdateNumNonEmptyLevels() {
2888 2889 2890 2891 2892 2893 2894 2895 2896 2897
  num_non_empty_levels_ = num_levels_;
  for (int i = num_levels_ - 1; i >= 0; i--) {
    if (files_[i].size() != 0) {
      return;
    } else {
      num_non_empty_levels_ = i;
    }
  }
}

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
namespace {
// Sort `temp` based on ratio of overlapping size over file size
void SortFileByOverlappingRatio(
    const InternalKeyComparator& icmp, const std::vector<FileMetaData*>& files,
    const std::vector<FileMetaData*>& next_level_files,
    std::vector<Fsize>* temp) {
  std::unordered_map<uint64_t, uint64_t> file_to_order;
  auto next_level_it = next_level_files.begin();

  for (auto& file : files) {
    uint64_t overlapping_bytes = 0;
    // Skip files in next level that is smaller than current file
    while (next_level_it != next_level_files.end() &&
           icmp.Compare((*next_level_it)->largest, file->smallest) < 0) {
      next_level_it++;
    }

    while (next_level_it != next_level_files.end() &&
           icmp.Compare((*next_level_it)->smallest, file->largest) < 0) {
      overlapping_bytes += (*next_level_it)->fd.file_size;

      if (icmp.Compare((*next_level_it)->largest, file->largest) > 0) {
        // next level file cross large boundary of current file.
        break;
      }
      next_level_it++;
    }

2926
    assert(file->compensated_file_size != 0);
2927
    file_to_order[file->fd.GetNumber()] =
2928
        overlapping_bytes * 1024u / file->compensated_file_size;
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938
  }

  std::sort(temp->begin(), temp->end(),
            [&](const Fsize& f1, const Fsize& f2) -> bool {
              return file_to_order[f1.file->fd.GetNumber()] <
                     file_to_order[f2.file->fd.GetNumber()];
            });
}
}  // namespace

2939
void VersionStorageInfo::UpdateFilesByCompactionPri(
Y
Yi Wu 已提交
2940
    CompactionPri compaction_pri) {
2941 2942
  if (compaction_style_ == kCompactionStyleNone ||
      compaction_style_ == kCompactionStyleFIFO ||
S
sdong 已提交
2943
      compaction_style_ == kCompactionStyleUniversal) {
I
Igor Canadi 已提交
2944 2945 2946
    // don't need this
    return;
  }
2947
  // No need to sort the highest level because it is never compacted.
2948
  for (int level = 0; level < num_levels() - 1; level++) {
2949
    const std::vector<FileMetaData*>& files = files_[level];
2950 2951
    auto& files_by_compaction_pri = files_by_compaction_pri_[level];
    assert(files_by_compaction_pri.size() == 0);
2952 2953 2954

    // populate a temp vector for sorting based on size
    std::vector<Fsize> temp(files.size());
2955
    for (size_t i = 0; i < files.size(); i++) {
2956 2957 2958 2959
      temp[i].index = i;
      temp[i].file = files[i];
    }

S
sdong 已提交
2960 2961
    // sort the top number_of_files_to_sort_ based on file size
    size_t num = VersionStorageInfo::kNumberFilesToSort;
2962 2963
    if (num > temp.size()) {
      num = temp.size();
2964
    }
Y
Yi Wu 已提交
2965
    switch (compaction_pri) {
2966
      case kByCompensatedSize:
2967 2968 2969
        std::partial_sort(temp.begin(), temp.begin() + num, temp.end(),
                          CompareCompensatedSizeDescending);
        break;
2970
      case kOldestLargestSeqFirst:
2971
        std::sort(temp.begin(), temp.end(),
2972
                  [](const Fsize& f1, const Fsize& f2) -> bool {
2973 2974
                    return f1.file->fd.largest_seqno <
                           f2.file->fd.largest_seqno;
2975 2976
                  });
        break;
2977 2978
      case kOldestSmallestSeqFirst:
        std::sort(temp.begin(), temp.end(),
2979
                  [](const Fsize& f1, const Fsize& f2) -> bool {
2980 2981
                    return f1.file->fd.smallest_seqno <
                           f2.file->fd.smallest_seqno;
2982 2983
                  });
        break;
2984 2985 2986 2987
      case kMinOverlappingRatio:
        SortFileByOverlappingRatio(*internal_comparator_, files_[level],
                                   files_[level + 1], &temp);
        break;
2988 2989 2990
      default:
        assert(false);
    }
2991 2992
    assert(temp.size() == files.size());

2993
    // initialize files_by_compaction_pri_
2994 2995
    for (size_t i = 0; i < temp.size(); i++) {
      files_by_compaction_pri.push_back(static_cast<int>(temp[i].index));
2996 2997
    }
    next_file_to_compact_by_size_[level] = 0;
2998
    assert(files_[level].size() == files_by_compaction_pri_[level].size());
2999 3000 3001
  }
}

3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
void VersionStorageInfo::GenerateLevel0NonOverlapping() {
  assert(!finalized_);
  level0_non_overlapping_ = true;
  if (level_files_brief_.size() == 0) {
    return;
  }

  // A copy of L0 files sorted by smallest key
  std::vector<FdWithKeyRange> level0_sorted_file(
      level_files_brief_[0].files,
      level_files_brief_[0].files + level_files_brief_[0].num_files);
3013 3014 3015 3016 3017
  std::sort(level0_sorted_file.begin(), level0_sorted_file.end(),
            [this](const FdWithKeyRange& f1, const FdWithKeyRange& f2) -> bool {
              return (internal_comparator_->Compare(f1.smallest_key,
                                                    f2.smallest_key) < 0);
            });
3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028

  for (size_t i = 1; i < level0_sorted_file.size(); ++i) {
    FdWithKeyRange& f = level0_sorted_file[i];
    FdWithKeyRange& prev = level0_sorted_file[i - 1];
    if (internal_comparator_->Compare(prev.largest_key, f.smallest_key) >= 0) {
      level0_non_overlapping_ = false;
      break;
    }
  }
}

3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
void VersionStorageInfo::GenerateBottommostFiles() {
  assert(!finalized_);
  assert(bottommost_files_.empty());
  for (size_t level = 0; level < level_files_brief_.size(); ++level) {
    for (size_t file_idx = 0; file_idx < level_files_brief_[level].num_files;
         ++file_idx) {
      const FdWithKeyRange& f = level_files_brief_[level].files[file_idx];
      int l0_file_idx;
      if (level == 0) {
        l0_file_idx = static_cast<int>(file_idx);
      } else {
        l0_file_idx = -1;
      }
3042 3043 3044
      Slice smallest_user_key = ExtractUserKey(f.smallest_key);
      Slice largest_user_key = ExtractUserKey(f.largest_key);
      if (!RangeMightExistAfterSortedRun(smallest_user_key, largest_user_key,
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066
                                         static_cast<int>(level),
                                         l0_file_idx)) {
        bottommost_files_.emplace_back(static_cast<int>(level),
                                       f.file_metadata);
      }
    }
  }
}

void VersionStorageInfo::UpdateOldestSnapshot(SequenceNumber seqnum) {
  assert(seqnum >= oldest_snapshot_seqnum_);
  oldest_snapshot_seqnum_ = seqnum;
  if (oldest_snapshot_seqnum_ > bottommost_files_mark_threshold_) {
    ComputeBottommostFilesMarkedForCompaction();
  }
}

void VersionStorageInfo::ComputeBottommostFilesMarkedForCompaction() {
  bottommost_files_marked_for_compaction_.clear();
  bottommost_files_mark_threshold_ = kMaxSequenceNumber;
  for (auto& level_and_file : bottommost_files_) {
    if (!level_and_file.second->being_compacted &&
3067
        level_and_file.second->fd.largest_seqno != 0) {
3068 3069 3070
      // largest_seqno might be nonzero due to containing the final key in an
      // earlier compaction, whose seqnum we didn't zero out. Multiple deletions
      // ensures the file really contains deleted or overwritten keys.
3071
      if (level_and_file.second->fd.largest_seqno < oldest_snapshot_seqnum_) {
3072 3073 3074 3075
        bottommost_files_marked_for_compaction_.push_back(level_and_file);
      } else {
        bottommost_files_mark_threshold_ =
            std::min(bottommost_files_mark_threshold_,
3076
                     level_and_file.second->fd.largest_seqno);
3077 3078 3079 3080 3081
      }
    }
  }
}

J
jorlow@chromium.org 已提交
3082 3083 3084 3085
void Version::Ref() {
  ++refs_;
}

3086
bool Version::Unref() {
J
jorlow@chromium.org 已提交
3087 3088 3089
  assert(refs_ >= 1);
  --refs_;
  if (refs_ == 0) {
3090
    delete this;
3091
    return true;
J
jorlow@chromium.org 已提交
3092
  }
3093
  return false;
J
jorlow@chromium.org 已提交
3094 3095
}

S
sdong 已提交
3096 3097 3098
bool VersionStorageInfo::OverlapInLevel(int level,
                                        const Slice* smallest_user_key,
                                        const Slice* largest_user_key) {
3099 3100 3101 3102
  if (level >= num_non_empty_levels_) {
    // empty level, no overlap
    return false;
  }
S
sdong 已提交
3103
  return SomeFileOverlapsRange(*internal_comparator_, (level > 0),
3104
                               level_files_brief_[level], smallest_user_key,
3105
                               largest_user_key);
G
Gabor Cselle 已提交
3106 3107 3108
}

// Store in "*inputs" all files in "level" that overlap [begin,end]
A
Abhishek Kona 已提交
3109
// If hint_index is specified, then it points to a file in the
3110 3111
// overlapping range.
// The file_index returns a pointer to any file in an overlapping range.
S
sdong 已提交
3112 3113
void VersionStorageInfo::GetOverlappingInputs(
    int level, const InternalKey* begin, const InternalKey* end,
3114
    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
3115
    bool expand_range, InternalKey** next_smallest) const {
3116 3117 3118 3119 3120
  if (level >= num_non_empty_levels_) {
    // this level is empty, no overlapping inputs
    return;
  }

G
Gabor Cselle 已提交
3121
  inputs->clear();
3122 3123 3124
  if (file_index) {
    *file_index = -1;
  }
S
sdong 已提交
3125
  const Comparator* user_cmp = user_comparator_;
3126
  if (level > 0) {
3127 3128
    GetOverlappingInputsRangeBinarySearch(level, begin, end, inputs, hint_index,
                                          file_index, false, next_smallest);
3129 3130
    return;
  }
A
Aaron Gao 已提交
3131

3132 3133 3134 3135 3136 3137
  if (next_smallest) {
    // next_smallest key only makes sense for non-level 0, where files are
    // non-overlapping
    *next_smallest = nullptr;
  }

3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
  Slice user_begin, user_end;
  if (begin != nullptr) {
    user_begin = begin->user_key();
  }
  if (end != nullptr) {
    user_end = end->user_key();
  }

  // index stores the file index need to check.
  std::list<size_t> index;
  for (size_t i = 0; i < level_files_brief_[level].num_files; i++) {
    index.emplace_back(i);
  }

  while (!index.empty()) {
    bool found_overlapping_file = false;
    auto iter = index.begin();
    while (iter != index.end()) {
      FdWithKeyRange* f = &(level_files_brief_[level].files[*iter]);
      const Slice file_start = ExtractUserKey(f->smallest_key);
      const Slice file_limit = ExtractUserKey(f->largest_key);
3159 3160
      if (begin != nullptr &&
          user_cmp->CompareWithoutTimestamp(file_limit, user_begin) < 0) {
3161 3162
        // "f" is completely before specified range; skip it
        iter++;
3163
      } else if (end != nullptr &&
3164
                 user_cmp->CompareWithoutTimestamp(file_start, user_end) > 0) {
3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178
        // "f" is completely after specified range; skip it
        iter++;
      } else {
        // if overlap
        inputs->emplace_back(files_[level][*iter]);
        found_overlapping_file = true;
        // record the first file index.
        if (file_index && *file_index == -1) {
          *file_index = static_cast<int>(*iter);
        }
        // the related file is overlap, erase to avoid checking again.
        iter = index.erase(iter);
        if (expand_range) {
          if (begin != nullptr &&
3179
              user_cmp->CompareWithoutTimestamp(file_start, user_begin) < 0) {
3180 3181
            user_begin = file_start;
          }
3182 3183
          if (end != nullptr &&
              user_cmp->CompareWithoutTimestamp(file_limit, user_end) > 0) {
3184 3185
            user_end = file_limit;
          }
H
Hans Wennborg 已提交
3186 3187
        }
      }
G
Gabor Cselle 已提交
3188
    }
3189 3190 3191 3192
    // if all the files left are not overlap, break
    if (!found_overlapping_file) {
      break;
    }
G
Gabor Cselle 已提交
3193
  }
3194 3195
}

A
Aaron Gao 已提交
3196 3197 3198 3199 3200 3201 3202 3203 3204
// Store in "*inputs" files in "level" that within range [begin,end]
// Guarantee a "clean cut" boundary between the files in inputs
// and the surrounding files and the maxinum number of files.
// This will ensure that no parts of a key are lost during compaction.
// If hint_index is specified, then it points to a file in the range.
// The file_index returns a pointer to any file in an overlapping range.
void VersionStorageInfo::GetCleanInputsWithinInterval(
    int level, const InternalKey* begin, const InternalKey* end,
    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index) const {
3205 3206 3207 3208 3209 3210
  inputs->clear();
  if (file_index) {
    *file_index = -1;
  }
  if (level >= num_non_empty_levels_ || level == 0 ||
      level_files_brief_[level].num_files == 0) {
A
Aaron Gao 已提交
3211
    // this level is empty, no inputs within range
3212
    // also don't support clean input interval within L0
A
Aaron Gao 已提交
3213 3214 3215
    return;
  }

3216
  GetOverlappingInputsRangeBinarySearch(level, begin, end, inputs,
3217 3218
                                        hint_index, file_index,
                                        true /* within_interval */);
A
Aaron Gao 已提交
3219 3220
}

3221 3222 3223 3224
// Store in "*inputs" all files in "level" that overlap [begin,end]
// Employ binary search to find at least one file that overlaps the
// specified range. From that file, iterate backwards and
// forwards to find all overlapping files.
A
Aaron Gao 已提交
3225
// if within_range is set, then only store the maximum clean inputs
3226
// within range [begin, end]. "clean" means there is a boundary
A
Aaron Gao 已提交
3227 3228
// between the files in "*inputs" and the surrounding files
void VersionStorageInfo::GetOverlappingInputsRangeBinarySearch(
3229
    int level, const InternalKey* begin, const InternalKey* end,
A
Aaron Gao 已提交
3230
    std::vector<FileMetaData*>* inputs, int hint_index, int* file_index,
3231
    bool within_interval, InternalKey** next_smallest) const {
3232
  assert(level > 0);
3233

3234
  auto user_cmp = user_comparator_;
3235 3236
  const FdWithKeyRange* files = level_files_brief_[level].files;
  const int num_files = static_cast<int>(level_files_brief_[level].num_files);
3237

3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292
  // begin to use binary search to find lower bound
  // and upper bound.
  int start_index = 0;
  int end_index = num_files;

  if (begin != nullptr) {
    // if within_interval is true, with file_key would find
    // not overlapping ranges in std::lower_bound.
    auto cmp = [&user_cmp, &within_interval](const FdWithKeyRange& f,
                                             const InternalKey* k) {
      auto& file_key = within_interval ? f.file_metadata->smallest
                                       : f.file_metadata->largest;
      return sstableKeyCompare(user_cmp, file_key, *k) < 0;
    };

    start_index = static_cast<int>(
        std::lower_bound(files,
                         files + (hint_index == -1 ? num_files : hint_index),
                         begin, cmp) -
        files);

    if (start_index > 0 && within_interval) {
      bool is_overlapping = true;
      while (is_overlapping && start_index < num_files) {
        auto& pre_limit = files[start_index - 1].file_metadata->largest;
        auto& cur_start = files[start_index].file_metadata->smallest;
        is_overlapping = sstableKeyCompare(user_cmp, pre_limit, cur_start) == 0;
        start_index += is_overlapping;
      }
    }
  }

  if (end != nullptr) {
    // if within_interval is true, with file_key would find
    // not overlapping ranges in std::upper_bound.
    auto cmp = [&user_cmp, &within_interval](const InternalKey* k,
                                             const FdWithKeyRange& f) {
      auto& file_key = within_interval ? f.file_metadata->largest
                                       : f.file_metadata->smallest;
      return sstableKeyCompare(user_cmp, *k, file_key) < 0;
    };

    end_index = static_cast<int>(
        std::upper_bound(files + start_index, files + num_files, end, cmp) -
        files);

    if (end_index < num_files && within_interval) {
      bool is_overlapping = true;
      while (is_overlapping && end_index > start_index) {
        auto& next_start = files[end_index].file_metadata->smallest;
        auto& cur_limit = files[end_index - 1].file_metadata->largest;
        is_overlapping =
            sstableKeyCompare(user_cmp, cur_limit, next_start) == 0;
        end_index -= is_overlapping;
      }
3293 3294
    }
  }
A
Abhishek Kona 已提交
3295

3296 3297
  assert(start_index <= end_index);

3298
  // If there were no overlapping files, return immediately.
3299
  if (start_index == end_index) {
3300
    if (next_smallest) {
3301
      *next_smallest = nullptr;
3302
    }
3303 3304
    return;
  }
3305 3306 3307

  assert(start_index < end_index);

3308 3309
  // returns the index where an overlap is found
  if (file_index) {
3310
    *file_index = start_index;
3311
  }
A
Aaron Gao 已提交
3312 3313

  // insert overlapping files into vector
3314
  for (int i = start_index; i < end_index; i++) {
A
Aaron Gao 已提交
3315 3316
    inputs->push_back(files_[level][i]);
  }
3317 3318 3319

  if (next_smallest != nullptr) {
    // Provide the next key outside the range covered by inputs
3320
    if (end_index < static_cast<int>(files_[level].size())) {
3321 3322 3323 3324 3325
      **next_smallest = files_[level][end_index]->smallest;
    } else {
      *next_smallest = nullptr;
    }
  }
3326
}
A
Abhishek Kona 已提交
3327

S
sdong 已提交
3328
uint64_t VersionStorageInfo::NumLevelBytes(int level) const {
3329
  assert(level >= 0);
3330
  assert(level < num_levels());
3331 3332 3333
  return TotalFileSize(files_[level]);
}

S
sdong 已提交
3334 3335
const char* VersionStorageInfo::LevelSummary(
    LevelSummaryStorage* scratch) const {
3336
  int len = 0;
S
sdong 已提交
3337
  if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) {
3338
    assert(base_level_ < static_cast<int>(level_max_bytes_.size()));
3339 3340 3341 3342 3343 3344
    if (level_multiplier_ != 0.0) {
      len = snprintf(
          scratch->buffer, sizeof(scratch->buffer),
          "base level %d level multiplier %.2f max bytes base %" PRIu64 " ",
          base_level_, level_multiplier_, level_max_bytes_[base_level_]);
    }
3345 3346
  }
  len +=
S
sdong 已提交
3347
      snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "files[");
3348
  for (int i = 0; i < num_levels(); i++) {
3349 3350 3351 3352 3353
    int sz = sizeof(scratch->buffer) - len;
    int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size()));
    if (ret < 0 || ret >= sz) break;
    len += ret;
  }
I
Igor Canadi 已提交
3354 3355 3356 3357
  if (len > 0) {
    // overwrite the last space
    --len;
  }
3358 3359 3360 3361 3362
  len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
                  "] max score %.2f", compaction_score_[0]);

  if (!files_marked_for_compaction_.empty()) {
    snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
3363
             " (%" ROCKSDB_PRIszt " files need compaction)",
3364 3365 3366
             files_marked_for_compaction_.size());
  }

3367 3368 3369
  return scratch->buffer;
}

S
sdong 已提交
3370 3371
const char* VersionStorageInfo::LevelFileSummary(FileSummaryStorage* scratch,
                                                 int level) const {
3372 3373 3374
  int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
  for (const auto& f : files_[level]) {
    int sz = sizeof(scratch->buffer) - len;
I
Igor Canadi 已提交
3375
    char sztxt[16];
3376
    AppendHumanBytes(f->fd.GetFileSize(), sztxt, sizeof(sztxt));
3377
    int ret = snprintf(scratch->buffer + len, sz,
3378
                       "#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ",
3379
                       f->fd.GetNumber(), f->fd.smallest_seqno, sztxt,
I
Igor Canadi 已提交
3380
                       static_cast<int>(f->being_compacted));
3381 3382 3383 3384
    if (ret < 0 || ret >= sz)
      break;
    len += ret;
  }
I
Igor Canadi 已提交
3385 3386 3387 3388
  // overwrite the last space (only if files_[level].size() is non-zero)
  if (files_[level].size() && len > 0) {
    --len;
  }
3389 3390 3391 3392
  snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]");
  return scratch->buffer;
}

3393
uint64_t VersionStorageInfo::MaxNextLevelOverlappingBytes() {
3394 3395
  uint64_t result = 0;
  std::vector<FileMetaData*> overlaps;
3396
  for (int level = 1; level < num_levels() - 1; level++) {
3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407
    for (const auto& f : files_[level]) {
      GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps);
      const uint64_t sum = TotalFileSize(overlaps);
      if (sum > result) {
        result = sum;
      }
    }
  }
  return result;
}

3408 3409 3410 3411 3412 3413 3414 3415
uint64_t VersionStorageInfo::MaxBytesForLevel(int level) const {
  // Note: the result for level zero is not really used since we set
  // the level-0 compaction threshold based on number of files.
  assert(level >= 0);
  assert(level < static_cast<int>(level_max_bytes_.size()));
  return level_max_bytes_[level];
}

3416
void VersionStorageInfo::CalculateBaseBytes(const ImmutableOptions& ioptions,
3417
                                            const MutableCFOptions& options) {
S
sdong 已提交
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
  // Special logic to set number of sorted runs.
  // It is to match the previous behavior when all files are in L0.
  int num_l0_count = static_cast<int>(files_[0].size());
  if (compaction_style_ == kCompactionStyleUniversal) {
    // For universal compaction, we use level0 score to indicate
    // compaction score for the whole DB. Adding other levels as if
    // they are L0 files.
    for (int i = 1; i < num_levels(); i++) {
      if (!files_[i].empty()) {
        num_l0_count++;
      }
    }
  }
  set_l0_delay_trigger_count(num_l0_count);

3433 3434
  level_max_bytes_.resize(ioptions.num_levels);
  if (!ioptions.level_compaction_dynamic_level_bytes) {
S
sdong 已提交
3435
    base_level_ = (ioptions.compaction_style == kCompactionStyleLevel) ? 1 : -1;
3436 3437 3438 3439 3440 3441 3442 3443 3444

    // Calculate for static bytes base case
    for (int i = 0; i < ioptions.num_levels; ++i) {
      if (i == 0 && ioptions.compaction_style == kCompactionStyleUniversal) {
        level_max_bytes_[i] = options.max_bytes_for_level_base;
      } else if (i > 1) {
        level_max_bytes_[i] = MultiplyCheckOverflow(
            MultiplyCheckOverflow(level_max_bytes_[i - 1],
                                  options.max_bytes_for_level_multiplier),
S
sdong 已提交
3445
            options.MaxBytesMultiplerAdditional(i - 1));
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479
      } else {
        level_max_bytes_[i] = options.max_bytes_for_level_base;
      }
    }
  } else {
    uint64_t max_level_size = 0;

    int first_non_empty_level = -1;
    // Find size of non-L0 level of most data.
    // Cannot use the size of the last level because it can be empty or less
    // than previous levels after compaction.
    for (int i = 1; i < num_levels_; i++) {
      uint64_t total_size = 0;
      for (const auto& f : files_[i]) {
        total_size += f->fd.GetFileSize();
      }
      if (total_size > 0 && first_non_empty_level == -1) {
        first_non_empty_level = i;
      }
      if (total_size > max_level_size) {
        max_level_size = total_size;
      }
    }

    // Prefill every level's max bytes to disallow compaction from there.
    for (int i = 0; i < num_levels_; i++) {
      level_max_bytes_[i] = std::numeric_limits<uint64_t>::max();
    }

    if (max_level_size == 0) {
      // No data for L1 and up. L0 compacts to last level directly.
      // No compaction from L1+ needs to be scheduled.
      base_level_ = num_levels_ - 1;
    } else {
3480 3481 3482 3483 3484 3485 3486
      uint64_t l0_size = 0;
      for (const auto& f : files_[0]) {
        l0_size += f->fd.GetFileSize();
      }

      uint64_t base_bytes_max =
          std::max(options.max_bytes_for_level_base, l0_size);
3487 3488
      uint64_t base_bytes_min = static_cast<uint64_t>(
          base_bytes_max / options.max_bytes_for_level_multiplier);
3489 3490 3491 3492 3493

      // Try whether we can make last level's target size to be max_level_size
      uint64_t cur_level_size = max_level_size;
      for (int i = num_levels_ - 2; i >= first_non_empty_level; i--) {
        // Round up after dividing
3494 3495
        cur_level_size = static_cast<uint64_t>(
            cur_level_size / options.max_bytes_for_level_multiplier);
3496 3497 3498
      }

      // Calculate base level and its size.
3499
      uint64_t base_level_size;
3500 3501 3502 3503
      if (cur_level_size <= base_bytes_min) {
        // Case 1. If we make target size of last level to be max_level_size,
        // target size of the first non-empty level would be smaller than
        // base_bytes_min. We set it be base_bytes_min.
3504
        base_level_size = base_bytes_min + 1U;
3505
        base_level_ = first_non_empty_level;
3506
        ROCKS_LOG_INFO(ioptions.logger,
3507 3508
                       "More existing levels in DB than needed. "
                       "max_bytes_for_level_multiplier may not be guaranteed.");
3509 3510 3511 3512 3513
      } else {
        // Find base level (where L0 data is compacted to).
        base_level_ = first_non_empty_level;
        while (base_level_ > 1 && cur_level_size > base_bytes_max) {
          --base_level_;
3514 3515
          cur_level_size = static_cast<uint64_t>(
              cur_level_size / options.max_bytes_for_level_multiplier);
3516 3517 3518 3519
        }
        if (cur_level_size > base_bytes_max) {
          // Even L1 will be too large
          assert(base_level_ == 1);
3520
          base_level_size = base_bytes_max;
3521
        } else {
3522
          base_level_size = cur_level_size;
3523 3524 3525
        }
      }

3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536
      level_multiplier_ = options.max_bytes_for_level_multiplier;
      assert(base_level_size > 0);
      if (l0_size > base_level_size &&
          (l0_size > options.max_bytes_for_level_base ||
           static_cast<int>(files_[0].size() / 2) >=
               options.level0_file_num_compaction_trigger)) {
        // We adjust the base level according to actual L0 size, and adjust
        // the level multiplier accordingly, when:
        //   1. the L0 size is larger than level size base, or
        //   2. number of L0 files reaches twice the L0->L1 compaction trigger
        // We don't do this otherwise to keep the LSM-tree structure stable
3537
        // unless the L0 compaction is backlogged.
3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
        base_level_size = l0_size;
        if (base_level_ == num_levels_ - 1) {
          level_multiplier_ = 1.0;
        } else {
          level_multiplier_ = std::pow(
              static_cast<double>(max_level_size) /
                  static_cast<double>(base_level_size),
              1.0 / static_cast<double>(num_levels_ - base_level_ - 1));
        }
      }

3549
      uint64_t level_size = base_level_size;
3550 3551
      for (int i = base_level_; i < num_levels_; i++) {
        if (i > base_level_) {
3552
          level_size = MultiplyCheckOverflow(level_size, level_multiplier_);
3553
        }
3554 3555 3556 3557 3558
        // Don't set any level below base_bytes_max. Otherwise, the LSM can
        // assume an hourglass shape where L1+ sizes are smaller than L0. This
        // causes compaction scoring, which depends on level sizes, to favor L1+
        // at the expense of L0, which may fill up and stall.
        level_max_bytes_[i] = std::max(level_size, base_bytes_max);
3559 3560 3561 3562 3563
      }
    }
  }
}

A
Andres Notzli 已提交
3564
uint64_t VersionStorageInfo::EstimateLiveDataSize() const {
3565 3566 3567 3568
  // Estimate the live data size by adding up the size of a maximal set of
  // sst files with no range overlap in same or higher level. The less
  // compacted, the more optimistic (smaller) this estimate is. Also,
  // for multiple sorted runs within a level, file order will matter.
A
Andres Notzli 已提交
3569 3570 3571 3572 3573
  uint64_t size = 0;

  auto ikey_lt = [this](InternalKey* x, InternalKey* y) {
    return internal_comparator_->Compare(*x, *y) < 0;
  };
3574
  // (Ordered) map of largest keys in files being included in size estimate
A
Andres Notzli 已提交
3575 3576 3577 3578 3579
  std::map<InternalKey*, FileMetaData*, decltype(ikey_lt)> ranges(ikey_lt);

  for (int l = num_levels_ - 1; l >= 0; l--) {
    bool found_end = false;
    for (auto file : files_[l]) {
3580 3581
      // Find the first file already included with largest key is larger than
      // the smallest key of `file`. If that file does not overlap with the
A
Andres Notzli 已提交
3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
      // current file, none of the files in the map does. If there is
      // no potential overlap, we can safely insert the rest of this level
      // (if the level is not 0) into the map without checking again because
      // the elements in the level are sorted and non-overlapping.
      auto lb = (found_end && l != 0) ?
        ranges.end() : ranges.lower_bound(&file->smallest);
      found_end = (lb == ranges.end());
      if (found_end || internal_comparator_->Compare(
            file->largest, (*lb).second->smallest) < 0) {
          ranges.emplace_hint(lb, &file->largest, file);
          size += file->fd.file_size;
      }
    }
  }
  return size;
}

3599
bool VersionStorageInfo::RangeMightExistAfterSortedRun(
3600 3601
    const Slice& smallest_user_key, const Slice& largest_user_key,
    int last_level, int last_l0_idx) {
3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
  assert((last_l0_idx != -1) == (last_level == 0));
  // TODO(ajkr): this preserves earlier behavior where we considered an L0 file
  // bottommost only if it's the oldest L0 file and there are no files on older
  // levels. It'd be better to consider it bottommost if there's no overlap in
  // older levels/files.
  if (last_level == 0 &&
      last_l0_idx != static_cast<int>(LevelFiles(0).size() - 1)) {
    return true;
  }

  // Checks whether there are files living beyond the `last_level`. If lower
  // levels have files, it checks for overlap between [`smallest_key`,
  // `largest_key`] and those files. Bottomlevel optimizations can be made if
  // there are no files in lower levels or if there is no overlap with the files
  // in the lower levels.
  for (int level = last_level + 1; level < num_levels(); level++) {
    // The range is not in the bottommost level if there are files in lower
    // levels when the `last_level` is 0 or if there are files in lower levels
    // which overlap with [`smallest_key`, `largest_key`].
    if (files_[level].size() > 0 &&
        (last_level == 0 ||
3623
         OverlapInLevel(level, &smallest_user_key, &largest_user_key))) {
3624 3625 3626 3627 3628
      return true;
    }
  }
  return false;
}
A
Andres Notzli 已提交
3629

3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640
void Version::AddLiveFiles(std::vector<uint64_t>* live_table_files,
                           std::vector<uint64_t>* live_blob_files) const {
  assert(live_table_files);
  assert(live_blob_files);

  for (int level = 0; level < storage_info_.num_levels(); ++level) {
    const auto& level_files = storage_info_.LevelFiles(level);
    for (const auto& meta : level_files) {
      assert(meta);

      live_table_files->emplace_back(meta->fd.GetNumber());
3641 3642
    }
  }
3643 3644 3645 3646 3647 3648 3649 3650

  const auto& blob_files = storage_info_.GetBlobFiles();
  for (const auto& pair : blob_files) {
    const auto& meta = pair.second;
    assert(meta);

    live_blob_files->emplace_back(meta->GetBlobFileNumber());
  }
3651 3652
}

3653
std::string Version::DebugString(bool hex, bool print_stats) const {
J
jorlow@chromium.org 已提交
3654
  std::string r;
S
sdong 已提交
3655
  for (int level = 0; level < storage_info_.num_levels_; level++) {
3656 3657
    // E.g.,
    //   --- level 1 ---
3658 3659
    //   17:123[1 .. 124]['a' .. 'd']
    //   20:43[124 .. 128]['e' .. 'g']
3660 3661
    //
    // if print_stats=true:
3662
    //   17:123[1 .. 124]['a' .. 'd'](4096)
3663
    r.append("--- level ");
J
jorlow@chromium.org 已提交
3664
    AppendNumberTo(&r, level);
3665 3666
    r.append(" --- version# ");
    AppendNumberTo(&r, version_number_);
3667
    r.append(" ---\n");
S
sdong 已提交
3668
    const std::vector<FileMetaData*>& files = storage_info_.files_[level];
D
dgrogan@chromium.org 已提交
3669
    for (size_t i = 0; i < files.size(); i++) {
J
jorlow@chromium.org 已提交
3670
      r.push_back(' ');
3671
      AppendNumberTo(&r, files[i]->fd.GetNumber());
J
jorlow@chromium.org 已提交
3672
      r.push_back(':');
3673
      AppendNumberTo(&r, files[i]->fd.GetFileSize());
G
Gabor Cselle 已提交
3674
      r.append("[");
3675 3676 3677 3678 3679
      AppendNumberTo(&r, files[i]->fd.smallest_seqno);
      r.append(" .. ");
      AppendNumberTo(&r, files[i]->fd.largest_seqno);
      r.append("]");
      r.append("[");
Z
Zheng Shao 已提交
3680
      r.append(files[i]->smallest.DebugString(hex));
G
Gabor Cselle 已提交
3681
      r.append(" .. ");
Z
Zheng Shao 已提交
3682
      r.append(files[i]->largest.DebugString(hex));
3683
      r.append("]");
3684 3685 3686 3687
      if (files[i]->oldest_blob_file_number != kInvalidBlobFileNumber) {
        r.append(" blob_file:");
        AppendNumberTo(&r, files[i]->oldest_blob_file_number);
      }
3688 3689 3690 3691 3692 3693 3694
      if (print_stats) {
        r.append("(");
        r.append(ToString(
            files[i]->stats.num_reads_sampled.load(std::memory_order_relaxed)));
        r.append(")");
      }
      r.append("\n");
J
jorlow@chromium.org 已提交
3695 3696
    }
  }
3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711

  const auto& blob_files = storage_info_.GetBlobFiles();
  if (!blob_files.empty()) {
    r.append("--- blob files --- version# ");
    AppendNumberTo(&r, version_number_);
    r.append(" ---\n");
    for (const auto& pair : blob_files) {
      const auto& blob_file_meta = pair.second;
      assert(blob_file_meta);

      r.append(blob_file_meta->DebugString());
      r.push_back('\n');
    }
  }

J
jorlow@chromium.org 已提交
3712 3713 3714
  return r;
}

3715 3716 3717 3718
// this is used to batch writes to the manifest file
struct VersionSet::ManifestWriter {
  Status status;
  bool done;
3719
  InstrumentedCondVar cv;
3720
  ColumnFamilyData* cfd;
3721
  const MutableCFOptions mutable_cf_options;
3722
  const autovector<VersionEdit*>& edit_list;
3723
  const std::function<void(const Status&)> manifest_write_callback;
A
Abhishek Kona 已提交
3724

3725 3726 3727 3728
  explicit ManifestWriter(
      InstrumentedMutex* mu, ColumnFamilyData* _cfd,
      const MutableCFOptions& cf_options, const autovector<VersionEdit*>& e,
      const std::function<void(const Status&)>& manifest_wcb)
3729 3730 3731 3732
      : done(false),
        cv(mu),
        cfd(_cfd),
        mutable_cf_options(cf_options),
3733 3734
        edit_list(e),
        manifest_write_callback(manifest_wcb) {}
3735
  ~ManifestWriter() { status.PermitUncheckedError(); }
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746

  bool IsAllWalEdits() const {
    bool all_wal_edits = true;
    for (const auto& e : edit_list) {
      if (!e->IsWalManipulation()) {
        all_wal_edits = false;
        break;
      }
    }
    return all_wal_edits;
  }
3747 3748
};

3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764
Status AtomicGroupReadBuffer::AddEdit(VersionEdit* edit) {
  assert(edit);
  if (edit->is_in_atomic_group_) {
    TEST_SYNC_POINT("AtomicGroupReadBuffer::AddEdit:AtomicGroup");
    if (replay_buffer_.empty()) {
      replay_buffer_.resize(edit->remaining_entries_ + 1);
      TEST_SYNC_POINT_CALLBACK(
          "AtomicGroupReadBuffer::AddEdit:FirstInAtomicGroup", edit);
    }
    read_edits_in_atomic_group_++;
    if (read_edits_in_atomic_group_ + edit->remaining_entries_ !=
        static_cast<uint32_t>(replay_buffer_.size())) {
      TEST_SYNC_POINT_CALLBACK(
          "AtomicGroupReadBuffer::AddEdit:IncorrectAtomicGroupSize", edit);
      return Status::Corruption("corrupted atomic group");
    }
3765
    replay_buffer_[read_edits_in_atomic_group_ - 1] = *edit;
3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793
    if (read_edits_in_atomic_group_ == replay_buffer_.size()) {
      TEST_SYNC_POINT_CALLBACK(
          "AtomicGroupReadBuffer::AddEdit:LastInAtomicGroup", edit);
      return Status::OK();
    }
    return Status::OK();
  }

  // A normal edit.
  if (!replay_buffer().empty()) {
    TEST_SYNC_POINT_CALLBACK(
        "AtomicGroupReadBuffer::AddEdit:AtomicGroupMixedWithNormalEdits", edit);
    return Status::Corruption("corrupted atomic group");
  }
  return Status::OK();
}

bool AtomicGroupReadBuffer::IsFull() const {
  return read_edits_in_atomic_group_ == replay_buffer_.size();
}

bool AtomicGroupReadBuffer::IsEmpty() const { return replay_buffer_.empty(); }

void AtomicGroupReadBuffer::Clear() {
  read_edits_in_atomic_group_ = 0;
  replay_buffer_.clear();
}

3794
VersionSet::VersionSet(const std::string& dbname,
S
Siying Dong 已提交
3795
                       const ImmutableDBOptions* _db_options,
3796
                       const FileOptions& storage_options, Cache* table_cache,
3797
                       WriteBufferManager* write_buffer_manager,
3798
                       WriteController* write_controller,
3799
                       BlockCacheTracer* const block_cache_tracer,
3800 3801
                       const std::shared_ptr<IOTracer>& io_tracer,
                       const std::string& db_session_id)
3802 3803 3804
    : column_family_set_(
          new ColumnFamilySet(dbname, _db_options, storage_options, table_cache,
                              write_buffer_manager, write_controller,
3805
                              block_cache_tracer, io_tracer, db_session_id)),
3806
      table_cache_(table_cache),
S
Siying Dong 已提交
3807
      env_(_db_options->env),
3808
      fs_(_db_options->fs, io_tracer),
3809
      clock_(_db_options->clock),
J
jorlow@chromium.org 已提交
3810
      dbname_(dbname),
S
Siying Dong 已提交
3811
      db_options_(_db_options),
J
jorlow@chromium.org 已提交
3812 3813
      next_file_number_(2),
      manifest_file_number_(0),  // Filled by Recover()
3814
      options_file_number_(0),
3815
      pending_manifest_file_number_(0),
3816
      last_sequence_(0),
3817
      last_allocated_sequence_(0),
3818
      last_published_sequence_(0),
3819
      prev_log_number_(0),
A
Abhishek Kona 已提交
3820
      current_version_number_(0),
3821
      manifest_file_size_(0),
3822
      file_options_(storage_options),
3823
      block_cache_tracer_(block_cache_tracer),
3824 3825
      io_tracer_(io_tracer),
      db_session_id_(db_session_id) {}
J
jorlow@chromium.org 已提交
3826 3827

VersionSet::~VersionSet() {
3828 3829 3830
  // we need to delete column_family_set_ because its destructor depends on
  // VersionSet
  column_family_set_.reset();
3831 3832
  for (auto& file : obsolete_files_) {
    if (file.metadata->table_reader_handle) {
3833 3834
      table_cache_->Release(file.metadata->table_reader_handle);
      TableCache::Evict(table_cache_, file.metadata->fd.GetNumber());
3835
    }
3836
    file.DeleteMetadata();
3837 3838
  }
  obsolete_files_.clear();
3839
  io_status_.PermitUncheckedError();
3840 3841
}

3842 3843 3844 3845
void VersionSet::Reset() {
  if (column_family_set_) {
    WriteBufferManager* wbm = column_family_set_->write_buffer_manager();
    WriteController* wc = column_family_set_->write_controller();
3846 3847 3848
    column_family_set_.reset(new ColumnFamilySet(
        dbname_, db_options_, file_options_, table_cache_, wbm, wc,
        block_cache_tracer_, io_tracer_, db_session_id_));
3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865
  }
  db_id_.clear();
  next_file_number_.store(2);
  min_log_number_to_keep_2pc_.store(0);
  manifest_file_number_ = 0;
  options_file_number_ = 0;
  pending_manifest_file_number_ = 0;
  last_sequence_.store(0);
  last_allocated_sequence_.store(0);
  last_published_sequence_.store(0);
  prev_log_number_ = 0;
  descriptor_log_.reset();
  current_version_number_ = 0;
  manifest_writers_.clear();
  manifest_file_size_ = 0;
  obsolete_files_.clear();
  obsolete_manifests_.clear();
3866
  wals_.Reset();
3867 3868
}

3869 3870
void VersionSet::AppendVersion(ColumnFamilyData* column_family_data,
                               Version* v) {
I
Igor Canadi 已提交
3871 3872
  // compute new compaction score
  v->storage_info()->ComputeCompactionScore(
Y
Yi Wu 已提交
3873
      *column_family_data->ioptions(),
3874
      *column_family_data->GetLatestMutableCFOptions());
I
Igor Canadi 已提交
3875

3876
  // Mark v finalized
S
sdong 已提交
3877
  v->storage_info_.SetFinalized();
3878

3879 3880
  // Make "v" current
  assert(v->refs_ == 0);
3881 3882 3883 3884 3885
  Version* current = column_family_data->current();
  assert(v != current);
  if (current != nullptr) {
    assert(current->refs_ > 0);
    current->Unref();
3886
  }
3887
  column_family_data->SetCurrent(v);
3888 3889 3890
  v->Ref();

  // Append to linked list
3891 3892
  v->prev_ = column_family_data->dummy_versions()->prev_;
  v->next_ = column_family_data->dummy_versions();
3893 3894 3895 3896
  v->prev_->next_ = v;
  v->next_->prev_ = v;
}

3897 3898
Status VersionSet::ProcessManifestWrites(
    std::deque<ManifestWriter>& writers, InstrumentedMutex* mu,
3899
    FSDirectory* db_directory, bool new_descriptor_log,
3900
    const ColumnFamilyOptions* new_cf_options) {
3901
  mu->AssertHeld();
3902 3903 3904
  assert(!writers.empty());
  ManifestWriter& first_writer = writers.front();
  ManifestWriter* last_writer = &first_writer;
3905

3906 3907
  assert(!manifest_writers_.empty());
  assert(manifest_writers_.front() == &first_writer);
A
Abhishek Kona 已提交
3908

3909
  autovector<VersionEdit*> batch_edits;
3910 3911 3912 3913 3914 3915 3916 3917
  autovector<Version*> versions;
  autovector<const MutableCFOptions*> mutable_cf_options_ptrs;
  std::vector<std::unique_ptr<BaseReferencedVersionBuilder>> builder_guards;

  if (first_writer.edit_list.front()->IsColumnFamilyManipulation()) {
    // No group commits for column family add or drop
    LogAndApplyCFHelper(first_writer.edit_list.front());
    batch_edits.push_back(first_writer.edit_list.front());
3918
  } else {
3919
    auto it = manifest_writers_.cbegin();
3920
    size_t group_start = std::numeric_limits<size_t>::max();
3921 3922
    while (it != manifest_writers_.cend()) {
      if ((*it)->edit_list.front()->IsColumnFamilyManipulation()) {
3923 3924 3925
        // no group commits for column family add or drop
        break;
      }
3926 3927
      last_writer = *(it++);
      assert(last_writer != nullptr);
3928
      assert(last_writer->cfd != nullptr);
3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958
      if (last_writer->cfd->IsDropped()) {
        // If we detect a dropped CF at this point, and the corresponding
        // version edits belong to an atomic group, then we need to find out
        // the preceding version edits in the same atomic group, and update
        // their `remaining_entries_` member variable because we are NOT going
        // to write the version edits' of dropped CF to the MANIFEST. If we
        // don't update, then Recover can report corrupted atomic group because
        // the `remaining_entries_` do not match.
        if (!batch_edits.empty()) {
          if (batch_edits.back()->is_in_atomic_group_ &&
              batch_edits.back()->remaining_entries_ > 0) {
            assert(group_start < batch_edits.size());
            const auto& edit_list = last_writer->edit_list;
            size_t k = 0;
            while (k < edit_list.size()) {
              if (!edit_list[k]->is_in_atomic_group_) {
                break;
              } else if (edit_list[k]->remaining_entries_ == 0) {
                ++k;
                break;
              }
              ++k;
            }
            for (auto i = group_start; i < batch_edits.size(); ++i) {
              assert(static_cast<uint32_t>(k) <=
                     batch_edits.back()->remaining_entries_);
              batch_edits[i]->remaining_entries_ -= static_cast<uint32_t>(k);
            }
          }
        }
3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977
        continue;
      }
      // We do a linear search on versions because versions is small.
      // TODO(yanqin) maybe consider unordered_map
      Version* version = nullptr;
      VersionBuilder* builder = nullptr;
      for (int i = 0; i != static_cast<int>(versions.size()); ++i) {
        uint32_t cf_id = last_writer->cfd->GetID();
        if (versions[i]->cfd()->GetID() == cf_id) {
          version = versions[i];
          assert(!builder_guards.empty() &&
                 builder_guards.size() == versions.size());
          builder = builder_guards[i]->version_builder();
          TEST_SYNC_POINT_CALLBACK(
              "VersionSet::ProcessManifestWrites:SameColumnFamily", &cf_id);
          break;
        }
      }
      if (version == nullptr) {
3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
        // WAL manipulations do not need to be applied to versions.
        if (!last_writer->IsAllWalEdits()) {
          version = new Version(last_writer->cfd, this, file_options_,
                                last_writer->mutable_cf_options, io_tracer_,
                                current_version_number_++);
          versions.push_back(version);
          mutable_cf_options_ptrs.push_back(&last_writer->mutable_cf_options);
          builder_guards.emplace_back(
              new BaseReferencedVersionBuilder(last_writer->cfd));
          builder = builder_guards.back()->version_builder();
        }
        assert(last_writer->IsAllWalEdits() || builder);
        assert(last_writer->IsAllWalEdits() || version);
        TEST_SYNC_POINT_CALLBACK("VersionSet::ProcessManifestWrites:NewVersion",
                                 version);
3993 3994
      }
      for (const auto& e : last_writer->edit_list) {
3995 3996 3997 3998 3999 4000 4001 4002 4003
        if (e->is_in_atomic_group_) {
          if (batch_edits.empty() || !batch_edits.back()->is_in_atomic_group_ ||
              (batch_edits.back()->is_in_atomic_group_ &&
               batch_edits.back()->remaining_entries_ == 0)) {
            group_start = batch_edits.size();
          }
        } else if (group_start != std::numeric_limits<size_t>::max()) {
          group_start = std::numeric_limits<size_t>::max();
        }
C
Cheng Chang 已提交
4004
        Status s = LogAndApplyHelper(last_writer->cfd, builder, e, mu);
4005
        if (!s.ok()) {
C
Cheng Chang 已提交
4006 4007 4008 4009
          // free up the allocated memory
          for (auto v : versions) {
            delete v;
          }
4010 4011
          return s;
        }
4012
        batch_edits.push_back(e);
4013
      }
4014
    }
4015 4016 4017 4018
    for (int i = 0; i < static_cast<int>(versions.size()); ++i) {
      assert(!builder_guards.empty() &&
             builder_guards.size() == versions.size());
      auto* builder = builder_guards[i]->version_builder();
C
Cheng Chang 已提交
4019
      Status s = builder->SaveTo(versions[i]->storage_info());
4020
      if (!s.ok()) {
C
Cheng Chang 已提交
4021 4022 4023 4024
        // free up the allocated memory
        for (auto v : versions) {
          delete v;
        }
4025 4026
        return s;
      }
4027
    }
J
jorlow@chromium.org 已提交
4028 4029
  }

4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065
#ifndef NDEBUG
  // Verify that version edits of atomic groups have correct
  // remaining_entries_.
  size_t k = 0;
  while (k < batch_edits.size()) {
    while (k < batch_edits.size() && !batch_edits[k]->is_in_atomic_group_) {
      ++k;
    }
    if (k == batch_edits.size()) {
      break;
    }
    size_t i = k;
    while (i < batch_edits.size()) {
      if (!batch_edits[i]->is_in_atomic_group_) {
        break;
      }
      assert(i - k + batch_edits[i]->remaining_entries_ ==
             batch_edits[k]->remaining_entries_);
      if (batch_edits[i]->remaining_entries_ == 0) {
        ++i;
        break;
      }
      ++i;
    }
    assert(batch_edits[i - 1]->is_in_atomic_group_);
    assert(0 == batch_edits[i - 1]->remaining_entries_);
    std::vector<VersionEdit*> tmp;
    for (size_t j = k; j != i; ++j) {
      tmp.emplace_back(batch_edits[j]);
    }
    TEST_SYNC_POINT_CALLBACK(
        "VersionSet::ProcessManifestWrites:CheckOneAtomicGroup", &tmp);
    k = i;
  }
#endif  // NDEBUG

4066
  assert(pending_manifest_file_number_ == 0);
4067
  if (!descriptor_log_ ||
4068
      manifest_file_size_ > db_options_->max_manifest_file_size) {
4069
    TEST_SYNC_POINT("VersionSet::ProcessManifestWrites:BeforeNewManifest");
A
Abhishek Kona 已提交
4070
    new_descriptor_log = true;
4071 4072
  } else {
    pending_manifest_file_number_ = manifest_file_number_;
A
Abhishek Kona 已提交
4073 4074
  }

4075 4076 4077 4078
  // Local cached copy of state variable(s). WriteCurrentStateToManifest()
  // reads its content after releasing db mutex to avoid race with
  // SwitchMemtable().
  std::unordered_map<uint32_t, MutableCFState> curr_state;
4079
  VersionEdit wal_additions;
I
Igor Canadi 已提交
4080
  if (new_descriptor_log) {
4081 4082 4083
    pending_manifest_file_number_ = NewFileNumber();
    batch_edits.back()->SetNextFile(next_file_number_.load());

4084 4085
    // if we are writing out new snapshot make sure to persist max column
    // family.
I
Igor Canadi 已提交
4086
    if (column_family_set_->GetMaxColumnFamily() > 0) {
4087
      first_writer.edit_list.front()->SetMaxColumnFamily(
4088
          column_family_set_->GetMaxColumnFamily());
I
Igor Canadi 已提交
4089
    }
4090 4091
    for (const auto* cfd : *column_family_set_) {
      assert(curr_state.find(cfd->GetID()) == curr_state.end());
4092 4093 4094
      curr_state.emplace(std::make_pair(
          cfd->GetID(),
          MutableCFState(cfd->GetLogNumber(), cfd->GetFullHistoryTsLow())));
4095
    }
4096 4097 4098 4099

    for (const auto& wal : wals_.GetWals()) {
      wal_additions.AddWal(wal.first, wal.second);
    }
J
jorlow@chromium.org 已提交
4100 4101
  }

4102 4103 4104
  uint64_t new_manifest_file_size = 0;
  Status s;
  IOStatus io_s;
4105
  IOStatus manifest_io_status;
4106
  {
4107
    FileOptions opt_file_opts = fs_->OptimizeForManifestWrite(file_options_);
4108
    mu->Unlock();
4109
    TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifestStart");
4110
    TEST_SYNC_POINT_CALLBACK("VersionSet::LogAndApply:WriteManifest", nullptr);
4111
    if (!first_writer.edit_list.front()->IsColumnFamilyManipulation()) {
4112 4113 4114 4115 4116 4117
      for (int i = 0; i < static_cast<int>(versions.size()); ++i) {
        assert(!builder_guards.empty() &&
               builder_guards.size() == versions.size());
        assert(!mutable_cf_options_ptrs.empty() &&
               builder_guards.size() == versions.size());
        ColumnFamilyData* cfd = versions[i]->cfd_;
4118
        s = builder_guards[i]->version_builder()->LoadTableHandlers(
4119
            cfd->internal_stats(), 1 /* max_threads */,
4120
            true /* prefetch_index_and_filter_in_cache */,
4121
            false /* is_initial_load */,
4122 4123
            mutable_cf_options_ptrs[i]->prefix_extractor.get(),
            MaxFileSizeForL0MetaPin(*mutable_cf_options_ptrs[i]));
4124 4125 4126 4127 4128 4129
        if (!s.ok()) {
          if (db_options_->paranoid_checks) {
            break;
          }
          s = Status::OK();
        }
4130
      }
4131 4132
    }

4133 4134 4135
    if (s.ok() && new_descriptor_log) {
      // This is fine because everything inside of this block is serialized --
      // only one thread can be here at the same time
4136
      // create new manifest file
4137 4138
      ROCKS_LOG_INFO(db_options_->info_log, "Creating manifest %" PRIu64 "\n",
                     pending_manifest_file_number_);
4139 4140
      std::string descriptor_fname =
          DescriptorFileName(dbname_, pending_manifest_file_number_);
4141
      std::unique_ptr<FSWritableFile> descriptor_file;
4142
      io_s = NewWritableFile(fs_.get(), descriptor_fname, &descriptor_file,
4143 4144
                             opt_file_opts);
      if (io_s.ok()) {
4145
        descriptor_file->SetPreallocationBlockSize(
4146
            db_options_->manifest_preallocation_size);
4147
        FileTypeSet tmp_set = db_options_->checksum_handoff_file_types;
4148
        std::unique_ptr<WritableFileWriter> file_writer(new WritableFileWriter(
4149
            std::move(descriptor_file), descriptor_fname, opt_file_opts, clock_,
4150
            io_tracer_, nullptr, db_options_->listeners, nullptr,
4151
            tmp_set.Contains(FileType::kDescriptorFile),
4152
            tmp_set.Contains(FileType::kDescriptorFile)));
4153 4154
        descriptor_log_.reset(
            new log::Writer(std::move(file_writer), 0, false));
4155 4156
        s = WriteCurrentStateToManifest(curr_state, wal_additions,
                                        descriptor_log_.get(), io_s);
4157
      } else {
4158
        manifest_io_status = io_s;
4159
        s = io_s;
I
Igor Canadi 已提交
4160 4161 4162
      }
    }

4163 4164 4165 4166 4167
    if (s.ok()) {
      if (!first_writer.edit_list.front()->IsColumnFamilyManipulation()) {
        for (int i = 0; i < static_cast<int>(versions.size()); ++i) {
          versions[i]->PrepareApply(*mutable_cf_options_ptrs[i], true);
        }
4168
      }
4169

4170
      // Write new records to MANIFEST log
Y
Yanqin Jin 已提交
4171 4172 4173
#ifndef NDEBUG
      size_t idx = 0;
#endif
I
Igor Canadi 已提交
4174 4175
      for (auto& e : batch_edits) {
        std::string record;
4176
        if (!e->EncodeTo(&record)) {
4177 4178
          s = Status::Corruption("Unable to encode VersionEdit:" +
                                 e->DebugString(true));
4179 4180
          break;
        }
S
sdong 已提交
4181 4182
        TEST_KILL_RANDOM_WITH_WEIGHT("VersionSet::LogAndApply:BeforeAddRecord",
                                     REDUCE_ODDS2);
Y
Yanqin Jin 已提交
4183 4184
#ifndef NDEBUG
        if (batch_edits.size() > 1 && batch_edits.size() - 1 == idx) {
4185 4186 4187
          TEST_SYNC_POINT_CALLBACK(
              "VersionSet::ProcessManifestWrites:BeforeWriteLastVersionEdit:0",
              nullptr);
Y
Yanqin Jin 已提交
4188 4189 4190 4191 4192
          TEST_SYNC_POINT(
              "VersionSet::ProcessManifestWrites:BeforeWriteLastVersionEdit:1");
        }
        ++idx;
#endif /* !NDEBUG */
4193 4194 4195
        io_s = descriptor_log_->AddRecord(record);
        if (!io_s.ok()) {
          s = io_s;
4196
          manifest_io_status = io_s;
4197 4198 4199
          break;
        }
      }
S
sdong 已提交
4200
      if (s.ok()) {
4201
        io_s = SyncManifest(db_options_, descriptor_log_->file());
4202
        manifest_io_status = io_s;
4203 4204
        TEST_SYNC_POINT_CALLBACK(
            "VersionSet::ProcessManifestWrites:AfterSyncManifest", &io_s);
4205
      }
4206 4207
      if (!io_s.ok()) {
        s = io_s;
4208
        ROCKS_LOG_ERROR(db_options_->info_log, "MANIFEST write %s\n",
4209
                        s.ToString().c_str());
4210
      }
J
jorlow@chromium.org 已提交
4211 4212
    }

4213 4214
    // If we just created a new descriptor file, install it by writing a
    // new CURRENT file that points to it.
4215 4216 4217
    if (s.ok()) {
      assert(manifest_io_status.ok());
    }
4218
    if (s.ok() && new_descriptor_log) {
4219
      io_s = SetCurrentFile(fs_.get(), dbname_, pending_manifest_file_number_,
4220 4221 4222 4223
                            db_directory);
      if (!io_s.ok()) {
        s = io_s;
      }
4224
      TEST_SYNC_POINT("VersionSet::ProcessManifestWrites:AfterNewManifest");
4225 4226
    }

4227 4228 4229 4230
    if (s.ok()) {
      // find offset in manifest file where this version is stored.
      new_manifest_file_size = descriptor_log_->file()->GetFileSize();
    }
A
Abhishek Kona 已提交
4231

4232
    if (first_writer.edit_list.front()->is_column_family_drop_) {
4233
      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:0");
4234 4235 4236 4237
      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:1");
      TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:2");
    }

4238
    LogFlush(db_options_->info_log);
4239
    TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifestDone");
4240
    mu->Lock();
J
jorlow@chromium.org 已提交
4241 4242
  }

4243 4244 4245 4246 4247 4248
  if (s.ok()) {
    // Apply WAL edits, DB mutex must be held.
    for (auto& e : batch_edits) {
      if (e->IsWalAddition()) {
        s = wals_.AddWals(e->GetWalAdditions());
      } else if (e->IsWalDeletion()) {
4249
        s = wals_.DeleteWalsBefore(e->GetWalDeletion().GetLogNumber());
4250 4251 4252 4253 4254 4255 4256
      }
      if (!s.ok()) {
        break;
      }
    }
  }

4257 4258 4259 4260 4261 4262 4263 4264
  if (!io_s.ok()) {
    if (io_status_.ok()) {
      io_status_ = io_s;
    }
  } else if (!io_status_.ok()) {
    io_status_ = io_s;
  }

4265
  // Append the old manifest file to the obsolete_manifest_ list to be deleted
4266 4267 4268 4269 4270 4271
  // by PurgeObsoleteFiles later.
  if (s.ok() && new_descriptor_log) {
    obsolete_manifests_.emplace_back(
        DescriptorFileName("", manifest_file_number_));
  }

4272
  // Install the new versions
J
jorlow@chromium.org 已提交
4273
  if (s.ok()) {
4274
    if (first_writer.edit_list.front()->is_column_family_add_) {
4275
      assert(batch_edits.size() == 1);
4276
      assert(new_cf_options != nullptr);
4277 4278
      CreateColumnFamily(*new_cf_options, first_writer.edit_list.front());
    } else if (first_writer.edit_list.front()->is_column_family_drop_) {
4279
      assert(batch_edits.size() == 1);
4280
      first_writer.cfd->SetDropped();
4281
      first_writer.cfd->UnrefAndTryDelete();
4282
    } else {
4283 4284 4285
      // Each version in versions corresponds to a column family.
      // For each column family, update its log number indicating that logs
      // with number smaller than this should be ignored.
4286 4287 4288 4289 4290 4291 4292 4293
      uint64_t last_min_log_number_to_keep = 0;
      for (const auto& e : batch_edits) {
        ColumnFamilyData* cfd = nullptr;
        if (!e->IsColumnFamilyManipulation()) {
          cfd = column_family_set_->GetColumnFamily(e->column_family_);
          // e would not have been added to batch_edits if its corresponding
          // column family is dropped.
          assert(cfd);
4294
        }
4295 4296 4297 4298 4299 4300 4301
        if (cfd) {
          if (e->has_log_number_ && e->log_number_ > cfd->GetLogNumber()) {
            cfd->SetLogNumber(e->log_number_);
          }
          if (e->HasFullHistoryTsLow()) {
            cfd->SetFullHistoryTsLow(e->GetFullHistoryTsLow());
          }
4302
        }
S
Siying Dong 已提交
4303
        if (e->has_min_log_number_to_keep_) {
4304 4305
          last_min_log_number_to_keep =
              std::max(last_min_log_number_to_keep, e->min_log_number_to_keep_);
S
Siying Dong 已提交
4306
        }
I
Igor Canadi 已提交
4307
      }
4308

4309
      if (last_min_log_number_to_keep != 0) {
S
Siying Dong 已提交
4310
        // Should only be set in 2PC mode.
4311
        MarkMinLogNumberToKeep2PC(last_min_log_number_to_keep);
S
Siying Dong 已提交
4312 4313
      }

4314 4315 4316 4317
      for (int i = 0; i < static_cast<int>(versions.size()); ++i) {
        ColumnFamilyData* cfd = versions[i]->cfd_;
        AppendVersion(cfd, versions[i]);
      }
4318
    }
4319
    manifest_file_number_ = pending_manifest_file_number_;
4320
    manifest_file_size_ = new_manifest_file_size;
4321
    prev_log_number_ = first_writer.edit_list.front()->prev_log_number_;
J
jorlow@chromium.org 已提交
4322
  } else {
4323 4324
    std::string version_edits;
    for (auto& e : batch_edits) {
4325 4326 4327 4328 4329
      version_edits += ("\n" + e->DebugString(true));
    }
    ROCKS_LOG_ERROR(db_options_->info_log,
                    "Error in committing version edit to MANIFEST: %s",
                    version_edits.c_str());
C
Cheng Chang 已提交
4330 4331 4332
    for (auto v : versions) {
      delete v;
    }
4333 4334 4335 4336
    if (manifest_io_status.ok()) {
      manifest_file_number_ = pending_manifest_file_number_;
      manifest_file_size_ = new_manifest_file_size;
    }
4337 4338 4339 4340
    // If manifest append failed for whatever reason, the file could be
    // corrupted. So we need to force the next version update to start a
    // new manifest file.
    descriptor_log_.reset();
4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367
    // If manifest operations failed, then we know the CURRENT file still
    // points to the original MANIFEST. Therefore, we can safely delete the
    // new MANIFEST.
    // If manifest operations succeeded, and we are here, then it is possible
    // that renaming tmp file to CURRENT failed.
    //
    // On local POSIX-compliant FS, the CURRENT must point to the original
    // MANIFEST. We can delete the new MANIFEST for simplicity, but we can also
    // keep it. Future recovery will ignore this MANIFEST. It's also ok for the
    // process not to crash and continue using the db. Any future LogAndApply()
    // call will switch to a new MANIFEST and update CURRENT, still ignoring
    // this one.
    //
    // On non-local FS, it is
    // possible that the rename operation succeeded on the server (remote)
    // side, but the client somehow returns a non-ok status to RocksDB. Note
    // that this does not violate atomicity. Should we delete the new MANIFEST
    // successfully, a subsequent recovery attempt will likely see the CURRENT
    // pointing to the new MANIFEST, thus fail. We will not be able to open the
    // DB again. Therefore, if manifest operations succeed, we should keep the
    // the new MANIFEST. If the process proceeds, any future LogAndApply() call
    // will switch to a new MANIFEST and update CURRENT. If user tries to
    // re-open the DB,
    // a) CURRENT points to the new MANIFEST, and the new MANIFEST is present.
    // b) CURRENT points to the original MANIFEST, and the original MANIFEST
    //    also exists.
    if (new_descriptor_log && !manifest_io_status.ok()) {
4368 4369 4370
      ROCKS_LOG_INFO(db_options_->info_log,
                     "Deleting manifest %" PRIu64 " current manifest %" PRIu64
                     "\n",
4371 4372 4373 4374 4375 4376 4377 4378 4379
                     pending_manifest_file_number_, manifest_file_number_);
      Status manifest_del_status = env_->DeleteFile(
          DescriptorFileName(dbname_, pending_manifest_file_number_));
      if (!manifest_del_status.ok()) {
        ROCKS_LOG_WARN(db_options_->info_log,
                       "Failed to delete manifest %" PRIu64 ": %s",
                       pending_manifest_file_number_,
                       manifest_del_status.ToString().c_str());
      }
J
jorlow@chromium.org 已提交
4380 4381
    }
  }
4382

4383
  pending_manifest_file_number_ = 0;
J
jorlow@chromium.org 已提交
4384

4385 4386 4387 4388
  // wake up all the waiting writers
  while (true) {
    ManifestWriter* ready = manifest_writers_.front();
    manifest_writers_.pop_front();
4389 4390 4391 4392 4393 4394 4395 4396 4397
    bool need_signal = true;
    for (const auto& w : writers) {
      if (&w == ready) {
        need_signal = false;
        break;
      }
    }
    ready->status = s;
    ready->done = true;
4398 4399 4400
    if (ready->manifest_write_callback) {
      (ready->manifest_write_callback)(s);
    }
4401
    if (need_signal) {
4402 4403
      ready->cv.Signal();
    }
4404 4405 4406
    if (ready == last_writer) {
      break;
    }
4407 4408 4409 4410
  }
  if (!manifest_writers_.empty()) {
    manifest_writers_.front()->cv.Signal();
  }
J
jorlow@chromium.org 已提交
4411 4412 4413
  return s;
}

4414
// 'datas' is grammatically incorrect. We still use this notation to indicate
4415 4416
// that this variable represents a collection of column_family_data.
Status VersionSet::LogAndApply(
4417 4418 4419
    const autovector<ColumnFamilyData*>& column_family_datas,
    const autovector<const MutableCFOptions*>& mutable_cf_options_list,
    const autovector<autovector<VersionEdit*>>& edit_lists,
4420
    InstrumentedMutex* mu, FSDirectory* db_directory, bool new_descriptor_log,
4421 4422
    const ColumnFamilyOptions* new_cf_options,
    const std::vector<std::function<void(const Status&)>>& manifest_wcbs) {
4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
  mu->AssertHeld();
  int num_edits = 0;
  for (const auto& elist : edit_lists) {
    num_edits += static_cast<int>(elist.size());
  }
  if (num_edits == 0) {
    return Status::OK();
  } else if (num_edits > 1) {
#ifndef NDEBUG
    for (const auto& edit_list : edit_lists) {
      for (const auto& edit : edit_list) {
        assert(!edit->IsColumnFamilyManipulation());
      }
    }
#endif /* ! NDEBUG */
  }

  int num_cfds = static_cast<int>(column_family_datas.size());
  if (num_cfds == 1 && column_family_datas[0] == nullptr) {
    assert(edit_lists.size() == 1 && edit_lists[0].size() == 1);
    assert(edit_lists[0][0]->is_column_family_add_);
    assert(new_cf_options != nullptr);
  }
  std::deque<ManifestWriter> writers;
  if (num_cfds > 0) {
    assert(static_cast<size_t>(num_cfds) == mutable_cf_options_list.size());
    assert(static_cast<size_t>(num_cfds) == edit_lists.size());
  }
  for (int i = 0; i < num_cfds; ++i) {
4452 4453
    const auto wcb =
        manifest_wcbs.empty() ? [](const Status&) {} : manifest_wcbs[i];
4454
    writers.emplace_back(mu, column_family_datas[i],
4455
                         *mutable_cf_options_list[i], edit_lists[i], wcb);
4456 4457 4458 4459
    manifest_writers_.push_back(&writers[i]);
  }
  assert(!writers.empty());
  ManifestWriter& first_writer = writers.front();
4460 4461
  TEST_SYNC_POINT_CALLBACK("VersionSet::LogAndApply:BeforeWriterWaiting",
                           nullptr);
4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
  while (!first_writer.done && &first_writer != manifest_writers_.front()) {
    first_writer.cv.Wait();
  }
  if (first_writer.done) {
    // All non-CF-manipulation operations can be grouped together and committed
    // to MANIFEST. They should all have finished. The status code is stored in
    // the first manifest writer.
#ifndef NDEBUG
    for (const auto& writer : writers) {
      assert(writer.done);
    }
4473
    TEST_SYNC_POINT_CALLBACK("VersionSet::LogAndApply:WakeUpAndDone", mu);
4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492
#endif /* !NDEBUG */
    return first_writer.status;
  }

  int num_undropped_cfds = 0;
  for (auto cfd : column_family_datas) {
    // if cfd == nullptr, it is a column family add.
    if (cfd == nullptr || !cfd->IsDropped()) {
      ++num_undropped_cfds;
    }
  }
  if (0 == num_undropped_cfds) {
    for (int i = 0; i != num_cfds; ++i) {
      manifest_writers_.pop_front();
    }
    // Notify new head of manifest write queue.
    if (!manifest_writers_.empty()) {
      manifest_writers_.front()->cv.Signal();
    }
4493
    return Status::ColumnFamilyDropped();
4494 4495 4496 4497 4498 4499
  }

  return ProcessManifestWrites(writers, mu, db_directory, new_descriptor_log,
                               new_cf_options);
}

I
Igor Canadi 已提交
4500 4501
void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) {
  assert(edit->IsColumnFamilyManipulation());
4502
  edit->SetNextFile(next_file_number_.load());
M
Maysam Yabandeh 已提交
4503 4504 4505 4506
  // The log might have data that is not visible to memtbale and hence have not
  // updated the last_sequence_ yet. It is also possible that the log has is
  // expecting some new data that is not written yet. Since LastSequence is an
  // upper bound on the sequence, it is ok to record
4507 4508 4509
  // last_allocated_sequence_ as the last sequence.
  edit->SetLastSequence(db_options_->two_write_queues ? last_allocated_sequence_
                                                      : last_sequence_);
I
Igor Canadi 已提交
4510 4511 4512 4513 4514 4515 4516
  if (edit->is_column_family_drop_) {
    // if we drop column family, we have to make sure to save max column family,
    // so that we don't reuse existing ID
    edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily());
  }
}

4517 4518 4519
Status VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd,
                                     VersionBuilder* builder, VersionEdit* edit,
                                     InstrumentedMutex* mu) {
4520 4521 4522
#ifdef NDEBUG
  (void)cfd;
#endif
4523
  mu->AssertHeld();
I
Igor Canadi 已提交
4524
  assert(!edit->IsColumnFamilyManipulation());
4525

4526 4527
  if (edit->has_log_number_) {
    assert(edit->log_number_ >= cfd->GetLogNumber());
4528
    assert(edit->log_number_ < next_file_number_.load());
I
Igor Canadi 已提交
4529
  }
4530

I
Igor Canadi 已提交
4531 4532 4533
  if (!edit->has_prev_log_number_) {
    edit->SetPrevLogNumber(prev_log_number_);
  }
4534
  edit->SetNextFile(next_file_number_.load());
M
Maysam Yabandeh 已提交
4535 4536 4537 4538
  // The log might have data that is not visible to memtbale and hence have not
  // updated the last_sequence_ yet. It is also possible that the log has is
  // expecting some new data that is not written yet. Since LastSequence is an
  // upper bound on the sequence, it is ok to record
4539 4540 4541
  // last_allocated_sequence_ as the last sequence.
  edit->SetLastSequence(db_options_->two_write_queues ? last_allocated_sequence_
                                                      : last_sequence_);
I
Igor Canadi 已提交
4542

4543 4544 4545 4546 4547
  // The builder can be nullptr only if edit is WAL manipulation,
  // because WAL edits do not need to be applied to versions,
  // we return Status::OK() in this case.
  assert(builder || edit->IsWalManipulation());
  return builder ? builder->Apply(edit) : Status::OK();
4548 4549
}

4550 4551
Status VersionSet::GetCurrentManifestPath(const std::string& dbname,
                                          FileSystem* fs,
4552 4553
                                          std::string* manifest_path,
                                          uint64_t* manifest_file_number) {
4554
  assert(fs != nullptr);
4555
  assert(manifest_path != nullptr);
4556 4557
  assert(manifest_file_number != nullptr);

4558
  std::string fname;
4559
  Status s = ReadFileToString(fs, CurrentFileName(dbname), &fname);
4560 4561 4562 4563 4564 4565 4566 4567 4568
  if (!s.ok()) {
    return s;
  }
  if (fname.empty() || fname.back() != '\n') {
    return Status::Corruption("CURRENT file does not end with newline");
  }
  // remove the trailing '\n'
  fname.resize(fname.size() - 1);
  FileType type;
4569
  bool parse_ok = ParseFileName(fname, manifest_file_number, &type);
4570 4571 4572
  if (!parse_ok || type != kDescriptorFile) {
    return Status::Corruption("CURRENT file corrupted");
  }
4573 4574
  *manifest_path = dbname;
  if (dbname.back() != '/') {
4575 4576
    manifest_path->push_back('/');
  }
4577
  manifest_path->append(fname);
4578 4579 4580
  return Status::OK();
}

I
Igor Canadi 已提交
4581
Status VersionSet::Recover(
4582 4583
    const std::vector<ColumnFamilyDescriptor>& column_families, bool read_only,
    std::string* db_id) {
J
jorlow@chromium.org 已提交
4584
  // Read "CURRENT" file, which contains a pointer to the current manifest file
4585
  std::string manifest_path;
4586
  Status s = GetCurrentManifestPath(dbname_, fs_.get(), &manifest_path,
4587
                                    &manifest_file_number_);
J
jorlow@chromium.org 已提交
4588 4589 4590 4591
  if (!s.ok()) {
    return s;
  }

4592
  ROCKS_LOG_INFO(db_options_->info_log, "Recovering from manifest file: %s\n",
4593
                 manifest_path.c_str());
H
heyongqiang 已提交
4594

4595
  std::unique_ptr<SequentialFileReader> manifest_file_reader;
4596
  {
4597 4598 4599 4600
    std::unique_ptr<FSSequentialFile> manifest_file;
    s = fs_->NewSequentialFile(manifest_path,
                               fs_->OptimizeForManifestRead(file_options_),
                               &manifest_file, nullptr);
4601 4602 4603 4604
    if (!s.ok()) {
      return s;
    }
    manifest_file_reader.reset(
4605
        new SequentialFileReader(std::move(manifest_file), manifest_path,
4606
                                 db_options_->log_readahead_size, io_tracer_));
J
jorlow@chromium.org 已提交
4607
  }
4608
  uint64_t current_manifest_file_size = 0;
4609
  uint64_t log_number = 0;
J
jorlow@chromium.org 已提交
4610
  {
I
Igor Canadi 已提交
4611
    VersionSet::LogReporter reporter;
4612 4613
    Status log_read_status;
    reporter.status = &log_read_status;
4614
    log::Reader reader(nullptr, std::move(manifest_file_reader), &reporter,
4615
                       true /* checksum */, 0 /* log_number */);
4616 4617 4618 4619
    VersionEditHandler handler(read_only, column_families,
                               const_cast<VersionSet*>(this),
                               /*track_missing_files=*/false,
                               /*no_error_if_files_missing=*/false, io_tracer_);
4620 4621 4622 4623 4624 4625 4626
    handler.Iterate(reader, &log_read_status);
    s = handler.status();
    if (s.ok()) {
      log_number = handler.GetVersionEditParams().log_number_;
      current_manifest_file_size = reader.GetReadOffset();
      assert(current_manifest_file_size != 0);
      handler.GetDbId(db_id);
4627 4628 4629
    }
  }

J
jorlow@chromium.org 已提交
4630
  if (s.ok()) {
I
Igor Canadi 已提交
4631
    manifest_file_size_ = current_manifest_file_size;
4632 4633
    ROCKS_LOG_INFO(
        db_options_->info_log,
4634
        "Recovered from manifest file:%s succeeded,"
4635 4636 4637 4638
        "manifest_file_number is %" PRIu64 ", next_file_number is %" PRIu64
        ", last_sequence is %" PRIu64 ", log_number is %" PRIu64
        ",prev_log_number is %" PRIu64 ",max_column_family is %" PRIu32
        ",min_log_number_to_keep is %" PRIu64 "\n",
4639
        manifest_path.c_str(), manifest_file_number_, next_file_number_.load(),
4640 4641
        last_sequence_.load(), log_number, prev_log_number_,
        column_family_set_->GetMaxColumnFamily(), min_log_number_to_keep_2pc());
4642 4643

    for (auto cfd : *column_family_set_) {
4644 4645 4646
      if (cfd->IsDropped()) {
        continue;
      }
4647
      ROCKS_LOG_INFO(db_options_->info_log,
4648 4649
                     "Column family [%s] (ID %" PRIu32
                     "), log number is %" PRIu64 "\n",
4650
                     cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
4651
    }
J
jorlow@chromium.org 已提交
4652 4653 4654 4655 4656
  }

  return s;
}

4657 4658 4659
namespace {
class ManifestPicker {
 public:
4660 4661
  explicit ManifestPicker(const std::string& dbname,
                          const std::vector<std::string>& files_in_dbname);
4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672
  // REQUIRES Valid() == true
  std::string GetNextManifest(uint64_t* file_number, std::string* file_name);
  bool Valid() const { return manifest_file_iter_ != manifest_files_.end(); }

 private:
  const std::string& dbname_;
  // MANIFEST file names(s)
  std::vector<std::string> manifest_files_;
  std::vector<std::string>::const_iterator manifest_file_iter_;
};

4673 4674 4675 4676 4677 4678
ManifestPicker::ManifestPicker(const std::string& dbname,
                               const std::vector<std::string>& files_in_dbname)
    : dbname_(dbname) {
  // populate manifest files
  assert(!files_in_dbname.empty());
  for (const auto& fname : files_in_dbname) {
4679 4680 4681 4682 4683 4684 4685
    uint64_t file_num = 0;
    FileType file_type;
    bool parse_ok = ParseFileName(fname, &file_num, &file_type);
    if (parse_ok && file_type == kDescriptorFile) {
      manifest_files_.push_back(fname);
    }
  }
4686
  // seek to first manifest
4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737
  std::sort(manifest_files_.begin(), manifest_files_.end(),
            [](const std::string& lhs, const std::string& rhs) {
              uint64_t num1 = 0;
              uint64_t num2 = 0;
              FileType type1;
              FileType type2;
              bool parse_ok1 = ParseFileName(lhs, &num1, &type1);
              bool parse_ok2 = ParseFileName(rhs, &num2, &type2);
#ifndef NDEBUG
              assert(parse_ok1);
              assert(parse_ok2);
#else
              (void)parse_ok1;
              (void)parse_ok2;
#endif
              return num1 > num2;
            });
  manifest_file_iter_ = manifest_files_.begin();
}

std::string ManifestPicker::GetNextManifest(uint64_t* number,
                                            std::string* file_name) {
  assert(Valid());
  std::string ret;
  if (manifest_file_iter_ != manifest_files_.end()) {
    ret.assign(dbname_);
    if (ret.back() != kFilePathSeparator) {
      ret.push_back(kFilePathSeparator);
    }
    ret.append(*manifest_file_iter_);
    if (number) {
      FileType type;
      bool parse = ParseFileName(*manifest_file_iter_, number, &type);
      assert(type == kDescriptorFile);
#ifndef NDEBUG
      assert(parse);
#else
      (void)parse;
#endif
    }
    if (file_name) {
      *file_name = *manifest_file_iter_;
    }
    ++manifest_file_iter_;
  }
  return ret;
}
}  // namespace

Status VersionSet::TryRecover(
    const std::vector<ColumnFamilyDescriptor>& column_families, bool read_only,
4738 4739 4740
    const std::vector<std::string>& files_in_dbname, std::string* db_id,
    bool* has_missing_table_file) {
  ManifestPicker manifest_picker(dbname_, files_in_dbname);
4741 4742 4743
  if (!manifest_picker.Valid()) {
    return Status::Corruption("Cannot locate MANIFEST file in " + dbname_);
  }
4744
  Status s;
4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777
  std::string manifest_path =
      manifest_picker.GetNextManifest(&manifest_file_number_, nullptr);
  while (!manifest_path.empty()) {
    s = TryRecoverFromOneManifest(manifest_path, column_families, read_only,
                                  db_id, has_missing_table_file);
    if (s.ok() || !manifest_picker.Valid()) {
      break;
    }
    Reset();
    manifest_path =
        manifest_picker.GetNextManifest(&manifest_file_number_, nullptr);
  }
  return s;
}

Status VersionSet::TryRecoverFromOneManifest(
    const std::string& manifest_path,
    const std::vector<ColumnFamilyDescriptor>& column_families, bool read_only,
    std::string* db_id, bool* has_missing_table_file) {
  ROCKS_LOG_INFO(db_options_->info_log, "Trying to recover from manifest: %s\n",
                 manifest_path.c_str());
  std::unique_ptr<SequentialFileReader> manifest_file_reader;
  Status s;
  {
    std::unique_ptr<FSSequentialFile> manifest_file;
    s = fs_->NewSequentialFile(manifest_path,
                               fs_->OptimizeForManifestRead(file_options_),
                               &manifest_file, nullptr);
    if (!s.ok()) {
      return s;
    }
    manifest_file_reader.reset(
        new SequentialFileReader(std::move(manifest_file), manifest_path,
4778
                                 db_options_->log_readahead_size, io_tracer_));
4779 4780
  }

4781
  assert(s.ok());
4782 4783 4784 4785
  VersionSet::LogReporter reporter;
  reporter.status = &s;
  log::Reader reader(nullptr, std::move(manifest_file_reader), &reporter,
                     /*checksum=*/true, /*log_num=*/0);
4786 4787
  VersionEditHandlerPointInTime handler_pit(
      read_only, column_families, const_cast<VersionSet*>(this), io_tracer_);
4788

4789 4790 4791
  handler_pit.Iterate(reader, &s);

  handler_pit.GetDbId(db_id);
4792

4793 4794
  assert(nullptr != has_missing_table_file);
  *has_missing_table_file = handler_pit.HasMissingFiles();
4795

4796
  return handler_pit.status();
4797 4798
}

I
Igor Canadi 已提交
4799
Status VersionSet::ListColumnFamilies(std::vector<std::string>* column_families,
4800 4801
                                      const std::string& dbname,
                                      FileSystem* fs) {
4802
  // these are just for performance reasons, not correctness,
I
Igor Canadi 已提交
4803
  // so we're fine using the defaults
4804
  FileOptions soptions;
I
Igor Canadi 已提交
4805
  // Read "CURRENT" file, which contains a pointer to the current manifest file
4806 4807
  std::string manifest_path;
  uint64_t manifest_file_number;
4808 4809
  Status s =
      GetCurrentManifestPath(dbname, fs, &manifest_path, &manifest_file_number);
I
Igor Canadi 已提交
4810 4811 4812
  if (!s.ok()) {
    return s;
  }
4813

4814
  std::unique_ptr<SequentialFileReader> file_reader;
4815
  {
4816 4817
    std::unique_ptr<FSSequentialFile> file;
    s = fs->NewSequentialFile(manifest_path, soptions, &file, nullptr);
4818 4819
    if (!s.ok()) {
      return s;
I
Igor Canadi 已提交
4820
  }
4821 4822
  file_reader.reset(new SequentialFileReader(std::move(file), manifest_path,
                                             nullptr /*IOTracer*/));
4823
  }
I
Igor Canadi 已提交
4824 4825 4826

  VersionSet::LogReporter reporter;
  reporter.status = &s;
4827
  log::Reader reader(nullptr, std::move(file_reader), &reporter,
4828
                     true /* checksum */, 0 /* log_number */);
I
Igor Canadi 已提交
4829

4830 4831 4832 4833
  ListColumnFamiliesHandler handler;
  handler.Iterate(reader, &s);

  assert(column_families);
I
Igor Canadi 已提交
4834
  column_families->clear();
4835 4836
  if (handler.status().ok()) {
    for (const auto& iter : handler.GetColumnFamilyNames()) {
I
Igor Canadi 已提交
4837
      column_families->push_back(iter.second);
4838
    }
I
Igor Canadi 已提交
4839 4840
  }

4841
  return handler.status();
I
Igor Canadi 已提交
4842
}
4843

I
Igor Canadi 已提交
4844
#ifndef ROCKSDB_LITE
4845 4846
Status VersionSet::ReduceNumberOfLevels(const std::string& dbname,
                                        const Options* options,
4847
                                        const FileOptions& file_options,
4848 4849 4850 4851 4852 4853
                                        int new_levels) {
  if (new_levels <= 1) {
    return Status::InvalidArgument(
        "Number of levels needs to be bigger than 1");
  }

4854
  ImmutableDBOptions db_options(*options);
I
Igor Canadi 已提交
4855
  ColumnFamilyOptions cf_options(*options);
4856 4857
  std::shared_ptr<Cache> tc(NewLRUCache(options->max_open_files - 10,
                                        options->table_cache_numshardbits));
S
sdong 已提交
4858
  WriteController wc(options->delayed_write_rate);
4859
  WriteBufferManager wb(options->db_write_buffer_size);
4860
  VersionSet versions(dbname, &db_options, file_options, tc.get(), &wb, &wc,
4861 4862
                      nullptr /*BlockCacheTracer*/, nullptr /*IOTracer*/,
                      /*db_session_id*/ "");
4863 4864
  Status status;

4865
  std::vector<ColumnFamilyDescriptor> dummy;
4866
  ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
I
Igor Canadi 已提交
4867
                                          ColumnFamilyOptions(*options));
I
Igor Canadi 已提交
4868
  dummy.push_back(dummy_descriptor);
4869
  status = versions.Recover(dummy);
4870 4871 4872 4873
  if (!status.ok()) {
    return status;
  }

4874
  Version* current_version =
4875
      versions.GetColumnFamilySet()->GetDefault()->current();
S
sdong 已提交
4876
  auto* vstorage = current_version->storage_info();
4877
  int current_levels = vstorage->num_levels();
4878 4879 4880 4881 4882 4883 4884 4885 4886 4887

  if (current_levels <= new_levels) {
    return Status::OK();
  }

  // Make sure there are file only on one level from
  // (new_levels-1) to (current_levels-1)
  int first_nonempty_level = -1;
  int first_nonempty_level_filenum = 0;
  for (int i = new_levels - 1; i < current_levels; i++) {
S
sdong 已提交
4888
    int file_num = vstorage->NumLevelFiles(i);
4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904
    if (file_num != 0) {
      if (first_nonempty_level < 0) {
        first_nonempty_level = i;
        first_nonempty_level_filenum = file_num;
      } else {
        char msg[255];
        snprintf(msg, sizeof(msg),
                 "Found at least two levels containing files: "
                 "[%d:%d],[%d:%d].\n",
                 first_nonempty_level, first_nonempty_level_filenum, i,
                 file_num);
        return Status::InvalidArgument(msg);
      }
    }
  }

I
Igor Canadi 已提交
4905
  // we need to allocate an array with the old number of levels size to
4906
  // avoid SIGSEGV in WriteCurrentStatetoManifest()
I
Igor Canadi 已提交
4907
  // however, all levels bigger or equal to new_levels will be empty
4908
  std::vector<FileMetaData*>* new_files_list =
I
Igor Canadi 已提交
4909
      new std::vector<FileMetaData*>[current_levels];
4910
  for (int i = 0; i < new_levels - 1; i++) {
S
sdong 已提交
4911
    new_files_list[i] = vstorage->LevelFiles(i);
4912 4913 4914
  }

  if (first_nonempty_level > 0) {
4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927
    auto& new_last_level = new_files_list[new_levels - 1];

    new_last_level = vstorage->LevelFiles(first_nonempty_level);

    for (size_t i = 0; i < new_last_level.size(); ++i) {
      const FileMetaData* const meta = new_last_level[i];
      assert(meta);

      const uint64_t file_number = meta->fd.GetNumber();

      vstorage->file_locations_[file_number] =
          VersionStorageInfo::FileLocation(new_levels - 1, i);
    }
4928 4929
  }

S
sdong 已提交
4930 4931 4932
  delete[] vstorage -> files_;
  vstorage->files_ = new_files_list;
  vstorage->num_levels_ = new_levels;
4933

Y
Yi Wu 已提交
4934
  MutableCFOptions mutable_cf_options(*options);
4935
  VersionEdit ve;
4936 4937
  InstrumentedMutex dummy_mutex;
  InstrumentedMutexLock l(&dummy_mutex);
4938 4939 4940
  return versions.LogAndApply(
      versions.GetColumnFamilySet()->GetDefault(),
      mutable_cf_options, &ve, &dummy_mutex, nullptr, true);
4941 4942
}

4943
// Get the checksum information including the checksum and checksum function
4944
// name of all SST and blob files in VersionSet. Store the information in
4945 4946 4947 4948 4949
// FileChecksumList which contains a map from file number to its checksum info.
// If DB is not running, make sure call VersionSet::Recover() to load the file
// metadata from Manifest to VersionSet before calling this function.
Status VersionSet::GetLiveFilesChecksumInfo(FileChecksumList* checksum_list) {
  // Clean the previously stored checksum information if any.
4950
  Status s;
4951
  if (checksum_list == nullptr) {
4952 4953
    s = Status::InvalidArgument("checksum_list is nullptr");
    return s;
4954 4955 4956 4957 4958 4959 4960
  }
  checksum_list->reset();

  for (auto cfd : *column_family_set_) {
    if (cfd->IsDropped() || !cfd->initialized()) {
      continue;
    }
4961
    /* SST files */
4962 4963 4964
    for (int level = 0; level < cfd->NumberLevels(); level++) {
      for (const auto& file :
           cfd->current()->storage_info()->LevelFiles(level)) {
4965 4966 4967 4968
        s = checksum_list->InsertOneFileChecksum(file->fd.GetNumber(),
                                                 file->file_checksum,
                                                 file->file_checksum_func_name);
        if (!s.ok()) {
4969
          return s;
4970
        }
4971
      }
4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992
    }

    /* Blob files */
    const auto& blob_files = cfd->current()->storage_info()->GetBlobFiles();
    for (const auto& pair : blob_files) {
      const uint64_t blob_file_number = pair.first;
      const auto& meta = pair.second;

      assert(meta);
      assert(blob_file_number == meta->GetBlobFileNumber());

      std::string checksum_value = meta->GetChecksumValue();
      std::string checksum_method = meta->GetChecksumMethod();
      assert(checksum_value.empty() == checksum_method.empty());
      if (meta->GetChecksumMethod().empty()) {
        checksum_value = kUnknownFileChecksum;
        checksum_method = kUnknownFileChecksumFuncName;
      }

      s = checksum_list->InsertOneFileChecksum(blob_file_number, checksum_value,
                                               checksum_method);
4993
      if (!s.ok()) {
4994
        return s;
4995 4996
      }
    }
4997
  }
4998

4999
  return s;
5000 5001
}

I
Igor Canadi 已提交
5002
Status VersionSet::DumpManifest(Options& options, std::string& dscname,
5003
                                bool verbose, bool hex, bool json) {
5004
  // Open the specified manifest file.
5005
  std::unique_ptr<SequentialFileReader> file_reader;
5006 5007
  Status s;
  {
5008
    std::unique_ptr<FSSequentialFile> file;
5009 5010
    const std::shared_ptr<FileSystem>& fs = options.env->GetFileSystem();
    s = fs->NewSequentialFile(
5011
        dscname,
5012
        fs->OptimizeForManifestRead(file_options_), &file,
5013
        nullptr);
5014 5015 5016
    if (!s.ok()) {
      return s;
    }
5017
    file_reader.reset(new SequentialFileReader(
5018
        std::move(file), dscname, db_options_->log_readahead_size, io_tracer_));
5019 5020
  }

5021 5022 5023 5024
  std::vector<ColumnFamilyDescriptor> column_families(
      1, ColumnFamilyDescriptor(kDefaultColumnFamilyName, options));
  DumpManifestHandler handler(column_families, this, io_tracer_, verbose, hex,
                              json);
5025
  {
I
Igor Canadi 已提交
5026
    VersionSet::LogReporter reporter;
5027
    reporter.status = &s;
5028
    log::Reader reader(nullptr, std::move(file_reader), &reporter,
5029
                       true /* checksum */, 0 /* log_number */);
5030
    handler.Iterate(reader, &s);
5031 5032
  }

5033
  return handler.status();
5034
}
I
Igor Canadi 已提交
5035
#endif  // ROCKSDB_LITE
5036

A
Andrew Kryczka 已提交
5037 5038 5039
void VersionSet::MarkFileNumberUsed(uint64_t number) {
  // only called during recovery and repair which are single threaded, so this
  // works because there can't be concurrent calls
5040 5041
  if (next_file_number_.load(std::memory_order_relaxed) <= number) {
    next_file_number_.store(number + 1, std::memory_order_relaxed);
5042 5043
  }
}
S
Siying Dong 已提交
5044 5045 5046 5047 5048 5049 5050 5051
// Called only either from ::LogAndApply which is protected by mutex or during
// recovery which is single-threaded.
void VersionSet::MarkMinLogNumberToKeep2PC(uint64_t number) {
  if (min_log_number_to_keep_2pc_.load(std::memory_order_relaxed) < number) {
    min_log_number_to_keep_2pc_.store(number, std::memory_order_relaxed);
  }
}

5052 5053
Status VersionSet::WriteCurrentStateToManifest(
    const std::unordered_map<uint32_t, MutableCFState>& curr_state,
5054
    const VersionEdit& wal_additions, log::Writer* log, IOStatus& io_s) {
J
jorlow@chromium.org 已提交
5055
  // TODO: Break up into multiple records to reduce memory usage on recovery?
5056

I
Igor Canadi 已提交
5057 5058
  // WARNING: This method doesn't hold a mutex!!

I
Igor Canadi 已提交
5059 5060
  // This is done without DB mutex lock held, but only within single-threaded
  // LogAndApply. Column family manipulations can only happen within LogAndApply
I
Igor Canadi 已提交
5061
  // (the same single thread), so we're safe to iterate.
5062

5063
  assert(io_s.ok());
5064 5065 5066 5067 5068 5069 5070 5071 5072
  if (db_options_->write_dbid_to_manifest) {
    VersionEdit edit_for_db_id;
    assert(!db_id_.empty());
    edit_for_db_id.SetDBId(db_id_);
    std::string db_id_record;
    if (!edit_for_db_id.EncodeTo(&db_id_record)) {
      return Status::Corruption("Unable to Encode VersionEdit:" +
                                edit_for_db_id.DebugString(true));
    }
5073
    io_s = log->AddRecord(db_id_record);
5074
    if (!io_s.ok()) {
5075
      return io_s;
5076 5077 5078
    }
  }

5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093
  // Save WALs.
  if (!wal_additions.GetWalAdditions().empty()) {
    TEST_SYNC_POINT_CALLBACK("VersionSet::WriteCurrentStateToManifest:SaveWal",
                             const_cast<VersionEdit*>(&wal_additions));
    std::string record;
    if (!wal_additions.EncodeTo(&record)) {
      return Status::Corruption("Unable to Encode VersionEdit: " +
                                wal_additions.DebugString(true));
    }
    io_s = log->AddRecord(record);
    if (!io_s.ok()) {
      return io_s;
    }
  }

I
Igor Canadi 已提交
5094
  for (auto cfd : *column_family_set_) {
5095 5096
    assert(cfd);

5097 5098 5099
    if (cfd->IsDropped()) {
      continue;
    }
5100
    assert(cfd->initialized());
5101 5102 5103
    {
      // Store column family info
      VersionEdit edit;
5104
      if (cfd->GetID() != 0) {
5105 5106
        // default column family is always there,
        // no need to explicitly write it
5107 5108
        edit.AddColumnFamily(cfd->GetName());
        edit.SetColumnFamily(cfd->GetID());
I
Igor Canadi 已提交
5109 5110 5111 5112
      }
      edit.SetComparatorName(
          cfd->internal_comparator().user_comparator()->Name());
      std::string record;
5113 5114 5115 5116
      if (!edit.EncodeTo(&record)) {
        return Status::Corruption(
            "Unable to Encode VersionEdit:" + edit.DebugString(true));
      }
5117
      io_s = log->AddRecord(record);
5118
      if (!io_s.ok()) {
5119
        return io_s;
5120
      }
5121
    }
5122

5123 5124 5125
    {
      // Save files
      VersionEdit edit;
5126
      edit.SetColumnFamily(cfd->GetID());
5127

5128 5129 5130
      assert(cfd->current());
      assert(cfd->current()->storage_info());

I
Igor Canadi 已提交
5131
      for (int level = 0; level < cfd->NumberLevels(); level++) {
S
sdong 已提交
5132 5133
        for (const auto& f :
             cfd->current()->storage_info()->LevelFiles(level)) {
5134 5135
          edit.AddFile(level, f->fd.GetNumber(), f->fd.GetPathId(),
                       f->fd.GetFileSize(), f->smallest, f->largest,
5136
                       f->fd.smallest_seqno, f->fd.largest_seqno,
5137
                       f->marked_for_compaction, f->oldest_blob_file_number,
5138 5139
                       f->oldest_ancester_time, f->file_creation_time,
                       f->file_checksum, f->file_checksum_func_name);
5140 5141
        }
      }
5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159

      const auto& blob_files = cfd->current()->storage_info()->GetBlobFiles();
      for (const auto& pair : blob_files) {
        const uint64_t blob_file_number = pair.first;
        const auto& meta = pair.second;

        assert(meta);
        assert(blob_file_number == meta->GetBlobFileNumber());

        edit.AddBlobFile(blob_file_number, meta->GetTotalBlobCount(),
                         meta->GetTotalBlobBytes(), meta->GetChecksumMethod(),
                         meta->GetChecksumValue());
        if (meta->GetGarbageBlobCount() > 0) {
          edit.AddBlobFileGarbage(blob_file_number, meta->GetGarbageBlobCount(),
                                  meta->GetGarbageBlobBytes());
        }
      }

5160 5161 5162 5163
      const auto iter = curr_state.find(cfd->GetID());
      assert(iter != curr_state.end());
      uint64_t log_number = iter->second.log_number;
      edit.SetLogNumber(log_number);
5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174

      if (cfd->GetID() == 0) {
        // min_log_number_to_keep is for the whole db, not for specific column family.
        // So it does not need to be set for every column family, just need to be set once.
        // Since default CF can never be dropped, we set the min_log to the default CF here.
        uint64_t min_log = min_log_number_to_keep_2pc();
        if (min_log != 0) {
          edit.SetMinLogNumberToKeep(min_log);
        }
      }

5175 5176 5177 5178
      const std::string& full_history_ts_low = iter->second.full_history_ts_low;
      if (!full_history_ts_low.empty()) {
        edit.SetFullHistoryTsLow(full_history_ts_low);
      }
5179
      std::string record;
5180 5181 5182 5183
      if (!edit.EncodeTo(&record)) {
        return Status::Corruption(
            "Unable to Encode VersionEdit:" + edit.DebugString(true));
      }
5184
      io_s = log->AddRecord(record);
5185
      if (!io_s.ok()) {
5186
        return io_s;
5187
      }
5188 5189
    }
  }
5190
  return Status::OK();
J
jorlow@chromium.org 已提交
5191 5192
}

5193 5194 5195 5196 5197 5198
// TODO(aekmekji): in CompactionJob::GenSubcompactionBoundaries(), this
// function is called repeatedly with consecutive pairs of slices. For example
// if the slice list is [a, b, c, d] this function is called with arguments
// (a,b) then (b,c) then (c,d). Knowing this, an optimization is possible where
// we avoid doing binary search for the keys b and c twice and instead somehow
// maintain state of where they first appear in the files.
5199 5200
uint64_t VersionSet::ApproximateSize(const SizeApproximationOptions& options,
                                     Version* v, const Slice& start,
5201
                                     const Slice& end, int start_level,
5202
                                     int end_level, TableReaderCaller caller) {
5203 5204
  const auto& icmp = v->cfd_->internal_comparator();

5205
  // pre-condition
5206
  assert(icmp.Compare(start, end) <= 0);
5207

5208
  uint64_t total_full_size = 0;
S
sdong 已提交
5209
  const auto* vstorage = v->storage_info();
5210 5211 5212
  const int num_non_empty_levels = vstorage->num_non_empty_levels();
  end_level = (end_level == -1) ? num_non_empty_levels
                                : std::min(end_level, num_non_empty_levels);
5213

5214 5215
  assert(start_level <= end_level);

5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235
  // Outline of the optimization that uses options.files_size_error_margin.
  // When approximating the files total size that is used to store a keys range,
  // we first sum up the sizes of the files that fully fall into the range.
  // Then we sum up the sizes of all the files that may intersect with the range
  // (this includes all files in L0 as well). Then, if total_intersecting_size
  // is smaller than total_full_size * options.files_size_error_margin - we can
  // infer that the intersecting files have a sufficiently negligible
  // contribution to the total size, and we can approximate the storage required
  // for the keys in range as just half of the intersecting_files_size.
  // E.g., if the value of files_size_error_margin is 0.1, then the error of the
  // approximation is limited to only ~10% of the total size of files that fully
  // fall into the keys range. In such case, this helps to avoid a costly
  // process of binary searching the intersecting files that is required only
  // for a more precise calculation of the total size.

  autovector<FdWithKeyRange*, 32> first_files;
  autovector<FdWithKeyRange*, 16> last_files;

  // scan all the levels
  for (int level = start_level; level < end_level; ++level) {
5236
    const LevelFilesBrief& files_brief = vstorage->LevelFilesBrief(level);
5237
    if (files_brief.num_files == 0) {
5238 5239 5240 5241
      // empty level, skip exploration
      continue;
    }

5242 5243 5244 5245 5246 5247 5248
    if (level == 0) {
      // level 0 files are not in sorted order, we need to iterate through
      // the list to compute the total bytes that require scanning,
      // so handle the case explicitly (similarly to first_files case)
      for (size_t i = 0; i < files_brief.num_files; i++) {
        first_files.push_back(&files_brief.files[i]);
      }
5249 5250 5251 5252 5253 5254
      continue;
    }

    assert(level > 0);
    assert(files_brief.num_files > 0);

5255 5256 5257 5258 5259
    // identify the file position for start key
    const int idx_start =
        FindFileInRange(icmp, files_brief, start, 0,
                        static_cast<uint32_t>(files_brief.num_files - 1));
    assert(static_cast<size_t>(idx_start) < files_brief.num_files);
5260

5261 5262 5263 5264 5265 5266 5267 5268 5269
    // identify the file position for end key
    int idx_end = idx_start;
    if (icmp.Compare(files_brief.files[idx_end].largest_key, end) < 0) {
      idx_end =
          FindFileInRange(icmp, files_brief, end, idx_start,
                          static_cast<uint32_t>(files_brief.num_files - 1));
    }
    assert(idx_end >= idx_start &&
           static_cast<size_t>(idx_end) < files_brief.num_files);
5270

5271 5272 5273 5274 5275 5276 5277 5278
    // scan all files from the starting index to the ending index
    // (inferred from the sorted order)

    // first scan all the intermediate full files (excluding first and last)
    for (int i = idx_start + 1; i < idx_end; ++i) {
      uint64_t file_size = files_brief.files[i].fd.GetFileSize();
      // The entire file falls into the range, so we can just take its size.
      assert(file_size ==
5279
             ApproximateSize(v, files_brief.files[i], start, end, caller));
5280 5281 5282 5283 5284 5285 5286 5287 5288
      total_full_size += file_size;
    }

    // save the first and the last files (which may be the same file), so we
    // can scan them later.
    first_files.push_back(&files_brief.files[idx_start]);
    if (idx_start != idx_end) {
      // we need to estimate size for both files, only if they are different
      last_files.push_back(&files_brief.files[idx_end]);
J
jorlow@chromium.org 已提交
5289 5290
    }
  }
5291

5292 5293 5294 5295 5296 5297 5298 5299
  // The sum of all file sizes that intersect the [start, end] keys range.
  uint64_t total_intersecting_size = 0;
  for (const auto* file_ptr : first_files) {
    total_intersecting_size += file_ptr->fd.GetFileSize();
  }
  for (const auto* file_ptr : last_files) {
    total_intersecting_size += file_ptr->fd.GetFileSize();
  }
5300

5301 5302 5303 5304 5305 5306 5307 5308 5309 5310
  // Now scan all the first & last files at each level, and estimate their size.
  // If the total_intersecting_size is less than X% of the total_full_size - we
  // want to approximate the result in order to avoid the costly binary search
  // inside ApproximateSize. We use half of file size as an approximation below.

  const double margin = options.files_size_error_margin;
  if (margin > 0 && total_intersecting_size <
                        static_cast<uint64_t>(total_full_size * margin)) {
    total_full_size += total_intersecting_size / 2;
  } else {
5311 5312
    // Estimate for all the first files (might also be last files), at each
    // level
5313
    for (const auto file_ptr : first_files) {
5314
      total_full_size += ApproximateSize(v, *file_ptr, start, end, caller);
5315 5316 5317 5318
    }

    // Estimate for all the last files, at each level
    for (const auto file_ptr : last_files) {
5319 5320 5321
      // We could use ApproximateSize here, but calling ApproximateOffsetOf
      // directly is just more efficient.
      total_full_size += ApproximateOffsetOf(v, *file_ptr, end, caller);
5322
    }
5323
  }
5324 5325

  return total_full_size;
5326 5327
}

5328 5329 5330
uint64_t VersionSet::ApproximateOffsetOf(Version* v, const FdWithKeyRange& f,
                                         const Slice& key,
                                         TableReaderCaller caller) {
5331 5332
  // pre-condition
  assert(v);
5333
  const auto& icmp = v->cfd_->internal_comparator();
5334 5335

  uint64_t result = 0;
5336
  if (icmp.Compare(f.largest_key, key) <= 0) {
5337 5338
    // Entire file is before "key", so just add the file size
    result = f.fd.GetFileSize();
5339
  } else if (icmp.Compare(f.smallest_key, key) > 0) {
5340 5341 5342 5343 5344
    // Entire file is after "key", so ignore
    result = 0;
  } else {
    // "key" falls in the range for this table.  Add the
    // approximate offset of "key" within the table.
5345 5346 5347
    TableCache* table_cache = v->cfd_->table_cache();
    if (table_cache != nullptr) {
      result = table_cache->ApproximateOffsetOf(
5348
          key, f.file_metadata->fd, caller, icmp,
5349 5350
          v->GetMutableCFOptions().prefix_extractor.get());
    }
5351
  }
J
jorlow@chromium.org 已提交
5352 5353 5354
  return result;
}

5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391
uint64_t VersionSet::ApproximateSize(Version* v, const FdWithKeyRange& f,
                                     const Slice& start, const Slice& end,
                                     TableReaderCaller caller) {
  // pre-condition
  assert(v);
  const auto& icmp = v->cfd_->internal_comparator();
  assert(icmp.Compare(start, end) <= 0);

  if (icmp.Compare(f.largest_key, start) <= 0 ||
      icmp.Compare(f.smallest_key, end) > 0) {
    // Entire file is before or after the start/end keys range
    return 0;
  }

  if (icmp.Compare(f.smallest_key, start) >= 0) {
    // Start of the range is before the file start - approximate by end offset
    return ApproximateOffsetOf(v, f, end, caller);
  }

  if (icmp.Compare(f.largest_key, end) < 0) {
    // End of the range is after the file end - approximate by subtracting
    // start offset from the file size
    uint64_t start_offset = ApproximateOffsetOf(v, f, start, caller);
    assert(f.fd.GetFileSize() >= start_offset);
    return f.fd.GetFileSize() - start_offset;
  }

  // The interval falls entirely in the range for this file.
  TableCache* table_cache = v->cfd_->table_cache();
  if (table_cache == nullptr) {
    return 0;
  }
  return table_cache->ApproximateSize(
      start, end, f.file_metadata->fd, caller, icmp,
      v->GetMutableCFOptions().prefix_extractor.get());
}

5392 5393 5394 5395 5396
void VersionSet::AddLiveFiles(std::vector<uint64_t>* live_table_files,
                              std::vector<uint64_t>* live_blob_files) const {
  assert(live_table_files);
  assert(live_blob_files);

5397
  // pre-calculate space requirement
5398 5399 5400 5401
  size_t total_table_files = 0;
  size_t total_blob_files = 0;

  assert(column_family_set_);
I
Igor Canadi 已提交
5402
  for (auto cfd : *column_family_set_) {
5403 5404
    assert(cfd);

5405 5406 5407
    if (!cfd->initialized()) {
      continue;
    }
5408 5409 5410 5411

    Version* const dummy_versions = cfd->dummy_versions();
    assert(dummy_versions);

5412
    for (Version* v = dummy_versions->next_; v != dummy_versions;
I
Igor Canadi 已提交
5413
         v = v->next_) {
5414 5415
      assert(v);

S
sdong 已提交
5416
      const auto* vstorage = v->storage_info();
5417 5418 5419 5420
      assert(vstorage);

      for (int level = 0; level < vstorage->num_levels(); ++level) {
        total_table_files += vstorage->LevelFiles(level).size();
5421
      }
5422 5423

      total_blob_files += vstorage->GetBlobFiles().size();
5424 5425 5426 5427
    }
  }

  // just one time extension to the right size
5428 5429
  live_table_files->reserve(live_table_files->size() + total_table_files);
  live_blob_files->reserve(live_blob_files->size() + total_blob_files);
5430

5431
  assert(column_family_set_);
I
Igor Canadi 已提交
5432
  for (auto cfd : *column_family_set_) {
5433
    assert(cfd);
5434 5435 5436
    if (!cfd->initialized()) {
      continue;
    }
5437

5438 5439
    auto* current = cfd->current();
    bool found_current = false;
5440 5441 5442 5443

    Version* const dummy_versions = cfd->dummy_versions();
    assert(dummy_versions);

5444
    for (Version* v = dummy_versions->next_; v != dummy_versions;
I
Igor Canadi 已提交
5445
         v = v->next_) {
5446
      v->AddLiveFiles(live_table_files, live_blob_files);
5447 5448
      if (v == current) {
        found_current = true;
J
jorlow@chromium.org 已提交
5449 5450
      }
    }
5451

5452 5453 5454
    if (!found_current && current != nullptr) {
      // Should never happen unless it is a bug.
      assert(false);
5455
      current->AddLiveFiles(live_table_files, live_blob_files);
5456
    }
J
jorlow@chromium.org 已提交
5457 5458 5459
  }
}

5460
InternalIterator* VersionSet::MakeInputIterator(
5461 5462
    const ReadOptions& read_options, const Compaction* c,
    RangeDelAggregator* range_del_agg,
5463
    const FileOptions& file_options_compactions) {
L
Lei Jin 已提交
5464
  auto cfd = c->column_family_data();
J
jorlow@chromium.org 已提交
5465 5466 5467
  // Level-0 files have to be merged together.  For other levels,
  // we will make a concatenating iterator per level.
  // TODO(opt): use concatenating iterator for level-0 if there is no overlap
5468 5469 5470
  const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
                                              c->num_input_levels() - 1
                                        : c->num_input_levels());
S
sdong 已提交
5471
  InternalIterator** list = new InternalIterator* [space];
5472 5473
  size_t num = 0;
  for (size_t which = 0; which < c->num_input_levels(); which++) {
F
Feng Zhu 已提交
5474
    if (c->input_levels(which)->num_files != 0) {
5475
      if (c->level(which) == 0) {
5476
        const LevelFilesBrief* flevel = c->input_levels(which);
F
Feng Zhu 已提交
5477
        for (size_t i = 0; i < flevel->num_files; i++) {
L
Lei Jin 已提交
5478
          list[num++] = cfd->table_cache()->NewIterator(
5479
              read_options, file_options_compactions,
5480 5481
              cfd->internal_comparator(), *flevel->files[i].file_metadata,
              range_del_agg, c->mutable_cf_options()->prefix_extractor.get(),
5482 5483 5484
              /*table_reader_ptr=*/nullptr,
              /*file_read_hist=*/nullptr, TableReaderCaller::kCompaction,
              /*arena=*/nullptr,
5485 5486 5487
              /*skip_filters=*/false,
              /*level=*/static_cast<int>(c->level(which)),
              MaxFileSizeForL0MetaPin(*c->mutable_cf_options()),
5488
              /*smallest_compaction_key=*/nullptr,
5489 5490
              /*largest_compaction_key=*/nullptr,
              /*allow_unprepared_value=*/false);
J
jorlow@chromium.org 已提交
5491 5492 5493
        }
      } else {
        // Create concatenating iterator for the files from this level
5494
        list[num++] = new LevelIterator(
5495
            cfd->table_cache(), read_options, file_options_compactions,
5496
            cfd->internal_comparator(), c->input_levels(which),
5497
            c->mutable_cf_options()->prefix_extractor.get(),
5498 5499 5500
            /*should_sample=*/false,
            /*no per level latency histogram=*/nullptr,
            TableReaderCaller::kCompaction, /*skip_filters=*/false,
5501
            /*level=*/static_cast<int>(c->level(which)), range_del_agg,
5502
            c->boundaries(which));
J
jorlow@chromium.org 已提交
5503 5504 5505 5506
      }
    }
  }
  assert(num <= space);
S
sdong 已提交
5507
  InternalIterator* result =
5508 5509
      NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
                         static_cast<int>(num));
J
jorlow@chromium.org 已提交
5510 5511 5512 5513
  delete[] list;
  return result;
}

5514
Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel,
5515
                                      FileMetaData** meta,
5516 5517
                                      ColumnFamilyData** cfd) {
  for (auto cfd_iter : *column_family_set_) {
5518 5519 5520
    if (!cfd_iter->initialized()) {
      continue;
    }
5521
    Version* version = cfd_iter->current();
S
sdong 已提交
5522
    const auto* vstorage = version->storage_info();
5523
    for (int level = 0; level < vstorage->num_levels(); level++) {
S
sdong 已提交
5524
      for (const auto& file : vstorage->LevelFiles(level)) {
5525
        if (file->fd.GetNumber() == number) {
5526
          *meta = file;
5527
          *filelevel = level;
5528
          *cfd = cfd_iter;
5529 5530
          return Status::OK();
        }
5531 5532 5533 5534 5535 5536
      }
    }
  }
  return Status::NotFound("File not present in any level");
}

5537
void VersionSet::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
I
Igor Canadi 已提交
5538
  for (auto cfd : *column_family_set_) {
5539
    if (cfd->IsDropped() || !cfd->initialized()) {
5540 5541
      continue;
    }
I
Igor Canadi 已提交
5542
    for (int level = 0; level < cfd->NumberLevels(); level++) {
S
sdong 已提交
5543 5544
      for (const auto& file :
           cfd->current()->storage_info()->LevelFiles(level)) {
5545
        LiveFileMetaData filemetadata;
5546
        filemetadata.column_family_name = cfd->GetName();
5547
        uint32_t path_id = file->fd.GetPathId();
5548 5549
        if (path_id < cfd->ioptions()->cf_paths.size()) {
          filemetadata.db_path = cfd->ioptions()->cf_paths[path_id].path;
5550
        } else {
5551 5552
          assert(!cfd->ioptions()->cf_paths.empty());
          filemetadata.db_path = cfd->ioptions()->cf_paths.back().path;
5553
        }
5554 5555 5556
        const uint64_t file_number = file->fd.GetNumber();
        filemetadata.name = MakeTableFileName("", file_number);
        filemetadata.file_number = file_number;
5557
        filemetadata.level = level;
5558
        filemetadata.size = static_cast<size_t>(file->fd.GetFileSize());
I
Igor Canadi 已提交
5559 5560
        filemetadata.smallestkey = file->smallest.user_key().ToString();
        filemetadata.largestkey = file->largest.user_key().ToString();
5561 5562
        filemetadata.smallest_seqno = file->fd.smallest_seqno;
        filemetadata.largest_seqno = file->fd.largest_seqno;
5563 5564 5565
        filemetadata.num_reads_sampled = file->stats.num_reads_sampled.load(
            std::memory_order_relaxed);
        filemetadata.being_compacted = file->being_compacted;
5566 5567
        filemetadata.num_entries = file->num_entries;
        filemetadata.num_deletions = file->num_deletions;
5568
        filemetadata.oldest_blob_file_number = file->oldest_blob_file_number;
5569 5570
        filemetadata.file_checksum = file->file_checksum;
        filemetadata.file_checksum_func_name = file->file_checksum_func_name;
5571 5572 5573
        filemetadata.temperature = file->temperature;
        filemetadata.oldest_ancester_time = file->TryGetOldestAncesterTime();
        filemetadata.file_creation_time = file->TryGetFileCreationTime();
5574 5575
        metadata->push_back(filemetadata);
      }
5576 5577 5578 5579
    }
  }
}

5580
void VersionSet::GetObsoleteFiles(std::vector<ObsoleteFileInfo>* files,
5581
                                  std::vector<ObsoleteBlobFileInfo>* blob_files,
5582
                                  std::vector<std::string>* manifest_filenames,
I
Igor Canadi 已提交
5583
                                  uint64_t min_pending_output) {
5584 5585 5586 5587 5588
  assert(files);
  assert(blob_files);
  assert(manifest_filenames);
  assert(files->empty());
  assert(blob_files->empty());
5589
  assert(manifest_filenames->empty());
5590

5591 5592 5593
  std::vector<ObsoleteFileInfo> pending_files;
  for (auto& f : obsolete_files_) {
    if (f.metadata->fd.GetNumber() < min_pending_output) {
5594
      files->emplace_back(std::move(f));
I
Igor Canadi 已提交
5595
    } else {
5596
      pending_files.emplace_back(std::move(f));
I
Igor Canadi 已提交
5597 5598 5599
    }
  }
  obsolete_files_.swap(pending_files);
5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611

  std::vector<ObsoleteBlobFileInfo> pending_blob_files;
  for (auto& blob_file : obsolete_blob_files_) {
    if (blob_file.GetBlobFileNumber() < min_pending_output) {
      blob_files->emplace_back(std::move(blob_file));
    } else {
      pending_blob_files.emplace_back(std::move(blob_file));
    }
  }
  obsolete_blob_files_.swap(pending_blob_files);

  obsolete_manifests_.swap(*manifest_filenames);
I
Igor Canadi 已提交
5612 5613
}

5614
ColumnFamilyData* VersionSet::CreateColumnFamily(
5615
    const ColumnFamilyOptions& cf_options, const VersionEdit* edit) {
5616 5617
  assert(edit->is_column_family_add_);

5618 5619
  MutableCFOptions dummy_cf_options;
  Version* dummy_versions =
5620
      new Version(nullptr, this, file_options_, dummy_cf_options, io_tracer_);
5621 5622 5623
  // Ref() dummy version once so that later we can call Unref() to delete it
  // by avoiding calling "delete" explicitly (~Version is private)
  dummy_versions->Ref();
I
Igor Canadi 已提交
5624
  auto new_cfd = column_family_set_->CreateColumnFamily(
5625 5626
      edit->column_family_name_, edit->column_family_, dummy_versions,
      cf_options);
I
Igor Canadi 已提交
5627

5628
  Version* v = new Version(new_cfd, this, file_options_,
5629
                           *new_cfd->GetLatestMutableCFOptions(), io_tracer_,
5630
                           current_version_number_++);
5631

5632 5633 5634
  // Fill level target base information.
  v->storage_info()->CalculateBaseBytes(*new_cfd->ioptions(),
                                        *new_cfd->GetLatestMutableCFOptions());
5635
  AppendVersion(new_cfd, v);
5636 5637
  // GetLatestMutableCFOptions() is safe here without mutex since the
  // cfd is not available to client
A
agiardullo 已提交
5638 5639
  new_cfd->CreateNewMemtable(*new_cfd->GetLatestMutableCFOptions(),
                             LastSequence());
I
Igor Canadi 已提交
5640
  new_cfd->SetLogNumber(edit->log_number_);
5641 5642 5643
  return new_cfd;
}

5644 5645 5646 5647 5648 5649 5650 5651
uint64_t VersionSet::GetNumLiveVersions(Version* dummy_versions) {
  uint64_t count = 0;
  for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
    count++;
  }
  return count;
}

5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669
uint64_t VersionSet::GetTotalSstFilesSize(Version* dummy_versions) {
  std::unordered_set<uint64_t> unique_files;
  uint64_t total_files_size = 0;
  for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) {
    VersionStorageInfo* storage_info = v->storage_info();
    for (int level = 0; level < storage_info->num_levels_; level++) {
      for (const auto& file_meta : storage_info->LevelFiles(level)) {
        if (unique_files.find(file_meta->fd.packed_number_and_path_id) ==
            unique_files.end()) {
          unique_files.insert(file_meta->fd.packed_number_and_path_id);
          total_files_size += file_meta->fd.GetFileSize();
        }
      }
    }
  }
  return total_files_size;
}

5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681
Status VersionSet::VerifyFileMetadata(const std::string& fpath,
                                      const FileMetaData& meta) const {
  uint64_t fsize = 0;
  Status status = fs_->GetFileSize(fpath, IOOptions(), &fsize, nullptr);
  if (status.ok()) {
    if (fsize != meta.fd.GetFileSize()) {
      status = Status::Corruption("File size mismatch: " + fpath);
    }
  }
  return status;
}

5682 5683 5684 5685 5686
ReactiveVersionSet::ReactiveVersionSet(
    const std::string& dbname, const ImmutableDBOptions* _db_options,
    const FileOptions& _file_options, Cache* table_cache,
    WriteBufferManager* write_buffer_manager, WriteController* write_controller,
    const std::shared_ptr<IOTracer>& io_tracer)
5687
    : VersionSet(dbname, _db_options, _file_options, table_cache,
5688
                 write_buffer_manager, write_controller,
5689 5690
                 /*block_cache_tracer=*/nullptr, io_tracer,
                 /*db_session_id*/ "") {}
5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704

ReactiveVersionSet::~ReactiveVersionSet() {}

Status ReactiveVersionSet::Recover(
    const std::vector<ColumnFamilyDescriptor>& column_families,
    std::unique_ptr<log::FragmentBufferedReader>* manifest_reader,
    std::unique_ptr<log::Reader::Reporter>* manifest_reporter,
    std::unique_ptr<Status>* manifest_reader_status) {
  assert(manifest_reader != nullptr);
  assert(manifest_reporter != nullptr);
  assert(manifest_reader_status != nullptr);

  manifest_reader_status->reset(new Status());
  manifest_reporter->reset(new LogReporter());
5705
  static_cast_with_check<LogReporter>(manifest_reporter->get())->status =
5706 5707 5708
      manifest_reader_status->get();
  Status s = MaybeSwitchManifest(manifest_reporter->get(), manifest_reader);
  log::Reader* reader = manifest_reader->get();
5709
  assert(reader);
5710

5711 5712
  manifest_tailer_.reset(new ManifestTailer(
      column_families, const_cast<ReactiveVersionSet*>(this), io_tracer_));
5713

5714
  manifest_tailer_->Iterate(*reader, manifest_reader_status->get());
5715

5716
  return manifest_tailer_->status();
5717 5718 5719 5720 5721
}

Status ReactiveVersionSet::ReadAndApply(
    InstrumentedMutex* mu,
    std::unique_ptr<log::FragmentBufferedReader>* manifest_reader,
5722
    Status* manifest_read_status,
5723 5724 5725 5726 5727 5728
    std::unordered_set<ColumnFamilyData*>* cfds_changed) {
  assert(manifest_reader != nullptr);
  assert(cfds_changed != nullptr);
  mu->AssertHeld();

  Status s;
5729 5730 5731
  log::Reader* reader = manifest_reader->get();
  assert(reader);
  s = MaybeSwitchManifest(reader->GetReporter(), manifest_reader);
5732 5733 5734
  if (!s.ok()) {
    return s;
  }
5735 5736
  manifest_tailer_->Iterate(*(manifest_reader->get()), manifest_read_status);
  s = manifest_tailer_->status();
5737
  if (s.ok()) {
5738
    *cfds_changed = std::move(manifest_tailer_->GetUpdatedColumnFamilies());
5739
  }
5740

5741
  return s;
5742 5743 5744 5745 5746 5747 5748 5749 5750
}

Status ReactiveVersionSet::MaybeSwitchManifest(
    log::Reader::Reporter* reporter,
    std::unique_ptr<log::FragmentBufferedReader>* manifest_reader) {
  assert(manifest_reader != nullptr);
  Status s;
  do {
    std::string manifest_path;
5751
    s = GetCurrentManifestPath(dbname_, fs_.get(), &manifest_path,
5752
                               &manifest_file_number_);
5753
    std::unique_ptr<FSSequentialFile> manifest_file;
5754 5755 5756 5757 5758 5759 5760 5761 5762
    if (s.ok()) {
      if (nullptr == manifest_reader->get() ||
          manifest_reader->get()->file()->file_name() != manifest_path) {
        TEST_SYNC_POINT(
            "ReactiveVersionSet::MaybeSwitchManifest:"
            "AfterGetCurrentManifestPath:0");
        TEST_SYNC_POINT(
            "ReactiveVersionSet::MaybeSwitchManifest:"
            "AfterGetCurrentManifestPath:1");
5763
        s = fs_->NewSequentialFile(manifest_path,
5764
                                   fs_->OptimizeForManifestRead(file_options_),
5765
                                   &manifest_file, nullptr);
5766 5767 5768 5769 5770 5771 5772
      } else {
        // No need to switch manifest.
        break;
      }
    }
    std::unique_ptr<SequentialFileReader> manifest_file_reader;
    if (s.ok()) {
5773 5774 5775
      manifest_file_reader.reset(new SequentialFileReader(
          std::move(manifest_file), manifest_path,
          db_options_->log_readahead_size, io_tracer_));
5776 5777 5778 5779 5780
      manifest_reader->reset(new log::FragmentBufferedReader(
          nullptr, std::move(manifest_file_reader), reporter,
          true /* checksum */, 0 /* log_number */));
      ROCKS_LOG_INFO(db_options_->info_log, "Switched to new manifest: %s\n",
                     manifest_path.c_str());
5781 5782 5783
      if (manifest_tailer_) {
        manifest_tailer_->PrepareToReadNewManifest();
      }
5784 5785 5786 5787 5788
    }
  } while (s.IsPathNotFound());
  return s;
}

5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800
#ifndef NDEBUG
uint64_t ReactiveVersionSet::TEST_read_edits_in_atomic_group() const {
  assert(manifest_tailer_);
  return manifest_tailer_->GetReadBuffer().TEST_read_edits_in_atomic_group();
}
#endif  // !NDEBUG

std::vector<VersionEdit>& ReactiveVersionSet::replay_buffer() {
  assert(manifest_tailer_);
  return manifest_tailer_->GetReadBuffer().replay_buffer();
}

5801
}  // namespace ROCKSDB_NAMESPACE