block_based_table_reader.cc 130.4 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "table/block_based/block_based_table_reader.h"
10

11
#include <algorithm>
12
#include <array>
13
#include <limits>
14 15
#include <string>
#include <utility>
O
omegaga 已提交
16
#include <vector>
17

18 19
#include "cache/sharded_cache.h"

T
Tyler Harter 已提交
20
#include "db/dbformat.h"
21
#include "db/pinned_iterators_manager.h"
22
#include "file/file_prefetch_buffer.h"
23
#include "file/file_util.h"
24
#include "file/random_access_file_reader.h"
25 26
#include "monitoring/perf_context_imp.h"
#include "options/options_helper.h"
27
#include "rocksdb/cache.h"
28 29
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
30
#include "rocksdb/file_system.h"
31
#include "rocksdb/filter_policy.h"
32
#include "rocksdb/iterator.h"
33 34
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
S
Siying Dong 已提交
35
#include "rocksdb/table.h"
36
#include "rocksdb/table_properties.h"
37
#include "table/block_based/binary_search_index_reader.h"
38 39 40
#include "table/block_based/block.h"
#include "table/block_based/block_based_filter_block.h"
#include "table/block_based/block_based_table_factory.h"
41
#include "table/block_based/block_based_table_iterator.h"
42 43 44
#include "table/block_based/block_prefix_index.h"
#include "table/block_based/filter_block.h"
#include "table/block_based/full_filter_block.h"
45
#include "table/block_based/hash_index_reader.h"
46
#include "table/block_based/partitioned_filter_block.h"
47
#include "table/block_based/partitioned_index_reader.h"
48
#include "table/block_fetcher.h"
J
jorlow@chromium.org 已提交
49
#include "table/format.h"
K
krad 已提交
50
#include "table/get_context.h"
S
sdong 已提交
51
#include "table/internal_iterator.h"
52
#include "table/meta_blocks.h"
53
#include "table/multiget_context.h"
K
krad 已提交
54
#include "table/persistent_cache_helper.h"
55
#include "table/sst_file_writer_collectors.h"
J
jorlow@chromium.org 已提交
56
#include "table/two_level_iterator.h"
57

58
#include "test_util/testharness.h"
59
#include "monitoring/perf_context_imp.h"
P
Peter Dillinger 已提交
60
#include "port/lang.h"
61
#include "test_util/sync_point.h"
J
jorlow@chromium.org 已提交
62
#include "util/coding.h"
63
#include "util/crc32c.h"
64
#include "util/stop_watch.h"
65
#include "util/string_util.h"
66
#include "util/xxhash.h"
J
jorlow@chromium.org 已提交
67

68
namespace ROCKSDB_NAMESPACE {
J
jorlow@chromium.org 已提交
69

I
xxHash  
Igor Canadi 已提交
70
extern const uint64_t kBlockBasedTableMagicNumber;
K
Kai Liu 已提交
71 72
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
73 74 75

typedef BlockBasedTable::IndexReader IndexReader;

S
sdong 已提交
76 77 78 79
// Found that 256 KB readahead size provides the best performance, based on
// experiments, for auto readahead. Experiment data is in PR #3282.
const size_t BlockBasedTable::kMaxAutoReadaheadSize = 256 * 1024;

M
Maysam Yabandeh 已提交
80 81 82 83
BlockBasedTable::~BlockBasedTable() {
  delete rep_;
}

84 85
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);

86 87 88 89 90 91 92 93 94
template <typename TBlocklike>
class BlocklikeTraits;

template <>
class BlocklikeTraits<BlockContents> {
 public:
  static BlockContents* Create(BlockContents&& contents,
                               size_t /* read_amp_bytes_per_bit */,
                               Statistics* /* statistics */,
95 96
                               bool /* using_zstd */,
                               const FilterPolicy* /* filter_policy */) {
97 98 99 100 101 102 103 104
    return new BlockContents(std::move(contents));
  }

  static uint32_t GetNumRestarts(const BlockContents& /* contents */) {
    return 0;
  }
};

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
template <>
class BlocklikeTraits<ParsedFullFilterBlock> {
 public:
  static ParsedFullFilterBlock* Create(BlockContents&& contents,
                                       size_t /* read_amp_bytes_per_bit */,
                                       Statistics* /* statistics */,
                                       bool /* using_zstd */,
                                       const FilterPolicy* filter_policy) {
    return new ParsedFullFilterBlock(filter_policy, std::move(contents));
  }

  static uint32_t GetNumRestarts(const ParsedFullFilterBlock& /* block */) {
    return 0;
  }
};

121 122 123
template <>
class BlocklikeTraits<Block> {
 public:
124 125
  static Block* Create(BlockContents&& contents, size_t read_amp_bytes_per_bit,
                       Statistics* statistics, bool /* using_zstd */,
126
                       const FilterPolicy* /* filter_policy */) {
127
    return new Block(std::move(contents), read_amp_bytes_per_bit, statistics);
128 129 130 131 132 133 134 135 136 137 138 139 140
  }

  static uint32_t GetNumRestarts(const Block& block) {
    return block.NumRestarts();
  }
};

template <>
class BlocklikeTraits<UncompressionDict> {
 public:
  static UncompressionDict* Create(BlockContents&& contents,
                                   size_t /* read_amp_bytes_per_bit */,
                                   Statistics* /* statistics */,
141 142
                                   bool using_zstd,
                                   const FilterPolicy* /* filter_policy */) {
143 144 145 146 147 148 149 150 151
    return new UncompressionDict(contents.data, std::move(contents.allocation),
                                 using_zstd);
  }

  static uint32_t GetNumRestarts(const UncompressionDict& /* dict */) {
    return 0;
  }
};

152 153 154 155 156
namespace {
// Read the block identified by "handle" from "file".
// The only relevant option is options.verify_checksums for now.
// On failure return non-OK.
// On success fill *result and return OK - caller owns *result
157
// @param uncompression_dict Data for presetting the compression library's
158
//    dictionary.
159
template <typename TBlocklike>
160 161 162
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
163
    std::unique_ptr<TBlocklike>* result, const ImmutableCFOptions& ioptions,
164
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
165
    const UncompressionDict& uncompression_dict,
166 167 168
    const PersistentCacheOptions& cache_options, size_t read_amp_bytes_per_bit,
    MemoryAllocator* memory_allocator, bool for_compaction, bool using_zstd,
    const FilterPolicy* filter_policy) {
169 170
  assert(result);

171
  BlockContents contents;
172 173 174 175
  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, &contents, ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);
S
Siying Dong 已提交
176
  Status s = block_fetcher.ReadBlockContents();
177
  if (s.ok()) {
178
    result->reset(BlocklikeTraits<TBlocklike>::Create(
179 180
        std::move(contents), read_amp_bytes_per_bit, ioptions.statistics,
        using_zstd, filter_policy));
181 182 183 184 185
  }

  return s;
}

186 187 188 189 190 191 192
// Delete the entry resided in the cache.
template <class Entry>
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
  auto entry = reinterpret_cast<Entry*>(value);
  delete entry;
}

193 194 195 196 197 198 199 200
// Release the cached entry and decrement its ref count.
// Do not force erase
void ReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
  cache->Release(handle, false /* force_erase */);
}

201 202 203
// For hash based index, return true if prefix_extractor and
// prefix_extractor_block mismatch, false otherwise. This flag will be used
// as total_order_seek via NewIndexIterator
204 205
bool PrefixExtractorChanged(const TableProperties* table_properties,
                            const SliceTransform* prefix_extractor) {
206 207 208
  // BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
  // Turn off hash index in prefix_extractor is not set; if  prefix_extractor
  // is set but prefix_extractor_block is not set, also disable hash index
209 210
  if (prefix_extractor == nullptr || table_properties == nullptr ||
      table_properties->prefix_extractor_name.empty()) {
211 212
    return true;
  }
213

214
  // prefix_extractor and prefix_extractor_block are both non-empty
215 216
  if (table_properties->prefix_extractor_name.compare(
          prefix_extractor->Name()) != 0) {
217 218 219 220 221 222
    return true;
  } else {
    return false;
  }
}

A
anand76 已提交
223 224 225 226 227 228
CacheAllocationPtr CopyBufferToHeap(MemoryAllocator* allocator, Slice& buf) {
  CacheAllocationPtr heap_buf;
  heap_buf = AllocateBlock(buf.size(), allocator);
  memcpy(heap_buf.get(), buf.data(), buf.size());
  return heap_buf;
}
229 230
}  // namespace

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
                                            GetContext* get_context,
                                            size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  PERF_COUNTER_ADD(block_cache_hit_count, 1);
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_hit;
    get_context->get_context_stats_.num_cache_bytes_read += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_HIT);
    RecordTick(statistics, BLOCK_CACHE_BYTES_READ, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_HIT);
      }
      break;

    case BlockType::kCompressionDictionary:
      // TODO: introduce perf counter for compression dictionary hit count
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_HIT);
      }
      break;

    case BlockType::kIndex:
      PERF_COUNTER_ADD(block_cache_index_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_HIT);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
                                             GetContext* get_context) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce aggregate (not per-level) block cache miss count
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_miss;
  } else {
    RecordTick(statistics, BLOCK_CACHE_MISS);
  }

  // TODO: introduce perf counters for misses per block type
  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_MISS);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_MISS);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_MISS);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheInsertionMetrics(BlockType block_type,
                                                  GetContext* get_context,
344 345
                                                  size_t usage,
                                                  bool redundant) const {
346 347 348 349 350
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce perf counters for block cache insertions
  if (get_context) {
    ++get_context->get_context_stats_.num_cache_add;
351 352 353
    if (redundant) {
      ++get_context->get_context_stats_.num_cache_add_redundant;
    }
354 355 356
    get_context->get_context_stats_.num_cache_bytes_write += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_ADD);
357 358 359
    if (redundant) {
      RecordTick(statistics, BLOCK_CACHE_ADD_REDUNDANT);
    }
360 361 362 363 364 365 366
    RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_add;
367 368 369
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_filter_add_redundant;
        }
370 371 372
        get_context->get_context_stats_.num_cache_filter_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
373 374 375
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_FILTER_ADD_REDUNDANT);
        }
376 377 378 379 380 381 382
        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_add;
383 384 385 386
        if (redundant) {
          ++get_context->get_context_stats_
                .num_cache_compression_dict_add_redundant;
        }
387 388 389 390
        get_context->get_context_stats_
            .num_cache_compression_dict_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
391 392 393
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT);
        }
394 395 396 397 398 399 400 401
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
                   usage);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_add;
402 403 404
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_index_add_redundant;
        }
405 406 407
        get_context->get_context_stats_.num_cache_index_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
408 409 410
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_INDEX_ADD_REDUNDANT);
        }
411 412 413 414 415 416 417 418 419
        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usage);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_add;
420 421 422
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_data_add_redundant;
        }
423 424 425
        get_context->get_context_stats_.num_cache_data_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
426 427 428
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_DATA_ADD_REDUNDANT);
        }
429 430 431 432 433 434
        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, usage);
      }
      break;
  }
}

435
Cache::Handle* BlockBasedTable::GetEntryFromCache(
436
    Cache* block_cache, const Slice& key, BlockType block_type,
437
    GetContext* get_context) const {
438 439
  auto cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics);

440
  if (cache_handle != nullptr) {
441 442
    UpdateCacheHitMetrics(block_type, get_context,
                          block_cache->GetUsage(cache_handle));
443
  } else {
444
    UpdateCacheMissMetrics(block_type, get_context);
445 446 447 448 449
  }

  return cache_handle;
}

450
// Helper function to setup the cache key's prefix for the Table.
451
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
452 453
  assert(kMaxCacheKeyPrefixSize >= 10);
  rep->cache_key_prefix_size = 0;
454
  rep->compressed_cache_key_prefix_size = 0;
455
  if (rep->table_options.block_cache != nullptr) {
456 457
    GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
                        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
458
  }
K
krad 已提交
459 460 461 462 463
  if (rep->table_options.persistent_cache != nullptr) {
    GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
                        &rep->persistent_cache_key_prefix[0],
                        &rep->persistent_cache_key_prefix_size);
  }
464 465
  if (rep->table_options.block_cache_compressed != nullptr) {
    GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
466
                        rep->file->file(), &rep->compressed_cache_key_prefix[0],
467 468 469 470
                        &rep->compressed_cache_key_prefix_size);
  }
}

471
void BlockBasedTable::GenerateCachePrefix(Cache* cc, FSRandomAccessFile* file,
472
                                          char* buffer, size_t* size) {
473 474 475 476 477
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
478
  if (cc != nullptr && *size == 0) {
479 480 481 482 483
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
  }
}

484
void BlockBasedTable::GenerateCachePrefix(Cache* cc, FSWritableFile* file,
485
                                          char* buffer, size_t* size) {
486 487 488 489 490
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
491
  if (cc != nullptr && *size == 0) {
492 493
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
494 495 496
  }
}

497 498 499 500 501 502 503 504 505 506 507 508
namespace {
// Return True if table_properties has `user_prop_name` has a `true` value
// or it doesn't contain this property (for backward compatible).
bool IsFeatureSupported(const TableProperties& table_properties,
                        const std::string& user_prop_name, Logger* info_log) {
  auto& props = table_properties.user_collected_properties;
  auto pos = props.find(user_prop_name);
  // Older version doesn't have this value set. Skip this check.
  if (pos != props.end()) {
    if (pos->second == kPropFalse) {
      return false;
    } else if (pos->second != kPropTrue) {
509 510
      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
                     user_prop_name.c_str(), pos->second.c_str());
511 512 513 514
    }
  }
  return true;
}
515

516 517 518 519 520 521 522
// Caller has to ensure seqno is not nullptr.
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
                               SequenceNumber largest_seqno,
                               SequenceNumber* seqno) {
  const auto& props = table_properties.user_collected_properties;
  const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
  const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
523

524
  *seqno = kDisableGlobalSequenceNumber;
525 526
  if (version_pos == props.end()) {
    if (seqno_pos != props.end()) {
527
      std::array<char, 200> msg_buf;
528
      // This is not an external sst file, global_seqno is not supported.
529 530
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
531 532
          "A non-external sst file have global seqno property with value %s",
          seqno_pos->second.c_str());
533
      return Status::Corruption(msg_buf.data());
534
    }
535
    return Status::OK();
536 537 538 539 540
  }

  uint32_t version = DecodeFixed32(version_pos->second.c_str());
  if (version < 2) {
    if (seqno_pos != props.end() || version != 1) {
541
      std::array<char, 200> msg_buf;
542
      // This is a v1 external sst file, global_seqno is not supported.
543 544 545 546 547
      snprintf(msg_buf.data(), msg_buf.max_size(),
               "An external sst file with version %u have global seqno "
               "property with value %s",
               version, seqno_pos->second.c_str());
      return Status::Corruption(msg_buf.data());
548
    }
549
    return Status::OK();
550 551
  }

552 553 554 555 556 557 558
  // Since we have a plan to deprecate global_seqno, we do not return failure
  // if seqno_pos == props.end(). We rely on version_pos to detect whether the
  // SST is external.
  SequenceNumber global_seqno(0);
  if (seqno_pos != props.end()) {
    global_seqno = DecodeFixed64(seqno_pos->second.c_str());
  }
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
  // SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
  // to denote it is unknown.
  if (largest_seqno < kMaxSequenceNumber) {
    if (global_seqno == 0) {
      global_seqno = largest_seqno;
    }
    if (global_seqno != largest_seqno) {
      std::array<char, 200> msg_buf;
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
          "An external sst file with version %u have global seqno property "
          "with value %s, while largest seqno in the file is %llu",
          version, seqno_pos->second.c_str(),
          static_cast<unsigned long long>(largest_seqno));
      return Status::Corruption(msg_buf.data());
    }
575
  }
576
  *seqno = global_seqno;
577 578

  if (global_seqno > kMaxSequenceNumber) {
579 580 581 582 583 584
    std::array<char, 200> msg_buf;
    snprintf(msg_buf.data(), msg_buf.max_size(),
             "An external sst file with version %u have global seqno property "
             "with value %llu, which is greater than kMaxSequenceNumber",
             version, static_cast<unsigned long long>(global_seqno));
    return Status::Corruption(msg_buf.data());
585 586
  }

587
  return Status::OK();
588
}
589 590
}  // namespace

K
krad 已提交
591 592 593 594 595 596 597 598 599 600 601 602
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
                                   size_t cache_key_prefix_size,
                                   const BlockHandle& handle, char* cache_key) {
  assert(cache_key != nullptr);
  assert(cache_key_prefix_size != 0);
  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
  char* end =
      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
  return Slice(cache_key, static_cast<size_t>(end - cache_key));
}

603 604 605 606 607 608 609 610 611
Status BlockBasedTable::Open(
    const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
    const BlockBasedTableOptions& table_options,
    const InternalKeyComparator& internal_comparator,
    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
    std::unique_ptr<TableReader>* table_reader,
    const SliceTransform* prefix_extractor,
    const bool prefetch_index_and_filter_in_cache, const bool skip_filters,
    const int level, const bool immortal_table,
612 613
    const SequenceNumber largest_seqno, const bool force_direct_prefetch,
    TailPrefetchStats* tail_prefetch_stats,
614
    BlockCacheTracer* const block_cache_tracer) {
S
Siying Dong 已提交
615
  table_reader->reset();
616

617
  Status s;
618
  Footer footer;
619 620
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;

621 622 623
  // prefetch both index and filters, down to all partitions
  const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
  const bool preload_all = !table_options.cache_index_and_filter_blocks;
624

625
  if (!ioptions.allow_mmap_reads) {
626 627 628
    s = PrefetchTail(file.get(), file_size, force_direct_prefetch,
                     tail_prefetch_stats, prefetch_all, preload_all,
                     &prefetch_buffer);
629 630 631 632 633
  } else {
    // Should not prefetch for mmap mode.
    prefetch_buffer.reset(new FilePrefetchBuffer(
        nullptr, 0, 0, false /* enable */, true /* track_min_offset */));
  }
634 635 636 637 638 639 640 641 642

  // Read in the following order:
  //    1. Footer
  //    2. [metaindex block]
  //    3. [meta block: properties]
  //    4. [meta block: range deletion tombstone]
  //    5. [meta block: compression dictionary]
  //    6. [meta block: index]
  //    7. [meta block: filter]
643 644
  s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
                         kBlockBasedTableMagicNumber);
645 646 647
  if (!s.ok()) {
    return s;
  }
648
  if (!BlockBasedTableSupportedVersion(footer.version())) {
649
    return Status::Corruption(
650
        "Unknown Footer version. Maybe this file was created with newer "
651 652
        "version of RocksDB?");
  }
J
jorlow@chromium.org 已提交
653

A
Aaron Gao 已提交
654
  // We've successfully read the footer. We are ready to serve requests.
655 656 657
  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
  // raw pointer will be used to create HashIndexReader, whose reset may
  // access a dangling pointer.
658
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
659
  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
660
                                      internal_comparator, skip_filters, level,
661
                                      immortal_table);
K
Kai Liu 已提交
662
  rep->file = std::move(file);
I
xxHash  
Igor Canadi 已提交
663
  rep->footer = footer;
664
  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
665 666
  // We need to wrap data with internal_prefix_transform to make sure it can
  // handle prefix correctly.
667 668 669 670
  if (prefix_extractor != nullptr) {
    rep->internal_prefix_transform.reset(
        new InternalKeySliceTransform(prefix_extractor));
  }
671
  SetupCacheKeyPrefix(rep);
672 673
  std::unique_ptr<BlockBasedTable> new_table(
      new BlockBasedTable(rep, block_cache_tracer));
K
Kai Liu 已提交
674

K
krad 已提交
675 676 677 678 679
  // page cache options
  rep->persistent_cache_options =
      PersistentCacheOptions(rep->table_options.persistent_cache,
                             std::string(rep->persistent_cache_key_prefix,
                                         rep->persistent_cache_key_prefix_size),
680
                             rep->ioptions.statistics);
K
krad 已提交
681

682 683 684 685 686
  // Meta-blocks are not dictionary compressed. Explicitly set the dictionary
  // handle to null, otherwise it may be seen as uninitialized during the below
  // meta-block reads.
  rep->compression_dict_handle = BlockHandle::NullBlockHandle();

687
  // Read metaindex
688 689 690 691
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
  s = new_table->ReadMetaIndexBlock(prefetch_buffer.get(), &metaindex,
                                    &metaindex_iter);
692 693 694
  if (!s.ok()) {
    return s;
  }
K
Kai Liu 已提交
695

696 697
  // Populates table_properties and some fields that depend on it,
  // such as index_type.
698 699
  s = new_table->ReadPropertiesBlock(prefetch_buffer.get(),
                                     metaindex_iter.get(), largest_seqno);
700 701 702
  if (!s.ok()) {
    return s;
  }
703
  s = new_table->ReadRangeDelBlock(prefetch_buffer.get(), metaindex_iter.get(),
704
                                   internal_comparator, &lookup_context);
705 706 707
  if (!s.ok()) {
    return s;
  }
708
  s = new_table->PrefetchIndexAndFilterBlocks(
709 710
      prefetch_buffer.get(), metaindex_iter.get(), new_table.get(),
      prefetch_all, table_options, level, &lookup_context);
711 712 713 714 715 716 717 718

  if (s.ok()) {
    // Update tail prefetch stats
    assert(prefetch_buffer.get() != nullptr);
    if (tail_prefetch_stats != nullptr) {
      assert(prefetch_buffer->min_offset_read() < file_size);
      tail_prefetch_stats->RecordEffectiveSize(
          static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
I
Igor Canadi 已提交
719
    }
720 721

    *table_reader = std::move(new_table);
I
Igor Canadi 已提交
722 723
  }

724 725 726 727 728
  return s;
}

Status BlockBasedTable::PrefetchTail(
    RandomAccessFileReader* file, uint64_t file_size,
729 730
    bool force_direct_prefetch, TailPrefetchStats* tail_prefetch_stats,
    const bool prefetch_all, const bool preload_all,
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
    std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
  size_t tail_prefetch_size = 0;
  if (tail_prefetch_stats != nullptr) {
    // Multiple threads may get a 0 (no history) when running in parallel,
    // but it will get cleared after the first of them finishes.
    tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
  }
  if (tail_prefetch_size == 0) {
    // Before read footer, readahead backwards to prefetch data. Do more
    // readahead if we're going to read index/filter.
    // TODO: This may incorrectly select small readahead in case partitioned
    // index/filter is enabled and top-level partition pinning is enabled.
    // That's because we need to issue readahead before we read the properties,
    // at which point we don't yet know the index type.
    tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
  }
  size_t prefetch_off;
  size_t prefetch_len;
  if (file_size < tail_prefetch_size) {
    prefetch_off = 0;
    prefetch_len = static_cast<size_t>(file_size);
  } else {
    prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
    prefetch_len = tail_prefetch_size;
  }
  TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
                           &tail_prefetch_size);
  Status s;
  // TODO should not have this special logic in the future.
760
  if (!file->use_direct_io() && !force_direct_prefetch) {
761 762
    prefetch_buffer->reset(new FilePrefetchBuffer(
        nullptr, 0, 0, false /* enable */, true /* track_min_offset */));
763 764
    s = file->Prefetch(prefetch_off, prefetch_len);
  } else {
765 766
    prefetch_buffer->reset(new FilePrefetchBuffer(
        nullptr, 0, 0, true /* enable */, true /* track_min_offset */));
767 768
    s = (*prefetch_buffer)->Prefetch(file, prefetch_off, prefetch_len);
  }
769

770 771 772
  return s;
}

773
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
774
    FilePrefetchBuffer* prefetch_buffer, const Slice& handle_value,
775 776 777 778 779 780 781 782 783 784
    TableProperties** table_properties) {
  assert(table_properties != nullptr);
  // If this is an external SST file ingested with write_global_seqno set to
  // true, then we expect the checksum mismatch because checksum was written
  // by SstFileWriter, but its global seqno in the properties block may have
  // been changed during ingestion. In this case, we read the properties
  // block, copy it to a memory buffer, change the global seqno to its
  // original value, i.e. 0, and verify the checksum again.
  BlockHandle props_block_handle;
  CacheAllocationPtr tmp_buf;
785 786
  Status s = ReadProperties(handle_value, rep_->file.get(), prefetch_buffer,
                            rep_->footer, rep_->ioptions, table_properties,
787 788 789 790 791 792 793 794
                            false /* verify_checksum */, &props_block_handle,
                            &tmp_buf, false /* compression_type_missing */,
                            nullptr /* memory_allocator */);
  if (s.ok() && tmp_buf) {
    const auto seqno_pos_iter =
        (*table_properties)
            ->properties_offsets.find(
                ExternalSstFilePropertyNames::kGlobalSeqno);
795
    size_t block_size = static_cast<size_t>(props_block_handle.size());
796 797 798 799 800 801
    if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
      uint64_t global_seqno_offset = seqno_pos_iter->second;
      EncodeFixed64(
          tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
    }
    uint32_t value = DecodeFixed32(tmp_buf.get() + block_size + 1);
802 803
    s = ROCKSDB_NAMESPACE::VerifyChecksum(rep_->footer.checksum(),
                                          tmp_buf.get(), block_size + 1, value);
804 805 806 807
  }
  return s;
}

808
Status BlockBasedTable::ReadPropertiesBlock(
809
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
810
    const SequenceNumber largest_seqno) {
811
  bool found_properties_block = true;
812 813
  Status s;
  s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
814

815
  if (!s.ok()) {
816
    ROCKS_LOG_WARN(rep_->ioptions.info_log,
817 818
                   "Error when seeking to properties block from file: %s",
                   s.ToString().c_str());
819
  } else if (found_properties_block) {
K
Kai Liu 已提交
820
    s = meta_iter->status();
K
kailiu 已提交
821
    TableProperties* table_properties = nullptr;
K
Kai Liu 已提交
822
    if (s.ok()) {
823
      s = ReadProperties(
824 825
          meta_iter->value(), rep_->file.get(), prefetch_buffer, rep_->footer,
          rep_->ioptions, &table_properties, true /* verify_checksum */,
826 827 828
          nullptr /* ret_block_handle */, nullptr /* ret_block_contents */,
          false /* compression_type_missing */, nullptr /* memory_allocator */);
    }
829
    IGNORE_STATUS_IF_ERROR(s);
830 831

    if (s.IsCorruption()) {
832 833
      s = TryReadPropertiesWithGlobalSeqno(prefetch_buffer, meta_iter->value(),
                                           &table_properties);
834
      IGNORE_STATUS_IF_ERROR(s);
835 836 837 838
    }
    std::unique_ptr<TableProperties> props_guard;
    if (table_properties != nullptr) {
      props_guard.reset(table_properties);
K
Kai Liu 已提交
839
    }
J
jorlow@chromium.org 已提交
840

K
Kai Liu 已提交
841
    if (!s.ok()) {
842
      ROCKS_LOG_WARN(rep_->ioptions.info_log,
843 844 845
                     "Encountered error while reading data from properties "
                     "block %s",
                     s.ToString().c_str());
K
kailiu 已提交
846
    } else {
847
      assert(table_properties != nullptr);
848 849 850 851 852 853
      rep_->table_properties.reset(props_guard.release());
      rep_->blocks_maybe_compressed =
          rep_->table_properties->compression_name !=
          CompressionTypeToString(kNoCompression);
      rep_->blocks_definitely_zstd_compressed =
          (rep_->table_properties->compression_name ==
854
               CompressionTypeToString(kZSTD) ||
855
           rep_->table_properties->compression_name ==
856
               CompressionTypeToString(kZSTDNotFinalCompression));
K
Kai Liu 已提交
857
    }
858
  } else {
859
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
860
                    "Cannot find Properties block from file.");
K
Kai Liu 已提交
861
  }
862
#ifndef ROCKSDB_LITE
863 864 865
  if (rep_->table_properties) {
    ParseSliceTransform(rep_->table_properties->prefix_extractor_name,
                        &(rep_->table_prefix_extractor));
866 867
  }
#endif  // ROCKSDB_LITE
K
Kai Liu 已提交
868

869
  // Read the table properties, if provided.
870 871 872
  if (rep_->table_properties) {
    rep_->whole_key_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
873
                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
874 875 876 877 878 879
                           rep_->ioptions.info_log);
    rep_->prefix_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
                           BlockBasedTablePropertyNames::kPrefixFiltering,
                           rep_->ioptions.info_log);

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
    rep_->index_key_includes_seq =
        rep_->table_properties->index_key_is_user_key == 0;
    rep_->index_value_is_full =
        rep_->table_properties->index_value_is_delta_encoded == 0;

    // Update index_type with the true type.
    // If table properties don't contain index type, we assume that the table
    // is in very old format and has kBinarySearch index type.
    auto& props = rep_->table_properties->user_collected_properties;
    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
    if (pos != props.end()) {
      rep_->index_type = static_cast<BlockBasedTableOptions::IndexType>(
          DecodeFixed32(pos->second.c_str()));
    }

    rep_->index_has_first_key =
        rep_->index_type == BlockBasedTableOptions::kBinarySearchWithFirstKey;

898 899
    s = GetGlobalSequenceNumber(*(rep_->table_properties), largest_seqno,
                                &(rep_->global_seqno));
900
    if (!s.ok()) {
901
      ROCKS_LOG_ERROR(rep_->ioptions.info_log, "%s", s.ToString().c_str());
902
    }
903
  }
904 905
  return s;
}
906

907
Status BlockBasedTable::ReadRangeDelBlock(
908
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
909 910
    const InternalKeyComparator& internal_comparator,
    BlockCacheLookupContext* lookup_context) {
911
  Status s;
912
  bool found_range_del_block;
913 914
  BlockHandle range_del_handle;
  s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
915
  if (!s.ok()) {
916
    ROCKS_LOG_WARN(
917
        rep_->ioptions.info_log,
918 919
        "Error when seeking to range delete tombstones block from file: %s",
        s.ToString().c_str());
920
  } else if (found_range_del_block && !range_del_handle.IsNull()) {
921
    ReadOptions read_options;
922
    std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
923 924 925
        read_options, range_del_handle,
        /*input_iter=*/nullptr, BlockType::kRangeDeletion,
        /*get_context=*/nullptr, lookup_context, Status(), prefetch_buffer));
926 927
    assert(iter != nullptr);
    s = iter->status();
928 929
    if (!s.ok()) {
      ROCKS_LOG_WARN(
930
          rep_->ioptions.info_log,
931 932
          "Encountered error while reading data from range del block %s",
          s.ToString().c_str());
933
      IGNORE_STATUS_IF_ERROR(s);
934
    } else {
935
      rep_->fragmented_range_dels =
936 937
          std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
                                                         internal_comparator);
938 939
    }
  }
940 941 942 943
  return s;
}

Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
944
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
945
    BlockBasedTable* new_table, bool prefetch_all,
946 947
    const BlockBasedTableOptions& table_options, const int level,
    BlockCacheLookupContext* lookup_context) {
948 949 950
  Status s;

  // Find filter handle and filter type
951
  if (rep_->filter_policy) {
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
    for (auto filter_type :
         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
          Rep::FilterType::kBlockFilter}) {
      std::string prefix;
      switch (filter_type) {
        case Rep::FilterType::kFullFilter:
          prefix = kFullFilterBlockPrefix;
          break;
        case Rep::FilterType::kPartitionedFilter:
          prefix = kPartitionedFilterBlockPrefix;
          break;
        case Rep::FilterType::kBlockFilter:
          prefix = kFilterBlockPrefix;
          break;
        default:
          assert(0);
      }
      std::string filter_block_key = prefix;
970 971
      filter_block_key.append(rep_->filter_policy->Name());
      if (FindMetaBlock(meta_iter, filter_block_key, &rep_->filter_handle)
972
              .ok()) {
973
        rep_->filter_type = filter_type;
974 975 976 977
        break;
      }
    }
  }
978

979 980 981 982 983 984
  // Find compression dictionary handle
  bool found_compression_dict = false;
  s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
                                 &rep_->compression_dict_handle);
  if (!s.ok()) {
    return s;
985 986
  }

987
  BlockBasedTableOptions::IndexType index_type = rep_->index_type;
988 989 990

  const bool use_cache = table_options.cache_index_and_filter_blocks;

991 992 993 994
  // pin both index and filters, down to all partitions
  const bool pin_all =
      rep_->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;

995 996 997 998 999
  // prefetch the first level of index
  const bool prefetch_index =
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
  // pin the first level of index
  const bool pin_index =
      pin_all || (table_options.pin_top_level_index_and_filter &&
                  index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);

  std::unique_ptr<IndexReader> index_reader;
  s = new_table->CreateIndexReader(prefetch_buffer, meta_iter, use_cache,
                                   prefetch_index, pin_index, lookup_context,
                                   &index_reader);
  if (!s.ok()) {
    return s;
  }

  rep_->index_reader = std::move(index_reader);

  // The partitions of partitioned index are always stored in cache. They
  // are hence follow the configuration for pin and prefetch regardless of
  // the value of cache_index_and_filter_blocks
  if (prefetch_all) {
    rep_->index_reader->CacheDependencies(pin_all);
  }

1022 1023
  // prefetch the first level of filter
  const bool prefetch_filter =
1024 1025 1026
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1027
  // Partition fitlers cannot be enabled without partition indexes
1028
  assert(!prefetch_filter || prefetch_index);
1029 1030 1031
  // pin the first level of filter
  const bool pin_filter =
      pin_all || (table_options.pin_top_level_index_and_filter &&
1032
                  rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1033

1034 1035 1036 1037 1038 1039
  if (rep_->filter_policy) {
    auto filter = new_table->CreateFilterBlockReader(
        prefetch_buffer, use_cache, prefetch_filter, pin_filter,
        lookup_context);
    if (filter) {
      // Refer to the comment above about paritioned indexes always being cached
1040
      if (prefetch_all) {
1041
        filter->CacheDependencies(pin_all);
1042
      }
1043 1044

      rep_->filter = std::move(filter);
1045 1046 1047
    }
  }

1048 1049 1050 1051 1052
  if (!rep_->compression_dict_handle.IsNull()) {
    std::unique_ptr<UncompressionDictReader> uncompression_dict_reader;
    s = UncompressionDictReader::Create(this, prefetch_buffer, use_cache,
                                        prefetch_all, pin_all, lookup_context,
                                        &uncompression_dict_reader);
1053 1054
    if (!s.ok()) {
      return s;
K
Kai Liu 已提交
1055
    }
1056

1057
    rep_->uncompression_dict_reader = std::move(uncompression_dict_reader);
K
Kai Liu 已提交
1058
  }
1059 1060

  assert(s.ok());
J
jorlow@chromium.org 已提交
1061 1062 1063
  return s;
}

S
Siying Dong 已提交
1064
void BlockBasedTable::SetupForCompaction() {
1065
  switch (rep_->ioptions.access_hint_on_compaction_start) {
1066 1067 1068
    case Options::NONE:
      break;
    case Options::NORMAL:
1069
      rep_->file->file()->Hint(FSRandomAccessFile::kNormal);
1070 1071
      break;
    case Options::SEQUENTIAL:
1072
      rep_->file->file()->Hint(FSRandomAccessFile::kSequential);
1073 1074
      break;
    case Options::WILLNEED:
1075
      rep_->file->file()->Hint(FSRandomAccessFile::kWillNeed);
1076 1077 1078 1079 1080 1081
      break;
    default:
      assert(false);
  }
}

K
kailiu 已提交
1082 1083
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
    const {
K
kailiu 已提交
1084
  return rep_->table_properties;
K
Kai Liu 已提交
1085
}
S
Sanjay Ghemawat 已提交
1086

1087 1088 1089 1090 1091 1092 1093 1094
size_t BlockBasedTable::ApproximateMemoryUsage() const {
  size_t usage = 0;
  if (rep_->filter) {
    usage += rep_->filter->ApproximateMemoryUsage();
  }
  if (rep_->index_reader) {
    usage += rep_->index_reader->ApproximateMemoryUsage();
  }
1095 1096
  if (rep_->uncompression_dict_reader) {
    usage += rep_->uncompression_dict_reader->ApproximateMemoryUsage();
1097
  }
1098 1099 1100
  return usage;
}

1101 1102 1103 1104 1105 1106 1107
// Load the meta-index-block from the file. On success, return the loaded
// metaindex
// block and its iterator.
Status BlockBasedTable::ReadMetaIndexBlock(
    FilePrefetchBuffer* prefetch_buffer,
    std::unique_ptr<Block>* metaindex_block,
    std::unique_ptr<InternalIterator>* iter) {
S
Sanjay Ghemawat 已提交
1108 1109
  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
  // it is an empty block.
1110
  std::unique_ptr<Block> metaindex;
K
Kai Liu 已提交
1111
  Status s = ReadBlockFromFile(
1112
      rep_->file.get(), prefetch_buffer, rep_->footer, ReadOptions(),
1113
      rep_->footer.metaindex_handle(), &metaindex, rep_->ioptions,
1114
      true /* decompress */, true /*maybe_compressed*/, BlockType::kMetaIndex,
1115
      UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options,
1116 1117 1118
      0 /* read_amp_bytes_per_bit */, GetMemoryAllocator(rep_->table_options),
      false /* for_compaction */, rep_->blocks_definitely_zstd_compressed,
      nullptr /* filter_policy */);
K
Kai Liu 已提交
1119

K
Kai Liu 已提交
1120
  if (!s.ok()) {
1121
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1122 1123 1124
                    "Encountered error while reading data from properties"
                    " block %s",
                    s.ToString().c_str());
K
Kai Liu 已提交
1125
    return s;
S
Sanjay Ghemawat 已提交
1126
  }
K
Kai Liu 已提交
1127

1128
  *metaindex_block = std::move(metaindex);
K
Kai Liu 已提交
1129
  // meta block uses bytewise comparator.
1130 1131 1132
  iter->reset(metaindex_block->get()->NewDataIterator(
      BytewiseComparator(), BytewiseComparator(),
      kDisableGlobalSequenceNumber));
K
Kai Liu 已提交
1133
  return Status::OK();
S
Sanjay Ghemawat 已提交
1134 1135
}

1136
template <typename TBlocklike>
1137 1138
Status BlockBasedTable::GetDataBlockFromCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
1139
    Cache* block_cache, Cache* block_cache_compressed,
1140
    const ReadOptions& read_options, CachableEntry<TBlocklike>* block,
1141
    const UncompressionDict& uncompression_dict, BlockType block_type,
1142 1143
    GetContext* get_context) const {
  const size_t read_amp_bytes_per_bit =
1144 1145 1146
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1147 1148 1149
  assert(block);
  assert(block->IsEmpty());

1150
  Status s;
1151
  BlockContents* compressed_block = nullptr;
1152 1153 1154 1155
  Cache::Handle* block_cache_compressed_handle = nullptr;

  // Lookup uncompressed cache first
  if (block_cache != nullptr) {
1156 1157
    auto cache_handle = GetEntryFromCache(block_cache, block_cache_key,
                                          block_type, get_context);
1158 1159
    if (cache_handle != nullptr) {
      block->SetCachedValue(
1160
          reinterpret_cast<TBlocklike*>(block_cache->Value(cache_handle)),
1161
          block_cache, cache_handle);
1162 1163 1164 1165 1166
      return s;
    }
  }

  // If not found, search from the compressed block cache.
1167
  assert(block->IsEmpty());
1168 1169 1170 1171 1172 1173 1174 1175

  if (block_cache_compressed == nullptr) {
    return s;
  }

  assert(!compressed_block_cache_key.empty());
  block_cache_compressed_handle =
      block_cache_compressed->Lookup(compressed_block_cache_key);
1176 1177 1178

  Statistics* statistics = rep_->ioptions.statistics;

1179 1180 1181 1182 1183 1184 1185 1186 1187
  // if we found in the compressed cache, then uncompress and insert into
  // uncompressed cache
  if (block_cache_compressed_handle == nullptr) {
    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
    return s;
  }

  // found compressed block
  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
1188
  compressed_block = reinterpret_cast<BlockContents*>(
1189
      block_cache_compressed->Value(block_cache_compressed_handle));
1190 1191
  CompressionType compression_type = compressed_block->get_compression_type();
  assert(compression_type != kNoCompression);
1192 1193 1194

  // Retrieve the uncompressed contents into a new buffer
  BlockContents contents;
1195
  UncompressionContext context(compression_type);
1196
  UncompressionInfo info(context, uncompression_dict, compression_type);
1197 1198 1199 1200
  s = UncompressBlockContents(
      info, compressed_block->data.data(), compressed_block->data.size(),
      &contents, rep_->table_options.format_version, rep_->ioptions,
      GetMemoryAllocator(rep_->table_options));
1201 1202 1203

  // Insert uncompressed block into block cache
  if (s.ok()) {
1204 1205
    std::unique_ptr<TBlocklike> block_holder(
        BlocklikeTraits<TBlocklike>::Create(
1206
            std::move(contents), read_amp_bytes_per_bit, statistics,
1207 1208
            rep_->blocks_definitely_zstd_compressed,
            rep_->table_options.filter_policy.get()));  // uncompressed block
1209 1210

    if (block_cache != nullptr && block_holder->own_bytes() &&
1211
        read_options.fill_cache) {
1212 1213 1214
      size_t charge = block_holder->ApproximateMemoryUsage();
      Cache::Handle* cache_handle = nullptr;
      s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1215
                              &DeleteCachedEntry<TBlocklike>, &cache_handle);
1216
      if (s.ok()) {
1217 1218 1219 1220
        assert(cache_handle != nullptr);
        block->SetCachedValue(block_holder.release(), block_cache,
                              cache_handle);

1221 1222
        UpdateCacheInsertionMetrics(block_type, get_context, charge,
                                    s.IsOkOverwritten());
1223 1224 1225
      } else {
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
      }
1226 1227
    } else {
      block->SetOwnedValue(block_holder.release());
1228 1229 1230 1231 1232 1233 1234 1235
    }
  }

  // Release hold on compressed cache entry
  block_cache_compressed->Release(block_cache_compressed_handle);
  return s;
}

1236
template <typename TBlocklike>
1237 1238 1239
Status BlockBasedTable::PutDataBlockToCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
    Cache* block_cache, Cache* block_cache_compressed,
1240
    CachableEntry<TBlocklike>* cached_block, BlockContents* raw_block_contents,
1241
    CompressionType raw_block_comp_type,
1242
    const UncompressionDict& uncompression_dict,
1243
    MemoryAllocator* memory_allocator, BlockType block_type,
1244 1245 1246 1247
    GetContext* get_context) const {
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  const uint32_t format_version = rep_->table_options.format_version;
  const size_t read_amp_bytes_per_bit =
1248 1249 1250
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1251
  const Cache::Priority priority =
1252 1253 1254 1255
      rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
              (block_type == BlockType::kFilter ||
               block_type == BlockType::kCompressionDictionary ||
               block_type == BlockType::kIndex)
1256 1257
          ? Cache::Priority::HIGH
          : Cache::Priority::LOW;
1258 1259
  assert(cached_block);
  assert(cached_block->IsEmpty());
1260 1261

  Status s;
1262
  Statistics* statistics = ioptions.statistics;
1263

1264
  std::unique_ptr<TBlocklike> block_holder;
1265
  if (raw_block_comp_type != kNoCompression) {
1266 1267
    // Retrieve the uncompressed contents into a new buffer
    BlockContents uncompressed_block_contents;
1268
    UncompressionContext context(raw_block_comp_type);
1269
    UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
1270 1271 1272 1273
    s = UncompressBlockContents(info, raw_block_contents->data.data(),
                                raw_block_contents->data.size(),
                                &uncompressed_block_contents, format_version,
                                ioptions, memory_allocator);
1274 1275 1276
    if (!s.ok()) {
      return s;
    }
1277

1278
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
1279
        std::move(uncompressed_block_contents), read_amp_bytes_per_bit,
1280 1281
        statistics, rep_->blocks_definitely_zstd_compressed,
        rep_->table_options.filter_policy.get()));
1282
  } else {
1283
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
1284 1285
        std::move(*raw_block_contents), read_amp_bytes_per_bit, statistics,
        rep_->blocks_definitely_zstd_compressed,
1286
        rep_->table_options.filter_policy.get()));
1287 1288 1289 1290
  }

  // Insert compressed block into compressed block cache.
  // Release the hold on the compressed cache entry immediately.
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304
  if (block_cache_compressed != nullptr &&
      raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
      raw_block_contents->own_bytes()) {
#ifndef NDEBUG
    assert(raw_block_contents->is_raw_block);
#endif  // NDEBUG

    // We cannot directly put raw_block_contents because this could point to
    // an object in the stack.
    BlockContents* block_cont_for_comp_cache =
        new BlockContents(std::move(*raw_block_contents));
    s = block_cache_compressed->Insert(
        compressed_block_cache_key, block_cont_for_comp_cache,
        block_cont_for_comp_cache->ApproximateMemoryUsage(),
1305
        &DeleteCachedEntry<BlockContents>);
1306 1307 1308 1309 1310
    if (s.ok()) {
      // Avoid the following code to delete this cached block.
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
    } else {
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
1311
      delete block_cont_for_comp_cache;
1312
    }
1313 1314 1315
  }

  // insert into uncompressed block cache
1316 1317 1318 1319
  if (block_cache != nullptr && block_holder->own_bytes()) {
    size_t charge = block_holder->ApproximateMemoryUsage();
    Cache::Handle* cache_handle = nullptr;
    s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1320 1321
                            &DeleteCachedEntry<TBlocklike>, &cache_handle,
                            priority);
1322
    if (s.ok()) {
1323 1324 1325 1326
      assert(cache_handle != nullptr);
      cached_block->SetCachedValue(block_holder.release(), block_cache,
                                   cache_handle);

1327 1328
      UpdateCacheInsertionMetrics(block_type, get_context, charge,
                                  s.IsOkOverwritten());
1329 1330 1331
    } else {
      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
    }
1332 1333
  } else {
    cached_block->SetOwnedValue(block_holder.release());
1334 1335 1336 1337 1338
  }

  return s;
}

1339 1340 1341
std::unique_ptr<FilterBlockReader> BlockBasedTable::CreateFilterBlockReader(
    FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
    bool pin, BlockCacheLookupContext* lookup_context) {
M
Maysam Yabandeh 已提交
1342
  auto& rep = rep_;
1343 1344 1345
  auto filter_type = rep->filter_type;
  if (filter_type == Rep::FilterType::kNoFilter) {
    return std::unique_ptr<FilterBlockReader>();
I
Igor Canadi 已提交
1346 1347 1348 1349
  }

  assert(rep->filter_policy);

M
Maysam Yabandeh 已提交
1350
  switch (filter_type) {
1351 1352 1353
    case Rep::FilterType::kPartitionedFilter:
      return PartitionedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);
M
Maysam Yabandeh 已提交
1354 1355

    case Rep::FilterType::kBlockFilter:
1356 1357 1358 1359 1360 1361
      return BlockBasedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);

    case Rep::FilterType::kFullFilter:
      return FullFilterBlockReader::Create(this, prefetch_buffer, use_cache,
                                           prefetch, pin, lookup_context);
I
Igor Canadi 已提交
1362

M
Maysam Yabandeh 已提交
1363 1364 1365 1366
    default:
      // filter_type is either kNoFilter (exited the function at the first if),
      // or it must be covered in this switch block
      assert(false);
1367
      return std::unique_ptr<FilterBlockReader>();
1368
  }
K
Kai Liu 已提交
1369 1370
}

1371 1372
// disable_prefix_seek should be set to true when prefix_extractor found in SST
// differs from the one in mutable_cf_options and index type is HashBasedIndex
1373
InternalIteratorBase<IndexValue>* BlockBasedTable::NewIndexIterator(
1374
    const ReadOptions& read_options, bool disable_prefix_seek,
1375 1376
    IndexBlockIter* input_iter, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
1377 1378
  assert(rep_ != nullptr);
  assert(rep_->index_reader != nullptr);
1379

1380
  // We don't return pinned data from index blocks, so no need
1381
  // to set `block_contents_pinned`.
1382
  return rep_->index_reader->NewIterator(read_options, disable_prefix_seek,
1383 1384
                                         input_iter, get_context,
                                         lookup_context);
K
Kai Liu 已提交
1385 1386
}

1387

1388 1389
template <>
DataBlockIter* BlockBasedTable::InitBlockIterator<DataBlockIter>(
1390 1391
    const Rep* rep, Block* block, BlockType block_type,
    DataBlockIter* input_iter, bool block_contents_pinned) {
1392 1393
  return block->NewDataIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
1394 1395
      rep->get_global_seqno(block_type), input_iter, rep->ioptions.statistics,
      block_contents_pinned);
1396 1397 1398 1399
}

template <>
IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
1400 1401
    const Rep* rep, Block* block, BlockType block_type,
    IndexBlockIter* input_iter, bool block_contents_pinned) {
1402 1403
  return block->NewIndexIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
1404 1405 1406 1407
      rep->get_global_seqno(block_type), input_iter, rep->ioptions.statistics,
      /* total_order_seek */ true, rep->index_has_first_key,
      rep->index_key_includes_seq, rep->index_value_is_full,
      block_contents_pinned);
1408 1409
}

A
anand76 已提交
1410 1411 1412 1413 1414 1415

// If contents is nullptr, this function looks up the block caches for the
// data block referenced by handle, and read the block from disk if necessary.
// If contents is non-null, it skips the cache lookup and disk read, since
// the caller has already read it. In both cases, if ro.fill_cache is true,
// it inserts the block into the block cache.
1416
template <typename TBlocklike>
1417
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
1418
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
1419
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
1420
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
A
anand76 已提交
1421 1422
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    BlockContents* contents) const {
1423
  assert(block_entry != nullptr);
1424
  const bool no_io = (ro.read_tier == kBlockCacheTier);
1425
  Cache* block_cache = rep_->table_options.block_cache.get();
1426
  // No point to cache compressed blocks if it never goes away
1427
  Cache* block_cache_compressed =
1428 1429
      rep_->immortal_table ? nullptr
                           : rep_->table_options.block_cache_compressed.get();
L
Lei Jin 已提交
1430

1431 1432
  // First, try to get the block from the cache
  //
L
Lei Jin 已提交
1433
  // If either block cache is enabled, we'll try to read from it.
1434
  Status s;
1435 1436 1437 1438
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  Slice key /* key to the block cache */;
  Slice ckey /* key to the compressed block cache */;
1439
  bool is_cache_hit = false;
L
Lei Jin 已提交
1440 1441 1442
  if (block_cache != nullptr || block_cache_compressed != nullptr) {
    // create key for block cache
    if (block_cache != nullptr) {
1443
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
1444
                        handle, cache_key);
L
Lei Jin 已提交
1445 1446 1447
    }

    if (block_cache_compressed != nullptr) {
1448 1449
      ckey = GetCacheKey(rep_->compressed_cache_key_prefix,
                         rep_->compressed_cache_key_prefix_size, handle,
L
Lei Jin 已提交
1450 1451 1452
                         compressed_cache_key);
    }

A
anand76 已提交
1453 1454 1455 1456 1457 1458 1459 1460 1461
    if (!contents) {
      s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
                                ro, block_entry, uncompression_dict, block_type,
                                get_context);
      if (block_entry->GetValue()) {
        // TODO(haoyu): Differentiate cache hit on uncompressed block cache and
        // compressed block cache.
        is_cache_hit = true;
      }
1462
    }
A
anand76 已提交
1463

1464 1465
    // Can't find the block from the cache. If I/O is allowed, read from the
    // file.
1466
    if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
1467
      Statistics* statistics = rep_->ioptions.statistics;
1468
      const bool maybe_compressed =
1469 1470 1471
          block_type != BlockType::kFilter &&
          block_type != BlockType::kCompressionDictionary &&
          rep_->blocks_maybe_compressed;
1472
      const bool do_uncompress = maybe_compressed && !block_cache_compressed;
1473 1474
      CompressionType raw_block_comp_type;
      BlockContents raw_block_contents;
A
anand76 已提交
1475
      if (!contents) {
1476
        StopWatch sw(rep_->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
1477
        BlockFetcher block_fetcher(
1478
            rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle,
1479 1480 1481
            &raw_block_contents, rep_->ioptions, do_uncompress,
            maybe_compressed, block_type, uncompression_dict,
            rep_->persistent_cache_options,
1482 1483
            GetMemoryAllocator(rep_->table_options),
            GetMemoryAllocatorForCompressedBlock(rep_->table_options));
1484 1485
        s = block_fetcher.ReadBlockContents();
        raw_block_comp_type = block_fetcher.get_compression_type();
A
anand76 已提交
1486 1487 1488
        contents = &raw_block_contents;
      } else {
        raw_block_comp_type = contents->get_compression_type();
L
Lei Jin 已提交
1489 1490 1491
      }

      if (s.ok()) {
1492 1493
        // If filling cache is allowed and a cache is configured, try to put the
        // block to the cache.
1494 1495
        s = PutDataBlockToCache(
            key, ckey, block_cache, block_cache_compressed, block_entry,
1496
            contents, raw_block_comp_type, uncompression_dict,
1497
            GetMemoryAllocator(rep_->table_options), block_type, get_context);
L
Lei Jin 已提交
1498 1499 1500
      }
    }
  }
1501 1502

  // Fill lookup_context.
1503 1504
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
1505 1506 1507 1508
    size_t usage = 0;
    uint64_t nkeys = 0;
    if (block_entry->GetValue()) {
      // Approximate the number of keys in the block using restarts.
1509 1510 1511
      nkeys =
          rep_->table_options.block_restart_interval *
          BlocklikeTraits<TBlocklike>::GetNumRestarts(*block_entry->GetValue());
1512 1513 1514 1515 1516 1517 1518
      usage = block_entry->GetValue()->ApproximateMemoryUsage();
    }
    TraceType trace_block_type = TraceType::kTraceMax;
    switch (block_type) {
      case BlockType::kData:
        trace_block_type = TraceType::kBlockTraceDataBlock;
        break;
1519 1520 1521
      case BlockType::kFilter:
        trace_block_type = TraceType::kBlockTraceFilterBlock;
        break;
1522 1523 1524
      case BlockType::kCompressionDictionary:
        trace_block_type = TraceType::kBlockTraceUncompressionDictBlock;
        break;
1525 1526 1527
      case BlockType::kRangeDeletion:
        trace_block_type = TraceType::kBlockTraceRangeDeletionBlock;
        break;
1528 1529 1530
      case BlockType::kIndex:
        trace_block_type = TraceType::kBlockTraceIndexBlock;
        break;
1531 1532 1533 1534 1535
      default:
        // This cannot happen.
        assert(false);
        break;
    }
1536 1537
    bool no_insert = no_io || !ro.fill_cache;
    if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
1538 1539
            trace_block_type, lookup_context->caller)) {
      // Defer logging the access to Get() and MultiGet() to trace additional
1540
      // information, e.g., referenced_key_exist_in_block.
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554

      // Make a copy of the block key here since it will be logged later.
      lookup_context->FillLookupContext(
          is_cache_hit, no_insert, trace_block_type,
          /*block_size=*/usage, /*block_key=*/key.ToString(), nkeys);
    } else {
      // Avoid making copy of block_key and cf_name when constructing the access
      // record.
      BlockCacheTraceRecord access_record(
          rep_->ioptions.env->NowMicros(),
          /*block_key=*/"", trace_block_type,
          /*block_size=*/usage, rep_->cf_id_for_tracing(),
          /*cf_name=*/"", rep_->level_for_tracing(),
          rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
1555 1556 1557
          no_insert, lookup_context->get_id,
          lookup_context->get_from_user_specified_snapshot,
          /*referenced_key=*/"");
1558 1559
      block_cache_tracer_->WriteBlockAccess(access_record, key,
                                            rep_->cf_name_for_tracing(),
1560
                                            lookup_context->referenced_key);
1561 1562 1563
    }
  }

1564
  assert(s.ok() || block_entry->GetValue() == nullptr);
1565
  return s;
1566 1567
}

A
anand76 已提交
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
// This function reads multiple data blocks from disk using Env::MultiRead()
// and optionally inserts them into the block cache. It uses the scratch
// buffer provided by the caller, which is contiguous. If scratch is a nullptr
// it allocates a separate buffer for each block. Typically, if the blocks
// need to be uncompressed and there is no compressed block cache, callers
// can allocate a temporary scratch buffer in order to minimize memory
// allocations.
// If options.fill_cache is true, it inserts the blocks into cache. If its
// false and scratch is non-null and the blocks are uncompressed, it copies
// the buffers to heap. In any case, the CachableEntry<Block> returned will
// own the data bytes.
1579 1580
// If compression is enabled and also there is no compressed block cache,
// the adjacent blocks are read out in one IO (combined read)
A
anand76 已提交
1581 1582 1583 1584
// batch - A MultiGetRange with only those keys with unique data blocks not
//         found in cache
// handles - A vector of block handles. Some of them me be NULL handles
// scratch - An optional contiguous buffer to read compressed blocks into
1585
void BlockBasedTable::RetrieveMultipleBlocks(
1586 1587
    const ReadOptions& options, const MultiGetRange* batch,
    const autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE>* handles,
A
anand76 已提交
1588
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE>* statuses,
1589 1590
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE>* results,
    char* scratch, const UncompressionDict& uncompression_dict) const {
A
anand76 已提交
1591 1592 1593 1594 1595 1596
  RandomAccessFileReader* file = rep_->file.get();
  const Footer& footer = rep_->footer;
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  size_t read_amp_bytes_per_bit = rep_->table_options.read_amp_bytes_per_bit;
  MemoryAllocator* memory_allocator = GetMemoryAllocator(rep_->table_options);

1597
  if (ioptions.allow_mmap_reads) {
A
anand76 已提交
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
    size_t idx_in_batch = 0;
    for (auto mget_iter = batch->begin(); mget_iter != batch->end();
         ++mget_iter, ++idx_in_batch) {
      BlockCacheLookupContext lookup_data_block_context(
          TableReaderCaller::kUserMultiGet);
      const BlockHandle& handle = (*handles)[idx_in_batch];
      if (handle.IsNull()) {
        continue;
      }

1608 1609 1610 1611 1612
      (*statuses)[idx_in_batch] =
          RetrieveBlock(nullptr, options, handle, uncompression_dict,
                        &(*results)[idx_in_batch], BlockType::kData,
                        mget_iter->get_context, &lookup_data_block_context,
                        /* for_compaction */ false, /* use_cache */ true);
A
anand76 已提交
1613 1614 1615 1616
    }
    return;
  }

1617 1618 1619 1620
  // In direct IO mode, blocks share the direct io buffer.
  // Otherwise, blocks share the scratch buffer.
  const bool use_shared_buffer = file->use_direct_io() || scratch != nullptr;

1621
  autovector<FSReadRequest, MultiGetContext::MAX_BATCH_SIZE> read_reqs;
A
anand76 已提交
1622 1623
  size_t buf_offset = 0;
  size_t idx_in_batch = 0;
1624 1625 1626 1627 1628

  uint64_t prev_offset = 0;
  size_t prev_len = 0;
  autovector<size_t, MultiGetContext::MAX_BATCH_SIZE> req_idx_for_block;
  autovector<size_t, MultiGetContext::MAX_BATCH_SIZE> req_offset_for_block;
A
anand76 已提交
1629 1630 1631 1632 1633 1634 1635
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];
    if (handle.IsNull()) {
      continue;
    }

1636 1637 1638 1639 1640
    size_t prev_end = static_cast<size_t>(prev_offset) + prev_len;

    // If current block is adjacent to the previous one, at the same time,
    // compression is enabled and there is no compressed cache, we combine
    // the two block read as one.
1641 1642 1643 1644 1645
    // We don't combine block reads here in direct IO mode, because when doing
    // direct IO read, the block requests will be realigned and merged when
    // necessary.
    if (use_shared_buffer && !file->use_direct_io() &&
        prev_end == handle.offset()) {
1646 1647 1648 1649 1650 1651 1652 1653 1654
      req_offset_for_block.emplace_back(prev_len);
      prev_len += block_size(handle);
    } else {
      // No compression or current block and previous one is not adjacent:
      // Step 1, create a new request for previous blocks
      if (prev_len != 0) {
        FSReadRequest req;
        req.offset = prev_offset;
        req.len = prev_len;
1655 1656 1657
        if (file->use_direct_io()) {
          req.scratch = nullptr;
        } else if (use_shared_buffer) {
1658 1659
          req.scratch = scratch + buf_offset;
          buf_offset += req.len;
1660 1661
        } else {
          req.scratch = new char[req.len];
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
        }
        read_reqs.emplace_back(req);
      }

      // Step 2, remeber the previous block info
      prev_offset = handle.offset();
      prev_len = block_size(handle);
      req_offset_for_block.emplace_back(0);
    }
    req_idx_for_block.emplace_back(read_reqs.size());
  }
  // Handle the last block and process the pending last request
  if (prev_len != 0) {
1675
    FSReadRequest req;
1676 1677
    req.offset = prev_offset;
    req.len = prev_len;
1678 1679 1680
    if (file->use_direct_io()) {
      req.scratch = nullptr;
    } else if (use_shared_buffer) {
A
anand76 已提交
1681
      req.scratch = scratch + buf_offset;
1682 1683
    } else {
      req.scratch = new char[req.len];
A
anand76 已提交
1684 1685 1686 1687
    }
    read_reqs.emplace_back(req);
  }

1688
  AlignedBuf direct_io_buf;
1689 1690 1691 1692 1693 1694 1695 1696
  {
    IOOptions opts;
    IOStatus s = PrepareIOFromReadOptions(options, file->env(), opts);
    if (s.IsTimedOut()) {
      for (FSReadRequest& req : read_reqs) {
        req.status = s;
      }
    } else {
1697
      file->MultiRead(opts, &read_reqs[0], read_reqs.size(), &direct_io_buf);
1698 1699
    }
  }
A
anand76 已提交
1700 1701

  idx_in_batch = 0;
1702
  size_t valid_batch_idx = 0;
A
anand76 已提交
1703 1704 1705 1706 1707 1708 1709 1710
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];

    if (handle.IsNull()) {
      continue;
    }

1711 1712 1713 1714 1715 1716 1717
    assert(valid_batch_idx < req_idx_for_block.size());
    assert(valid_batch_idx < req_offset_for_block.size());
    assert(req_idx_for_block[valid_batch_idx] < read_reqs.size());
    size_t& req_idx = req_idx_for_block[valid_batch_idx];
    size_t& req_offset = req_offset_for_block[valid_batch_idx];
    valid_batch_idx++;
    FSReadRequest& req = read_reqs[req_idx];
A
anand76 已提交
1718 1719
    Status s = req.status;
    if (s.ok()) {
1720 1721
      if ((req.result.size() != req.len) ||
          (req_offset + block_size(handle) > req.result.size())) {
1722 1723 1724 1725
        s = Status::Corruption(
            "truncated block read from " + rep_->file->file_name() +
            " offset " + ToString(handle.offset()) + ", expected " +
            ToString(req.len) + " bytes, got " + ToString(req.result.size()));
A
anand76 已提交
1726 1727 1728 1729
      }
    }

    BlockContents raw_block_contents;
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
    if (!use_shared_buffer) {
      // We allocated a buffer for this block. Give ownership of it to
      // BlockContents so it can free the memory
      assert(req.result.data() == req.scratch);
      assert(req.result.size() == block_size(handle));
      assert(req_offset == 0);
      std::unique_ptr<char[]> raw_block(req.scratch);
      raw_block_contents = BlockContents(std::move(raw_block), handle.size());
    } else {
      // We used the scratch buffer or direct io buffer
      // which are shared by the blocks.
      // raw_block_contents does not have the ownership.
      raw_block_contents =
          BlockContents(Slice(req.result.data() + req_offset, handle.size()));
1744
    }
A
anand76 已提交
1745 1746 1747
#ifndef NDEBUG
      raw_block_contents.is_raw_block = true;
#endif
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762

    if (s.ok() && options.verify_checksums) {
      PERF_TIMER_GUARD(block_checksum_time);
      const char* data = req.result.data();
      uint32_t expected =
          DecodeFixed32(data + req_offset + handle.size() + 1);
      // Since the scratch might be shared. the offset of the data block in
      // the buffer might not be 0. req.result.data() only point to the
      // begin address of each read request, we need to add the offset
      // in each read request. Checksum is stored in the block trailer,
      // which is handle.size() + 1.
      s = ROCKSDB_NAMESPACE::VerifyChecksum(footer.checksum(),
                                            data + req_offset,
                                            handle.size() + 1, expected);
      TEST_SYNC_POINT_CALLBACK("RetrieveMultipleBlocks:VerifyChecksum", &s);
A
anand76 已提交
1763
    }
1764 1765

    if (s.ok()) {
1766 1767 1768 1769 1770
      // When the blocks share the same underlying buffer (scratch or direct io
      // buffer), if the block is compressed, the shared buffer will be
      // uncompressed into heap during uncompressing; otherwise, we need to
      // manually copy the block into heap before inserting the block to block
      // cache.
1771 1772
      CompressionType compression_type =
          raw_block_contents.get_compression_type();
1773 1774
      if (use_shared_buffer && compression_type == kNoCompression) {
        Slice raw = Slice(req.result.data() + req_offset, block_size(handle));
1775 1776 1777 1778 1779 1780 1781 1782 1783
        raw_block_contents = BlockContents(
            CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
            handle.size());
#ifndef NDEBUG
        raw_block_contents.is_raw_block = true;
#endif
      }
    }

A
anand76 已提交
1784 1785 1786 1787 1788 1789 1790 1791
    if (s.ok()) {
      if (options.fill_cache) {
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
        CachableEntry<Block>* block_entry = &(*results)[idx_in_batch];
        // MaybeReadBlockAndLoadToCache will insert into the block caches if
        // necessary. Since we're passing the raw block contents, it will
        // avoid looking up the block cache
1792 1793 1794 1795
        s = MaybeReadBlockAndLoadToCache(
            nullptr, options, handle, uncompression_dict, block_entry,
            BlockType::kData, mget_iter->get_context,
            &lookup_data_block_context, &raw_block_contents);
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811

        // block_entry value could be null if no block cache is present, i.e
        // BlockBasedTableOptions::no_block_cache is true and no compressed
        // block cache is configured. In that case, fall
        // through and set up the block explicitly
        if (block_entry->GetValue() != nullptr) {
          continue;
        }
      }

      CompressionType compression_type =
          raw_block_contents.get_compression_type();
      BlockContents contents;
      if (compression_type != kNoCompression) {
        UncompressionContext context(compression_type);
        UncompressionInfo info(context, uncompression_dict, compression_type);
1812 1813
        s = UncompressBlockContents(info, req.result.data() + req_offset,
                                    handle.size(), &contents, footer.version(),
1814
                                    rep_->ioptions, memory_allocator);
A
anand76 已提交
1815
      } else {
1816 1817 1818 1819
        // There are two cases here:
        // 1) caller uses the shared buffer (scratch or direct io buffer);
        // 2) we use the requst buffer.
        // If scratch buffer or direct io buffer is used, we ensure that
1820 1821 1822 1823
        // all raw blocks are copyed to the heap as single blocks. If scratch
        // buffer is not used, we also have no combined read, so the raw
        // block can be used directly.
        contents = std::move(raw_block_contents);
A
anand76 已提交
1824
      }
1825
      if (s.ok()) {
1826 1827
        (*results)[idx_in_batch].SetOwnedValue(new Block(
            std::move(contents), read_amp_bytes_per_bit, ioptions.statistics));
1828
      }
A
anand76 已提交
1829 1830 1831 1832 1833
    }
    (*statuses)[idx_in_batch] = s;
  }
}

1834
template <typename TBlocklike>
1835
Status BlockBasedTable::RetrieveBlock(
1836
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
1837
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
1838
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
1839
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1840
    bool for_compaction, bool use_cache) const {
1841 1842 1843 1844
  assert(block_entry);
  assert(block_entry->IsEmpty());

  Status s;
1845
  if (use_cache) {
1846
    s = MaybeReadBlockAndLoadToCache(prefetch_buffer, ro, handle,
1847
                                     uncompression_dict, block_entry,
A
anand76 已提交
1848 1849
                                     block_type, get_context, lookup_context,
                                     /*contents=*/nullptr);
1850 1851 1852 1853 1854 1855

    if (!s.ok()) {
      return s;
    }

    if (block_entry->GetValue() != nullptr) {
1856
      assert(s.ok());
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
      return s;
    }
  }

  assert(block_entry->IsEmpty());

  const bool no_io = ro.read_tier == kBlockCacheTier;
  if (no_io) {
    return Status::Incomplete("no blocking io");
  }

1868
  const bool maybe_compressed =
1869 1870 1871
      block_type != BlockType::kFilter &&
      block_type != BlockType::kCompressionDictionary &&
      rep_->blocks_maybe_compressed;
1872 1873
  const bool do_uncompress = maybe_compressed;
  std::unique_ptr<TBlocklike> block;
1874 1875

  {
1876
    StopWatch sw(rep_->ioptions.env, rep_->ioptions.statistics,
1877 1878
                 READ_BLOCK_GET_MICROS);
    s = ReadBlockFromFile(
1879
        rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block,
1880 1881
        rep_->ioptions, do_uncompress, maybe_compressed, block_type,
        uncompression_dict, rep_->persistent_cache_options,
1882 1883 1884
        block_type == BlockType::kData
            ? rep_->table_options.read_amp_bytes_per_bit
            : 0,
1885
        GetMemoryAllocator(rep_->table_options), for_compaction,
1886 1887
        rep_->blocks_definitely_zstd_compressed,
        rep_->table_options.filter_policy.get());
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
  }

  if (!s.ok()) {
    return s;
  }

  block_entry->SetOwnedValue(block.release());

  assert(s.ok());
  return s;
}

1900 1901 1902 1903 1904 1905 1906
// Explicitly instantiate templates for both "blocklike" types we use.
// This makes it possible to keep the template definitions in the .cc file.
template Status BlockBasedTable::RetrieveBlock<BlockContents>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<BlockContents>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1907
    bool for_compaction, bool use_cache) const;
1908

1909 1910 1911 1912 1913 1914 1915
template Status BlockBasedTable::RetrieveBlock<ParsedFullFilterBlock>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<ParsedFullFilterBlock>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction, bool use_cache) const;

1916 1917 1918 1919 1920
template Status BlockBasedTable::RetrieveBlock<Block>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1921
    bool for_compaction, bool use_cache) const;
1922

1923 1924 1925 1926 1927 1928 1929
template Status BlockBasedTable::RetrieveBlock<UncompressionDict>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<UncompressionDict>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction, bool use_cache) const;

1930
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
1931
    const BlockBasedTable* table,
1932 1933 1934 1935
    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
    : table_(table), block_map_(block_map) {}

InternalIteratorBase<IndexValue>*
1936
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
1937
    const BlockHandle& handle) {
M
Maysam Yabandeh 已提交
1938
  // Return a block iterator on the index partition
1939 1940 1941 1942
  auto block = block_map_->find(handle.offset());
  // This is a possible scenario since block cache might not have had space
  // for the partition
  if (block != block_map_->end()) {
1943
    const Rep* rep = table_->get_rep();
1944 1945
    assert(rep);

M
Maysam Yabandeh 已提交
1946
    Statistics* kNullStats = nullptr;
1947
    // We don't return pinned data from index blocks, so no need
1948
    // to set `block_contents_pinned`.
1949
    return block->second.GetValue()->NewIndexIterator(
M
Maysam Yabandeh 已提交
1950
        &rep->internal_comparator, rep->internal_comparator.user_comparator(),
1951 1952 1953
        rep->get_global_seqno(BlockType::kIndex), nullptr, kNullStats, true,
        rep->index_has_first_key, rep->index_key_includes_seq,
        rep->index_value_is_full);
1954 1955
  }
  // Create an empty iterator
1956
  return new IndexBlockIter();
1957 1958
}

T
Tyler Harter 已提交
1959 1960
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
1961 1962
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
T
Tyler Harter 已提交
1963 1964 1965 1966
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
T
Tyler Harter 已提交
1967
//
1968 1969 1970
// If read_options.read_tier == kBlockCacheTier, this method will do no I/O and
// will return true if the filter block is not in memory and not found in block
// cache.
K
Kai Liu 已提交
1971 1972
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
1973 1974 1975
bool BlockBasedTable::PrefixMayMatch(
    const Slice& internal_key, const ReadOptions& read_options,
    const SliceTransform* options_prefix_extractor,
1976 1977
    const bool need_upper_bound_check,
    BlockCacheLookupContext* lookup_context) const {
1978
  if (!rep_->filter_policy) {
1979 1980 1981
    return true;
  }

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
  const SliceTransform* prefix_extractor;

  if (rep_->table_prefix_extractor == nullptr) {
    if (need_upper_bound_check) {
      return true;
    }
    prefix_extractor = options_prefix_extractor;
  } else {
    prefix_extractor = rep_->table_prefix_extractor.get();
  }
1992
  auto user_key = ExtractUserKey(internal_key);
1993
  if (!prefix_extractor->InDomain(user_key)) {
1994 1995
    return true;
  }
L
Lei Jin 已提交
1996

T
Tyler Harter 已提交
1997 1998 1999
  bool may_match = true;
  Status s;

2000
  // First, try check with full filter
2001
  FilterBlockReader* const filter = rep_->filter.get();
2002
  bool filter_checked = true;
2003
  if (filter != nullptr) {
2004 2005
    const bool no_io = read_options.read_tier == kBlockCacheTier;

2006
    if (!filter->IsBlockBased()) {
M
Maysam Yabandeh 已提交
2007
      const Slice* const const_ikey_ptr = &internal_key;
2008 2009 2010
      may_match = filter->RangeMayExist(
          read_options.iterate_upper_bound, user_key, prefix_extractor,
          rep_->internal_comparator.user_comparator(), const_ikey_ptr,
2011
          &filter_checked, need_upper_bound_check, no_io, lookup_context);
2012
    } else {
2013 2014 2015 2016 2017
      // if prefix_extractor changed for block based filter, skip filter
      if (need_upper_bound_check) {
        return true;
      }
      auto prefix = prefix_extractor->Transform(user_key);
M
Maysam Yabandeh 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026
      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
      auto internal_prefix = internal_key_prefix.Encode();

      // To prevent any io operation in this method, we set `read_tier` to make
      // sure we always read index or filter only when they have already been
      // loaded to memory.
      ReadOptions no_io_read_options;
      no_io_read_options.read_tier = kBlockCacheTier;

2027
      // Then, try find it within each block
2028 2029
      // we already know prefix_extractor and prefix_extractor_name must match
      // because `CheckPrefixMayMatch` first checks `check_filter_ == true`
2030
      std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2031 2032
          no_io_read_options,
          /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2033
          /*get_context=*/nullptr, lookup_context));
2034 2035 2036 2037 2038 2039 2040 2041
      iiter->Seek(internal_prefix);

      if (!iiter->Valid()) {
        // we're past end of file
        // if it's incomplete, it means that we avoided I/O
        // and we're not really sure that we're past the end
        // of the file
        may_match = iiter->status().IsIncomplete();
2042 2043
      } else if ((rep_->index_key_includes_seq ? ExtractUserKey(iiter->key())
                                               : iiter->key())
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
                     .starts_with(ExtractUserKey(internal_prefix))) {
        // we need to check for this subtle case because our only
        // guarantee is that "the key is a string >= last key in that data
        // block" according to the doc/table_format.txt spec.
        //
        // Suppose iiter->key() starts with the desired prefix; it is not
        // necessarily the case that the corresponding data block will
        // contain the prefix, since iiter->key() need not be in the
        // block.  However, the next data block may contain the prefix, so
        // we return true to play it safe.
        may_match = true;
      } else if (filter->IsBlockBased()) {
        // iiter->key() does NOT start with the desired prefix.  Because
        // Seek() finds the first key that is >= the seek target, this
        // means that iiter->key() > prefix.  Thus, any data blocks coming
        // after the data block corresponding to iiter->key() cannot
        // possibly contain the key.  Thus, the corresponding data block
        // is the only on could potentially contain the prefix.
2062
        BlockHandle handle = iiter->value().handle;
2063
        may_match = filter->PrefixMayMatch(
2064
            prefix, prefix_extractor, handle.offset(), no_io,
2065
            /*const_key_ptr=*/nullptr, /*get_context=*/nullptr, lookup_context);
2066
      }
2067
    }
T
Tyler Harter 已提交
2068
  }
T
Tyler Harter 已提交
2069

2070 2071 2072 2073 2074 2075
  if (filter_checked) {
    Statistics* statistics = rep_->ioptions.statistics;
    RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
    if (!may_match) {
      RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
    }
T
Tyler Harter 已提交
2076 2077
  }

T
Tyler Harter 已提交
2078 2079 2080
  return may_match;
}

2081

2082 2083
InternalIterator* BlockBasedTable::NewIterator(
    const ReadOptions& read_options, const SliceTransform* prefix_extractor,
2084
    Arena* arena, bool skip_filters, TableReaderCaller caller,
2085
    size_t compaction_readahead_size, bool allow_unprepared_value) {
2086
  BlockCacheLookupContext lookup_context{caller};
2087
  bool need_upper_bound_check =
S
sdong 已提交
2088
      read_options.auto_prefix_mode ||
2089
      PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
2090 2091 2092 2093 2094
  std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter(NewIndexIterator(
      read_options,
      need_upper_bound_check &&
          rep_->index_type == BlockBasedTableOptions::kHashSearch,
      /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context));
2095
  if (arena == nullptr) {
2096 2097
    return new BlockBasedTableIterator(
        this, read_options, rep_->internal_comparator, std::move(index_iter),
2098
        !skip_filters && !read_options.total_order_seek &&
2099
            prefix_extractor != nullptr,
2100
        need_upper_bound_check, prefix_extractor, caller,
2101
        compaction_readahead_size, allow_unprepared_value);
2102
  } else {
2103 2104 2105
    auto* mem = arena->AllocateAligned(sizeof(BlockBasedTableIterator));
    return new (mem) BlockBasedTableIterator(
        this, read_options, rep_->internal_comparator, std::move(index_iter),
2106
        !skip_filters && !read_options.total_order_seek &&
2107
            prefix_extractor != nullptr,
2108
        need_upper_bound_check, prefix_extractor, caller,
2109
        compaction_readahead_size, allow_unprepared_value);
2110
  }
J
jorlow@chromium.org 已提交
2111 2112
}

2113
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
2114
    const ReadOptions& read_options) {
2115 2116 2117
  if (rep_->fragmented_range_dels == nullptr) {
    return nullptr;
  }
2118 2119 2120 2121 2122
  SequenceNumber snapshot = kMaxSequenceNumber;
  if (read_options.snapshot != nullptr) {
    snapshot = read_options.snapshot->GetSequenceNumber();
  }
  return new FragmentedRangeTombstoneIterator(
2123
      rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
2124 2125
}

2126 2127 2128
bool BlockBasedTable::FullFilterKeyMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    const Slice& internal_key, const bool no_io,
2129
    const SliceTransform* prefix_extractor, GetContext* get_context,
2130
    BlockCacheLookupContext* lookup_context) const {
2131 2132 2133 2134
  if (filter == nullptr || filter->IsBlockBased()) {
    return true;
  }
  Slice user_key = ExtractUserKey(internal_key);
M
Maysam Yabandeh 已提交
2135
  const Slice* const const_ikey_ptr = &internal_key;
2136
  bool may_match = true;
2137
  if (rep_->whole_key_filtering) {
2138 2139 2140
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
    Slice user_key_without_ts = StripTimestampFromUserKey(user_key, ts_sz);
2141 2142
    may_match =
        filter->KeyMayMatch(user_key_without_ts, prefix_extractor, kNotValid,
2143
                            no_io, const_ikey_ptr, get_context, lookup_context);
2144
  } else if (!read_options.total_order_seek && prefix_extractor &&
2145
             rep_->table_properties->prefix_extractor_name.compare(
2146 2147 2148
                 prefix_extractor->Name()) == 0 &&
             prefix_extractor->InDomain(user_key) &&
             !filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
2149 2150 2151
                                     prefix_extractor, kNotValid, no_io,
                                     const_ikey_ptr, get_context,
                                     lookup_context)) {
2152 2153 2154 2155
    may_match = false;
  }
  if (may_match) {
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
2156
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
2157
  }
2158
  return may_match;
2159 2160
}

2161 2162 2163
void BlockBasedTable::FullFilterKeysMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    MultiGetRange* range, const bool no_io,
2164 2165
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
2166 2167 2168
  if (filter == nullptr || filter->IsBlockBased()) {
    return;
  }
2169 2170
  uint64_t before_keys = range->KeysLeft();
  assert(before_keys > 0);  // Caller should ensure
2171
  if (rep_->whole_key_filtering) {
2172 2173
    filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io,
                         lookup_context);
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
    uint64_t after_keys = range->KeysLeft();
    if (after_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE,
                 after_keys);
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, after_keys,
                                rep_->level);
    }
    uint64_t filtered_keys = before_keys - after_keys;
    if (filtered_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL, filtered_keys);
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, filtered_keys,
                                rep_->level);
    }
2187 2188 2189
  } else if (!read_options.total_order_seek && prefix_extractor &&
             rep_->table_properties->prefix_extractor_name.compare(
                 prefix_extractor->Name()) == 0) {
2190 2191
    filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false,
                             lookup_context);
2192 2193 2194 2195 2196 2197 2198 2199
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_PREFIX_CHECKED,
               before_keys);
    uint64_t after_keys = range->KeysLeft();
    uint64_t filtered_keys = before_keys - after_keys;
    if (filtered_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_PREFIX_USEFUL,
                 filtered_keys);
    }
2200 2201 2202
  }
}

2203
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
2204 2205 2206
                            GetContext* get_context,
                            const SliceTransform* prefix_extractor,
                            bool skip_filters) {
M
Maysam Yabandeh 已提交
2207
  assert(key.size() >= 8);  // key must be internal key
2208
  assert(get_context != nullptr);
S
Sanjay Ghemawat 已提交
2209
  Status s;
M
Maysam Yabandeh 已提交
2210
  const bool no_io = read_options.read_tier == kBlockCacheTier;
2211 2212 2213 2214 2215 2216

  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;

  // First check the full filter
  // If full filter not useful, Then go into each block
H
haoyuhuang 已提交
2217
  uint64_t tracing_get_id = get_context->get_tracing_get_id();
2218 2219 2220 2221 2222 2223 2224 2225 2226
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserGet, tracing_get_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
    // Trace the key since it contains both user key and sequence number.
    lookup_context.referenced_key = key.ToString();
    lookup_context.get_from_user_specified_snapshot =
        read_options.snapshot != nullptr;
  }
2227 2228 2229
  const bool may_match =
      FullFilterKeyMayMatch(read_options, filter, key, no_io, prefix_extractor,
                            get_context, &lookup_context);
2230
  if (!may_match) {
2231
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
2232
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
2233
  } else {
M
Maysam Yabandeh 已提交
2234
    IndexBlockIter iiter_on_stack;
2235 2236
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
2237
    bool need_upper_bound_check = false;
2238
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
2239
      need_upper_bound_check = PrefixExtractorChanged(
2240
          rep_->table_properties.get(), prefix_extractor);
2241
    }
2242 2243 2244
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
                         get_context, &lookup_context);
2245
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
2246
    if (iiter != &iiter_on_stack) {
M
Maysam Yabandeh 已提交
2247
      iiter_unique_ptr.reset(iiter);
M
Maysam Yabandeh 已提交
2248
    }
2249

2250 2251
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
2252
    bool matched = false;  // if such user key matched a key in SST
2253
    bool done = false;
M
Maysam Yabandeh 已提交
2254
    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
2255
      IndexValue v = iiter->value();
2256

2257 2258
      bool not_exist_in_filter =
          filter != nullptr && filter->IsBlockBased() == true &&
2259
          !filter->KeyMayMatch(ExtractUserKeyAndStripTimestamp(key, ts_sz),
2260
                               prefix_extractor, v.handle.offset(), no_io,
2261 2262
                               /*const_ikey_ptr=*/nullptr, get_context,
                               &lookup_context);
2263 2264 2265 2266 2267 2268

      if (not_exist_in_filter) {
        // Not found
        // TODO: think about interaction with Merge. If a user key cannot
        // cross one data block, we should be fine.
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
2269
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
2270
        break;
2271
      }
2272

2273 2274 2275 2276 2277 2278 2279 2280
      if (!v.first_internal_key.empty() && !skip_filters &&
          UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                  .Compare(ExtractUserKey(key),
                           ExtractUserKey(v.first_internal_key)) < 0) {
        // The requested key falls between highest key in previous block and
        // lowest key in current block.
        break;
      }
2281

2282
      BlockCacheLookupContext lookup_data_block_context{
2283 2284 2285
          TableReaderCaller::kUserGet, tracing_get_id,
          /*get_from_user_specified_snapshot=*/read_options.snapshot !=
              nullptr};
2286 2287 2288 2289
      bool does_referenced_key_exist = false;
      DataBlockIter biter;
      uint64_t referenced_data_size = 0;
      NewDataBlockIterator<DataBlockIter>(
2290 2291
          read_options, v.handle, &biter, BlockType::kData, get_context,
          &lookup_data_block_context,
2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304
          /*s=*/Status(), /*prefetch_buffer*/ nullptr);

      if (no_io && biter.status().IsIncomplete()) {
        // couldn't get block from block_cache
        // Update Saver.state to Found because we are only looking for
        // whether we can guarantee the key is not there when "no_io" is set
        get_context->MarkKeyMayExist();
        break;
      }
      if (!biter.status().ok()) {
        s = biter.status();
        break;
      }
2305

2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
      bool may_exist = biter.SeekForGet(key);
      // If user-specified timestamp is supported, we cannot end the search
      // just because hash index lookup indicates the key+ts does not exist.
      if (!may_exist && ts_sz == 0) {
        // HashSeek cannot find the key this block and the the iter is not
        // the end of the block, i.e. cannot be in the following blocks
        // either. In this case, the seek_key cannot be found, so we break
        // from the top level for-loop.
        done = true;
      } else {
        // Call the *saver function on each entry/block until it returns false
        for (; biter.Valid(); biter.Next()) {
          ParsedInternalKey parsed_key;
          if (!ParseInternalKey(biter.key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }

          if (!get_context->SaveValue(
                  parsed_key, biter.value(), &matched,
                  biter.IsValuePinned() ? &biter : nullptr)) {
2326 2327 2328 2329
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size = biter.key().size() + biter.value().size();
            }
2330 2331
            done = true;
            break;
2332 2333
          }
        }
2334 2335 2336
        s = biter.status();
      }
      // Write the block cache access record.
2337
      if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
2338 2339
        // Avoid making copy of block_key, cf_name, and referenced_key when
        // constructing the access record.
2340 2341 2342 2343
        Slice referenced_key;
        if (does_referenced_key_exist) {
          referenced_key = biter.key();
        } else {
2344
          referenced_key = key;
2345
        }
2346 2347 2348 2349 2350 2351 2352 2353
        BlockCacheTraceRecord access_record(
            rep_->ioptions.env->NowMicros(),
            /*block_key=*/"", lookup_data_block_context.block_type,
            lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
            /*cf_name=*/"", rep_->level_for_tracing(),
            rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
            lookup_data_block_context.is_cache_hit,
            lookup_data_block_context.no_insert,
2354
            lookup_data_block_context.get_id,
2355
            lookup_data_block_context.get_from_user_specified_snapshot,
2356 2357 2358 2359 2360
            /*referenced_key=*/"", referenced_data_size,
            lookup_data_block_context.num_keys_in_block,
            does_referenced_key_exist);
        block_cache_tracer_->WriteBlockAccess(
            access_record, lookup_data_block_context.block_key,
2361
            rep_->cf_name_for_tracing(), referenced_key);
S
Sanjay Ghemawat 已提交
2362
      }
2363

M
Maysam Yabandeh 已提交
2364 2365 2366 2367
      if (done) {
        // Avoid the extra Next which is expensive in two-level indexes
        break;
      }
2368
    }
2369 2370
    if (matched && filter != nullptr && !filter->IsBlockBased()) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
2371 2372
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                rep_->level);
2373
    }
2374
    if (s.ok() && !iiter->status().IsNotFound()) {
M
Maysam Yabandeh 已提交
2375
      s = iiter->status();
S
Sanjay Ghemawat 已提交
2376 2377
    }
  }
K
Kai Liu 已提交
2378

S
Sanjay Ghemawat 已提交
2379 2380 2381
  return s;
}

2382 2383 2384 2385 2386
using MultiGetRange = MultiGetContext::Range;
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
                               const MultiGetRange* mget_range,
                               const SliceTransform* prefix_extractor,
                               bool skip_filters) {
2387 2388 2389 2390 2391 2392
  if (mget_range->empty()) {
    // Caller should ensure non-empty (performance bug)
    assert(false);
    return;  // Nothing to do
  }

2393 2394
  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;
2395 2396
  MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
                               mget_range->end());
2397 2398 2399 2400

  // First check the full filter
  // If full filter not useful, Then go into each block
  const bool no_io = read_options.read_tier == kBlockCacheTier;
2401
  uint64_t tracing_mget_id = BlockCacheTraceHelper::kReservedGetId;
2402
  if (sst_file_range.begin()->get_context) {
H
haoyuhuang 已提交
2403
    tracing_mget_id = sst_file_range.begin()->get_context->get_tracing_get_id();
2404
  }
2405 2406 2407
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserMultiGet, tracing_mget_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
2408 2409
  FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
                         prefix_extractor, &lookup_context);
2410

2411
  if (!sst_file_range.empty()) {
2412 2413 2414 2415 2416 2417 2418 2419
    IndexBlockIter iiter_on_stack;
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
    bool need_upper_bound_check = false;
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
      need_upper_bound_check = PrefixExtractorChanged(
          rep_->table_properties.get(), prefix_extractor);
    }
2420 2421
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
2422
                         sst_file_range.begin()->get_context, &lookup_context);
2423
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
2424 2425 2426 2427
    if (iiter != &iiter_on_stack) {
      iiter_unique_ptr.reset(iiter);
    }

2428
    uint64_t offset = std::numeric_limits<uint64_t>::max();
A
anand76 已提交
2429 2430 2431 2432 2433 2434 2435 2436
    autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE> block_handles;
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE> results;
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE> statuses;
    char stack_buf[kMultiGetReadStackBufSize];
    std::unique_ptr<char[]> block_buf;
    {
      MultiGetRange data_block_range(sst_file_range, sst_file_range.begin(),
                                     sst_file_range.end());
2437

2438
      CachableEntry<UncompressionDict> uncompression_dict;
2439 2440 2441 2442 2443 2444 2445 2446 2447
      Status uncompression_dict_status;
      if (rep_->uncompression_dict_reader) {
        uncompression_dict_status =
            rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
                nullptr /* prefetch_buffer */, no_io,
                sst_file_range.begin()->get_context, &lookup_context,
                &uncompression_dict);
      }

2448 2449 2450 2451
      const UncompressionDict& dict = uncompression_dict.GetValue()
                                          ? *uncompression_dict.GetValue()
                                          : UncompressionDict::GetEmptyDict();

A
anand76 已提交
2452 2453 2454 2455 2456
      size_t total_len = 0;
      ReadOptions ro = read_options;
      ro.read_tier = kBlockCacheTier;

      for (auto miter = data_block_range.begin();
2457
           miter != data_block_range.end(); ++miter) {
A
anand76 已提交
2458 2459 2460 2461 2462 2463 2464 2465 2466
        const Slice& key = miter->ikey;
        iiter->Seek(miter->ikey);

        IndexValue v;
        if (iiter->Valid()) {
          v = iiter->value();
        }
        if (!iiter->Valid() ||
            (!v.first_internal_key.empty() && !skip_filters &&
2467 2468 2469
             UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                     .Compare(ExtractUserKey(key),
                              ExtractUserKey(v.first_internal_key)) < 0)) {
A
anand76 已提交
2470 2471
          // The requested key falls between highest key in previous block and
          // lowest key in current block.
2472 2473 2474
          if (!iiter->status().IsNotFound()) {
            *(miter->s) = iiter->status();
          }
A
anand76 已提交
2475 2476 2477 2478
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }
2479 2480

        if (!uncompression_dict_status.ok()) {
2481
          assert(!uncompression_dict_status.IsNotFound());
2482 2483 2484 2485 2486 2487
          *(miter->s) = uncompression_dict_status;
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }

A
anand76 已提交
2488 2489 2490 2491 2492 2493 2494 2495
        statuses.emplace_back();
        results.emplace_back();
        if (v.handle.offset() == offset) {
          // We're going to reuse the block for this key later on. No need to
          // look it up now. Place a null handle
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
          continue;
        }
2496 2497 2498
        // Lookup the cache for the given data block referenced by an index
        // iterator value (i.e BlockHandle). If it exists in the cache,
        // initialize block to the contents of the data block.
A
anand76 已提交
2499 2500
        offset = v.handle.offset();
        BlockHandle handle = v.handle;
2501 2502
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
2503 2504 2505 2506
        Status s = RetrieveBlock(
            nullptr, ro, handle, dict, &(results.back()), BlockType::kData,
            miter->get_context, &lookup_data_block_context,
            /* for_compaction */ false, /* use_cache */ true);
2507 2508 2509
        if (s.IsIncomplete()) {
          s = Status::OK();
        }
A
anand76 已提交
2510 2511 2512 2513 2514 2515
        if (s.ok() && !results.back().IsEmpty()) {
          // Found it in the cache. Add NULL handle to indicate there is
          // nothing to read from disk
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
        } else {
          block_handles.emplace_back(handle);
2516
          total_len += block_size(handle);
A
anand76 已提交
2517 2518 2519 2520 2521
        }
      }

      if (total_len) {
        char* scratch = nullptr;
2522
        // If using direct IO, then scratch is not used, so keep it nullptr.
A
anand76 已提交
2523 2524 2525 2526 2527 2528 2529 2530 2531
        // If the blocks need to be uncompressed and we don't need the
        // compressed blocks, then we can use a contiguous block of
        // memory to read in all the blocks as it will be temporary
        // storage
        // 1. If blocks are compressed and compressed block cache is there,
        //    alloc heap bufs
        // 2. If blocks are uncompressed, alloc heap bufs
        // 3. If blocks are compressed and no compressed block cache, use
        //    stack buf
2532 2533
        if (!rep_->file->use_direct_io() &&
            rep_->table_options.block_cache_compressed == nullptr &&
A
anand76 已提交
2534 2535 2536 2537 2538 2539 2540 2541
            rep_->blocks_maybe_compressed) {
          if (total_len <= kMultiGetReadStackBufSize) {
            scratch = stack_buf;
          } else {
            scratch = new char[total_len];
            block_buf.reset(scratch);
          }
        }
2542 2543
        RetrieveMultipleBlocks(read_options, &data_block_range, &block_handles,
                               &statuses, &results, scratch, dict);
A
anand76 已提交
2544 2545 2546 2547 2548 2549
      }
    }

    DataBlockIter first_biter;
    DataBlockIter next_biter;
    size_t idx_in_batch = 0;
2550 2551 2552 2553 2554 2555 2556
    for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
         ++miter) {
      Status s;
      GetContext* get_context = miter->get_context;
      const Slice& key = miter->ikey;
      bool matched = false;  // if such user key matched a key in SST
      bool done = false;
A
anand76 已提交
2557 2558 2559
      bool first_block = true;
      do {
        DataBlockIter* biter = nullptr;
2560
        bool reusing_block = true;
2561 2562 2563
        uint64_t referenced_data_size = 0;
        bool does_referenced_key_exist = false;
        BlockCacheLookupContext lookup_data_block_context(
2564 2565 2566
            TableReaderCaller::kUserMultiGet, tracing_mget_id,
            /*get_from_user_specified_snapshot=*/read_options.snapshot !=
                nullptr);
A
anand76 已提交
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
        if (first_block) {
          if (!block_handles[idx_in_batch].IsNull() ||
              !results[idx_in_batch].IsEmpty()) {
            first_biter.Invalidate(Status::OK());
            NewDataBlockIterator<DataBlockIter>(
                read_options, results[idx_in_batch], &first_biter,
                statuses[idx_in_batch]);
            reusing_block = false;
          }
          biter = &first_biter;
          idx_in_batch++;
        } else {
          IndexValue v = iiter->value();
          if (!v.first_internal_key.empty() && !skip_filters &&
              UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                      .Compare(ExtractUserKey(key),
                               ExtractUserKey(v.first_internal_key)) < 0) {
            // The requested key falls between highest key in previous block and
            // lowest key in current block.
            break;
          }

          next_biter.Invalidate(Status::OK());
2590
          NewDataBlockIterator<DataBlockIter>(
A
anand76 已提交
2591 2592 2593 2594
              read_options, iiter->value().handle, &next_biter,
              BlockType::kData, get_context, &lookup_data_block_context,
              Status(), nullptr);
          biter = &next_biter;
2595 2596
          reusing_block = false;
        }
2597

2598
        if (read_options.read_tier == kBlockCacheTier &&
A
anand76 已提交
2599
            biter->status().IsIncomplete()) {
2600 2601 2602 2603 2604 2605
          // couldn't get block from block_cache
          // Update Saver.state to Found because we are only looking for
          // whether we can guarantee the key is not there when "no_io" is set
          get_context->MarkKeyMayExist();
          break;
        }
A
anand76 已提交
2606 2607
        if (!biter->status().ok()) {
          s = biter->status();
2608 2609 2610
          break;
        }

A
anand76 已提交
2611
        bool may_exist = biter->SeekForGet(key);
2612 2613 2614 2615 2616
        if (!may_exist) {
          // HashSeek cannot find the key this block and the the iter is not
          // the end of the block, i.e. cannot be in the following blocks
          // either. In this case, the seek_key cannot be found, so we break
          // from the top level for-loop.
A
anand76 已提交
2617 2618
          break;
        }
2619

A
anand76 已提交
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
        // Call the *saver function on each entry/block until it returns false
        for (; biter->Valid(); biter->Next()) {
          ParsedInternalKey parsed_key;
          Cleanable dummy;
          Cleanable* value_pinner = nullptr;
          if (!ParseInternalKey(biter->key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }
          if (biter->IsValuePinned()) {
            if (reusing_block) {
              Cache* block_cache = rep_->table_options.block_cache.get();
              assert(biter->cache_handle() != nullptr);
              block_cache->Ref(biter->cache_handle());
              dummy.RegisterCleanup(&ReleaseCachedEntry, block_cache,
                                    biter->cache_handle());
              value_pinner = &dummy;
            } else {
              value_pinner = biter;
2638
            }
2639
          }
2640 2641 2642 2643 2644 2645 2646
          if (!get_context->SaveValue(parsed_key, biter->value(), &matched,
                                      value_pinner)) {
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size =
                  biter->key().size() + biter->value().size();
            }
A
anand76 已提交
2647 2648 2649 2650
            done = true;
            break;
          }
          s = biter->status();
2651 2652
        }
        // Write the block cache access.
2653
        if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
2654 2655
          // Avoid making copy of block_key, cf_name, and referenced_key when
          // constructing the access record.
2656 2657 2658 2659
          Slice referenced_key;
          if (does_referenced_key_exist) {
            referenced_key = biter->key();
          } else {
2660
            referenced_key = key;
2661
          }
2662 2663 2664 2665 2666 2667 2668 2669
          BlockCacheTraceRecord access_record(
              rep_->ioptions.env->NowMicros(),
              /*block_key=*/"", lookup_data_block_context.block_type,
              lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
              /*cf_name=*/"", rep_->level_for_tracing(),
              rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
              lookup_data_block_context.is_cache_hit,
              lookup_data_block_context.no_insert,
2670
              lookup_data_block_context.get_id,
2671
              lookup_data_block_context.get_from_user_specified_snapshot,
2672 2673 2674 2675 2676
              /*referenced_key=*/"", referenced_data_size,
              lookup_data_block_context.num_keys_in_block,
              does_referenced_key_exist);
          block_cache_tracer_->WriteBlockAccess(
              access_record, lookup_data_block_context.block_key,
2677
              rep_->cf_name_for_tracing(), referenced_key);
2678
        }
A
anand76 已提交
2679
        s = biter->status();
2680 2681 2682 2683
        if (done) {
          // Avoid the extra Next which is expensive in two-level indexes
          break;
        }
A
anand76 已提交
2684 2685 2686 2687 2688 2689 2690
        if (first_block) {
          iiter->Seek(key);
        }
        first_block = false;
        iiter->Next();
      } while (iiter->Valid());

2691 2692 2693 2694 2695
      if (matched && filter != nullptr && !filter->IsBlockBased()) {
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                  rep_->level);
      }
2696
      if (s.ok() && !iiter->status().IsNotFound()) {
2697 2698 2699 2700 2701 2702 2703
        s = iiter->status();
      }
      *(miter->s) = s;
    }
  }
}

2704 2705 2706
Status BlockBasedTable::Prefetch(const Slice* const begin,
                                 const Slice* const end) {
  auto& comparator = rep_->internal_comparator;
2707
  UserComparatorWrapper user_comparator(comparator.user_comparator());
2708 2709 2710 2711
  // pre-condition
  if (begin && end && comparator.Compare(*begin, *end) > 0) {
    return Status::InvalidArgument(*begin, *end);
  }
2712
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
M
Maysam Yabandeh 已提交
2713
  IndexBlockIter iiter_on_stack;
2714 2715 2716
  auto iiter = NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                                &iiter_on_stack, /*get_context=*/nullptr,
                                &lookup_context);
2717
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
2718
  if (iiter != &iiter_on_stack) {
2719
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
M
Maysam Yabandeh 已提交
2720
  }
2721

M
Maysam Yabandeh 已提交
2722
  if (!iiter->status().ok()) {
2723
    // error opening index iterator
M
Maysam Yabandeh 已提交
2724
    return iiter->status();
2725 2726 2727 2728 2729
  }

  // indicates if we are on the last page that need to be pre-fetched
  bool prefetching_boundary_page = false;

M
Maysam Yabandeh 已提交
2730 2731
  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
       iiter->Next()) {
2732 2733
    BlockHandle block_handle = iiter->value().handle;
    const bool is_user_key = !rep_->index_key_includes_seq;
M
Maysam Yabandeh 已提交
2734 2735 2736
    if (end &&
        ((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
         (is_user_key &&
2737
          user_comparator.Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747
      if (prefetching_boundary_page) {
        break;
      }

      // The index entry represents the last key in the data block.
      // We should load this page into memory as well, but no more
      prefetching_boundary_page = true;
    }

    // Load the block specified by the block_handle into the block cache
M
Maysam Yabandeh 已提交
2748
    DataBlockIter biter;
2749 2750 2751 2752 2753

    NewDataBlockIterator<DataBlockIter>(
        ReadOptions(), block_handle, &biter, /*type=*/BlockType::kData,
        /*get_context=*/nullptr, &lookup_context, Status(),
        /*prefetch_buffer=*/nullptr);
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763

    if (!biter.status().ok()) {
      // there was an unexpected error while pre-fetching
      return biter.status();
    }
  }

  return Status::OK();
}

S
sdong 已提交
2764 2765
Status BlockBasedTable::VerifyChecksum(const ReadOptions& read_options,
                                       TableReaderCaller caller) {
A
Aaron G 已提交
2766 2767
  Status s;
  // Check Meta blocks
2768 2769 2770 2771
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
  s = ReadMetaIndexBlock(nullptr /* prefetch buffer */, &metaindex,
                         &metaindex_iter);
A
Aaron G 已提交
2772
  if (s.ok()) {
2773
    s = VerifyChecksumInMetaBlocks(metaindex_iter.get());
A
Aaron G 已提交
2774 2775 2776 2777 2778 2779 2780
    if (!s.ok()) {
      return s;
    }
  } else {
    return s;
  }
  // Check Data blocks
M
Maysam Yabandeh 已提交
2781
  IndexBlockIter iiter_on_stack;
2782
  BlockCacheLookupContext context{caller};
2783
  InternalIteratorBase<IndexValue>* iiter = NewIndexIterator(
S
sdong 已提交
2784
      read_options, /*disable_prefix_seek=*/false, &iiter_on_stack,
2785
      /*get_context=*/nullptr, &context);
2786
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
A
Aaron G 已提交
2787
  if (iiter != &iiter_on_stack) {
2788
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
A
Aaron G 已提交
2789 2790 2791 2792 2793
  }
  if (!iiter->status().ok()) {
    // error opening index iterator
    return iiter->status();
  }
S
sdong 已提交
2794
  s = VerifyChecksumInBlocks(read_options, iiter);
A
Aaron G 已提交
2795 2796 2797
  return s;
}

2798
Status BlockBasedTable::VerifyChecksumInBlocks(
S
sdong 已提交
2799
    const ReadOptions& read_options,
2800
    InternalIteratorBase<IndexValue>* index_iter) {
A
Aaron G 已提交
2801
  Status s;
S
sdong 已提交
2802 2803 2804 2805 2806
  // We are scanning the whole file, so no need to do exponential
  // increasing of the buffer size.
  size_t readahead_size = (read_options.readahead_size != 0)
                              ? read_options.readahead_size
                              : kMaxAutoReadaheadSize;
2807 2808 2809 2810 2811 2812
  // FilePrefetchBuffer doesn't work in mmap mode and readahead is not
  // needed there.
  FilePrefetchBuffer prefetch_buffer(
      rep_->file.get(), readahead_size /* readadhead_size */,
      readahead_size /* max_readahead_size */,
      !rep_->ioptions.allow_mmap_reads /* enable */);
S
sdong 已提交
2813

A
Aaron G 已提交
2814 2815 2816 2817 2818
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
    if (!s.ok()) {
      break;
    }
2819
    BlockHandle handle = index_iter->value().handle;
2820
    BlockContents contents;
2821
    BlockFetcher block_fetcher(
S
sdong 已提交
2822 2823 2824
        rep_->file.get(), &prefetch_buffer, rep_->footer, ReadOptions(), handle,
        &contents, rep_->ioptions, false /* decompress */,
        false /*maybe_compressed*/, BlockType::kData,
2825
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
2826 2827 2828 2829 2830 2831 2832 2833
    s = block_fetcher.ReadBlockContents();
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
    const Slice& meta_block_name) {
  if (meta_block_name.starts_with(kFilterBlockPrefix) ||
      meta_block_name.starts_with(kFullFilterBlockPrefix) ||
      meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
    return BlockType::kFilter;
  }

  if (meta_block_name == kPropertiesBlock) {
    return BlockType::kProperties;
  }

  if (meta_block_name == kCompressionDictBlock) {
    return BlockType::kCompressionDictionary;
  }

  if (meta_block_name == kRangeDelBlock) {
    return BlockType::kRangeDeletion;
  }

  if (meta_block_name == kHashIndexPrefixesBlock) {
    return BlockType::kHashIndexPrefixes;
  }

  if (meta_block_name == kHashIndexPrefixesMetadataBlock) {
    return BlockType::kHashIndexMetadata;
  }

  assert(false);
  return BlockType::kInvalid;
}

2866
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
2867 2868 2869 2870
    InternalIteratorBase<Slice>* index_iter) {
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
A
Aaron G 已提交
2871 2872 2873
    if (!s.ok()) {
      break;
    }
2874 2875 2876
    BlockHandle handle;
    Slice input = index_iter->value();
    s = handle.DecodeFrom(&input);
A
Aaron G 已提交
2877
    BlockContents contents;
2878
    const Slice meta_block_name = index_iter->key();
2879 2880 2881 2882
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
        false /* decompress */, false /*maybe_compressed*/,
2883
        GetBlockTypeForMetaBlockByName(meta_block_name),
2884
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
S
Siying Dong 已提交
2885
    s = block_fetcher.ReadBlockContents();
2886
    if (s.IsCorruption() && meta_block_name == kPropertiesBlock) {
2887
      TableProperties* table_properties;
2888
      s = TryReadPropertiesWithGlobalSeqno(nullptr /* prefetch_buffer */,
2889 2890 2891 2892
                                           index_iter->value(),
                                           &table_properties);
      delete table_properties;
    }
A
Aaron G 已提交
2893 2894 2895 2896 2897 2898 2899
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

2900 2901 2902 2903 2904 2905 2906 2907 2908
bool BlockBasedTable::TEST_BlockInCache(const BlockHandle& handle) const {
  assert(rep_ != nullptr);

  Cache* const cache = rep_->table_options.block_cache.get();
  if (cache == nullptr) {
    return false;
  }

  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
2909 2910 2911
  Slice cache_key =
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
                  cache_key_storage);
2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922

  Cache::Handle* const cache_handle = cache->Lookup(cache_key);
  if (cache_handle == nullptr) {
    return false;
  }

  cache->Release(cache_handle);

  return true;
}

S
Siying Dong 已提交
2923 2924
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
                                      const Slice& key) {
2925
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2926
      options, /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2927
      /*get_context=*/nullptr, /*lookup_context=*/nullptr));
I
Igor Canadi 已提交
2928 2929 2930
  iiter->Seek(key);
  assert(iiter->Valid());

2931
  return TEST_BlockInCache(iiter->value().handle);
2932 2933 2934 2935 2936 2937 2938 2939 2940
}

// REQUIRES: The following fields of rep_ should have already been populated:
//  1. file
//  2. index_handle,
//  3. options
//  4. internal_comparator
//  5. index_type
Status BlockBasedTable::CreateIndexReader(
2941 2942
    FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* preloaded_meta_index_iter, bool use_cache, bool prefetch,
2943 2944
    bool pin, BlockCacheLookupContext* lookup_context,
    std::unique_ptr<IndexReader>* index_reader) {
2945 2946
  // kHashSearch requires non-empty prefix_extractor but bypass checking
  // prefix_extractor here since we have no access to MutableCFOptions.
2947
  // Add need_upper_bound_check flag in  BlockBasedTable::NewIndexIterator.
2948 2949
  // If prefix_extractor does not match prefix_extractor_name from table
  // properties, turn off Hash Index by setting total_order_seek to true
2950

2951
  switch (rep_->index_type) {
M
Maysam Yabandeh 已提交
2952
    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
2953
      return PartitionIndexReader::Create(this, prefetch_buffer, use_cache,
2954 2955
                                          prefetch, pin, lookup_context,
                                          index_reader);
M
Maysam Yabandeh 已提交
2956
    }
2957
    case BlockBasedTableOptions::kBinarySearch:
Y
Yanqin Jin 已提交
2958
      FALLTHROUGH_INTENDED;
2959
    case BlockBasedTableOptions::kBinarySearchWithFirstKey: {
2960
      return BinarySearchIndexReader::Create(this, prefetch_buffer, use_cache,
2961 2962
                                             prefetch, pin, lookup_context,
                                             index_reader);
2963 2964
    }
    case BlockBasedTableOptions::kHashSearch: {
2965 2966
      std::unique_ptr<Block> metaindex_guard;
      std::unique_ptr<InternalIterator> metaindex_iter_guard;
K
Kai Liu 已提交
2967
      auto meta_index_iter = preloaded_meta_index_iter;
2968 2969 2970 2971 2972 2973 2974
      bool should_fallback = false;
      if (rep_->internal_prefix_transform.get() == nullptr) {
        ROCKS_LOG_WARN(rep_->ioptions.info_log,
                       "No prefix extractor passed in. Fall back to binary"
                       " search index.");
        should_fallback = true;
      } else if (meta_index_iter == nullptr) {
2975 2976
        auto s = ReadMetaIndexBlock(prefetch_buffer, &metaindex_guard,
                                    &metaindex_iter_guard);
K
Kai Liu 已提交
2977
        if (!s.ok()) {
2978 2979
          // we simply fall back to binary search in case there is any
          // problem with prefix hash index loading.
2980 2981 2982
          ROCKS_LOG_WARN(rep_->ioptions.info_log,
                         "Unable to read the metaindex block."
                         " Fall back to binary search index.");
2983
          should_fallback = true;
K
Kai Liu 已提交
2984
        }
2985
        meta_index_iter = metaindex_iter_guard.get();
K
Kai Liu 已提交
2986 2987
      }

2988 2989 2990 2991 2992 2993 2994 2995 2996
      if (should_fallback) {
        return BinarySearchIndexReader::Create(this, prefetch_buffer, use_cache,
                                               prefetch, pin, lookup_context,
                                               index_reader);
      } else {
        return HashIndexReader::Create(this, prefetch_buffer, meta_index_iter,
                                       use_cache, prefetch, pin, lookup_context,
                                       index_reader);
      }
2997 2998 2999
    }
    default: {
      std::string error_message =
3000
          "Unrecognized index type: " + ToString(rep_->index_type);
3001
      return Status::InvalidArgument(error_message.c_str());
3002 3003 3004 3005
    }
  }
}

3006 3007 3008 3009 3010
uint64_t BlockBasedTable::ApproximateOffsetOf(
    const InternalIteratorBase<IndexValue>& index_iter) const {
  uint64_t result = 0;
  if (index_iter.Valid()) {
    BlockHandle handle = index_iter.value().handle;
3011
    result = handle.offset();
J
jorlow@chromium.org 已提交
3012
  } else {
3013
    // The iterator is past the last key in the file. If table_properties is not
K
Kai Liu 已提交
3014 3015
    // available, approximate the offset by returning the offset of the
    // metaindex block (which is right near the end of the file).
3016 3017 3018
    if (rep_->table_properties) {
      result = rep_->table_properties->data_size;
    }
K
Kai Liu 已提交
3019 3020
    // table_properties is not present in the table.
    if (result == 0) {
I
xxHash  
Igor Canadi 已提交
3021
      result = rep_->footer.metaindex_handle().offset();
K
Kai Liu 已提交
3022
    }
J
jorlow@chromium.org 已提交
3023
  }
3024

3025 3026 3027 3028 3029 3030 3031
  return result;
}

uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key,
                                              TableReaderCaller caller) {
  BlockCacheLookupContext context(caller);
  IndexBlockIter iiter_on_stack;
3032 3033
  ReadOptions ro;
  ro.total_order_seek = true;
3034
  auto index_iter =
3035
      NewIndexIterator(ro, /*disable_prefix_seek=*/true,
3036 3037 3038
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
3039
  if (index_iter != &iiter_on_stack) {
3040
    iiter_unique_ptr.reset(index_iter);
3041 3042
  }

3043 3044 3045 3046 3047 3048 3049 3050 3051 3052
  index_iter->Seek(key);
  return ApproximateOffsetOf(*index_iter);
}

uint64_t BlockBasedTable::ApproximateSize(const Slice& start, const Slice& end,
                                          TableReaderCaller caller) {
  assert(rep_->internal_comparator.Compare(start, end) <= 0);

  BlockCacheLookupContext context(caller);
  IndexBlockIter iiter_on_stack;
3053 3054
  ReadOptions ro;
  ro.total_order_seek = true;
3055
  auto index_iter =
3056
      NewIndexIterator(ro, /*disable_prefix_seek=*/true,
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
  if (index_iter != &iiter_on_stack) {
    iiter_unique_ptr.reset(index_iter);
  }

  index_iter->Seek(start);
  uint64_t start_offset = ApproximateOffsetOf(*index_iter);
  index_iter->Seek(end);
  uint64_t end_offset = ApproximateOffsetOf(*index_iter);

  assert(end_offset >= start_offset);
  return end_offset - start_offset;
J
jorlow@chromium.org 已提交
3071 3072
}

3073 3074 3075
bool BlockBasedTable::TEST_FilterBlockInCache() const {
  assert(rep_ != nullptr);
  return TEST_BlockInCache(rep_->filter_handle);
3076 3077
}

3078 3079 3080 3081
bool BlockBasedTable::TEST_IndexBlockInCache() const {
  assert(rep_ != nullptr);

  return TEST_BlockInCache(rep_->footer.index_handle());
3082 3083
}

O
omegaga 已提交
3084 3085
Status BlockBasedTable::GetKVPairsFromDataBlocks(
    std::vector<KVPairBlock>* kv_pair_blocks) {
3086
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3087 3088 3089
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
O
omegaga 已提交
3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105

  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    // Cannot read Index Block
    return s;
  }

  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();

    if (!s.ok()) {
      break;
    }

    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3106
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3107 3108
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3109 3110
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
O
omegaga 已提交
3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
    s = datablock_iter->status();

    if (!s.ok()) {
      // Error reading the block - Skipped
      continue;
    }

    KVPairBlock kv_pair_block;
    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        // Error reading the block - Skipped
        break;
      }
      const Slice& key = datablock_iter->key();
      const Slice& value = datablock_iter->value();
      std::string key_copy = std::string(key.data(), key.size());
      std::string value_copy = std::string(value.data(), value.size());

      kv_pair_block.push_back(
          std::make_pair(std::move(key_copy), std::move(value_copy)));
    }
    kv_pair_blocks->push_back(std::move(kv_pair_block));
  }
  return Status::OK();
}

3139
Status BlockBasedTable::DumpTable(WritableFile* out_file) {
3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151
  // Output Footer
  out_file->Append(
      "Footer Details:\n"
      "--------------------------------------\n"
      "  ");
  out_file->Append(rep_->footer.ToString().c_str());
  out_file->Append("\n");

  // Output MetaIndex
  out_file->Append(
      "Metaindex Details:\n"
      "--------------------------------------\n");
3152 3153 3154 3155
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
  Status s = ReadMetaIndexBlock(nullptr /* prefetch_buffer */, &metaindex,
                                &metaindex_iter);
3156
  if (s.ok()) {
3157 3158 3159
    for (metaindex_iter->SeekToFirst(); metaindex_iter->Valid();
         metaindex_iter->Next()) {
      s = metaindex_iter->status();
3160 3161 3162
      if (!s.ok()) {
        return s;
      }
3163
      if (metaindex_iter->key() == ROCKSDB_NAMESPACE::kPropertiesBlock) {
3164
        out_file->Append("  Properties block handle: ");
3165
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3166
        out_file->Append("\n");
3167 3168
      } else if (metaindex_iter->key() ==
                 ROCKSDB_NAMESPACE::kCompressionDictBlock) {
3169
        out_file->Append("  Compression dictionary block handle: ");
3170
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3171
        out_file->Append("\n");
3172
      } else if (strstr(metaindex_iter->key().ToString().c_str(),
3173 3174
                        "filter.rocksdb.") != nullptr) {
        out_file->Append("  Filter block handle: ");
3175
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3176
        out_file->Append("\n");
3177
      } else if (metaindex_iter->key() == ROCKSDB_NAMESPACE::kRangeDelBlock) {
3178
        out_file->Append("  Range deletion block handle: ");
3179
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3180
        out_file->Append("\n");
3181 3182 3183 3184 3185 3186 3187 3188
      }
    }
    out_file->Append("\n");
  } else {
    return s;
  }

  // Output TableProperties
3189
  const ROCKSDB_NAMESPACE::TableProperties* table_properties;
3190 3191 3192 3193 3194 3195 3196 3197 3198 3199
  table_properties = rep_->table_properties.get();

  if (table_properties != nullptr) {
    out_file->Append(
        "Table Properties:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
    out_file->Append("\n");
  }
3200

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214
  if (rep_->filter) {
    out_file->Append(
        "Filter Details:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(rep_->filter->ToString().c_str());
    out_file->Append("\n");
  }

  // Output Index block
  s = DumpIndexBlock(out_file);
  if (!s.ok()) {
    return s;
  }
3215 3216

  // Output compression dictionary
3217
  if (rep_->uncompression_dict_reader) {
3218
    CachableEntry<UncompressionDict> uncompression_dict;
3219 3220 3221 3222
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        nullptr /* prefetch_buffer */, false /* no_io */,
        nullptr /* get_context */, nullptr /* lookup_context */,
        &uncompression_dict);
3223 3224 3225
    if (!s.ok()) {
      return s;
    }
3226

3227 3228 3229
    assert(uncompression_dict.GetValue());

    const Slice& raw_dict = uncompression_dict.GetValue()->GetRawDict();
3230 3231 3232 3233
    out_file->Append(
        "Compression Dictionary:\n"
        "--------------------------------------\n");
    out_file->Append("  size (bytes): ");
3234
    out_file->Append(ROCKSDB_NAMESPACE::ToString(raw_dict.size()));
3235 3236
    out_file->Append("\n\n");
    out_file->Append("  HEX    ");
3237
    out_file->Append(raw_dict.ToString(true).c_str());
3238 3239 3240
    out_file->Append("\n\n");
  }

3241
  // Output range deletions block
A
Andrew Kryczka 已提交
3242
  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
A
Andrew Kryczka 已提交
3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
  if (range_del_iter != nullptr) {
    range_del_iter->SeekToFirst();
    if (range_del_iter->Valid()) {
      out_file->Append(
          "Range deletions:\n"
          "--------------------------------------\n"
          "  ");
      for (; range_del_iter->Valid(); range_del_iter->Next()) {
        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
      }
      out_file->Append("\n");
3254
    }
A
Andrew Kryczka 已提交
3255
    delete range_del_iter;
3256
  }
3257 3258 3259 3260 3261 3262 3263 3264 3265 3266
  // Output Data blocks
  s = DumpDataBlocks(out_file);

  return s;
}

Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
  out_file->Append(
      "Index Details:\n"
      "--------------------------------------\n");
3267
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3268 3269 3270
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

  out_file->Append("  Block key hex dump: Data block handle\n");
  out_file->Append("  Block key ascii\n\n");
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }
    Slice key = blockhandles_iter->key();
M
Maysam Yabandeh 已提交
3286
    Slice user_key;
3287
    InternalKey ikey;
3288
    if (!rep_->index_key_includes_seq) {
3289 3290
      user_key = key;
    } else {
M
Maysam Yabandeh 已提交
3291 3292 3293
      ikey.DecodeFrom(key);
      user_key = ikey.user_key();
    }
3294 3295

    out_file->Append("  HEX    ");
M
Maysam Yabandeh 已提交
3296
    out_file->Append(user_key.ToString(true).c_str());
3297
    out_file->Append(": ");
3298 3299 3300
    out_file->Append(blockhandles_iter->value()
                         .ToString(true, rep_->index_has_first_key)
                         .c_str());
3301 3302
    out_file->Append("\n");

M
Maysam Yabandeh 已提交
3303
    std::string str_key = user_key.ToString();
3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318
    std::string res_key("");
    char cspace = ' ';
    for (size_t i = 0; i < str_key.size(); i++) {
      res_key.append(&str_key[i], 1);
      res_key.append(1, cspace);
    }
    out_file->Append("  ASCII  ");
    out_file->Append(res_key.c_str());
    out_file->Append("\n  ------\n");
  }
  out_file->Append("\n");
  return Status::OK();
}

Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
3319
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3320 3321 3322
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3323 3324 3325 3326 3327 3328
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

3329 3330 3331 3332
  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
  uint64_t datablock_size_max = 0;
  uint64_t datablock_size_sum = 0;

3333 3334 3335 3336 3337 3338 3339 3340
  size_t block_id = 1;
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       block_id++, blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }

3341
    BlockHandle bh = blockhandles_iter->value().handle;
3342 3343 3344 3345 3346
    uint64_t datablock_size = bh.size();
    datablock_size_min = std::min(datablock_size_min, datablock_size);
    datablock_size_max = std::max(datablock_size_max, datablock_size);
    datablock_size_sum += datablock_size;

3347
    out_file->Append("Data Block # ");
3348
    out_file->Append(ROCKSDB_NAMESPACE::ToString(block_id));
3349
    out_file->Append(" @ ");
3350
    out_file->Append(blockhandles_iter->value().handle.ToString(true).c_str());
3351 3352 3353
    out_file->Append("\n");
    out_file->Append("--------------------------------------\n");

S
sdong 已提交
3354
    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3355
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3356 3357
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3358 3359
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
    s = datablock_iter->status();

    if (!s.ok()) {
      out_file->Append("Error reading the block - Skipped \n\n");
      continue;
    }

    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        out_file->Append("Error reading the block - Skipped \n");
        break;
      }
3374
      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
3375 3376 3377
    }
    out_file->Append("\n");
  }
3378 3379 3380 3381 3382 3383 3384 3385

  uint64_t num_datablocks = block_id - 1;
  if (num_datablocks) {
    double datablock_size_avg =
        static_cast<double>(datablock_size_sum) / num_datablocks;
    out_file->Append("Data Block Summary:\n");
    out_file->Append("--------------------------------------");
    out_file->Append("\n  # data blocks: ");
3386
    out_file->Append(ROCKSDB_NAMESPACE::ToString(num_datablocks));
3387
    out_file->Append("\n  min data block size: ");
3388
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_min));
3389
    out_file->Append("\n  max data block size: ");
3390
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_max));
3391
    out_file->Append("\n  avg data block size: ");
3392
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_avg));
3393 3394 3395
    out_file->Append("\n");
  }

3396 3397 3398
  return Status::OK();
}

3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
                                   WritableFile* out_file) {
  InternalKey ikey;
  ikey.DecodeFrom(key);

  out_file->Append("  HEX    ");
  out_file->Append(ikey.user_key().ToString(true).c_str());
  out_file->Append(": ");
  out_file->Append(value.ToString(true).c_str());
  out_file->Append("\n");

  std::string str_key = ikey.user_key().ToString();
  std::string str_value = value.ToString();
  std::string res_key(""), res_value("");
  char cspace = ' ';
  for (size_t i = 0; i < str_key.size(); i++) {
3415 3416 3417 3418 3419
    if (str_key[i] == '\0') {
      res_key.append("\\0", 2);
    } else {
      res_key.append(&str_key[i], 1);
    }
3420 3421 3422
    res_key.append(1, cspace);
  }
  for (size_t i = 0; i < str_value.size(); i++) {
3423 3424 3425 3426 3427
    if (str_value[i] == '\0') {
      res_value.append("\\0", 2);
    } else {
      res_value.append(&str_value[i], 1);
    }
3428 3429 3430 3431 3432 3433 3434 3435 3436 3437
    res_value.append(1, cspace);
  }

  out_file->Append("  ASCII  ");
  out_file->Append(res_key.c_str());
  out_file->Append(": ");
  out_file->Append(res_value.c_str());
  out_file->Append("\n  ------\n");
}

3438
}  // namespace ROCKSDB_NAMESPACE