block_based_table_reader.cc 132.4 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "table/block_based/block_based_table_reader.h"
10

11
#include <algorithm>
12
#include <array>
13
#include <limits>
14 15
#include <string>
#include <utility>
O
omegaga 已提交
16
#include <vector>
17

18 19
#include "cache/sharded_cache.h"

T
Tyler Harter 已提交
20
#include "db/dbformat.h"
21
#include "db/pinned_iterators_manager.h"
22
#include "file/file_prefetch_buffer.h"
23
#include "file/file_util.h"
24
#include "file/random_access_file_reader.h"
25 26
#include "monitoring/perf_context_imp.h"
#include "options/options_helper.h"
27
#include "rocksdb/cache.h"
28 29
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
30
#include "rocksdb/file_system.h"
31
#include "rocksdb/filter_policy.h"
32
#include "rocksdb/iterator.h"
33 34
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
S
Siying Dong 已提交
35
#include "rocksdb/table.h"
36
#include "rocksdb/table_properties.h"
37
#include "table/block_based/binary_search_index_reader.h"
38 39 40
#include "table/block_based/block.h"
#include "table/block_based/block_based_filter_block.h"
#include "table/block_based/block_based_table_factory.h"
41
#include "table/block_based/block_based_table_iterator.h"
42 43 44
#include "table/block_based/block_prefix_index.h"
#include "table/block_based/filter_block.h"
#include "table/block_based/full_filter_block.h"
45
#include "table/block_based/hash_index_reader.h"
46
#include "table/block_based/partitioned_filter_block.h"
47
#include "table/block_based/partitioned_index_reader.h"
48
#include "table/block_fetcher.h"
J
jorlow@chromium.org 已提交
49
#include "table/format.h"
K
krad 已提交
50
#include "table/get_context.h"
S
sdong 已提交
51
#include "table/internal_iterator.h"
52
#include "table/meta_blocks.h"
53
#include "table/multiget_context.h"
K
krad 已提交
54
#include "table/persistent_cache_helper.h"
55
#include "table/sst_file_writer_collectors.h"
J
jorlow@chromium.org 已提交
56
#include "table/two_level_iterator.h"
57

58
#include "monitoring/perf_context_imp.h"
P
Peter Dillinger 已提交
59
#include "port/lang.h"
60
#include "test_util/sync_point.h"
J
jorlow@chromium.org 已提交
61
#include "util/coding.h"
62
#include "util/crc32c.h"
63
#include "util/stop_watch.h"
64
#include "util/string_util.h"
65
#include "util/xxhash.h"
J
jorlow@chromium.org 已提交
66

67
namespace ROCKSDB_NAMESPACE {
J
jorlow@chromium.org 已提交
68

I
xxHash  
Igor Canadi 已提交
69
extern const uint64_t kBlockBasedTableMagicNumber;
K
Kai Liu 已提交
70 71
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
72 73 74

typedef BlockBasedTable::IndexReader IndexReader;

S
sdong 已提交
75 76 77 78
// Found that 256 KB readahead size provides the best performance, based on
// experiments, for auto readahead. Experiment data is in PR #3282.
const size_t BlockBasedTable::kMaxAutoReadaheadSize = 256 * 1024;

M
Maysam Yabandeh 已提交
79 80 81 82
BlockBasedTable::~BlockBasedTable() {
  delete rep_;
}

83 84
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);

85 86 87 88 89 90 91 92 93
template <typename TBlocklike>
class BlocklikeTraits;

template <>
class BlocklikeTraits<BlockContents> {
 public:
  static BlockContents* Create(BlockContents&& contents,
                               size_t /* read_amp_bytes_per_bit */,
                               Statistics* /* statistics */,
94 95
                               bool /* using_zstd */,
                               const FilterPolicy* /* filter_policy */) {
96 97 98 99 100 101 102 103
    return new BlockContents(std::move(contents));
  }

  static uint32_t GetNumRestarts(const BlockContents& /* contents */) {
    return 0;
  }
};

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
template <>
class BlocklikeTraits<ParsedFullFilterBlock> {
 public:
  static ParsedFullFilterBlock* Create(BlockContents&& contents,
                                       size_t /* read_amp_bytes_per_bit */,
                                       Statistics* /* statistics */,
                                       bool /* using_zstd */,
                                       const FilterPolicy* filter_policy) {
    return new ParsedFullFilterBlock(filter_policy, std::move(contents));
  }

  static uint32_t GetNumRestarts(const ParsedFullFilterBlock& /* block */) {
    return 0;
  }
};

120 121 122
template <>
class BlocklikeTraits<Block> {
 public:
123 124
  static Block* Create(BlockContents&& contents, size_t read_amp_bytes_per_bit,
                       Statistics* statistics, bool /* using_zstd */,
125
                       const FilterPolicy* /* filter_policy */) {
126
    return new Block(std::move(contents), read_amp_bytes_per_bit, statistics);
127 128 129 130 131 132 133 134 135 136 137 138 139
  }

  static uint32_t GetNumRestarts(const Block& block) {
    return block.NumRestarts();
  }
};

template <>
class BlocklikeTraits<UncompressionDict> {
 public:
  static UncompressionDict* Create(BlockContents&& contents,
                                   size_t /* read_amp_bytes_per_bit */,
                                   Statistics* /* statistics */,
140 141
                                   bool using_zstd,
                                   const FilterPolicy* /* filter_policy */) {
142 143 144 145 146 147 148 149 150
    return new UncompressionDict(contents.data, std::move(contents.allocation),
                                 using_zstd);
  }

  static uint32_t GetNumRestarts(const UncompressionDict& /* dict */) {
    return 0;
  }
};

151 152 153 154 155
namespace {
// Read the block identified by "handle" from "file".
// The only relevant option is options.verify_checksums for now.
// On failure return non-OK.
// On success fill *result and return OK - caller owns *result
156
// @param uncompression_dict Data for presetting the compression library's
157
//    dictionary.
158
template <typename TBlocklike>
159 160 161
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
162
    std::unique_ptr<TBlocklike>* result, const ImmutableCFOptions& ioptions,
163
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
164
    const UncompressionDict& uncompression_dict,
165 166 167
    const PersistentCacheOptions& cache_options, size_t read_amp_bytes_per_bit,
    MemoryAllocator* memory_allocator, bool for_compaction, bool using_zstd,
    const FilterPolicy* filter_policy) {
168 169
  assert(result);

170
  BlockContents contents;
171 172 173 174
  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, &contents, ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);
S
Siying Dong 已提交
175
  Status s = block_fetcher.ReadBlockContents();
176
  if (s.ok()) {
177
    result->reset(BlocklikeTraits<TBlocklike>::Create(
178 179
        std::move(contents), read_amp_bytes_per_bit, ioptions.statistics,
        using_zstd, filter_policy));
180 181 182 183 184
  }

  return s;
}

185 186 187 188 189 190 191
// Delete the entry resided in the cache.
template <class Entry>
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
  auto entry = reinterpret_cast<Entry*>(value);
  delete entry;
}

192 193 194 195 196 197 198 199
// Release the cached entry and decrement its ref count.
// Do not force erase
void ReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
  cache->Release(handle, false /* force_erase */);
}

200 201 202
// For hash based index, return true if prefix_extractor and
// prefix_extractor_block mismatch, false otherwise. This flag will be used
// as total_order_seek via NewIndexIterator
203 204
bool PrefixExtractorChanged(const TableProperties* table_properties,
                            const SliceTransform* prefix_extractor) {
205 206 207
  // BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
  // Turn off hash index in prefix_extractor is not set; if  prefix_extractor
  // is set but prefix_extractor_block is not set, also disable hash index
208 209
  if (prefix_extractor == nullptr || table_properties == nullptr ||
      table_properties->prefix_extractor_name.empty()) {
210 211
    return true;
  }
212

213
  // prefix_extractor and prefix_extractor_block are both non-empty
214 215
  if (table_properties->prefix_extractor_name.compare(
          prefix_extractor->Name()) != 0) {
216 217 218 219 220 221
    return true;
  } else {
    return false;
  }
}

A
anand76 已提交
222 223 224 225 226 227
CacheAllocationPtr CopyBufferToHeap(MemoryAllocator* allocator, Slice& buf) {
  CacheAllocationPtr heap_buf;
  heap_buf = AllocateBlock(buf.size(), allocator);
  memcpy(heap_buf.get(), buf.data(), buf.size());
  return heap_buf;
}
228 229
}  // namespace

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
                                            GetContext* get_context,
                                            size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  PERF_COUNTER_ADD(block_cache_hit_count, 1);
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_hit;
    get_context->get_context_stats_.num_cache_bytes_read += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_HIT);
    RecordTick(statistics, BLOCK_CACHE_BYTES_READ, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_HIT);
      }
      break;

    case BlockType::kCompressionDictionary:
      // TODO: introduce perf counter for compression dictionary hit count
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_HIT);
      }
      break;

    case BlockType::kIndex:
      PERF_COUNTER_ADD(block_cache_index_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_HIT);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
                                             GetContext* get_context) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce aggregate (not per-level) block cache miss count
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_miss;
  } else {
    RecordTick(statistics, BLOCK_CACHE_MISS);
  }

  // TODO: introduce perf counters for misses per block type
  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_MISS);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_MISS);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_MISS);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheInsertionMetrics(BlockType block_type,
                                                  GetContext* get_context,
343 344
                                                  size_t usage,
                                                  bool redundant) const {
345 346 347 348 349
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce perf counters for block cache insertions
  if (get_context) {
    ++get_context->get_context_stats_.num_cache_add;
350 351 352
    if (redundant) {
      ++get_context->get_context_stats_.num_cache_add_redundant;
    }
353 354 355
    get_context->get_context_stats_.num_cache_bytes_write += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_ADD);
356 357 358
    if (redundant) {
      RecordTick(statistics, BLOCK_CACHE_ADD_REDUNDANT);
    }
359 360 361 362 363 364 365
    RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_add;
366 367 368
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_filter_add_redundant;
        }
369 370 371
        get_context->get_context_stats_.num_cache_filter_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
372 373 374
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_FILTER_ADD_REDUNDANT);
        }
375 376 377 378 379 380 381
        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_add;
382 383 384 385
        if (redundant) {
          ++get_context->get_context_stats_
                .num_cache_compression_dict_add_redundant;
        }
386 387 388 389
        get_context->get_context_stats_
            .num_cache_compression_dict_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
390 391 392
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD_REDUNDANT);
        }
393 394 395 396 397 398 399 400
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
                   usage);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_add;
401 402 403
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_index_add_redundant;
        }
404 405 406
        get_context->get_context_stats_.num_cache_index_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
407 408 409
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_INDEX_ADD_REDUNDANT);
        }
410 411 412 413 414 415 416 417 418
        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usage);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_add;
419 420 421
        if (redundant) {
          ++get_context->get_context_stats_.num_cache_data_add_redundant;
        }
422 423 424
        get_context->get_context_stats_.num_cache_data_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
425 426 427
        if (redundant) {
          RecordTick(statistics, BLOCK_CACHE_DATA_ADD_REDUNDANT);
        }
428 429 430 431 432 433
        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, usage);
      }
      break;
  }
}

434
Cache::Handle* BlockBasedTable::GetEntryFromCache(
435
    Cache* block_cache, const Slice& key, BlockType block_type,
436
    GetContext* get_context) const {
437 438
  auto cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics);

439
  if (cache_handle != nullptr) {
440 441
    UpdateCacheHitMetrics(block_type, get_context,
                          block_cache->GetUsage(cache_handle));
442
  } else {
443
    UpdateCacheMissMetrics(block_type, get_context);
444 445 446 447 448
  }

  return cache_handle;
}

449
// Helper function to setup the cache key's prefix for the Table.
450
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
451 452
  assert(kMaxCacheKeyPrefixSize >= 10);
  rep->cache_key_prefix_size = 0;
453
  rep->compressed_cache_key_prefix_size = 0;
454
  if (rep->table_options.block_cache != nullptr) {
455 456 457
    GenerateCachePrefix<Cache, FSRandomAccessFile>(
        rep->table_options.block_cache.get(), rep->file->file(),
        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
458
  }
K
krad 已提交
459
  if (rep->table_options.persistent_cache != nullptr) {
460 461 462 463
    GenerateCachePrefix<PersistentCache, FSRandomAccessFile>(
        rep->table_options.persistent_cache.get(), rep->file->file(),
        &rep->persistent_cache_key_prefix[0],
        &rep->persistent_cache_key_prefix_size);
K
krad 已提交
464
  }
465
  if (rep->table_options.block_cache_compressed != nullptr) {
466 467 468 469
    GenerateCachePrefix<Cache, FSRandomAccessFile>(
        rep->table_options.block_cache_compressed.get(), rep->file->file(),
        &rep->compressed_cache_key_prefix[0],
        &rep->compressed_cache_key_prefix_size);
470 471 472
  }
}

473 474 475 476 477 478 479 480 481 482 483 484
namespace {
// Return True if table_properties has `user_prop_name` has a `true` value
// or it doesn't contain this property (for backward compatible).
bool IsFeatureSupported(const TableProperties& table_properties,
                        const std::string& user_prop_name, Logger* info_log) {
  auto& props = table_properties.user_collected_properties;
  auto pos = props.find(user_prop_name);
  // Older version doesn't have this value set. Skip this check.
  if (pos != props.end()) {
    if (pos->second == kPropFalse) {
      return false;
    } else if (pos->second != kPropTrue) {
485 486
      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
                     user_prop_name.c_str(), pos->second.c_str());
487 488 489 490
    }
  }
  return true;
}
491

492 493 494 495 496 497 498
// Caller has to ensure seqno is not nullptr.
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
                               SequenceNumber largest_seqno,
                               SequenceNumber* seqno) {
  const auto& props = table_properties.user_collected_properties;
  const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
  const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
499

500
  *seqno = kDisableGlobalSequenceNumber;
501 502
  if (version_pos == props.end()) {
    if (seqno_pos != props.end()) {
503
      std::array<char, 200> msg_buf;
504
      // This is not an external sst file, global_seqno is not supported.
505 506
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
507 508
          "A non-external sst file have global seqno property with value %s",
          seqno_pos->second.c_str());
509
      return Status::Corruption(msg_buf.data());
510
    }
511
    return Status::OK();
512 513 514 515 516
  }

  uint32_t version = DecodeFixed32(version_pos->second.c_str());
  if (version < 2) {
    if (seqno_pos != props.end() || version != 1) {
517
      std::array<char, 200> msg_buf;
518
      // This is a v1 external sst file, global_seqno is not supported.
519 520 521 522 523
      snprintf(msg_buf.data(), msg_buf.max_size(),
               "An external sst file with version %u have global seqno "
               "property with value %s",
               version, seqno_pos->second.c_str());
      return Status::Corruption(msg_buf.data());
524
    }
525
    return Status::OK();
526 527
  }

528 529 530 531 532 533 534
  // Since we have a plan to deprecate global_seqno, we do not return failure
  // if seqno_pos == props.end(). We rely on version_pos to detect whether the
  // SST is external.
  SequenceNumber global_seqno(0);
  if (seqno_pos != props.end()) {
    global_seqno = DecodeFixed64(seqno_pos->second.c_str());
  }
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
  // SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
  // to denote it is unknown.
  if (largest_seqno < kMaxSequenceNumber) {
    if (global_seqno == 0) {
      global_seqno = largest_seqno;
    }
    if (global_seqno != largest_seqno) {
      std::array<char, 200> msg_buf;
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
          "An external sst file with version %u have global seqno property "
          "with value %s, while largest seqno in the file is %llu",
          version, seqno_pos->second.c_str(),
          static_cast<unsigned long long>(largest_seqno));
      return Status::Corruption(msg_buf.data());
    }
551
  }
552
  *seqno = global_seqno;
553 554

  if (global_seqno > kMaxSequenceNumber) {
555 556 557 558 559 560
    std::array<char, 200> msg_buf;
    snprintf(msg_buf.data(), msg_buf.max_size(),
             "An external sst file with version %u have global seqno property "
             "with value %llu, which is greater than kMaxSequenceNumber",
             version, static_cast<unsigned long long>(global_seqno));
    return Status::Corruption(msg_buf.data());
561 562
  }

563
  return Status::OK();
564
}
565 566
}  // namespace

K
krad 已提交
567 568 569 570 571 572 573 574 575 576 577 578
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
                                   size_t cache_key_prefix_size,
                                   const BlockHandle& handle, char* cache_key) {
  assert(cache_key != nullptr);
  assert(cache_key_prefix_size != 0);
  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
  char* end =
      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
  return Slice(cache_key, static_cast<size_t>(end - cache_key));
}

579
Status BlockBasedTable::Open(
580 581
    const ReadOptions& read_options, const ImmutableCFOptions& ioptions,
    const EnvOptions& env_options, const BlockBasedTableOptions& table_options,
582 583 584 585 586 587
    const InternalKeyComparator& internal_comparator,
    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
    std::unique_ptr<TableReader>* table_reader,
    const SliceTransform* prefix_extractor,
    const bool prefetch_index_and_filter_in_cache, const bool skip_filters,
    const int level, const bool immortal_table,
588 589
    const SequenceNumber largest_seqno, const bool force_direct_prefetch,
    TailPrefetchStats* tail_prefetch_stats,
590 591
    BlockCacheTracer* const block_cache_tracer,
    size_t max_file_size_for_l0_meta_pin) {
S
Siying Dong 已提交
592
  table_reader->reset();
593

594
  Status s;
595
  Footer footer;
596 597
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;

598 599
  // Only retain read_options.deadline and read_options.io_timeout.
  // In future, we may retain more
600 601 602 603 604
  // options. Specifically, w ignore verify_checksums and default to
  // checksum verification anyway when creating the index and filter
  // readers.
  ReadOptions ro;
  ro.deadline = read_options.deadline;
605
  ro.io_timeout = read_options.io_timeout;
606

607 608 609
  // prefetch both index and filters, down to all partitions
  const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
  const bool preload_all = !table_options.cache_index_and_filter_blocks;
610

611
  if (!ioptions.allow_mmap_reads) {
612
    s = PrefetchTail(ro, file.get(), file_size, force_direct_prefetch,
613 614
                     tail_prefetch_stats, prefetch_all, preload_all,
                     &prefetch_buffer);
615 616 617 618 619
  } else {
    // Should not prefetch for mmap mode.
    prefetch_buffer.reset(new FilePrefetchBuffer(
        nullptr, 0, 0, false /* enable */, true /* track_min_offset */));
  }
620 621 622 623 624 625 626 627 628

  // Read in the following order:
  //    1. Footer
  //    2. [metaindex block]
  //    3. [meta block: properties]
  //    4. [meta block: range deletion tombstone]
  //    5. [meta block: compression dictionary]
  //    6. [meta block: index]
  //    7. [meta block: filter]
629 630 631 632 633 634
  IOOptions opts;
  s = PrepareIOFromReadOptions(ro, file->env(), opts);
  if (s.ok()) {
    s = ReadFooterFromFile(opts, file.get(), prefetch_buffer.get(), file_size,
                           &footer, kBlockBasedTableMagicNumber);
  }
635 636 637
  if (!s.ok()) {
    return s;
  }
638
  if (!BlockBasedTableSupportedVersion(footer.version())) {
639
    return Status::Corruption(
640
        "Unknown Footer version. Maybe this file was created with newer "
641 642
        "version of RocksDB?");
  }
J
jorlow@chromium.org 已提交
643

A
Aaron Gao 已提交
644
  // We've successfully read the footer. We are ready to serve requests.
645 646 647
  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
  // raw pointer will be used to create HashIndexReader, whose reset may
  // access a dangling pointer.
648
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
649
  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
650 651
                                      internal_comparator, skip_filters,
                                      file_size, level, immortal_table);
K
Kai Liu 已提交
652
  rep->file = std::move(file);
I
xxHash  
Igor Canadi 已提交
653
  rep->footer = footer;
654
  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
655 656
  // We need to wrap data with internal_prefix_transform to make sure it can
  // handle prefix correctly.
657 658 659 660
  if (prefix_extractor != nullptr) {
    rep->internal_prefix_transform.reset(
        new InternalKeySliceTransform(prefix_extractor));
  }
661
  SetupCacheKeyPrefix(rep);
662 663
  std::unique_ptr<BlockBasedTable> new_table(
      new BlockBasedTable(rep, block_cache_tracer));
K
Kai Liu 已提交
664

K
krad 已提交
665 666 667 668 669
  // page cache options
  rep->persistent_cache_options =
      PersistentCacheOptions(rep->table_options.persistent_cache,
                             std::string(rep->persistent_cache_key_prefix,
                                         rep->persistent_cache_key_prefix_size),
670
                             rep->ioptions.statistics);
K
krad 已提交
671

672 673 674 675 676
  // Meta-blocks are not dictionary compressed. Explicitly set the dictionary
  // handle to null, otherwise it may be seen as uninitialized during the below
  // meta-block reads.
  rep->compression_dict_handle = BlockHandle::NullBlockHandle();

677
  // Read metaindex
678 679
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
680
  s = new_table->ReadMetaIndexBlock(ro, prefetch_buffer.get(), &metaindex,
681
                                    &metaindex_iter);
682 683 684
  if (!s.ok()) {
    return s;
  }
K
Kai Liu 已提交
685

686 687
  // Populates table_properties and some fields that depend on it,
  // such as index_type.
688
  s = new_table->ReadPropertiesBlock(ro, prefetch_buffer.get(),
689
                                     metaindex_iter.get(), largest_seqno);
690 691 692
  if (!s.ok()) {
    return s;
  }
693 694 695
  s = new_table->ReadRangeDelBlock(ro, prefetch_buffer.get(),
                                   metaindex_iter.get(), internal_comparator,
                                   &lookup_context);
696 697 698
  if (!s.ok()) {
    return s;
  }
699
  s = new_table->PrefetchIndexAndFilterBlocks(
700
      ro, prefetch_buffer.get(), metaindex_iter.get(), new_table.get(),
701 702
      prefetch_all, table_options, level, file_size,
      max_file_size_for_l0_meta_pin, &lookup_context);
703 704 705 706 707 708 709 710

  if (s.ok()) {
    // Update tail prefetch stats
    assert(prefetch_buffer.get() != nullptr);
    if (tail_prefetch_stats != nullptr) {
      assert(prefetch_buffer->min_offset_read() < file_size);
      tail_prefetch_stats->RecordEffectiveSize(
          static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
I
Igor Canadi 已提交
711
    }
712 713

    *table_reader = std::move(new_table);
I
Igor Canadi 已提交
714 715
  }

716 717 718 719
  return s;
}

Status BlockBasedTable::PrefetchTail(
720
    const ReadOptions& ro, RandomAccessFileReader* file, uint64_t file_size,
721 722
    bool force_direct_prefetch, TailPrefetchStats* tail_prefetch_stats,
    const bool prefetch_all, const bool preload_all,
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
    std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
  size_t tail_prefetch_size = 0;
  if (tail_prefetch_stats != nullptr) {
    // Multiple threads may get a 0 (no history) when running in parallel,
    // but it will get cleared after the first of them finishes.
    tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
  }
  if (tail_prefetch_size == 0) {
    // Before read footer, readahead backwards to prefetch data. Do more
    // readahead if we're going to read index/filter.
    // TODO: This may incorrectly select small readahead in case partitioned
    // index/filter is enabled and top-level partition pinning is enabled.
    // That's because we need to issue readahead before we read the properties,
    // at which point we don't yet know the index type.
    tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
  }
  size_t prefetch_off;
  size_t prefetch_len;
  if (file_size < tail_prefetch_size) {
    prefetch_off = 0;
    prefetch_len = static_cast<size_t>(file_size);
  } else {
    prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
    prefetch_len = tail_prefetch_size;
  }
  TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
                           &tail_prefetch_size);
  Status s;
  // TODO should not have this special logic in the future.
752
  if (!file->use_direct_io() && !force_direct_prefetch) {
753 754
    prefetch_buffer->reset(new FilePrefetchBuffer(
        nullptr, 0, 0, false /* enable */, true /* track_min_offset */));
755 756
    s = file->Prefetch(prefetch_off, prefetch_len);
  } else {
757 758
    prefetch_buffer->reset(new FilePrefetchBuffer(
        nullptr, 0, 0, true /* enable */, true /* track_min_offset */));
759 760 761 762 763
    IOOptions opts;
    s = PrepareIOFromReadOptions(ro, file->env(), opts);
    if (s.ok()) {
      s = (*prefetch_buffer)->Prefetch(opts, file, prefetch_off, prefetch_len);
    }
764
  }
765

766 767 768
  return s;
}

769
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
770 771
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer,
    const Slice& handle_value, TableProperties** table_properties) {
772 773 774 775 776 777 778 779 780
  assert(table_properties != nullptr);
  // If this is an external SST file ingested with write_global_seqno set to
  // true, then we expect the checksum mismatch because checksum was written
  // by SstFileWriter, but its global seqno in the properties block may have
  // been changed during ingestion. In this case, we read the properties
  // block, copy it to a memory buffer, change the global seqno to its
  // original value, i.e. 0, and verify the checksum again.
  BlockHandle props_block_handle;
  CacheAllocationPtr tmp_buf;
781
  Status s = ReadProperties(ro, handle_value, rep_->file.get(), prefetch_buffer,
782
                            rep_->footer, rep_->ioptions, table_properties,
783 784 785 786 787 788 789 790
                            false /* verify_checksum */, &props_block_handle,
                            &tmp_buf, false /* compression_type_missing */,
                            nullptr /* memory_allocator */);
  if (s.ok() && tmp_buf) {
    const auto seqno_pos_iter =
        (*table_properties)
            ->properties_offsets.find(
                ExternalSstFilePropertyNames::kGlobalSeqno);
791
    size_t block_size = static_cast<size_t>(props_block_handle.size());
792 793 794 795 796
    if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
      uint64_t global_seqno_offset = seqno_pos_iter->second;
      EncodeFixed64(
          tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
    }
797 798 799
    s = ROCKSDB_NAMESPACE::VerifyBlockChecksum(
        rep_->footer.checksum(), tmp_buf.get(), block_size,
        rep_->file->file_name(), props_block_handle.offset());
800 801 802 803
  }
  return s;
}

804
Status BlockBasedTable::ReadPropertiesBlock(
805 806
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* meta_iter, const SequenceNumber largest_seqno) {
807
  bool found_properties_block = true;
808 809
  Status s;
  s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
810

811
  if (!s.ok()) {
812
    ROCKS_LOG_WARN(rep_->ioptions.info_log,
813 814
                   "Error when seeking to properties block from file: %s",
                   s.ToString().c_str());
815
  } else if (found_properties_block) {
K
Kai Liu 已提交
816
    s = meta_iter->status();
K
kailiu 已提交
817
    TableProperties* table_properties = nullptr;
K
Kai Liu 已提交
818
    if (s.ok()) {
819
      s = ReadProperties(
820 821 822 823
          ro, meta_iter->value(), rep_->file.get(), prefetch_buffer,
          rep_->footer, rep_->ioptions, &table_properties,
          true /* verify_checksum */, nullptr /* ret_block_handle */,
          nullptr /* ret_block_contents */,
824 825
          false /* compression_type_missing */, nullptr /* memory_allocator */);
    }
826
    IGNORE_STATUS_IF_ERROR(s);
827 828

    if (s.IsCorruption()) {
829 830
      s = TryReadPropertiesWithGlobalSeqno(
          ro, prefetch_buffer, meta_iter->value(), &table_properties);
831
      IGNORE_STATUS_IF_ERROR(s);
832 833 834 835
    }
    std::unique_ptr<TableProperties> props_guard;
    if (table_properties != nullptr) {
      props_guard.reset(table_properties);
K
Kai Liu 已提交
836
    }
J
jorlow@chromium.org 已提交
837

K
Kai Liu 已提交
838
    if (!s.ok()) {
839
      ROCKS_LOG_WARN(rep_->ioptions.info_log,
840 841 842
                     "Encountered error while reading data from properties "
                     "block %s",
                     s.ToString().c_str());
K
kailiu 已提交
843
    } else {
844
      assert(table_properties != nullptr);
845 846 847 848 849 850
      rep_->table_properties.reset(props_guard.release());
      rep_->blocks_maybe_compressed =
          rep_->table_properties->compression_name !=
          CompressionTypeToString(kNoCompression);
      rep_->blocks_definitely_zstd_compressed =
          (rep_->table_properties->compression_name ==
851
               CompressionTypeToString(kZSTD) ||
852
           rep_->table_properties->compression_name ==
853
               CompressionTypeToString(kZSTDNotFinalCompression));
K
Kai Liu 已提交
854
    }
855
  } else {
856
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
857
                    "Cannot find Properties block from file.");
K
Kai Liu 已提交
858
  }
859
#ifndef ROCKSDB_LITE
860 861 862
  if (rep_->table_properties) {
    ParseSliceTransform(rep_->table_properties->prefix_extractor_name,
                        &(rep_->table_prefix_extractor));
863 864
  }
#endif  // ROCKSDB_LITE
K
Kai Liu 已提交
865

866
  // Read the table properties, if provided.
867 868 869
  if (rep_->table_properties) {
    rep_->whole_key_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
870
                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
871 872 873 874 875 876
                           rep_->ioptions.info_log);
    rep_->prefix_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
                           BlockBasedTablePropertyNames::kPrefixFiltering,
                           rep_->ioptions.info_log);

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
    rep_->index_key_includes_seq =
        rep_->table_properties->index_key_is_user_key == 0;
    rep_->index_value_is_full =
        rep_->table_properties->index_value_is_delta_encoded == 0;

    // Update index_type with the true type.
    // If table properties don't contain index type, we assume that the table
    // is in very old format and has kBinarySearch index type.
    auto& props = rep_->table_properties->user_collected_properties;
    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
    if (pos != props.end()) {
      rep_->index_type = static_cast<BlockBasedTableOptions::IndexType>(
          DecodeFixed32(pos->second.c_str()));
    }

    rep_->index_has_first_key =
        rep_->index_type == BlockBasedTableOptions::kBinarySearchWithFirstKey;

895 896
    s = GetGlobalSequenceNumber(*(rep_->table_properties), largest_seqno,
                                &(rep_->global_seqno));
897
    if (!s.ok()) {
898
      ROCKS_LOG_ERROR(rep_->ioptions.info_log, "%s", s.ToString().c_str());
899
    }
900
  }
901 902
  return s;
}
903

904
Status BlockBasedTable::ReadRangeDelBlock(
905 906
    const ReadOptions& read_options, FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* meta_iter,
907 908
    const InternalKeyComparator& internal_comparator,
    BlockCacheLookupContext* lookup_context) {
909
  Status s;
910
  bool found_range_del_block;
911 912
  BlockHandle range_del_handle;
  s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
913
  if (!s.ok()) {
914
    ROCKS_LOG_WARN(
915
        rep_->ioptions.info_log,
916 917
        "Error when seeking to range delete tombstones block from file: %s",
        s.ToString().c_str());
918 919
  } else if (found_range_del_block && !range_del_handle.IsNull()) {
    std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
920 921 922
        read_options, range_del_handle,
        /*input_iter=*/nullptr, BlockType::kRangeDeletion,
        /*get_context=*/nullptr, lookup_context, Status(), prefetch_buffer));
923 924
    assert(iter != nullptr);
    s = iter->status();
925 926
    if (!s.ok()) {
      ROCKS_LOG_WARN(
927
          rep_->ioptions.info_log,
928 929
          "Encountered error while reading data from range del block %s",
          s.ToString().c_str());
930
      IGNORE_STATUS_IF_ERROR(s);
931
    } else {
932
      rep_->fragmented_range_dels =
933 934
          std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
                                                         internal_comparator);
935 936
    }
  }
937 938 939 940
  return s;
}

Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
941 942
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* meta_iter, BlockBasedTable* new_table, bool prefetch_all,
943
    const BlockBasedTableOptions& table_options, const int level,
944
    size_t file_size, size_t max_file_size_for_l0_meta_pin,
945
    BlockCacheLookupContext* lookup_context) {
946 947 948
  Status s;

  // Find filter handle and filter type
949
  if (rep_->filter_policy) {
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
    for (auto filter_type :
         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
          Rep::FilterType::kBlockFilter}) {
      std::string prefix;
      switch (filter_type) {
        case Rep::FilterType::kFullFilter:
          prefix = kFullFilterBlockPrefix;
          break;
        case Rep::FilterType::kPartitionedFilter:
          prefix = kPartitionedFilterBlockPrefix;
          break;
        case Rep::FilterType::kBlockFilter:
          prefix = kFilterBlockPrefix;
          break;
        default:
          assert(0);
      }
      std::string filter_block_key = prefix;
968 969
      filter_block_key.append(rep_->filter_policy->Name());
      if (FindMetaBlock(meta_iter, filter_block_key, &rep_->filter_handle)
970
              .ok()) {
971
        rep_->filter_type = filter_type;
972 973 974 975
        break;
      }
    }
  }
976

977 978 979 980 981 982
  // Find compression dictionary handle
  bool found_compression_dict = false;
  s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
                                 &rep_->compression_dict_handle);
  if (!s.ok()) {
    return s;
983 984
  }

985
  BlockBasedTableOptions::IndexType index_type = rep_->index_type;
986 987 988

  const bool use_cache = table_options.cache_index_and_filter_blocks;

989
  // pin both index and filters, down to all partitions.
990
  const bool pin_all =
991 992
      rep_->table_options.pin_l0_filter_and_index_blocks_in_cache &&
      level == 0 && file_size <= max_file_size_for_l0_meta_pin;
993

994 995 996 997 998
  // prefetch the first level of index
  const bool prefetch_index =
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
999 1000 1001 1002 1003 1004
  // pin the first level of index
  const bool pin_index =
      pin_all || (table_options.pin_top_level_index_and_filter &&
                  index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);

  std::unique_ptr<IndexReader> index_reader;
1005
  s = new_table->CreateIndexReader(ro, prefetch_buffer, meta_iter, use_cache,
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
                                   prefetch_index, pin_index, lookup_context,
                                   &index_reader);
  if (!s.ok()) {
    return s;
  }

  rep_->index_reader = std::move(index_reader);

  // The partitions of partitioned index are always stored in cache. They
  // are hence follow the configuration for pin and prefetch regardless of
  // the value of cache_index_and_filter_blocks
  if (prefetch_all) {
1018 1019 1020 1021
    s = rep_->index_reader->CacheDependencies(ro, pin_all);
  }
  if (!s.ok()) {
    return s;
1022 1023
  }

1024 1025
  // prefetch the first level of filter
  const bool prefetch_filter =
1026 1027 1028
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1029
  // Partition fitlers cannot be enabled without partition indexes
1030
  assert(!prefetch_filter || prefetch_index);
1031 1032 1033
  // pin the first level of filter
  const bool pin_filter =
      pin_all || (table_options.pin_top_level_index_and_filter &&
1034
                  rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1035

1036 1037
  if (rep_->filter_policy) {
    auto filter = new_table->CreateFilterBlockReader(
1038
        ro, prefetch_buffer, use_cache, prefetch_filter, pin_filter,
1039 1040 1041
        lookup_context);
    if (filter) {
      // Refer to the comment above about paritioned indexes always being cached
1042
      if (prefetch_all) {
1043
        filter->CacheDependencies(ro, pin_all);
1044
      }
1045 1046

      rep_->filter = std::move(filter);
1047 1048 1049
    }
  }

1050 1051
  if (!rep_->compression_dict_handle.IsNull()) {
    std::unique_ptr<UncompressionDictReader> uncompression_dict_reader;
1052
    s = UncompressionDictReader::Create(this, ro, prefetch_buffer, use_cache,
1053 1054
                                        prefetch_all, pin_all, lookup_context,
                                        &uncompression_dict_reader);
1055 1056
    if (!s.ok()) {
      return s;
K
Kai Liu 已提交
1057
    }
1058

1059
    rep_->uncompression_dict_reader = std::move(uncompression_dict_reader);
K
Kai Liu 已提交
1060
  }
1061 1062

  assert(s.ok());
J
jorlow@chromium.org 已提交
1063 1064 1065
  return s;
}

S
Siying Dong 已提交
1066
void BlockBasedTable::SetupForCompaction() {
1067
  switch (rep_->ioptions.access_hint_on_compaction_start) {
1068 1069 1070
    case Options::NONE:
      break;
    case Options::NORMAL:
1071
      rep_->file->file()->Hint(FSRandomAccessFile::kNormal);
1072 1073
      break;
    case Options::SEQUENTIAL:
1074
      rep_->file->file()->Hint(FSRandomAccessFile::kSequential);
1075 1076
      break;
    case Options::WILLNEED:
1077
      rep_->file->file()->Hint(FSRandomAccessFile::kWillNeed);
1078 1079 1080 1081 1082 1083
      break;
    default:
      assert(false);
  }
}

K
kailiu 已提交
1084 1085
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
    const {
K
kailiu 已提交
1086
  return rep_->table_properties;
K
Kai Liu 已提交
1087
}
S
Sanjay Ghemawat 已提交
1088

1089 1090 1091 1092 1093 1094 1095 1096
size_t BlockBasedTable::ApproximateMemoryUsage() const {
  size_t usage = 0;
  if (rep_->filter) {
    usage += rep_->filter->ApproximateMemoryUsage();
  }
  if (rep_->index_reader) {
    usage += rep_->index_reader->ApproximateMemoryUsage();
  }
1097 1098
  if (rep_->uncompression_dict_reader) {
    usage += rep_->uncompression_dict_reader->ApproximateMemoryUsage();
1099
  }
1100 1101 1102
  return usage;
}

1103 1104 1105 1106
// Load the meta-index-block from the file. On success, return the loaded
// metaindex
// block and its iterator.
Status BlockBasedTable::ReadMetaIndexBlock(
1107
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer,
1108 1109
    std::unique_ptr<Block>* metaindex_block,
    std::unique_ptr<InternalIterator>* iter) {
S
Sanjay Ghemawat 已提交
1110 1111
  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
  // it is an empty block.
1112
  std::unique_ptr<Block> metaindex;
K
Kai Liu 已提交
1113
  Status s = ReadBlockFromFile(
1114
      rep_->file.get(), prefetch_buffer, rep_->footer, ro,
1115
      rep_->footer.metaindex_handle(), &metaindex, rep_->ioptions,
1116
      true /* decompress */, true /*maybe_compressed*/, BlockType::kMetaIndex,
1117
      UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options,
1118 1119 1120
      0 /* read_amp_bytes_per_bit */, GetMemoryAllocator(rep_->table_options),
      false /* for_compaction */, rep_->blocks_definitely_zstd_compressed,
      nullptr /* filter_policy */);
K
Kai Liu 已提交
1121

K
Kai Liu 已提交
1122
  if (!s.ok()) {
1123
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1124 1125 1126
                    "Encountered error while reading data from properties"
                    " block %s",
                    s.ToString().c_str());
K
Kai Liu 已提交
1127
    return s;
S
Sanjay Ghemawat 已提交
1128
  }
K
Kai Liu 已提交
1129

1130
  *metaindex_block = std::move(metaindex);
K
Kai Liu 已提交
1131
  // meta block uses bytewise comparator.
1132
  iter->reset(metaindex_block->get()->NewDataIterator(
1133
      BytewiseComparator(), kDisableGlobalSequenceNumber));
K
Kai Liu 已提交
1134
  return Status::OK();
S
Sanjay Ghemawat 已提交
1135 1136
}

1137
template <typename TBlocklike>
1138 1139
Status BlockBasedTable::GetDataBlockFromCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
1140
    Cache* block_cache, Cache* block_cache_compressed,
1141
    const ReadOptions& read_options, CachableEntry<TBlocklike>* block,
1142
    const UncompressionDict& uncompression_dict, BlockType block_type,
1143 1144
    GetContext* get_context) const {
  const size_t read_amp_bytes_per_bit =
1145 1146 1147
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1148 1149 1150
  assert(block);
  assert(block->IsEmpty());

1151
  Status s;
1152
  BlockContents* compressed_block = nullptr;
1153 1154 1155 1156
  Cache::Handle* block_cache_compressed_handle = nullptr;

  // Lookup uncompressed cache first
  if (block_cache != nullptr) {
1157 1158
    auto cache_handle = GetEntryFromCache(block_cache, block_cache_key,
                                          block_type, get_context);
1159 1160
    if (cache_handle != nullptr) {
      block->SetCachedValue(
1161
          reinterpret_cast<TBlocklike*>(block_cache->Value(cache_handle)),
1162
          block_cache, cache_handle);
1163 1164 1165 1166 1167
      return s;
    }
  }

  // If not found, search from the compressed block cache.
1168
  assert(block->IsEmpty());
1169 1170 1171 1172 1173 1174 1175 1176

  if (block_cache_compressed == nullptr) {
    return s;
  }

  assert(!compressed_block_cache_key.empty());
  block_cache_compressed_handle =
      block_cache_compressed->Lookup(compressed_block_cache_key);
1177 1178 1179

  Statistics* statistics = rep_->ioptions.statistics;

1180 1181 1182 1183 1184 1185 1186 1187 1188
  // if we found in the compressed cache, then uncompress and insert into
  // uncompressed cache
  if (block_cache_compressed_handle == nullptr) {
    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
    return s;
  }

  // found compressed block
  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
1189
  compressed_block = reinterpret_cast<BlockContents*>(
1190
      block_cache_compressed->Value(block_cache_compressed_handle));
1191 1192
  CompressionType compression_type = compressed_block->get_compression_type();
  assert(compression_type != kNoCompression);
1193 1194 1195

  // Retrieve the uncompressed contents into a new buffer
  BlockContents contents;
1196
  UncompressionContext context(compression_type);
1197
  UncompressionInfo info(context, uncompression_dict, compression_type);
1198 1199 1200 1201
  s = UncompressBlockContents(
      info, compressed_block->data.data(), compressed_block->data.size(),
      &contents, rep_->table_options.format_version, rep_->ioptions,
      GetMemoryAllocator(rep_->table_options));
1202 1203 1204

  // Insert uncompressed block into block cache
  if (s.ok()) {
1205 1206
    std::unique_ptr<TBlocklike> block_holder(
        BlocklikeTraits<TBlocklike>::Create(
1207
            std::move(contents), read_amp_bytes_per_bit, statistics,
1208 1209
            rep_->blocks_definitely_zstd_compressed,
            rep_->table_options.filter_policy.get()));  // uncompressed block
1210 1211

    if (block_cache != nullptr && block_holder->own_bytes() &&
1212
        read_options.fill_cache) {
1213 1214 1215
      size_t charge = block_holder->ApproximateMemoryUsage();
      Cache::Handle* cache_handle = nullptr;
      s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1216
                              &DeleteCachedEntry<TBlocklike>, &cache_handle);
1217
      if (s.ok()) {
1218 1219 1220 1221
        assert(cache_handle != nullptr);
        block->SetCachedValue(block_holder.release(), block_cache,
                              cache_handle);

1222 1223
        UpdateCacheInsertionMetrics(block_type, get_context, charge,
                                    s.IsOkOverwritten());
1224 1225 1226
      } else {
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
      }
1227 1228
    } else {
      block->SetOwnedValue(block_holder.release());
1229 1230 1231 1232 1233 1234 1235 1236
    }
  }

  // Release hold on compressed cache entry
  block_cache_compressed->Release(block_cache_compressed_handle);
  return s;
}

1237
template <typename TBlocklike>
1238 1239 1240
Status BlockBasedTable::PutDataBlockToCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
    Cache* block_cache, Cache* block_cache_compressed,
1241
    CachableEntry<TBlocklike>* cached_block, BlockContents* raw_block_contents,
1242
    CompressionType raw_block_comp_type,
1243
    const UncompressionDict& uncompression_dict,
1244
    MemoryAllocator* memory_allocator, BlockType block_type,
1245 1246 1247 1248
    GetContext* get_context) const {
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  const uint32_t format_version = rep_->table_options.format_version;
  const size_t read_amp_bytes_per_bit =
1249 1250 1251
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1252
  const Cache::Priority priority =
1253 1254 1255 1256
      rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
              (block_type == BlockType::kFilter ||
               block_type == BlockType::kCompressionDictionary ||
               block_type == BlockType::kIndex)
1257 1258
          ? Cache::Priority::HIGH
          : Cache::Priority::LOW;
1259 1260
  assert(cached_block);
  assert(cached_block->IsEmpty());
1261 1262

  Status s;
1263
  Statistics* statistics = ioptions.statistics;
1264

1265
  std::unique_ptr<TBlocklike> block_holder;
1266
  if (raw_block_comp_type != kNoCompression) {
1267 1268
    // Retrieve the uncompressed contents into a new buffer
    BlockContents uncompressed_block_contents;
1269
    UncompressionContext context(raw_block_comp_type);
1270
    UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
1271 1272 1273 1274
    s = UncompressBlockContents(info, raw_block_contents->data.data(),
                                raw_block_contents->data.size(),
                                &uncompressed_block_contents, format_version,
                                ioptions, memory_allocator);
1275 1276 1277
    if (!s.ok()) {
      return s;
    }
1278

1279
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
1280
        std::move(uncompressed_block_contents), read_amp_bytes_per_bit,
1281 1282
        statistics, rep_->blocks_definitely_zstd_compressed,
        rep_->table_options.filter_policy.get()));
1283
  } else {
1284
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
1285 1286
        std::move(*raw_block_contents), read_amp_bytes_per_bit, statistics,
        rep_->blocks_definitely_zstd_compressed,
1287
        rep_->table_options.filter_policy.get()));
1288 1289 1290 1291
  }

  // Insert compressed block into compressed block cache.
  // Release the hold on the compressed cache entry immediately.
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
  if (block_cache_compressed != nullptr &&
      raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
      raw_block_contents->own_bytes()) {
#ifndef NDEBUG
    assert(raw_block_contents->is_raw_block);
#endif  // NDEBUG

    // We cannot directly put raw_block_contents because this could point to
    // an object in the stack.
    BlockContents* block_cont_for_comp_cache =
        new BlockContents(std::move(*raw_block_contents));
    s = block_cache_compressed->Insert(
        compressed_block_cache_key, block_cont_for_comp_cache,
        block_cont_for_comp_cache->ApproximateMemoryUsage(),
1306
        &DeleteCachedEntry<BlockContents>);
1307 1308 1309 1310 1311
    if (s.ok()) {
      // Avoid the following code to delete this cached block.
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
    } else {
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
1312
      delete block_cont_for_comp_cache;
1313
    }
1314 1315 1316
  }

  // insert into uncompressed block cache
1317 1318 1319 1320
  if (block_cache != nullptr && block_holder->own_bytes()) {
    size_t charge = block_holder->ApproximateMemoryUsage();
    Cache::Handle* cache_handle = nullptr;
    s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1321 1322
                            &DeleteCachedEntry<TBlocklike>, &cache_handle,
                            priority);
1323
    if (s.ok()) {
1324 1325 1326 1327
      assert(cache_handle != nullptr);
      cached_block->SetCachedValue(block_holder.release(), block_cache,
                                   cache_handle);

1328 1329
      UpdateCacheInsertionMetrics(block_type, get_context, charge,
                                  s.IsOkOverwritten());
1330 1331 1332
    } else {
      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
    }
1333 1334
  } else {
    cached_block->SetOwnedValue(block_holder.release());
1335 1336 1337 1338 1339
  }

  return s;
}

1340
std::unique_ptr<FilterBlockReader> BlockBasedTable::CreateFilterBlockReader(
1341 1342
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer, bool use_cache,
    bool prefetch, bool pin, BlockCacheLookupContext* lookup_context) {
M
Maysam Yabandeh 已提交
1343
  auto& rep = rep_;
1344 1345 1346
  auto filter_type = rep->filter_type;
  if (filter_type == Rep::FilterType::kNoFilter) {
    return std::unique_ptr<FilterBlockReader>();
I
Igor Canadi 已提交
1347 1348 1349 1350
  }

  assert(rep->filter_policy);

M
Maysam Yabandeh 已提交
1351
  switch (filter_type) {
1352 1353
    case Rep::FilterType::kPartitionedFilter:
      return PartitionedFilterBlockReader::Create(
1354
          this, ro, prefetch_buffer, use_cache, prefetch, pin, lookup_context);
M
Maysam Yabandeh 已提交
1355 1356

    case Rep::FilterType::kBlockFilter:
1357
      return BlockBasedFilterBlockReader::Create(
1358
          this, ro, prefetch_buffer, use_cache, prefetch, pin, lookup_context);
1359 1360

    case Rep::FilterType::kFullFilter:
1361
      return FullFilterBlockReader::Create(this, ro, prefetch_buffer, use_cache,
1362
                                           prefetch, pin, lookup_context);
I
Igor Canadi 已提交
1363

M
Maysam Yabandeh 已提交
1364 1365 1366 1367
    default:
      // filter_type is either kNoFilter (exited the function at the first if),
      // or it must be covered in this switch block
      assert(false);
1368
      return std::unique_ptr<FilterBlockReader>();
1369
  }
K
Kai Liu 已提交
1370 1371
}

1372 1373
// disable_prefix_seek should be set to true when prefix_extractor found in SST
// differs from the one in mutable_cf_options and index type is HashBasedIndex
1374
InternalIteratorBase<IndexValue>* BlockBasedTable::NewIndexIterator(
1375
    const ReadOptions& read_options, bool disable_prefix_seek,
1376 1377
    IndexBlockIter* input_iter, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
1378 1379
  assert(rep_ != nullptr);
  assert(rep_->index_reader != nullptr);
1380

1381
  // We don't return pinned data from index blocks, so no need
1382
  // to set `block_contents_pinned`.
1383
  return rep_->index_reader->NewIterator(read_options, disable_prefix_seek,
1384 1385
                                         input_iter, get_context,
                                         lookup_context);
K
Kai Liu 已提交
1386 1387
}

1388 1389
template <>
DataBlockIter* BlockBasedTable::InitBlockIterator<DataBlockIter>(
1390 1391
    const Rep* rep, Block* block, BlockType block_type,
    DataBlockIter* input_iter, bool block_contents_pinned) {
1392 1393 1394 1395
  return block->NewDataIterator(rep->internal_comparator.user_comparator(),
                                rep->get_global_seqno(block_type), input_iter,
                                rep->ioptions.statistics,
                                block_contents_pinned);
1396 1397 1398 1399
}

template <>
IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
1400 1401
    const Rep* rep, Block* block, BlockType block_type,
    IndexBlockIter* input_iter, bool block_contents_pinned) {
1402
  return block->NewIndexIterator(
1403
      rep->internal_comparator.user_comparator(),
1404 1405 1406 1407
      rep->get_global_seqno(block_type), input_iter, rep->ioptions.statistics,
      /* total_order_seek */ true, rep->index_has_first_key,
      rep->index_key_includes_seq, rep->index_value_is_full,
      block_contents_pinned);
1408 1409
}

A
anand76 已提交
1410 1411 1412 1413 1414
// If contents is nullptr, this function looks up the block caches for the
// data block referenced by handle, and read the block from disk if necessary.
// If contents is non-null, it skips the cache lookup and disk read, since
// the caller has already read it. In both cases, if ro.fill_cache is true,
// it inserts the block into the block cache.
1415
template <typename TBlocklike>
1416
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
1417
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
1418
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
1419
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
A
anand76 已提交
1420 1421
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    BlockContents* contents) const {
1422
  assert(block_entry != nullptr);
1423
  const bool no_io = (ro.read_tier == kBlockCacheTier);
1424
  Cache* block_cache = rep_->table_options.block_cache.get();
1425
  Cache* block_cache_compressed =
1426
      rep_->table_options.block_cache_compressed.get();
L
Lei Jin 已提交
1427

1428 1429
  // First, try to get the block from the cache
  //
L
Lei Jin 已提交
1430
  // If either block cache is enabled, we'll try to read from it.
1431
  Status s;
1432 1433 1434 1435
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  Slice key /* key to the block cache */;
  Slice ckey /* key to the compressed block cache */;
1436
  bool is_cache_hit = false;
L
Lei Jin 已提交
1437 1438 1439
  if (block_cache != nullptr || block_cache_compressed != nullptr) {
    // create key for block cache
    if (block_cache != nullptr) {
1440
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
1441
                        handle, cache_key);
L
Lei Jin 已提交
1442 1443 1444
    }

    if (block_cache_compressed != nullptr) {
1445 1446
      ckey = GetCacheKey(rep_->compressed_cache_key_prefix,
                         rep_->compressed_cache_key_prefix_size, handle,
L
Lei Jin 已提交
1447 1448 1449
                         compressed_cache_key);
    }

A
anand76 已提交
1450 1451 1452 1453 1454 1455 1456 1457 1458
    if (!contents) {
      s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
                                ro, block_entry, uncompression_dict, block_type,
                                get_context);
      if (block_entry->GetValue()) {
        // TODO(haoyu): Differentiate cache hit on uncompressed block cache and
        // compressed block cache.
        is_cache_hit = true;
      }
1459
    }
A
anand76 已提交
1460

1461 1462
    // Can't find the block from the cache. If I/O is allowed, read from the
    // file.
1463
    if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
1464
      Statistics* statistics = rep_->ioptions.statistics;
1465
      const bool maybe_compressed =
1466 1467 1468
          block_type != BlockType::kFilter &&
          block_type != BlockType::kCompressionDictionary &&
          rep_->blocks_maybe_compressed;
1469
      const bool do_uncompress = maybe_compressed && !block_cache_compressed;
1470 1471
      CompressionType raw_block_comp_type;
      BlockContents raw_block_contents;
A
anand76 已提交
1472
      if (!contents) {
1473
        StopWatch sw(rep_->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
1474
        BlockFetcher block_fetcher(
1475
            rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle,
1476 1477 1478
            &raw_block_contents, rep_->ioptions, do_uncompress,
            maybe_compressed, block_type, uncompression_dict,
            rep_->persistent_cache_options,
1479 1480
            GetMemoryAllocator(rep_->table_options),
            GetMemoryAllocatorForCompressedBlock(rep_->table_options));
1481 1482
        s = block_fetcher.ReadBlockContents();
        raw_block_comp_type = block_fetcher.get_compression_type();
A
anand76 已提交
1483 1484 1485
        contents = &raw_block_contents;
      } else {
        raw_block_comp_type = contents->get_compression_type();
L
Lei Jin 已提交
1486 1487 1488
      }

      if (s.ok()) {
1489 1490
        // If filling cache is allowed and a cache is configured, try to put the
        // block to the cache.
1491 1492
        s = PutDataBlockToCache(
            key, ckey, block_cache, block_cache_compressed, block_entry,
1493
            contents, raw_block_comp_type, uncompression_dict,
1494
            GetMemoryAllocator(rep_->table_options), block_type, get_context);
L
Lei Jin 已提交
1495 1496 1497
      }
    }
  }
1498 1499

  // Fill lookup_context.
1500 1501
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
1502 1503 1504 1505
    size_t usage = 0;
    uint64_t nkeys = 0;
    if (block_entry->GetValue()) {
      // Approximate the number of keys in the block using restarts.
1506 1507 1508
      nkeys =
          rep_->table_options.block_restart_interval *
          BlocklikeTraits<TBlocklike>::GetNumRestarts(*block_entry->GetValue());
1509 1510 1511 1512 1513 1514 1515
      usage = block_entry->GetValue()->ApproximateMemoryUsage();
    }
    TraceType trace_block_type = TraceType::kTraceMax;
    switch (block_type) {
      case BlockType::kData:
        trace_block_type = TraceType::kBlockTraceDataBlock;
        break;
1516 1517 1518
      case BlockType::kFilter:
        trace_block_type = TraceType::kBlockTraceFilterBlock;
        break;
1519 1520 1521
      case BlockType::kCompressionDictionary:
        trace_block_type = TraceType::kBlockTraceUncompressionDictBlock;
        break;
1522 1523 1524
      case BlockType::kRangeDeletion:
        trace_block_type = TraceType::kBlockTraceRangeDeletionBlock;
        break;
1525 1526 1527
      case BlockType::kIndex:
        trace_block_type = TraceType::kBlockTraceIndexBlock;
        break;
1528 1529 1530 1531 1532
      default:
        // This cannot happen.
        assert(false);
        break;
    }
1533 1534
    bool no_insert = no_io || !ro.fill_cache;
    if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
1535 1536
            trace_block_type, lookup_context->caller)) {
      // Defer logging the access to Get() and MultiGet() to trace additional
1537
      // information, e.g., referenced_key_exist_in_block.
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551

      // Make a copy of the block key here since it will be logged later.
      lookup_context->FillLookupContext(
          is_cache_hit, no_insert, trace_block_type,
          /*block_size=*/usage, /*block_key=*/key.ToString(), nkeys);
    } else {
      // Avoid making copy of block_key and cf_name when constructing the access
      // record.
      BlockCacheTraceRecord access_record(
          rep_->ioptions.env->NowMicros(),
          /*block_key=*/"", trace_block_type,
          /*block_size=*/usage, rep_->cf_id_for_tracing(),
          /*cf_name=*/"", rep_->level_for_tracing(),
          rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
1552 1553 1554
          no_insert, lookup_context->get_id,
          lookup_context->get_from_user_specified_snapshot,
          /*referenced_key=*/"");
1555 1556
      block_cache_tracer_->WriteBlockAccess(access_record, key,
                                            rep_->cf_name_for_tracing(),
1557
                                            lookup_context->referenced_key);
1558 1559 1560
    }
  }

1561
  assert(s.ok() || block_entry->GetValue() == nullptr);
1562
  return s;
1563 1564
}

A
anand76 已提交
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
// This function reads multiple data blocks from disk using Env::MultiRead()
// and optionally inserts them into the block cache. It uses the scratch
// buffer provided by the caller, which is contiguous. If scratch is a nullptr
// it allocates a separate buffer for each block. Typically, if the blocks
// need to be uncompressed and there is no compressed block cache, callers
// can allocate a temporary scratch buffer in order to minimize memory
// allocations.
// If options.fill_cache is true, it inserts the blocks into cache. If its
// false and scratch is non-null and the blocks are uncompressed, it copies
// the buffers to heap. In any case, the CachableEntry<Block> returned will
// own the data bytes.
1576 1577
// If compression is enabled and also there is no compressed block cache,
// the adjacent blocks are read out in one IO (combined read)
A
anand76 已提交
1578 1579 1580 1581
// batch - A MultiGetRange with only those keys with unique data blocks not
//         found in cache
// handles - A vector of block handles. Some of them me be NULL handles
// scratch - An optional contiguous buffer to read compressed blocks into
1582
void BlockBasedTable::RetrieveMultipleBlocks(
1583 1584
    const ReadOptions& options, const MultiGetRange* batch,
    const autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE>* handles,
A
anand76 已提交
1585
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE>* statuses,
1586 1587
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE>* results,
    char* scratch, const UncompressionDict& uncompression_dict) const {
A
anand76 已提交
1588 1589 1590 1591 1592 1593
  RandomAccessFileReader* file = rep_->file.get();
  const Footer& footer = rep_->footer;
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  size_t read_amp_bytes_per_bit = rep_->table_options.read_amp_bytes_per_bit;
  MemoryAllocator* memory_allocator = GetMemoryAllocator(rep_->table_options);

1594
  if (ioptions.allow_mmap_reads) {
A
anand76 已提交
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
    size_t idx_in_batch = 0;
    for (auto mget_iter = batch->begin(); mget_iter != batch->end();
         ++mget_iter, ++idx_in_batch) {
      BlockCacheLookupContext lookup_data_block_context(
          TableReaderCaller::kUserMultiGet);
      const BlockHandle& handle = (*handles)[idx_in_batch];
      if (handle.IsNull()) {
        continue;
      }

1605 1606 1607 1608 1609
      (*statuses)[idx_in_batch] =
          RetrieveBlock(nullptr, options, handle, uncompression_dict,
                        &(*results)[idx_in_batch], BlockType::kData,
                        mget_iter->get_context, &lookup_data_block_context,
                        /* for_compaction */ false, /* use_cache */ true);
A
anand76 已提交
1610 1611 1612 1613
    }
    return;
  }

1614 1615 1616 1617
  // In direct IO mode, blocks share the direct io buffer.
  // Otherwise, blocks share the scratch buffer.
  const bool use_shared_buffer = file->use_direct_io() || scratch != nullptr;

1618
  autovector<FSReadRequest, MultiGetContext::MAX_BATCH_SIZE> read_reqs;
A
anand76 已提交
1619 1620
  size_t buf_offset = 0;
  size_t idx_in_batch = 0;
1621 1622 1623 1624 1625

  uint64_t prev_offset = 0;
  size_t prev_len = 0;
  autovector<size_t, MultiGetContext::MAX_BATCH_SIZE> req_idx_for_block;
  autovector<size_t, MultiGetContext::MAX_BATCH_SIZE> req_offset_for_block;
A
anand76 已提交
1626 1627 1628 1629 1630 1631 1632
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];
    if (handle.IsNull()) {
      continue;
    }

1633 1634 1635 1636 1637
    size_t prev_end = static_cast<size_t>(prev_offset) + prev_len;

    // If current block is adjacent to the previous one, at the same time,
    // compression is enabled and there is no compressed cache, we combine
    // the two block read as one.
1638 1639 1640 1641 1642
    // We don't combine block reads here in direct IO mode, because when doing
    // direct IO read, the block requests will be realigned and merged when
    // necessary.
    if (use_shared_buffer && !file->use_direct_io() &&
        prev_end == handle.offset()) {
1643 1644 1645 1646 1647 1648 1649 1650 1651
      req_offset_for_block.emplace_back(prev_len);
      prev_len += block_size(handle);
    } else {
      // No compression or current block and previous one is not adjacent:
      // Step 1, create a new request for previous blocks
      if (prev_len != 0) {
        FSReadRequest req;
        req.offset = prev_offset;
        req.len = prev_len;
1652 1653 1654
        if (file->use_direct_io()) {
          req.scratch = nullptr;
        } else if (use_shared_buffer) {
1655 1656
          req.scratch = scratch + buf_offset;
          buf_offset += req.len;
1657 1658
        } else {
          req.scratch = new char[req.len];
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
        }
        read_reqs.emplace_back(req);
      }

      // Step 2, remeber the previous block info
      prev_offset = handle.offset();
      prev_len = block_size(handle);
      req_offset_for_block.emplace_back(0);
    }
    req_idx_for_block.emplace_back(read_reqs.size());
  }
  // Handle the last block and process the pending last request
  if (prev_len != 0) {
1672
    FSReadRequest req;
1673 1674
    req.offset = prev_offset;
    req.len = prev_len;
1675 1676 1677
    if (file->use_direct_io()) {
      req.scratch = nullptr;
    } else if (use_shared_buffer) {
A
anand76 已提交
1678
      req.scratch = scratch + buf_offset;
1679 1680
    } else {
      req.scratch = new char[req.len];
A
anand76 已提交
1681 1682 1683 1684
    }
    read_reqs.emplace_back(req);
  }

1685
  AlignedBuf direct_io_buf;
1686 1687 1688 1689 1690 1691 1692 1693
  {
    IOOptions opts;
    IOStatus s = PrepareIOFromReadOptions(options, file->env(), opts);
    if (s.IsTimedOut()) {
      for (FSReadRequest& req : read_reqs) {
        req.status = s;
      }
    } else {
1694
      file->MultiRead(opts, &read_reqs[0], read_reqs.size(), &direct_io_buf);
1695 1696
    }
  }
A
anand76 已提交
1697 1698

  idx_in_batch = 0;
1699
  size_t valid_batch_idx = 0;
A
anand76 已提交
1700 1701 1702 1703 1704 1705 1706 1707
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];

    if (handle.IsNull()) {
      continue;
    }

1708 1709 1710 1711 1712 1713 1714
    assert(valid_batch_idx < req_idx_for_block.size());
    assert(valid_batch_idx < req_offset_for_block.size());
    assert(req_idx_for_block[valid_batch_idx] < read_reqs.size());
    size_t& req_idx = req_idx_for_block[valid_batch_idx];
    size_t& req_offset = req_offset_for_block[valid_batch_idx];
    valid_batch_idx++;
    FSReadRequest& req = read_reqs[req_idx];
A
anand76 已提交
1715 1716
    Status s = req.status;
    if (s.ok()) {
1717 1718
      if ((req.result.size() != req.len) ||
          (req_offset + block_size(handle) > req.result.size())) {
1719 1720 1721 1722
        s = Status::Corruption(
            "truncated block read from " + rep_->file->file_name() +
            " offset " + ToString(handle.offset()) + ", expected " +
            ToString(req.len) + " bytes, got " + ToString(req.result.size()));
A
anand76 已提交
1723 1724 1725 1726
      }
    }

    BlockContents raw_block_contents;
1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742
    if (s.ok()) {
      if (!use_shared_buffer) {
        // We allocated a buffer for this block. Give ownership of it to
        // BlockContents so it can free the memory
        assert(req.result.data() == req.scratch);
        assert(req.result.size() == block_size(handle));
        assert(req_offset == 0);
        std::unique_ptr<char[]> raw_block(req.scratch);
        raw_block_contents = BlockContents(std::move(raw_block), handle.size());
      } else {
        // We used the scratch buffer or direct io buffer
        // which are shared by the blocks.
        // raw_block_contents does not have the ownership.
        raw_block_contents =
            BlockContents(Slice(req.result.data() + req_offset, handle.size()));
      }
A
anand76 已提交
1743 1744 1745
#ifndef NDEBUG
      raw_block_contents.is_raw_block = true;
#endif
1746

1747 1748 1749
      if (options.verify_checksums) {
        PERF_TIMER_GUARD(block_checksum_time);
        const char* data = req.result.data();
1750
        // Since the scratch might be shared, the offset of the data block in
1751 1752 1753
        // the buffer might not be 0. req.result.data() only point to the
        // begin address of each read request, we need to add the offset
        // in each read request. Checksum is stored in the block trailer,
1754 1755 1756 1757
        // beyond the payload size.
        s = ROCKSDB_NAMESPACE::VerifyBlockChecksum(
            footer.checksum(), data + req_offset, handle.size(),
            rep_->file->file_name(), handle.offset());
1758 1759 1760 1761 1762
        TEST_SYNC_POINT_CALLBACK("RetrieveMultipleBlocks:VerifyChecksum", &s);
      }
    } else if (!use_shared_buffer) {
      // Free the allocated scratch buffer.
      delete[] req.scratch;
A
anand76 已提交
1763
    }
1764 1765

    if (s.ok()) {
1766 1767 1768 1769 1770
      // When the blocks share the same underlying buffer (scratch or direct io
      // buffer), if the block is compressed, the shared buffer will be
      // uncompressed into heap during uncompressing; otherwise, we need to
      // manually copy the block into heap before inserting the block to block
      // cache.
1771 1772
      CompressionType compression_type =
          raw_block_contents.get_compression_type();
1773 1774
      if (use_shared_buffer && compression_type == kNoCompression) {
        Slice raw = Slice(req.result.data() + req_offset, block_size(handle));
1775 1776 1777 1778 1779 1780 1781 1782 1783
        raw_block_contents = BlockContents(
            CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
            handle.size());
#ifndef NDEBUG
        raw_block_contents.is_raw_block = true;
#endif
      }
    }

A
anand76 已提交
1784 1785 1786 1787 1788 1789 1790 1791
    if (s.ok()) {
      if (options.fill_cache) {
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
        CachableEntry<Block>* block_entry = &(*results)[idx_in_batch];
        // MaybeReadBlockAndLoadToCache will insert into the block caches if
        // necessary. Since we're passing the raw block contents, it will
        // avoid looking up the block cache
1792 1793 1794 1795
        s = MaybeReadBlockAndLoadToCache(
            nullptr, options, handle, uncompression_dict, block_entry,
            BlockType::kData, mget_iter->get_context,
            &lookup_data_block_context, &raw_block_contents);
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811

        // block_entry value could be null if no block cache is present, i.e
        // BlockBasedTableOptions::no_block_cache is true and no compressed
        // block cache is configured. In that case, fall
        // through and set up the block explicitly
        if (block_entry->GetValue() != nullptr) {
          continue;
        }
      }

      CompressionType compression_type =
          raw_block_contents.get_compression_type();
      BlockContents contents;
      if (compression_type != kNoCompression) {
        UncompressionContext context(compression_type);
        UncompressionInfo info(context, uncompression_dict, compression_type);
1812 1813
        s = UncompressBlockContents(info, req.result.data() + req_offset,
                                    handle.size(), &contents, footer.version(),
1814
                                    rep_->ioptions, memory_allocator);
A
anand76 已提交
1815
      } else {
1816 1817 1818 1819
        // There are two cases here:
        // 1) caller uses the shared buffer (scratch or direct io buffer);
        // 2) we use the requst buffer.
        // If scratch buffer or direct io buffer is used, we ensure that
1820 1821 1822 1823
        // all raw blocks are copyed to the heap as single blocks. If scratch
        // buffer is not used, we also have no combined read, so the raw
        // block can be used directly.
        contents = std::move(raw_block_contents);
A
anand76 已提交
1824
      }
1825
      if (s.ok()) {
1826 1827
        (*results)[idx_in_batch].SetOwnedValue(new Block(
            std::move(contents), read_amp_bytes_per_bit, ioptions.statistics));
1828
      }
A
anand76 已提交
1829 1830 1831 1832 1833
    }
    (*statuses)[idx_in_batch] = s;
  }
}

1834
template <typename TBlocklike>
1835
Status BlockBasedTable::RetrieveBlock(
1836
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
1837
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
1838
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
1839
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1840
    bool for_compaction, bool use_cache) const {
1841 1842 1843 1844
  assert(block_entry);
  assert(block_entry->IsEmpty());

  Status s;
1845
  if (use_cache) {
1846
    s = MaybeReadBlockAndLoadToCache(prefetch_buffer, ro, handle,
1847
                                     uncompression_dict, block_entry,
A
anand76 已提交
1848 1849
                                     block_type, get_context, lookup_context,
                                     /*contents=*/nullptr);
1850 1851 1852 1853 1854 1855

    if (!s.ok()) {
      return s;
    }

    if (block_entry->GetValue() != nullptr) {
1856
      assert(s.ok());
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867
      return s;
    }
  }

  assert(block_entry->IsEmpty());

  const bool no_io = ro.read_tier == kBlockCacheTier;
  if (no_io) {
    return Status::Incomplete("no blocking io");
  }

1868
  const bool maybe_compressed =
1869 1870 1871
      block_type != BlockType::kFilter &&
      block_type != BlockType::kCompressionDictionary &&
      rep_->blocks_maybe_compressed;
1872 1873
  const bool do_uncompress = maybe_compressed;
  std::unique_ptr<TBlocklike> block;
1874 1875

  {
1876
    StopWatch sw(rep_->ioptions.env, rep_->ioptions.statistics,
1877 1878
                 READ_BLOCK_GET_MICROS);
    s = ReadBlockFromFile(
1879
        rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block,
1880 1881
        rep_->ioptions, do_uncompress, maybe_compressed, block_type,
        uncompression_dict, rep_->persistent_cache_options,
1882 1883 1884
        block_type == BlockType::kData
            ? rep_->table_options.read_amp_bytes_per_bit
            : 0,
1885
        GetMemoryAllocator(rep_->table_options), for_compaction,
1886 1887
        rep_->blocks_definitely_zstd_compressed,
        rep_->table_options.filter_policy.get());
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
  }

  if (!s.ok()) {
    return s;
  }

  block_entry->SetOwnedValue(block.release());

  assert(s.ok());
  return s;
}

1900 1901 1902 1903 1904 1905 1906
// Explicitly instantiate templates for both "blocklike" types we use.
// This makes it possible to keep the template definitions in the .cc file.
template Status BlockBasedTable::RetrieveBlock<BlockContents>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<BlockContents>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1907
    bool for_compaction, bool use_cache) const;
1908

1909 1910 1911 1912 1913 1914 1915
template Status BlockBasedTable::RetrieveBlock<ParsedFullFilterBlock>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<ParsedFullFilterBlock>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction, bool use_cache) const;

1916 1917 1918 1919 1920
template Status BlockBasedTable::RetrieveBlock<Block>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
1921
    bool for_compaction, bool use_cache) const;
1922

1923 1924 1925 1926 1927 1928 1929
template Status BlockBasedTable::RetrieveBlock<UncompressionDict>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<UncompressionDict>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction, bool use_cache) const;

1930
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
1931
    const BlockBasedTable* table,
1932 1933 1934 1935
    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
    : table_(table), block_map_(block_map) {}

InternalIteratorBase<IndexValue>*
1936
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
1937
    const BlockHandle& handle) {
M
Maysam Yabandeh 已提交
1938
  // Return a block iterator on the index partition
1939 1940 1941 1942
  auto block = block_map_->find(handle.offset());
  // This is a possible scenario since block cache might not have had space
  // for the partition
  if (block != block_map_->end()) {
1943
    const Rep* rep = table_->get_rep();
1944 1945
    assert(rep);

M
Maysam Yabandeh 已提交
1946
    Statistics* kNullStats = nullptr;
1947
    // We don't return pinned data from index blocks, so no need
1948
    // to set `block_contents_pinned`.
1949
    return block->second.GetValue()->NewIndexIterator(
1950
        rep->internal_comparator.user_comparator(),
1951 1952 1953
        rep->get_global_seqno(BlockType::kIndex), nullptr, kNullStats, true,
        rep->index_has_first_key, rep->index_key_includes_seq,
        rep->index_value_is_full);
1954 1955
  }
  // Create an empty iterator
1956
  return new IndexBlockIter();
1957 1958
}

T
Tyler Harter 已提交
1959 1960
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
1961 1962
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
T
Tyler Harter 已提交
1963 1964 1965 1966
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
T
Tyler Harter 已提交
1967
//
1968 1969 1970
// If read_options.read_tier == kBlockCacheTier, this method will do no I/O and
// will return true if the filter block is not in memory and not found in block
// cache.
K
Kai Liu 已提交
1971 1972
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
1973 1974 1975
bool BlockBasedTable::PrefixMayMatch(
    const Slice& internal_key, const ReadOptions& read_options,
    const SliceTransform* options_prefix_extractor,
1976 1977
    const bool need_upper_bound_check,
    BlockCacheLookupContext* lookup_context) const {
1978
  if (!rep_->filter_policy) {
1979 1980 1981
    return true;
  }

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
  const SliceTransform* prefix_extractor;

  if (rep_->table_prefix_extractor == nullptr) {
    if (need_upper_bound_check) {
      return true;
    }
    prefix_extractor = options_prefix_extractor;
  } else {
    prefix_extractor = rep_->table_prefix_extractor.get();
  }
1992
  auto user_key = ExtractUserKey(internal_key);
1993
  if (!prefix_extractor->InDomain(user_key)) {
1994 1995
    return true;
  }
L
Lei Jin 已提交
1996

T
Tyler Harter 已提交
1997 1998 1999
  bool may_match = true;
  Status s;

2000
  // First, try check with full filter
2001
  FilterBlockReader* const filter = rep_->filter.get();
2002
  bool filter_checked = true;
2003
  if (filter != nullptr) {
2004 2005
    const bool no_io = read_options.read_tier == kBlockCacheTier;

2006
    if (!filter->IsBlockBased()) {
M
Maysam Yabandeh 已提交
2007
      const Slice* const const_ikey_ptr = &internal_key;
2008 2009 2010
      may_match = filter->RangeMayExist(
          read_options.iterate_upper_bound, user_key, prefix_extractor,
          rep_->internal_comparator.user_comparator(), const_ikey_ptr,
2011
          &filter_checked, need_upper_bound_check, no_io, lookup_context);
2012
    } else {
2013 2014 2015 2016 2017
      // if prefix_extractor changed for block based filter, skip filter
      if (need_upper_bound_check) {
        return true;
      }
      auto prefix = prefix_extractor->Transform(user_key);
M
Maysam Yabandeh 已提交
2018 2019 2020 2021 2022 2023 2024 2025 2026
      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
      auto internal_prefix = internal_key_prefix.Encode();

      // To prevent any io operation in this method, we set `read_tier` to make
      // sure we always read index or filter only when they have already been
      // loaded to memory.
      ReadOptions no_io_read_options;
      no_io_read_options.read_tier = kBlockCacheTier;

2027
      // Then, try find it within each block
2028 2029
      // we already know prefix_extractor and prefix_extractor_name must match
      // because `CheckPrefixMayMatch` first checks `check_filter_ == true`
2030
      std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2031 2032
          no_io_read_options,
          /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2033
          /*get_context=*/nullptr, lookup_context));
2034 2035 2036 2037 2038 2039 2040 2041
      iiter->Seek(internal_prefix);

      if (!iiter->Valid()) {
        // we're past end of file
        // if it's incomplete, it means that we avoided I/O
        // and we're not really sure that we're past the end
        // of the file
        may_match = iiter->status().IsIncomplete();
2042 2043
      } else if ((rep_->index_key_includes_seq ? ExtractUserKey(iiter->key())
                                               : iiter->key())
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
                     .starts_with(ExtractUserKey(internal_prefix))) {
        // we need to check for this subtle case because our only
        // guarantee is that "the key is a string >= last key in that data
        // block" according to the doc/table_format.txt spec.
        //
        // Suppose iiter->key() starts with the desired prefix; it is not
        // necessarily the case that the corresponding data block will
        // contain the prefix, since iiter->key() need not be in the
        // block.  However, the next data block may contain the prefix, so
        // we return true to play it safe.
        may_match = true;
      } else if (filter->IsBlockBased()) {
        // iiter->key() does NOT start with the desired prefix.  Because
        // Seek() finds the first key that is >= the seek target, this
        // means that iiter->key() > prefix.  Thus, any data blocks coming
        // after the data block corresponding to iiter->key() cannot
        // possibly contain the key.  Thus, the corresponding data block
        // is the only on could potentially contain the prefix.
2062
        BlockHandle handle = iiter->value().handle;
2063
        may_match = filter->PrefixMayMatch(
2064
            prefix, prefix_extractor, handle.offset(), no_io,
2065
            /*const_key_ptr=*/nullptr, /*get_context=*/nullptr, lookup_context);
2066
      }
2067
    }
T
Tyler Harter 已提交
2068
  }
T
Tyler Harter 已提交
2069

2070 2071 2072 2073 2074 2075
  if (filter_checked) {
    Statistics* statistics = rep_->ioptions.statistics;
    RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
    if (!may_match) {
      RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
    }
T
Tyler Harter 已提交
2076 2077
  }

T
Tyler Harter 已提交
2078 2079 2080
  return may_match;
}

2081

2082 2083
InternalIterator* BlockBasedTable::NewIterator(
    const ReadOptions& read_options, const SliceTransform* prefix_extractor,
2084
    Arena* arena, bool skip_filters, TableReaderCaller caller,
2085
    size_t compaction_readahead_size, bool allow_unprepared_value) {
2086
  BlockCacheLookupContext lookup_context{caller};
2087
  bool need_upper_bound_check =
S
sdong 已提交
2088
      read_options.auto_prefix_mode ||
2089
      PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
2090 2091 2092 2093 2094
  std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter(NewIndexIterator(
      read_options,
      need_upper_bound_check &&
          rep_->index_type == BlockBasedTableOptions::kHashSearch,
      /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context));
2095
  if (arena == nullptr) {
2096 2097
    return new BlockBasedTableIterator(
        this, read_options, rep_->internal_comparator, std::move(index_iter),
2098
        !skip_filters && !read_options.total_order_seek &&
2099
            prefix_extractor != nullptr,
2100
        need_upper_bound_check, prefix_extractor, caller,
2101
        compaction_readahead_size, allow_unprepared_value);
2102
  } else {
2103 2104 2105
    auto* mem = arena->AllocateAligned(sizeof(BlockBasedTableIterator));
    return new (mem) BlockBasedTableIterator(
        this, read_options, rep_->internal_comparator, std::move(index_iter),
2106
        !skip_filters && !read_options.total_order_seek &&
2107
            prefix_extractor != nullptr,
2108
        need_upper_bound_check, prefix_extractor, caller,
2109
        compaction_readahead_size, allow_unprepared_value);
2110
  }
J
jorlow@chromium.org 已提交
2111 2112
}

2113
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
2114
    const ReadOptions& read_options) {
2115 2116 2117
  if (rep_->fragmented_range_dels == nullptr) {
    return nullptr;
  }
2118 2119 2120 2121 2122
  SequenceNumber snapshot = kMaxSequenceNumber;
  if (read_options.snapshot != nullptr) {
    snapshot = read_options.snapshot->GetSequenceNumber();
  }
  return new FragmentedRangeTombstoneIterator(
2123
      rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
2124 2125
}

2126 2127 2128
bool BlockBasedTable::FullFilterKeyMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    const Slice& internal_key, const bool no_io,
2129
    const SliceTransform* prefix_extractor, GetContext* get_context,
2130
    BlockCacheLookupContext* lookup_context) const {
2131 2132 2133 2134
  if (filter == nullptr || filter->IsBlockBased()) {
    return true;
  }
  Slice user_key = ExtractUserKey(internal_key);
M
Maysam Yabandeh 已提交
2135
  const Slice* const const_ikey_ptr = &internal_key;
2136
  bool may_match = true;
2137
  if (rep_->whole_key_filtering) {
2138 2139 2140
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
    Slice user_key_without_ts = StripTimestampFromUserKey(user_key, ts_sz);
2141 2142
    may_match =
        filter->KeyMayMatch(user_key_without_ts, prefix_extractor, kNotValid,
2143
                            no_io, const_ikey_ptr, get_context, lookup_context);
2144
  } else if (!read_options.total_order_seek && prefix_extractor &&
2145
             rep_->table_properties->prefix_extractor_name.compare(
2146 2147 2148
                 prefix_extractor->Name()) == 0 &&
             prefix_extractor->InDomain(user_key) &&
             !filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
2149 2150 2151
                                     prefix_extractor, kNotValid, no_io,
                                     const_ikey_ptr, get_context,
                                     lookup_context)) {
2152 2153 2154 2155
    may_match = false;
  }
  if (may_match) {
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
2156
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
2157
  }
2158
  return may_match;
2159 2160
}

2161 2162 2163
void BlockBasedTable::FullFilterKeysMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    MultiGetRange* range, const bool no_io,
2164 2165
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
2166 2167 2168
  if (filter == nullptr || filter->IsBlockBased()) {
    return;
  }
2169 2170
  uint64_t before_keys = range->KeysLeft();
  assert(before_keys > 0);  // Caller should ensure
2171
  if (rep_->whole_key_filtering) {
2172 2173
    filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io,
                         lookup_context);
2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
    uint64_t after_keys = range->KeysLeft();
    if (after_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE,
                 after_keys);
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, after_keys,
                                rep_->level);
    }
    uint64_t filtered_keys = before_keys - after_keys;
    if (filtered_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL, filtered_keys);
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, filtered_keys,
                                rep_->level);
    }
2187 2188 2189
  } else if (!read_options.total_order_seek && prefix_extractor &&
             rep_->table_properties->prefix_extractor_name.compare(
                 prefix_extractor->Name()) == 0) {
2190 2191
    filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false,
                             lookup_context);
2192 2193 2194 2195 2196 2197 2198 2199
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_PREFIX_CHECKED,
               before_keys);
    uint64_t after_keys = range->KeysLeft();
    uint64_t filtered_keys = before_keys - after_keys;
    if (filtered_keys) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_PREFIX_USEFUL,
                 filtered_keys);
    }
2200 2201 2202
  }
}

2203
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
2204 2205 2206
                            GetContext* get_context,
                            const SliceTransform* prefix_extractor,
                            bool skip_filters) {
M
Maysam Yabandeh 已提交
2207
  assert(key.size() >= 8);  // key must be internal key
2208
  assert(get_context != nullptr);
S
Sanjay Ghemawat 已提交
2209
  Status s;
M
Maysam Yabandeh 已提交
2210
  const bool no_io = read_options.read_tier == kBlockCacheTier;
2211 2212 2213 2214 2215 2216

  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;

  // First check the full filter
  // If full filter not useful, Then go into each block
H
haoyuhuang 已提交
2217
  uint64_t tracing_get_id = get_context->get_tracing_get_id();
2218 2219 2220 2221 2222 2223 2224 2225 2226
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserGet, tracing_get_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
    // Trace the key since it contains both user key and sequence number.
    lookup_context.referenced_key = key.ToString();
    lookup_context.get_from_user_specified_snapshot =
        read_options.snapshot != nullptr;
  }
2227
  TEST_SYNC_POINT("BlockBasedTable::Get:BeforeFilterMatch");
2228 2229 2230
  const bool may_match =
      FullFilterKeyMayMatch(read_options, filter, key, no_io, prefix_extractor,
                            get_context, &lookup_context);
2231
  TEST_SYNC_POINT("BlockBasedTable::Get:AfterFilterMatch");
2232
  if (!may_match) {
2233
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
2234
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
2235
  } else {
M
Maysam Yabandeh 已提交
2236
    IndexBlockIter iiter_on_stack;
2237 2238
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
2239
    bool need_upper_bound_check = false;
2240
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
2241
      need_upper_bound_check = PrefixExtractorChanged(
2242
          rep_->table_properties.get(), prefix_extractor);
2243
    }
2244 2245 2246
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
                         get_context, &lookup_context);
2247
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
2248
    if (iiter != &iiter_on_stack) {
M
Maysam Yabandeh 已提交
2249
      iiter_unique_ptr.reset(iiter);
M
Maysam Yabandeh 已提交
2250
    }
2251

2252 2253
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
2254
    bool matched = false;  // if such user key matched a key in SST
2255
    bool done = false;
M
Maysam Yabandeh 已提交
2256
    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
2257
      IndexValue v = iiter->value();
2258

2259 2260
      bool not_exist_in_filter =
          filter != nullptr && filter->IsBlockBased() == true &&
2261
          !filter->KeyMayMatch(ExtractUserKeyAndStripTimestamp(key, ts_sz),
2262
                               prefix_extractor, v.handle.offset(), no_io,
2263 2264
                               /*const_ikey_ptr=*/nullptr, get_context,
                               &lookup_context);
2265 2266 2267 2268 2269 2270

      if (not_exist_in_filter) {
        // Not found
        // TODO: think about interaction with Merge. If a user key cannot
        // cross one data block, we should be fine.
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
2271
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
2272
        break;
2273
      }
2274

2275 2276 2277 2278 2279 2280 2281 2282
      if (!v.first_internal_key.empty() && !skip_filters &&
          UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                  .Compare(ExtractUserKey(key),
                           ExtractUserKey(v.first_internal_key)) < 0) {
        // The requested key falls between highest key in previous block and
        // lowest key in current block.
        break;
      }
2283

2284
      BlockCacheLookupContext lookup_data_block_context{
2285 2286 2287
          TableReaderCaller::kUserGet, tracing_get_id,
          /*get_from_user_specified_snapshot=*/read_options.snapshot !=
              nullptr};
2288 2289 2290 2291
      bool does_referenced_key_exist = false;
      DataBlockIter biter;
      uint64_t referenced_data_size = 0;
      NewDataBlockIterator<DataBlockIter>(
2292 2293
          read_options, v.handle, &biter, BlockType::kData, get_context,
          &lookup_data_block_context,
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
          /*s=*/Status(), /*prefetch_buffer*/ nullptr);

      if (no_io && biter.status().IsIncomplete()) {
        // couldn't get block from block_cache
        // Update Saver.state to Found because we are only looking for
        // whether we can guarantee the key is not there when "no_io" is set
        get_context->MarkKeyMayExist();
        break;
      }
      if (!biter.status().ok()) {
        s = biter.status();
        break;
      }
2307

2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
      bool may_exist = biter.SeekForGet(key);
      // If user-specified timestamp is supported, we cannot end the search
      // just because hash index lookup indicates the key+ts does not exist.
      if (!may_exist && ts_sz == 0) {
        // HashSeek cannot find the key this block and the the iter is not
        // the end of the block, i.e. cannot be in the following blocks
        // either. In this case, the seek_key cannot be found, so we break
        // from the top level for-loop.
        done = true;
      } else {
        // Call the *saver function on each entry/block until it returns false
        for (; biter.Valid(); biter.Next()) {
          ParsedInternalKey parsed_key;
          if (!ParseInternalKey(biter.key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }

          if (!get_context->SaveValue(
                  parsed_key, biter.value(), &matched,
                  biter.IsValuePinned() ? &biter : nullptr)) {
2328 2329 2330 2331
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size = biter.key().size() + biter.value().size();
            }
2332 2333
            done = true;
            break;
2334 2335
          }
        }
2336 2337 2338
        s = biter.status();
      }
      // Write the block cache access record.
2339
      if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
2340 2341
        // Avoid making copy of block_key, cf_name, and referenced_key when
        // constructing the access record.
2342 2343 2344 2345
        Slice referenced_key;
        if (does_referenced_key_exist) {
          referenced_key = biter.key();
        } else {
2346
          referenced_key = key;
2347
        }
2348 2349 2350 2351 2352 2353 2354 2355
        BlockCacheTraceRecord access_record(
            rep_->ioptions.env->NowMicros(),
            /*block_key=*/"", lookup_data_block_context.block_type,
            lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
            /*cf_name=*/"", rep_->level_for_tracing(),
            rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
            lookup_data_block_context.is_cache_hit,
            lookup_data_block_context.no_insert,
2356
            lookup_data_block_context.get_id,
2357
            lookup_data_block_context.get_from_user_specified_snapshot,
2358 2359 2360 2361 2362
            /*referenced_key=*/"", referenced_data_size,
            lookup_data_block_context.num_keys_in_block,
            does_referenced_key_exist);
        block_cache_tracer_->WriteBlockAccess(
            access_record, lookup_data_block_context.block_key,
2363
            rep_->cf_name_for_tracing(), referenced_key);
S
Sanjay Ghemawat 已提交
2364
      }
2365

M
Maysam Yabandeh 已提交
2366 2367 2368 2369
      if (done) {
        // Avoid the extra Next which is expensive in two-level indexes
        break;
      }
2370
    }
2371 2372
    if (matched && filter != nullptr && !filter->IsBlockBased()) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
2373 2374
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                rep_->level);
2375
    }
2376
    if (s.ok() && !iiter->status().IsNotFound()) {
M
Maysam Yabandeh 已提交
2377
      s = iiter->status();
S
Sanjay Ghemawat 已提交
2378 2379
    }
  }
K
Kai Liu 已提交
2380

S
Sanjay Ghemawat 已提交
2381 2382 2383
  return s;
}

2384 2385 2386 2387 2388
using MultiGetRange = MultiGetContext::Range;
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
                               const MultiGetRange* mget_range,
                               const SliceTransform* prefix_extractor,
                               bool skip_filters) {
2389 2390 2391 2392 2393 2394
  if (mget_range->empty()) {
    // Caller should ensure non-empty (performance bug)
    assert(false);
    return;  // Nothing to do
  }

2395 2396
  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;
2397 2398
  MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
                               mget_range->end());
2399 2400 2401 2402

  // First check the full filter
  // If full filter not useful, Then go into each block
  const bool no_io = read_options.read_tier == kBlockCacheTier;
2403
  uint64_t tracing_mget_id = BlockCacheTraceHelper::kReservedGetId;
2404
  if (sst_file_range.begin()->get_context) {
H
haoyuhuang 已提交
2405
    tracing_mget_id = sst_file_range.begin()->get_context->get_tracing_get_id();
2406
  }
2407 2408 2409
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserMultiGet, tracing_mget_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
2410 2411
  FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
                         prefix_extractor, &lookup_context);
2412

2413
  if (!sst_file_range.empty()) {
2414 2415 2416 2417 2418 2419 2420 2421
    IndexBlockIter iiter_on_stack;
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
    bool need_upper_bound_check = false;
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
      need_upper_bound_check = PrefixExtractorChanged(
          rep_->table_properties.get(), prefix_extractor);
    }
2422 2423
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
2424
                         sst_file_range.begin()->get_context, &lookup_context);
2425
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
2426 2427 2428 2429
    if (iiter != &iiter_on_stack) {
      iiter_unique_ptr.reset(iiter);
    }

2430
    uint64_t offset = std::numeric_limits<uint64_t>::max();
A
anand76 已提交
2431 2432 2433 2434 2435 2436 2437 2438
    autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE> block_handles;
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE> results;
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE> statuses;
    char stack_buf[kMultiGetReadStackBufSize];
    std::unique_ptr<char[]> block_buf;
    {
      MultiGetRange data_block_range(sst_file_range, sst_file_range.begin(),
                                     sst_file_range.end());
2439

2440
      CachableEntry<UncompressionDict> uncompression_dict;
2441
      Status uncompression_dict_status;
2442
      bool uncompression_dict_inited = false;
A
anand76 已提交
2443 2444 2445 2446 2447
      size_t total_len = 0;
      ReadOptions ro = read_options;
      ro.read_tier = kBlockCacheTier;

      for (auto miter = data_block_range.begin();
2448
           miter != data_block_range.end(); ++miter) {
A
anand76 已提交
2449 2450 2451 2452 2453 2454 2455 2456 2457
        const Slice& key = miter->ikey;
        iiter->Seek(miter->ikey);

        IndexValue v;
        if (iiter->Valid()) {
          v = iiter->value();
        }
        if (!iiter->Valid() ||
            (!v.first_internal_key.empty() && !skip_filters &&
2458 2459 2460
             UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                     .Compare(ExtractUserKey(key),
                              ExtractUserKey(v.first_internal_key)) < 0)) {
A
anand76 已提交
2461 2462
          // The requested key falls between highest key in previous block and
          // lowest key in current block.
2463 2464 2465
          if (!iiter->status().IsNotFound()) {
            *(miter->s) = iiter->status();
          }
A
anand76 已提交
2466 2467 2468 2469
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }
2470

2471 2472 2473 2474 2475 2476 2477 2478 2479
        if (!uncompression_dict_inited && rep_->uncompression_dict_reader) {
          uncompression_dict_status =
              rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
                  nullptr /* prefetch_buffer */, no_io,
                  sst_file_range.begin()->get_context, &lookup_context,
                  &uncompression_dict);
          uncompression_dict_inited = true;
        }

2480
        if (!uncompression_dict_status.ok()) {
2481
          assert(!uncompression_dict_status.IsNotFound());
2482 2483 2484 2485 2486 2487
          *(miter->s) = uncompression_dict_status;
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }

A
anand76 已提交
2488 2489 2490 2491 2492 2493 2494 2495
        statuses.emplace_back();
        results.emplace_back();
        if (v.handle.offset() == offset) {
          // We're going to reuse the block for this key later on. No need to
          // look it up now. Place a null handle
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
          continue;
        }
2496 2497 2498
        // Lookup the cache for the given data block referenced by an index
        // iterator value (i.e BlockHandle). If it exists in the cache,
        // initialize block to the contents of the data block.
A
anand76 已提交
2499 2500
        offset = v.handle.offset();
        BlockHandle handle = v.handle;
2501 2502
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
2503 2504 2505
        const UncompressionDict& dict = uncompression_dict.GetValue()
                                            ? *uncompression_dict.GetValue()
                                            : UncompressionDict::GetEmptyDict();
2506 2507 2508 2509
        Status s = RetrieveBlock(
            nullptr, ro, handle, dict, &(results.back()), BlockType::kData,
            miter->get_context, &lookup_data_block_context,
            /* for_compaction */ false, /* use_cache */ true);
2510 2511 2512
        if (s.IsIncomplete()) {
          s = Status::OK();
        }
A
anand76 已提交
2513 2514 2515 2516 2517 2518
        if (s.ok() && !results.back().IsEmpty()) {
          // Found it in the cache. Add NULL handle to indicate there is
          // nothing to read from disk
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
        } else {
          block_handles.emplace_back(handle);
2519
          total_len += block_size(handle);
A
anand76 已提交
2520 2521 2522 2523 2524
        }
      }

      if (total_len) {
        char* scratch = nullptr;
2525 2526 2527 2528 2529
        const UncompressionDict& dict = uncompression_dict.GetValue()
                                            ? *uncompression_dict.GetValue()
                                            : UncompressionDict::GetEmptyDict();
        assert(uncompression_dict_inited || !rep_->uncompression_dict_reader);
        assert(uncompression_dict_status.ok());
2530
        // If using direct IO, then scratch is not used, so keep it nullptr.
A
anand76 已提交
2531 2532 2533 2534 2535 2536 2537 2538 2539
        // If the blocks need to be uncompressed and we don't need the
        // compressed blocks, then we can use a contiguous block of
        // memory to read in all the blocks as it will be temporary
        // storage
        // 1. If blocks are compressed and compressed block cache is there,
        //    alloc heap bufs
        // 2. If blocks are uncompressed, alloc heap bufs
        // 3. If blocks are compressed and no compressed block cache, use
        //    stack buf
2540 2541
        if (!rep_->file->use_direct_io() &&
            rep_->table_options.block_cache_compressed == nullptr &&
A
anand76 已提交
2542 2543 2544 2545 2546 2547 2548 2549
            rep_->blocks_maybe_compressed) {
          if (total_len <= kMultiGetReadStackBufSize) {
            scratch = stack_buf;
          } else {
            scratch = new char[total_len];
            block_buf.reset(scratch);
          }
        }
2550 2551
        RetrieveMultipleBlocks(read_options, &data_block_range, &block_handles,
                               &statuses, &results, scratch, dict);
A
anand76 已提交
2552 2553 2554 2555 2556 2557
      }
    }

    DataBlockIter first_biter;
    DataBlockIter next_biter;
    size_t idx_in_batch = 0;
2558 2559 2560 2561 2562 2563 2564
    for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
         ++miter) {
      Status s;
      GetContext* get_context = miter->get_context;
      const Slice& key = miter->ikey;
      bool matched = false;  // if such user key matched a key in SST
      bool done = false;
A
anand76 已提交
2565 2566 2567
      bool first_block = true;
      do {
        DataBlockIter* biter = nullptr;
2568
        bool reusing_block = true;
2569 2570 2571
        uint64_t referenced_data_size = 0;
        bool does_referenced_key_exist = false;
        BlockCacheLookupContext lookup_data_block_context(
2572 2573 2574
            TableReaderCaller::kUserMultiGet, tracing_mget_id,
            /*get_from_user_specified_snapshot=*/read_options.snapshot !=
                nullptr);
A
anand76 已提交
2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597
        if (first_block) {
          if (!block_handles[idx_in_batch].IsNull() ||
              !results[idx_in_batch].IsEmpty()) {
            first_biter.Invalidate(Status::OK());
            NewDataBlockIterator<DataBlockIter>(
                read_options, results[idx_in_batch], &first_biter,
                statuses[idx_in_batch]);
            reusing_block = false;
          }
          biter = &first_biter;
          idx_in_batch++;
        } else {
          IndexValue v = iiter->value();
          if (!v.first_internal_key.empty() && !skip_filters &&
              UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                      .Compare(ExtractUserKey(key),
                               ExtractUserKey(v.first_internal_key)) < 0) {
            // The requested key falls between highest key in previous block and
            // lowest key in current block.
            break;
          }

          next_biter.Invalidate(Status::OK());
2598
          NewDataBlockIterator<DataBlockIter>(
A
anand76 已提交
2599 2600 2601 2602
              read_options, iiter->value().handle, &next_biter,
              BlockType::kData, get_context, &lookup_data_block_context,
              Status(), nullptr);
          biter = &next_biter;
2603 2604
          reusing_block = false;
        }
2605

2606
        if (read_options.read_tier == kBlockCacheTier &&
A
anand76 已提交
2607
            biter->status().IsIncomplete()) {
2608 2609 2610 2611 2612 2613
          // couldn't get block from block_cache
          // Update Saver.state to Found because we are only looking for
          // whether we can guarantee the key is not there when "no_io" is set
          get_context->MarkKeyMayExist();
          break;
        }
A
anand76 已提交
2614 2615
        if (!biter->status().ok()) {
          s = biter->status();
2616 2617 2618
          break;
        }

A
anand76 已提交
2619
        bool may_exist = biter->SeekForGet(key);
2620 2621 2622 2623 2624
        if (!may_exist) {
          // HashSeek cannot find the key this block and the the iter is not
          // the end of the block, i.e. cannot be in the following blocks
          // either. In this case, the seek_key cannot be found, so we break
          // from the top level for-loop.
A
anand76 已提交
2625 2626
          break;
        }
2627

A
anand76 已提交
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
        // Call the *saver function on each entry/block until it returns false
        for (; biter->Valid(); biter->Next()) {
          ParsedInternalKey parsed_key;
          Cleanable dummy;
          Cleanable* value_pinner = nullptr;
          if (!ParseInternalKey(biter->key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }
          if (biter->IsValuePinned()) {
            if (reusing_block) {
              Cache* block_cache = rep_->table_options.block_cache.get();
              assert(biter->cache_handle() != nullptr);
              block_cache->Ref(biter->cache_handle());
              dummy.RegisterCleanup(&ReleaseCachedEntry, block_cache,
                                    biter->cache_handle());
              value_pinner = &dummy;
            } else {
              value_pinner = biter;
2646
            }
2647
          }
2648 2649 2650 2651 2652 2653 2654
          if (!get_context->SaveValue(parsed_key, biter->value(), &matched,
                                      value_pinner)) {
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size =
                  biter->key().size() + biter->value().size();
            }
A
anand76 已提交
2655 2656 2657 2658
            done = true;
            break;
          }
          s = biter->status();
2659 2660
        }
        // Write the block cache access.
2661
        if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
2662 2663
          // Avoid making copy of block_key, cf_name, and referenced_key when
          // constructing the access record.
2664 2665 2666 2667
          Slice referenced_key;
          if (does_referenced_key_exist) {
            referenced_key = biter->key();
          } else {
2668
            referenced_key = key;
2669
          }
2670 2671 2672 2673 2674 2675 2676 2677
          BlockCacheTraceRecord access_record(
              rep_->ioptions.env->NowMicros(),
              /*block_key=*/"", lookup_data_block_context.block_type,
              lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
              /*cf_name=*/"", rep_->level_for_tracing(),
              rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
              lookup_data_block_context.is_cache_hit,
              lookup_data_block_context.no_insert,
2678
              lookup_data_block_context.get_id,
2679
              lookup_data_block_context.get_from_user_specified_snapshot,
2680 2681 2682 2683 2684
              /*referenced_key=*/"", referenced_data_size,
              lookup_data_block_context.num_keys_in_block,
              does_referenced_key_exist);
          block_cache_tracer_->WriteBlockAccess(
              access_record, lookup_data_block_context.block_key,
2685
              rep_->cf_name_for_tracing(), referenced_key);
2686
        }
A
anand76 已提交
2687
        s = biter->status();
2688 2689 2690 2691
        if (done) {
          // Avoid the extra Next which is expensive in two-level indexes
          break;
        }
A
anand76 已提交
2692 2693 2694 2695 2696 2697 2698
        if (first_block) {
          iiter->Seek(key);
        }
        first_block = false;
        iiter->Next();
      } while (iiter->Valid());

2699 2700 2701 2702 2703
      if (matched && filter != nullptr && !filter->IsBlockBased()) {
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                  rep_->level);
      }
2704
      if (s.ok() && !iiter->status().IsNotFound()) {
2705 2706 2707 2708 2709 2710 2711
        s = iiter->status();
      }
      *(miter->s) = s;
    }
  }
}

2712 2713 2714
Status BlockBasedTable::Prefetch(const Slice* const begin,
                                 const Slice* const end) {
  auto& comparator = rep_->internal_comparator;
2715
  UserComparatorWrapper user_comparator(comparator.user_comparator());
2716 2717 2718 2719
  // pre-condition
  if (begin && end && comparator.Compare(*begin, *end) > 0) {
    return Status::InvalidArgument(*begin, *end);
  }
2720
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
M
Maysam Yabandeh 已提交
2721
  IndexBlockIter iiter_on_stack;
2722 2723 2724
  auto iiter = NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                                &iiter_on_stack, /*get_context=*/nullptr,
                                &lookup_context);
2725
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
2726
  if (iiter != &iiter_on_stack) {
2727
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
M
Maysam Yabandeh 已提交
2728
  }
2729

M
Maysam Yabandeh 已提交
2730
  if (!iiter->status().ok()) {
2731
    // error opening index iterator
M
Maysam Yabandeh 已提交
2732
    return iiter->status();
2733 2734 2735 2736 2737
  }

  // indicates if we are on the last page that need to be pre-fetched
  bool prefetching_boundary_page = false;

M
Maysam Yabandeh 已提交
2738 2739
  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
       iiter->Next()) {
2740 2741
    BlockHandle block_handle = iiter->value().handle;
    const bool is_user_key = !rep_->index_key_includes_seq;
M
Maysam Yabandeh 已提交
2742 2743 2744
    if (end &&
        ((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
         (is_user_key &&
2745
          user_comparator.Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755
      if (prefetching_boundary_page) {
        break;
      }

      // The index entry represents the last key in the data block.
      // We should load this page into memory as well, but no more
      prefetching_boundary_page = true;
    }

    // Load the block specified by the block_handle into the block cache
M
Maysam Yabandeh 已提交
2756
    DataBlockIter biter;
2757 2758 2759 2760 2761

    NewDataBlockIterator<DataBlockIter>(
        ReadOptions(), block_handle, &biter, /*type=*/BlockType::kData,
        /*get_context=*/nullptr, &lookup_context, Status(),
        /*prefetch_buffer=*/nullptr);
2762 2763 2764 2765 2766 2767 2768 2769 2770 2771

    if (!biter.status().ok()) {
      // there was an unexpected error while pre-fetching
      return biter.status();
    }
  }

  return Status::OK();
}

S
sdong 已提交
2772 2773
Status BlockBasedTable::VerifyChecksum(const ReadOptions& read_options,
                                       TableReaderCaller caller) {
A
Aaron G 已提交
2774 2775
  Status s;
  // Check Meta blocks
2776 2777
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
2778 2779
  ReadOptions ro;
  s = ReadMetaIndexBlock(ro, nullptr /* prefetch buffer */, &metaindex,
2780
                         &metaindex_iter);
A
Aaron G 已提交
2781
  if (s.ok()) {
2782
    s = VerifyChecksumInMetaBlocks(metaindex_iter.get());
A
Aaron G 已提交
2783 2784 2785 2786 2787 2788 2789
    if (!s.ok()) {
      return s;
    }
  } else {
    return s;
  }
  // Check Data blocks
M
Maysam Yabandeh 已提交
2790
  IndexBlockIter iiter_on_stack;
2791
  BlockCacheLookupContext context{caller};
2792
  InternalIteratorBase<IndexValue>* iiter = NewIndexIterator(
S
sdong 已提交
2793
      read_options, /*disable_prefix_seek=*/false, &iiter_on_stack,
2794
      /*get_context=*/nullptr, &context);
2795
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
A
Aaron G 已提交
2796
  if (iiter != &iiter_on_stack) {
2797
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
A
Aaron G 已提交
2798 2799 2800 2801 2802
  }
  if (!iiter->status().ok()) {
    // error opening index iterator
    return iiter->status();
  }
S
sdong 已提交
2803
  s = VerifyChecksumInBlocks(read_options, iiter);
A
Aaron G 已提交
2804 2805 2806
  return s;
}

2807
Status BlockBasedTable::VerifyChecksumInBlocks(
S
sdong 已提交
2808
    const ReadOptions& read_options,
2809
    InternalIteratorBase<IndexValue>* index_iter) {
A
Aaron G 已提交
2810
  Status s;
S
sdong 已提交
2811 2812 2813 2814 2815
  // We are scanning the whole file, so no need to do exponential
  // increasing of the buffer size.
  size_t readahead_size = (read_options.readahead_size != 0)
                              ? read_options.readahead_size
                              : kMaxAutoReadaheadSize;
2816 2817 2818 2819 2820 2821
  // FilePrefetchBuffer doesn't work in mmap mode and readahead is not
  // needed there.
  FilePrefetchBuffer prefetch_buffer(
      rep_->file.get(), readahead_size /* readadhead_size */,
      readahead_size /* max_readahead_size */,
      !rep_->ioptions.allow_mmap_reads /* enable */);
S
sdong 已提交
2822

A
Aaron G 已提交
2823 2824 2825 2826 2827
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
    if (!s.ok()) {
      break;
    }
2828
    BlockHandle handle = index_iter->value().handle;
2829
    BlockContents contents;
2830
    BlockFetcher block_fetcher(
S
sdong 已提交
2831 2832 2833
        rep_->file.get(), &prefetch_buffer, rep_->footer, ReadOptions(), handle,
        &contents, rep_->ioptions, false /* decompress */,
        false /*maybe_compressed*/, BlockType::kData,
2834
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
2835 2836 2837 2838 2839
    s = block_fetcher.ReadBlockContents();
    if (!s.ok()) {
      break;
    }
  }
2840 2841 2842 2843 2844 2845
  if (s.ok()) {
    // In the case of two level indexes, we would have exited the above loop
    // by checking index_iter->Valid(), but Valid() might have returned false
    // due to an IO error. So check the index_iter status
    s = index_iter->status();
  }
2846 2847 2848
  return s;
}

2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
    const Slice& meta_block_name) {
  if (meta_block_name.starts_with(kFilterBlockPrefix) ||
      meta_block_name.starts_with(kFullFilterBlockPrefix) ||
      meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
    return BlockType::kFilter;
  }

  if (meta_block_name == kPropertiesBlock) {
    return BlockType::kProperties;
  }

  if (meta_block_name == kCompressionDictBlock) {
    return BlockType::kCompressionDictionary;
  }

  if (meta_block_name == kRangeDelBlock) {
    return BlockType::kRangeDeletion;
  }

  if (meta_block_name == kHashIndexPrefixesBlock) {
    return BlockType::kHashIndexPrefixes;
  }

  if (meta_block_name == kHashIndexPrefixesMetadataBlock) {
    return BlockType::kHashIndexMetadata;
  }

  assert(false);
  return BlockType::kInvalid;
}

2881
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
2882 2883 2884 2885
    InternalIteratorBase<Slice>* index_iter) {
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
A
Aaron G 已提交
2886 2887 2888
    if (!s.ok()) {
      break;
    }
2889 2890 2891
    BlockHandle handle;
    Slice input = index_iter->value();
    s = handle.DecodeFrom(&input);
A
Aaron G 已提交
2892
    BlockContents contents;
2893
    const Slice meta_block_name = index_iter->key();
2894 2895 2896 2897
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
        false /* decompress */, false /*maybe_compressed*/,
2898
        GetBlockTypeForMetaBlockByName(meta_block_name),
2899
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
S
Siying Dong 已提交
2900
    s = block_fetcher.ReadBlockContents();
2901
    if (s.IsCorruption() && meta_block_name == kPropertiesBlock) {
2902
      TableProperties* table_properties;
2903 2904
      ReadOptions ro;
      s = TryReadPropertiesWithGlobalSeqno(ro, nullptr /* prefetch_buffer */,
2905 2906 2907 2908
                                           index_iter->value(),
                                           &table_properties);
      delete table_properties;
    }
A
Aaron G 已提交
2909 2910 2911 2912 2913 2914 2915
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

2916 2917 2918 2919 2920 2921 2922 2923 2924
bool BlockBasedTable::TEST_BlockInCache(const BlockHandle& handle) const {
  assert(rep_ != nullptr);

  Cache* const cache = rep_->table_options.block_cache.get();
  if (cache == nullptr) {
    return false;
  }

  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
2925 2926 2927
  Slice cache_key =
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
                  cache_key_storage);
2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938

  Cache::Handle* const cache_handle = cache->Lookup(cache_key);
  if (cache_handle == nullptr) {
    return false;
  }

  cache->Release(cache_handle);

  return true;
}

S
Siying Dong 已提交
2939 2940
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
                                      const Slice& key) {
2941
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2942
      options, /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2943
      /*get_context=*/nullptr, /*lookup_context=*/nullptr));
I
Igor Canadi 已提交
2944 2945 2946
  iiter->Seek(key);
  assert(iiter->Valid());

2947
  return TEST_BlockInCache(iiter->value().handle);
2948 2949 2950 2951 2952 2953 2954 2955 2956
}

// REQUIRES: The following fields of rep_ should have already been populated:
//  1. file
//  2. index_handle,
//  3. options
//  4. internal_comparator
//  5. index_type
Status BlockBasedTable::CreateIndexReader(
2957
    const ReadOptions& ro, FilePrefetchBuffer* prefetch_buffer,
2958
    InternalIterator* preloaded_meta_index_iter, bool use_cache, bool prefetch,
2959 2960
    bool pin, BlockCacheLookupContext* lookup_context,
    std::unique_ptr<IndexReader>* index_reader) {
2961 2962
  // kHashSearch requires non-empty prefix_extractor but bypass checking
  // prefix_extractor here since we have no access to MutableCFOptions.
2963
  // Add need_upper_bound_check flag in  BlockBasedTable::NewIndexIterator.
2964 2965
  // If prefix_extractor does not match prefix_extractor_name from table
  // properties, turn off Hash Index by setting total_order_seek to true
2966

2967
  switch (rep_->index_type) {
M
Maysam Yabandeh 已提交
2968
    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
2969
      return PartitionIndexReader::Create(this, ro, prefetch_buffer, use_cache,
2970 2971
                                          prefetch, pin, lookup_context,
                                          index_reader);
M
Maysam Yabandeh 已提交
2972
    }
2973
    case BlockBasedTableOptions::kBinarySearch:
Y
Yanqin Jin 已提交
2974
      FALLTHROUGH_INTENDED;
2975
    case BlockBasedTableOptions::kBinarySearchWithFirstKey: {
2976 2977 2978
      return BinarySearchIndexReader::Create(this, ro, prefetch_buffer,
                                             use_cache, prefetch, pin,
                                             lookup_context, index_reader);
2979 2980
    }
    case BlockBasedTableOptions::kHashSearch: {
2981 2982
      std::unique_ptr<Block> metaindex_guard;
      std::unique_ptr<InternalIterator> metaindex_iter_guard;
K
Kai Liu 已提交
2983
      auto meta_index_iter = preloaded_meta_index_iter;
2984 2985 2986 2987 2988 2989 2990
      bool should_fallback = false;
      if (rep_->internal_prefix_transform.get() == nullptr) {
        ROCKS_LOG_WARN(rep_->ioptions.info_log,
                       "No prefix extractor passed in. Fall back to binary"
                       " search index.");
        should_fallback = true;
      } else if (meta_index_iter == nullptr) {
2991
        auto s = ReadMetaIndexBlock(ro, prefetch_buffer, &metaindex_guard,
2992
                                    &metaindex_iter_guard);
K
Kai Liu 已提交
2993
        if (!s.ok()) {
2994 2995
          // we simply fall back to binary search in case there is any
          // problem with prefix hash index loading.
2996 2997 2998
          ROCKS_LOG_WARN(rep_->ioptions.info_log,
                         "Unable to read the metaindex block."
                         " Fall back to binary search index.");
2999
          should_fallback = true;
K
Kai Liu 已提交
3000
        }
3001
        meta_index_iter = metaindex_iter_guard.get();
K
Kai Liu 已提交
3002 3003
      }

3004
      if (should_fallback) {
3005 3006 3007
        return BinarySearchIndexReader::Create(this, ro, prefetch_buffer,
                                               use_cache, prefetch, pin,
                                               lookup_context, index_reader);
3008
      } else {
3009 3010 3011
        return HashIndexReader::Create(this, ro, prefetch_buffer,
                                       meta_index_iter, use_cache, prefetch,
                                       pin, lookup_context, index_reader);
3012
      }
3013 3014 3015
    }
    default: {
      std::string error_message =
3016
          "Unrecognized index type: " + ToString(rep_->index_type);
3017
      return Status::InvalidArgument(error_message.c_str());
3018 3019 3020 3021
    }
  }
}

3022 3023 3024
uint64_t BlockBasedTable::ApproximateDataOffsetOf(
    const InternalIteratorBase<IndexValue>& index_iter,
    uint64_t data_size) const {
3025 3026
  if (index_iter.Valid()) {
    BlockHandle handle = index_iter.value().handle;
3027
    return handle.offset();
J
jorlow@chromium.org 已提交
3028
  } else {
3029 3030
    // The iterator is past the last key in the file.
    return data_size;
J
jorlow@chromium.org 已提交
3031
  }
3032
}
3033

3034 3035 3036 3037 3038 3039 3040
uint64_t BlockBasedTable::GetApproximateDataSize() {
  // Should be in table properties unless super old version
  if (rep_->table_properties) {
    return rep_->table_properties->data_size;
  }
  // Fall back to rough estimate from footer
  return rep_->footer.metaindex_handle().offset();
3041 3042 3043 3044
}

uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key,
                                              TableReaderCaller caller) {
3045 3046 3047 3048 3049 3050 3051 3052
  uint64_t data_size = GetApproximateDataSize();
  if (UNLIKELY(data_size == 0)) {
    // Hmm. Let's just split in half to avoid skewing one way or another,
    // since we don't know whether we're operating on lower bound or
    // upper bound.
    return rep_->file_size / 2;
  }

3053 3054
  BlockCacheLookupContext context(caller);
  IndexBlockIter iiter_on_stack;
3055 3056
  ReadOptions ro;
  ro.total_order_seek = true;
3057
  auto index_iter =
3058
      NewIndexIterator(ro, /*disable_prefix_seek=*/true,
3059 3060 3061
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
3062
  if (index_iter != &iiter_on_stack) {
3063
    iiter_unique_ptr.reset(index_iter);
3064 3065
  }

3066
  index_iter->Seek(key);
3067 3068 3069 3070 3071 3072 3073 3074

  uint64_t offset = ApproximateDataOffsetOf(*index_iter, data_size);
  // Pro-rate file metadata (incl filters) size-proportionally across data
  // blocks.
  double size_ratio =
      static_cast<double>(offset) / static_cast<double>(data_size);
  return static_cast<uint64_t>(size_ratio *
                               static_cast<double>(rep_->file_size));
3075 3076 3077 3078 3079 3080
}

uint64_t BlockBasedTable::ApproximateSize(const Slice& start, const Slice& end,
                                          TableReaderCaller caller) {
  assert(rep_->internal_comparator.Compare(start, end) <= 0);

3081 3082 3083 3084 3085 3086 3087
  uint64_t data_size = GetApproximateDataSize();
  if (UNLIKELY(data_size == 0)) {
    // Hmm. Assume whole file is involved, since we have lower and upper
    // bound.
    return rep_->file_size;
  }

3088 3089
  BlockCacheLookupContext context(caller);
  IndexBlockIter iiter_on_stack;
3090 3091
  ReadOptions ro;
  ro.total_order_seek = true;
3092
  auto index_iter =
3093
      NewIndexIterator(ro, /*disable_prefix_seek=*/true,
3094 3095 3096 3097 3098 3099 3100 3101
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
  if (index_iter != &iiter_on_stack) {
    iiter_unique_ptr.reset(index_iter);
  }

  index_iter->Seek(start);
3102
  uint64_t start_offset = ApproximateDataOffsetOf(*index_iter, data_size);
3103
  index_iter->Seek(end);
3104
  uint64_t end_offset = ApproximateDataOffsetOf(*index_iter, data_size);
3105 3106

  assert(end_offset >= start_offset);
3107 3108 3109 3110 3111 3112
  // Pro-rate file metadata (incl filters) size-proportionally across data
  // blocks.
  double size_ratio = static_cast<double>(end_offset - start_offset) /
                      static_cast<double>(data_size);
  return static_cast<uint64_t>(size_ratio *
                               static_cast<double>(rep_->file_size));
J
jorlow@chromium.org 已提交
3113 3114
}

3115 3116 3117
bool BlockBasedTable::TEST_FilterBlockInCache() const {
  assert(rep_ != nullptr);
  return TEST_BlockInCache(rep_->filter_handle);
3118 3119
}

3120 3121 3122 3123
bool BlockBasedTable::TEST_IndexBlockInCache() const {
  assert(rep_ != nullptr);

  return TEST_BlockInCache(rep_->footer.index_handle());
3124 3125
}

O
omegaga 已提交
3126 3127
Status BlockBasedTable::GetKVPairsFromDataBlocks(
    std::vector<KVPairBlock>* kv_pair_blocks) {
3128
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3129 3130 3131
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
O
omegaga 已提交
3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147

  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    // Cannot read Index Block
    return s;
  }

  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();

    if (!s.ok()) {
      break;
    }

    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3148
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3149 3150
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3151 3152
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
O
omegaga 已提交
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180
    s = datablock_iter->status();

    if (!s.ok()) {
      // Error reading the block - Skipped
      continue;
    }

    KVPairBlock kv_pair_block;
    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        // Error reading the block - Skipped
        break;
      }
      const Slice& key = datablock_iter->key();
      const Slice& value = datablock_iter->value();
      std::string key_copy = std::string(key.data(), key.size());
      std::string value_copy = std::string(value.data(), value.size());

      kv_pair_block.push_back(
          std::make_pair(std::move(key_copy), std::move(value_copy)));
    }
    kv_pair_blocks->push_back(std::move(kv_pair_block));
  }
  return Status::OK();
}

3181
Status BlockBasedTable::DumpTable(WritableFile* out_file) {
3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193
  // Output Footer
  out_file->Append(
      "Footer Details:\n"
      "--------------------------------------\n"
      "  ");
  out_file->Append(rep_->footer.ToString().c_str());
  out_file->Append("\n");

  // Output MetaIndex
  out_file->Append(
      "Metaindex Details:\n"
      "--------------------------------------\n");
3194 3195
  std::unique_ptr<Block> metaindex;
  std::unique_ptr<InternalIterator> metaindex_iter;
3196 3197
  ReadOptions ro;
  Status s = ReadMetaIndexBlock(ro, nullptr /* prefetch_buffer */, &metaindex,
3198
                                &metaindex_iter);
3199
  if (s.ok()) {
3200 3201 3202
    for (metaindex_iter->SeekToFirst(); metaindex_iter->Valid();
         metaindex_iter->Next()) {
      s = metaindex_iter->status();
3203 3204 3205
      if (!s.ok()) {
        return s;
      }
3206
      if (metaindex_iter->key() == ROCKSDB_NAMESPACE::kPropertiesBlock) {
3207
        out_file->Append("  Properties block handle: ");
3208
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3209
        out_file->Append("\n");
3210 3211
      } else if (metaindex_iter->key() ==
                 ROCKSDB_NAMESPACE::kCompressionDictBlock) {
3212
        out_file->Append("  Compression dictionary block handle: ");
3213
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3214
        out_file->Append("\n");
3215
      } else if (strstr(metaindex_iter->key().ToString().c_str(),
3216 3217
                        "filter.rocksdb.") != nullptr) {
        out_file->Append("  Filter block handle: ");
3218
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3219
        out_file->Append("\n");
3220
      } else if (metaindex_iter->key() == ROCKSDB_NAMESPACE::kRangeDelBlock) {
3221
        out_file->Append("  Range deletion block handle: ");
3222
        out_file->Append(metaindex_iter->value().ToString(true).c_str());
3223
        out_file->Append("\n");
3224 3225 3226 3227 3228 3229 3230 3231
      }
    }
    out_file->Append("\n");
  } else {
    return s;
  }

  // Output TableProperties
3232
  const ROCKSDB_NAMESPACE::TableProperties* table_properties;
3233 3234 3235 3236 3237 3238 3239 3240 3241 3242
  table_properties = rep_->table_properties.get();

  if (table_properties != nullptr) {
    out_file->Append(
        "Table Properties:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
    out_file->Append("\n");
  }
3243

3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
  if (rep_->filter) {
    out_file->Append(
        "Filter Details:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(rep_->filter->ToString().c_str());
    out_file->Append("\n");
  }

  // Output Index block
  s = DumpIndexBlock(out_file);
  if (!s.ok()) {
    return s;
  }
3258 3259

  // Output compression dictionary
3260
  if (rep_->uncompression_dict_reader) {
3261
    CachableEntry<UncompressionDict> uncompression_dict;
3262 3263 3264 3265
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        nullptr /* prefetch_buffer */, false /* no_io */,
        nullptr /* get_context */, nullptr /* lookup_context */,
        &uncompression_dict);
3266 3267 3268
    if (!s.ok()) {
      return s;
    }
3269

3270 3271 3272
    assert(uncompression_dict.GetValue());

    const Slice& raw_dict = uncompression_dict.GetValue()->GetRawDict();
3273 3274 3275 3276
    out_file->Append(
        "Compression Dictionary:\n"
        "--------------------------------------\n");
    out_file->Append("  size (bytes): ");
3277
    out_file->Append(ROCKSDB_NAMESPACE::ToString(raw_dict.size()));
3278 3279
    out_file->Append("\n\n");
    out_file->Append("  HEX    ");
3280
    out_file->Append(raw_dict.ToString(true).c_str());
3281 3282 3283
    out_file->Append("\n\n");
  }

3284
  // Output range deletions block
A
Andrew Kryczka 已提交
3285
  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
A
Andrew Kryczka 已提交
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
  if (range_del_iter != nullptr) {
    range_del_iter->SeekToFirst();
    if (range_del_iter->Valid()) {
      out_file->Append(
          "Range deletions:\n"
          "--------------------------------------\n"
          "  ");
      for (; range_del_iter->Valid(); range_del_iter->Next()) {
        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
      }
      out_file->Append("\n");
3297
    }
A
Andrew Kryczka 已提交
3298
    delete range_del_iter;
3299
  }
3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
  // Output Data blocks
  s = DumpDataBlocks(out_file);

  return s;
}

Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
  out_file->Append(
      "Index Details:\n"
      "--------------------------------------\n");
3310
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3311 3312 3313
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

  out_file->Append("  Block key hex dump: Data block handle\n");
  out_file->Append("  Block key ascii\n\n");
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }
    Slice key = blockhandles_iter->key();
M
Maysam Yabandeh 已提交
3329
    Slice user_key;
3330
    InternalKey ikey;
3331
    if (!rep_->index_key_includes_seq) {
3332 3333
      user_key = key;
    } else {
M
Maysam Yabandeh 已提交
3334 3335 3336
      ikey.DecodeFrom(key);
      user_key = ikey.user_key();
    }
3337 3338

    out_file->Append("  HEX    ");
M
Maysam Yabandeh 已提交
3339
    out_file->Append(user_key.ToString(true).c_str());
3340
    out_file->Append(": ");
3341 3342 3343
    out_file->Append(blockhandles_iter->value()
                         .ToString(true, rep_->index_has_first_key)
                         .c_str());
3344 3345
    out_file->Append("\n");

M
Maysam Yabandeh 已提交
3346
    std::string str_key = user_key.ToString();
3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361
    std::string res_key("");
    char cspace = ' ';
    for (size_t i = 0; i < str_key.size(); i++) {
      res_key.append(&str_key[i], 1);
      res_key.append(1, cspace);
    }
    out_file->Append("  ASCII  ");
    out_file->Append(res_key.c_str());
    out_file->Append("\n  ------\n");
  }
  out_file->Append("\n");
  return Status::OK();
}

Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
3362
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3363 3364 3365
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3366 3367 3368 3369 3370 3371
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

3372 3373 3374 3375
  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
  uint64_t datablock_size_max = 0;
  uint64_t datablock_size_sum = 0;

3376 3377 3378 3379 3380 3381 3382 3383
  size_t block_id = 1;
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       block_id++, blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }

3384
    BlockHandle bh = blockhandles_iter->value().handle;
3385 3386 3387 3388 3389
    uint64_t datablock_size = bh.size();
    datablock_size_min = std::min(datablock_size_min, datablock_size);
    datablock_size_max = std::max(datablock_size_max, datablock_size);
    datablock_size_sum += datablock_size;

3390
    out_file->Append("Data Block # ");
3391
    out_file->Append(ROCKSDB_NAMESPACE::ToString(block_id));
3392
    out_file->Append(" @ ");
3393
    out_file->Append(blockhandles_iter->value().handle.ToString(true).c_str());
3394 3395 3396
    out_file->Append("\n");
    out_file->Append("--------------------------------------\n");

S
sdong 已提交
3397
    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3398
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3399 3400
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3401 3402
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416
    s = datablock_iter->status();

    if (!s.ok()) {
      out_file->Append("Error reading the block - Skipped \n\n");
      continue;
    }

    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        out_file->Append("Error reading the block - Skipped \n");
        break;
      }
3417
      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
3418 3419 3420
    }
    out_file->Append("\n");
  }
3421 3422 3423 3424 3425 3426 3427 3428

  uint64_t num_datablocks = block_id - 1;
  if (num_datablocks) {
    double datablock_size_avg =
        static_cast<double>(datablock_size_sum) / num_datablocks;
    out_file->Append("Data Block Summary:\n");
    out_file->Append("--------------------------------------");
    out_file->Append("\n  # data blocks: ");
3429
    out_file->Append(ROCKSDB_NAMESPACE::ToString(num_datablocks));
3430
    out_file->Append("\n  min data block size: ");
3431
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_min));
3432
    out_file->Append("\n  max data block size: ");
3433
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_max));
3434
    out_file->Append("\n  avg data block size: ");
3435
    out_file->Append(ROCKSDB_NAMESPACE::ToString(datablock_size_avg));
3436 3437 3438
    out_file->Append("\n");
  }

3439 3440 3441
  return Status::OK();
}

3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
                                   WritableFile* out_file) {
  InternalKey ikey;
  ikey.DecodeFrom(key);

  out_file->Append("  HEX    ");
  out_file->Append(ikey.user_key().ToString(true).c_str());
  out_file->Append(": ");
  out_file->Append(value.ToString(true).c_str());
  out_file->Append("\n");

  std::string str_key = ikey.user_key().ToString();
  std::string str_value = value.ToString();
  std::string res_key(""), res_value("");
  char cspace = ' ';
  for (size_t i = 0; i < str_key.size(); i++) {
3458 3459 3460 3461 3462
    if (str_key[i] == '\0') {
      res_key.append("\\0", 2);
    } else {
      res_key.append(&str_key[i], 1);
    }
3463 3464 3465
    res_key.append(1, cspace);
  }
  for (size_t i = 0; i < str_value.size(); i++) {
3466 3467 3468 3469 3470
    if (str_value[i] == '\0') {
      res_value.append("\\0", 2);
    } else {
      res_value.append(&str_value[i], 1);
    }
3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
    res_value.append(1, cspace);
  }

  out_file->Append("  ASCII  ");
  out_file->Append(res_key.c_str());
  out_file->Append(": ");
  out_file->Append(res_value.c_str());
  out_file->Append("\n  ------\n");
}

3481
}  // namespace ROCKSDB_NAMESPACE