block_based_table_reader.cc 159.8 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "table/block_based/block_based_table_reader.h"
10

11
#include <algorithm>
12
#include <array>
13
#include <limits>
14 15
#include <string>
#include <utility>
O
omegaga 已提交
16
#include <vector>
17

T
Tyler Harter 已提交
18
#include "db/dbformat.h"
19
#include "db/pinned_iterators_manager.h"
T
Tyler Harter 已提交
20

21
#include "rocksdb/cache.h"
22 23 24
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
25
#include "rocksdb/iterator.h"
26 27
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
S
Siying Dong 已提交
28
#include "rocksdb/table.h"
29
#include "rocksdb/table_properties.h"
30

31 32 33 34 35 36 37
#include "table/block_based/block.h"
#include "table/block_based/block_based_filter_block.h"
#include "table/block_based/block_based_table_factory.h"
#include "table/block_based/block_prefix_index.h"
#include "table/block_based/filter_block.h"
#include "table/block_based/full_filter_block.h"
#include "table/block_based/partitioned_filter_block.h"
38
#include "table/block_fetcher.h"
J
jorlow@chromium.org 已提交
39
#include "table/format.h"
K
krad 已提交
40
#include "table/get_context.h"
S
sdong 已提交
41
#include "table/internal_iterator.h"
42
#include "table/meta_blocks.h"
43
#include "table/multiget_context.h"
K
krad 已提交
44
#include "table/persistent_cache_helper.h"
45
#include "table/sst_file_writer_collectors.h"
J
jorlow@chromium.org 已提交
46
#include "table/two_level_iterator.h"
47

48
#include "monitoring/perf_context_imp.h"
49
#include "test_util/sync_point.h"
J
jorlow@chromium.org 已提交
50
#include "util/coding.h"
51
#include "util/crc32c.h"
52
#include "util/file_reader_writer.h"
53
#include "util/stop_watch.h"
54
#include "util/string_util.h"
55
#include "util/xxhash.h"
J
jorlow@chromium.org 已提交
56

57
namespace rocksdb {
J
jorlow@chromium.org 已提交
58

I
xxHash  
Igor Canadi 已提交
59
extern const uint64_t kBlockBasedTableMagicNumber;
K
Kai Liu 已提交
60 61
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
62 63 64

typedef BlockBasedTable::IndexReader IndexReader;

M
Maysam Yabandeh 已提交
65 66 67 68
BlockBasedTable::~BlockBasedTable() {
  delete rep_;
}

69 70
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);

71 72 73 74 75
namespace {
// Read the block identified by "handle" from "file".
// The only relevant option is options.verify_checksums for now.
// On failure return non-OK.
// On success fill *result and return OK - caller owns *result
76
// @param uncompression_dict Data for presetting the compression library's
77
//    dictionary.
78 79 80 81
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
    std::unique_ptr<Block>* result, const ImmutableCFOptions& ioptions,
82
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
83
    const UncompressionDict& uncompression_dict,
84
    const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
85 86
    size_t read_amp_bytes_per_bit, MemoryAllocator* memory_allocator,
    bool for_compaction = false) {
87 88
  assert(result);

89
  BlockContents contents;
90 91 92 93
  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, &contents, ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);
S
Siying Dong 已提交
94
  Status s = block_fetcher.ReadBlockContents();
95
  if (s.ok()) {
96 97
    result->reset(new Block(std::move(contents), global_seqno,
                            read_amp_bytes_per_bit, ioptions.statistics));
98 99 100 101 102
  }

  return s;
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
    std::unique_ptr<BlockContents>* result, const ImmutableCFOptions& ioptions,
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
    const UncompressionDict& uncompression_dict,
    const PersistentCacheOptions& cache_options,
    SequenceNumber /* global_seqno */, size_t /* read_amp_bytes_per_bit */,
    MemoryAllocator* memory_allocator, bool for_compaction = false) {
  assert(result);

  result->reset(new BlockContents);

  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, result->get(), ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);

  const Status s = block_fetcher.ReadBlockContents();
  if (!s.ok()) {
    result->reset();
  }

  return s;
}

Y
Yi Wu 已提交
129
inline MemoryAllocator* GetMemoryAllocator(
130
    const BlockBasedTableOptions& table_options) {
131 132 133
  return table_options.block_cache.get()
             ? table_options.block_cache->memory_allocator()
             : nullptr;
134 135
}

136 137 138 139 140 141 142
inline MemoryAllocator* GetMemoryAllocatorForCompressedBlock(
    const BlockBasedTableOptions& table_options) {
  return table_options.block_cache_compressed.get()
             ? table_options.block_cache_compressed->memory_allocator()
             : nullptr;
}

143 144
// Delete the entry resided in the cache.
template <class Entry>
A
Andrew Kryczka 已提交
145
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
146 147 148 149
  auto entry = reinterpret_cast<Entry*>(value);
  delete entry;
}

150 151 152 153
// Release the cached entry and decrement its ref count.
void ForceReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
154
  cache->Release(handle, true /* force_erase */);
155 156
}

157 158 159 160 161 162 163 164
// Release the cached entry and decrement its ref count.
// Do not force erase
void ReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
  cache->Release(handle, false /* force_erase */);
}

165 166 167
// For hash based index, return true if prefix_extractor and
// prefix_extractor_block mismatch, false otherwise. This flag will be used
// as total_order_seek via NewIndexIterator
168 169
bool PrefixExtractorChanged(const TableProperties* table_properties,
                            const SliceTransform* prefix_extractor) {
170 171 172
  // BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
  // Turn off hash index in prefix_extractor is not set; if  prefix_extractor
  // is set but prefix_extractor_block is not set, also disable hash index
173 174
  if (prefix_extractor == nullptr || table_properties == nullptr ||
      table_properties->prefix_extractor_name.empty()) {
175 176
    return true;
  }
177

178
  // prefix_extractor and prefix_extractor_block are both non-empty
179 180
  if (table_properties->prefix_extractor_name.compare(
          prefix_extractor->Name()) != 0) {
181 182 183 184 185 186
    return true;
  } else {
    return false;
  }
}

A
anand76 已提交
187 188 189 190 191 192 193
CacheAllocationPtr CopyBufferToHeap(MemoryAllocator* allocator, Slice& buf) {
  CacheAllocationPtr heap_buf;
  heap_buf = AllocateBlock(buf.size(), allocator);
  memcpy(heap_buf.get(), buf.data(), buf.size());
  return heap_buf;
}

194 195
}  // namespace

196 197 198 199 200
// Encapsulates common functionality for the various index reader
// implementations. Provides access to the index block regardless of whether
// it is owned by the reader or stored in the cache, or whether it is pinned
// in the cache or not.
class BlockBasedTable::IndexReaderCommon : public BlockBasedTable::IndexReader {
201
 public:
202 203
  IndexReaderCommon(const BlockBasedTable* t,
                    CachableEntry<Block>&& index_block)
204
      : table_(t), index_block_(std::move(index_block)) {
205 206 207
    assert(table_ != nullptr);
  }

208
 protected:
209
  static Status ReadIndexBlock(const BlockBasedTable* table,
210
                               FilePrefetchBuffer* prefetch_buffer,
211
                               const ReadOptions& read_options, bool use_cache,
212
                               GetContext* get_context,
213
                               BlockCacheLookupContext* lookup_context,
214
                               CachableEntry<Block>* index_block);
215

216
  const BlockBasedTable* table() const { return table_; }
217 218 219 220 221 222 223 224

  const InternalKeyComparator* internal_comparator() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);

    return &table_->get_rep()->internal_comparator;
  }

225
  bool index_has_first_key() const {
226 227
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
228 229
    return table_->get_rep()->index_has_first_key;
  }
230

231 232 233 234
  bool index_key_includes_seq() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
    return table_->get_rep()->index_key_includes_seq;
235 236 237 238 239
  }

  bool index_value_is_full() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
240
    return table_->get_rep()->index_value_is_full;
241 242
  }

243 244 245 246 247 248
  bool cache_index_blocks() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
    return table_->get_rep()->table_options.cache_index_and_filter_blocks;
  }

249
  Status GetOrReadIndexBlock(bool no_io, GetContext* get_context,
250
                             BlockCacheLookupContext* lookup_context,
251 252 253 254
                             CachableEntry<Block>* index_block) const;

  size_t ApproximateIndexBlockMemoryUsage() const {
    assert(!index_block_.GetOwnValue() || index_block_.GetValue() != nullptr);
255 256 257
    return index_block_.GetOwnValue()
               ? index_block_.GetValue()->ApproximateMemoryUsage()
               : 0;
258 259
  }

260
 private:
261
  const BlockBasedTable* table_;
262 263 264 265
  CachableEntry<Block> index_block_;
};

Status BlockBasedTable::IndexReaderCommon::ReadIndexBlock(
266
    const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
267
    const ReadOptions& read_options, bool use_cache, GetContext* get_context,
268
    BlockCacheLookupContext* lookup_context,
269
    CachableEntry<Block>* index_block) {
270 271 272 273 274 275 276 277 278
  PERF_TIMER_GUARD(read_index_block_nanos);

  assert(table != nullptr);
  assert(index_block != nullptr);
  assert(index_block->IsEmpty());

  const Rep* const rep = table->get_rep();
  assert(rep != nullptr);

279 280
  const Status s = table->RetrieveBlock(
      prefetch_buffer, read_options, rep->footer.index_handle(),
281
      UncompressionDict::GetEmptyDict(), index_block, BlockType::kIndex,
282
      get_context, lookup_context, /* for_compaction */ false, use_cache);
283 284 285 286 287

  return s;
}

Status BlockBasedTable::IndexReaderCommon::GetOrReadIndexBlock(
288
    bool no_io, GetContext* get_context,
289
    BlockCacheLookupContext* lookup_context,
290
    CachableEntry<Block>* index_block) const {
291 292 293
  assert(index_block != nullptr);

  if (!index_block_.IsEmpty()) {
294
    index_block->SetUnownedValue(index_block_.GetValue());
295 296 297
    return Status::OK();
  }

298 299 300 301 302
  ReadOptions read_options;
  if (no_io) {
    read_options.read_tier = kBlockCacheTier;
  }

303
  return ReadIndexBlock(table_, /*prefetch_buffer=*/nullptr, read_options,
304 305
                        cache_index_blocks(), get_context, lookup_context,
                        index_block);
306 307
}

M
Maysam Yabandeh 已提交
308
// Index that allows binary search lookup in a two-level index structure.
309
class PartitionIndexReader : public BlockBasedTable::IndexReaderCommon {
M
Maysam Yabandeh 已提交
310 311 312 313 314
 public:
  // Read the partition index from the file and create an instance for
  // `PartitionIndexReader`.
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
315
  static Status Create(const BlockBasedTable* table,
316
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
317 318 319
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
320 321 322 323 324 325 326
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
327
      const Status s =
328
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(), use_cache,
329
                         /*get_context=*/nullptr, lookup_context, &index_block);
330 331 332
      if (!s.ok()) {
        return s;
      }
M
Maysam Yabandeh 已提交
333

334 335 336
      if (use_cache && !pin) {
        index_block.Reset();
      }
M
Maysam Yabandeh 已提交
337 338
    }

339 340
    index_reader->reset(
        new PartitionIndexReader(table, std::move(index_block)));
341 342

    return Status::OK();
M
Maysam Yabandeh 已提交
343 344 345
  }

  // return a two-level iterator: first level is on the partition index
346
  InternalIteratorBase<IndexValue>* NewIterator(
347
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
348 349
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
350
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
351
    CachableEntry<Block> index_block;
352 353
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
354 355 356 357 358 359
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

360
      return NewErrorInternalIterator<IndexValue>(s);
361 362
    }

363
    InternalIteratorBase<IndexValue>* it = nullptr;
364

M
Maysam Yabandeh 已提交
365
    Statistics* kNullStats = nullptr;
M
Maysam Yabandeh 已提交
366
    // Filters are already checked before seeking the index
367
    if (!partition_map_.empty()) {
368
      // We don't return pinned data from index blocks, so no need
369
      // to set `block_contents_pinned`.
370
      it = NewTwoLevelIterator(
371 372 373
          new BlockBasedTable::PartitionedIndexIteratorState(table(),
                                                             &partition_map_),
          index_block.GetValue()->NewIndexIterator(
374
              internal_comparator(), internal_comparator()->user_comparator(),
375 376
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()));
377
    } else {
378 379 380
      ReadOptions ro;
      ro.fill_cache = read_options.fill_cache;
      // We don't return pinned data from index blocks, so no need
381
      // to set `block_contents_pinned`.
382
      it = new BlockBasedTableIterator<IndexBlockIter, IndexValue>(
383
          table(), ro, *internal_comparator(),
384
          index_block.GetValue()->NewIndexIterator(
385
              internal_comparator(), internal_comparator()->user_comparator(),
386 387
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()),
388
          false, true, /* prefix_extractor */ nullptr, BlockType::kIndex,
389 390
          lookup_context ? lookup_context->caller
                         : TableReaderCaller::kUncategorized);
391
    }
392 393 394 395 396 397

    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;

M
Maysam Yabandeh 已提交
398
    // TODO(myabandeh): Update TwoLevelIterator to be able to make use of
M
Maysam Yabandeh 已提交
399 400 401 402 403
    // on-stack BlockIter while the state is on heap. Currentlly it assumes
    // the first level iter is always on heap and will attempt to delete it
    // in its destructor.
  }

404
  void CacheDependencies(bool pin) override {
M
Maysam Yabandeh 已提交
405
    // Before read partitions, prefetch them to avoid lots of IOs
406
    BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
407
    const BlockBasedTable::Rep* rep = table()->rep_;
M
Maysam Yabandeh 已提交
408
    IndexBlockIter biter;
M
Maysam Yabandeh 已提交
409
    BlockHandle handle;
M
Maysam Yabandeh 已提交
410
    Statistics* kNullStats = nullptr;
411 412

    CachableEntry<Block> index_block;
413
    Status s = GetOrReadIndexBlock(false /* no_io */, nullptr /* get_context */,
414
                                   &lookup_context, &index_block);
415 416 417
    if (!s.ok()) {
      ROCKS_LOG_WARN(rep->ioptions.info_log,
                     "Error retrieving top-level index block while trying to "
418 419
                     "cache index partitions: %s",
                     s.ToString().c_str());
420 421 422 423
      return;
    }

    // We don't return pinned data from index blocks, so no need
424
    // to set `block_contents_pinned`.
425
    index_block.GetValue()->NewIndexIterator(
426
        internal_comparator(), internal_comparator()->user_comparator(), &biter,
427 428
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
M
Maysam Yabandeh 已提交
429 430 431
    // Index partitions are assumed to be consecuitive. Prefetch them all.
    // Read the first block offset
    biter.SeekToFirst();
432 433 434 435
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
436
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
437 438 439 440
    uint64_t prefetch_off = handle.offset();

    // Read the last block's offset
    biter.SeekToLast();
441 442 443 444
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
445
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
446 447 448
    uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
    uint64_t prefetch_len = last_off - prefetch_off;
    std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
449
    auto& file = rep->file;
M
Maysam Yabandeh 已提交
450
    prefetch_buffer.reset(new FilePrefetchBuffer());
451 452
    s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
                                  static_cast<size_t>(prefetch_len));
M
Maysam Yabandeh 已提交
453 454 455 456 457

    // After prefetch, read the partitions one by one
    biter.SeekToFirst();
    auto ro = ReadOptions();
    for (; biter.Valid(); biter.Next()) {
458
      handle = biter.value().handle;
459
      CachableEntry<Block> block;
460 461
      // TODO: Support counter batch update for partitioned index and
      // filter blocks
462 463
      s = table()->MaybeReadBlockAndLoadToCache(
          prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
A
anand76 已提交
464 465
          &block, BlockType::kIndex, /*get_context=*/nullptr, &lookup_context,
          /*contents=*/nullptr);
M
Maysam Yabandeh 已提交
466

467 468 469
      assert(s.ok() || block.GetValue() == nullptr);
      if (s.ok() && block.GetValue() != nullptr) {
        if (block.IsCached()) {
470
          if (pin) {
471
            partition_map_[handle.offset()] = std::move(block);
472
          }
M
Maysam Yabandeh 已提交
473 474 475
        }
      }
    }
M
Maysam Yabandeh 已提交
476 477
  }

478
  size_t ApproximateMemoryUsage() const override {
479
    size_t usage = ApproximateIndexBlockMemoryUsage();
480
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
481
    usage += malloc_usable_size(const_cast<PartitionIndexReader*>(this));
482 483 484 485 486
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    // TODO(myabandeh): more accurate estimate of partition_map_ mem usage
    return usage;
M
Maysam Yabandeh 已提交
487 488 489
  }

 private:
490 491
  PartitionIndexReader(const BlockBasedTable* t,
                       CachableEntry<Block>&& index_block)
492
      : IndexReaderCommon(t, std::move(index_block)) {}
493

494
  std::unordered_map<uint64_t, CachableEntry<Block>> partition_map_;
M
Maysam Yabandeh 已提交
495 496
};

497 498 499
// Index that allows binary search lookup for the first key of each block.
// This class can be viewed as a thin wrapper for `Block` class which already
// supports binary search.
500
class BinarySearchIndexReader : public BlockBasedTable::IndexReaderCommon {
501 502 503
 public:
  // Read index from the file and create an intance for
  // `BinarySearchIndexReader`.
504 505
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
506
  static Status Create(const BlockBasedTable* table,
507
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
508 509 510
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
511 512 513 514 515 516 517
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
518
      const Status s =
519
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(), use_cache,
520
                         /*get_context=*/nullptr, lookup_context, &index_block);
521 522 523
      if (!s.ok()) {
        return s;
      }
524

525 526 527
      if (use_cache && !pin) {
        index_block.Reset();
      }
528 529
    }

530 531
    index_reader->reset(
        new BinarySearchIndexReader(table, std::move(index_block)));
532 533

    return Status::OK();
534 535
  }

536
  InternalIteratorBase<IndexValue>* NewIterator(
537
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
538 539
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
540
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
541
    CachableEntry<Block> index_block;
542 543
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
544 545 546 547 548 549
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

550
      return NewErrorInternalIterator<IndexValue>(s);
551 552
    }

M
Maysam Yabandeh 已提交
553
    Statistics* kNullStats = nullptr;
554
    // We don't return pinned data from index blocks, so no need
555
    // to set `block_contents_pinned`.
556
    auto it = index_block.GetValue()->NewIndexIterator(
557
        internal_comparator(), internal_comparator()->user_comparator(), iter,
558 559
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
560

561 562 563 564 565
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
566

567
  size_t ApproximateMemoryUsage() const override {
568
    size_t usage = ApproximateIndexBlockMemoryUsage();
569
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
570
    usage += malloc_usable_size(const_cast<BinarySearchIndexReader*>(this));
571 572 573 574
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
575 576
  }

577
 private:
578
  BinarySearchIndexReader(const BlockBasedTable* t,
579
                          CachableEntry<Block>&& index_block)
580
      : IndexReaderCommon(t, std::move(index_block)) {}
581 582 583 584
};

// Index that leverages an internal hash table to quicken the lookup for a given
// key.
585
class HashIndexReader : public BlockBasedTable::IndexReaderCommon {
586
 public:
587
  static Status Create(const BlockBasedTable* table,
588 589
                       FilePrefetchBuffer* prefetch_buffer,
                       InternalIterator* meta_index_iter, bool use_cache,
590 591 592
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
593 594 595 596
    assert(table != nullptr);
    assert(index_reader != nullptr);
    assert(!pin || prefetch);

597
    const BlockBasedTable::Rep* rep = table->get_rep();
598 599 600 601
    assert(rep != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
602
      const Status s =
603
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(), use_cache,
604
                         /*get_context=*/nullptr, lookup_context, &index_block);
605 606 607
      if (!s.ok()) {
        return s;
      }
608

609 610 611
      if (use_cache && !pin) {
        index_block.Reset();
      }
612 613
    }

614 615 616 617
    // Note, failure to create prefix hash index does not need to be a
    // hard error. We can still fall back to the original binary search index.
    // So, Create will succeed regardless, from this point on.

618
    index_reader->reset(new HashIndexReader(table, std::move(index_block)));
619

K
Kai Liu 已提交
620 621
    // Get prefixes block
    BlockHandle prefixes_handle;
622 623
    Status s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
                             &prefixes_handle);
K
Kai Liu 已提交
624
    if (!s.ok()) {
625 626
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
627 628 629 630 631 632 633
    }

    // Get index metadata block
    BlockHandle prefixes_meta_handle;
    s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
                      &prefixes_meta_handle);
    if (!s.ok()) {
634 635
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
636 637
    }

638 639 640 641 642
    RandomAccessFileReader* const file = rep->file.get();
    const Footer& footer = rep->footer;
    const ImmutableCFOptions& ioptions = rep->ioptions;
    const PersistentCacheOptions& cache_options = rep->persistent_cache_options;
    MemoryAllocator* const memory_allocator =
643
        GetMemoryAllocator(rep->table_options);
644

K
Kai Liu 已提交
645 646
    // Read contents for the blocks
    BlockContents prefixes_contents;
S
Siying Dong 已提交
647 648
    BlockFetcher prefixes_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_handle,
649
        &prefixes_contents, ioptions, true /*decompress*/,
650 651
        true /*maybe_compressed*/, BlockType::kHashIndexPrefixes,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
S
Siying Dong 已提交
652
    s = prefixes_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
653 654 655 656
    if (!s.ok()) {
      return s;
    }
    BlockContents prefixes_meta_contents;
S
Siying Dong 已提交
657 658
    BlockFetcher prefixes_meta_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_meta_handle,
659
        &prefixes_meta_contents, ioptions, true /*decompress*/,
660 661
        true /*maybe_compressed*/, BlockType::kHashIndexMetadata,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
662
    s = prefixes_meta_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
663
    if (!s.ok()) {
664 665
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
666 667
    }

668
    BlockPrefixIndex* prefix_index = nullptr;
669 670
    s = BlockPrefixIndex::Create(rep->internal_prefix_transform.get(),
                                 prefixes_contents.data,
671 672 673
                                 prefixes_meta_contents.data, &prefix_index);
    // TODO: log error
    if (s.ok()) {
674 675 676
      HashIndexReader* const hash_index_reader =
          static_cast<HashIndexReader*>(index_reader->get());
      hash_index_reader->prefix_index_.reset(prefix_index);
K
Kai Liu 已提交
677 678
    }

679
    return Status::OK();
680 681
  }

682
  InternalIteratorBase<IndexValue>* NewIterator(
683
      const ReadOptions& read_options, bool disable_prefix_seek,
684 685
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
686
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
687
    CachableEntry<Block> index_block;
688 689
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
690 691 692 693 694 695
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

696
      return NewErrorInternalIterator<IndexValue>(s);
697 698
    }

M
Maysam Yabandeh 已提交
699
    Statistics* kNullStats = nullptr;
700 701
    const bool total_order_seek =
        read_options.total_order_seek || disable_prefix_seek;
702
    // We don't return pinned data from index blocks, so no need
703
    // to set `block_contents_pinned`.
704
    auto it = index_block.GetValue()->NewIndexIterator(
705
        internal_comparator(), internal_comparator()->user_comparator(), iter,
706 707 708
        kNullStats, total_order_seek, index_has_first_key(),
        index_key_includes_seq(), index_value_is_full(),
        false /* block_contents_pinned */, prefix_index_.get());
709

710 711 712 713 714
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
715

716
  size_t ApproximateMemoryUsage() const override {
717
    size_t usage = ApproximateIndexBlockMemoryUsage();
718
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
719
    usage += malloc_usable_size(const_cast<HashIndexReader*>(this));
720
#else
M
Maysam Yabandeh 已提交
721 722 723
    if (prefix_index_) {
      usage += prefix_index_->ApproximateMemoryUsage();
    }
724 725 726
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
727 728
  }

729
 private:
730
  HashIndexReader(const BlockBasedTable* t, CachableEntry<Block>&& index_block)
731
      : IndexReaderCommon(t, std::move(index_block)) {}
K
Kai Liu 已提交
732

M
Maysam Yabandeh 已提交
733
  std::unique_ptr<BlockPrefixIndex> prefix_index_;
734 735
};

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
                                            GetContext* get_context,
                                            size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  PERF_COUNTER_ADD(block_cache_hit_count, 1);
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_hit;
    get_context->get_context_stats_.num_cache_bytes_read += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_HIT);
    RecordTick(statistics, BLOCK_CACHE_BYTES_READ, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_HIT);
      }
      break;

    case BlockType::kCompressionDictionary:
      // TODO: introduce perf counter for compression dictionary hit count
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_HIT);
      }
      break;

    case BlockType::kIndex:
      PERF_COUNTER_ADD(block_cache_index_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_HIT);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
                                             GetContext* get_context) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce aggregate (not per-level) block cache miss count
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_miss;
  } else {
    RecordTick(statistics, BLOCK_CACHE_MISS);
  }

  // TODO: introduce perf counters for misses per block type
  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_MISS);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_MISS);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_MISS);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheInsertionMetrics(BlockType block_type,
                                                  GetContext* get_context,
                                                  size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce perf counters for block cache insertions
  if (get_context) {
    ++get_context->get_context_stats_.num_cache_add;
    get_context->get_context_stats_.num_cache_bytes_write += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_ADD);
    RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_add;
        get_context->get_context_stats_.num_cache_filter_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_add;
        get_context->get_context_stats_
            .num_cache_compression_dict_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
                   usage);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_add;
        get_context->get_context_stats_.num_cache_index_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usage);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_add;
        get_context->get_context_stats_.num_cache_data_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, usage);
      }
      break;
  }
}

908
Cache::Handle* BlockBasedTable::GetEntryFromCache(
909
    Cache* block_cache, const Slice& key, BlockType block_type,
910
    GetContext* get_context) const {
911 912
  auto cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics);

913
  if (cache_handle != nullptr) {
914 915
    UpdateCacheHitMetrics(block_type, get_context,
                          block_cache->GetUsage(cache_handle));
916
  } else {
917
    UpdateCacheMissMetrics(block_type, get_context);
918 919 920 921 922
  }

  return cache_handle;
}

923
// Helper function to setup the cache key's prefix for the Table.
924
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
925 926
  assert(kMaxCacheKeyPrefixSize >= 10);
  rep->cache_key_prefix_size = 0;
927
  rep->compressed_cache_key_prefix_size = 0;
928
  if (rep->table_options.block_cache != nullptr) {
929 930
    GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
                        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
931
  }
K
krad 已提交
932 933 934 935 936
  if (rep->table_options.persistent_cache != nullptr) {
    GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
                        &rep->persistent_cache_key_prefix[0],
                        &rep->persistent_cache_key_prefix_size);
  }
937 938
  if (rep->table_options.block_cache_compressed != nullptr) {
    GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
939
                        rep->file->file(), &rep->compressed_cache_key_prefix[0],
940 941 942 943
                        &rep->compressed_cache_key_prefix_size);
  }
}

944 945
void BlockBasedTable::GenerateCachePrefix(Cache* cc, RandomAccessFile* file,
                                          char* buffer, size_t* size) {
946 947 948 949 950
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
K
krad 已提交
951
  if (cc && *size == 0) {
952 953 954 955 956
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
  }
}

957 958
void BlockBasedTable::GenerateCachePrefix(Cache* cc, WritableFile* file,
                                          char* buffer, size_t* size) {
959 960 961 962 963 964 965 966
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
  if (*size == 0) {
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
967 968 969
  }
}

970 971 972 973 974 975 976 977 978 979 980 981
namespace {
// Return True if table_properties has `user_prop_name` has a `true` value
// or it doesn't contain this property (for backward compatible).
bool IsFeatureSupported(const TableProperties& table_properties,
                        const std::string& user_prop_name, Logger* info_log) {
  auto& props = table_properties.user_collected_properties;
  auto pos = props.find(user_prop_name);
  // Older version doesn't have this value set. Skip this check.
  if (pos != props.end()) {
    if (pos->second == kPropFalse) {
      return false;
    } else if (pos->second != kPropTrue) {
982 983
      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
                     user_prop_name.c_str(), pos->second.c_str());
984 985 986 987
    }
  }
  return true;
}
988

989 990 991 992 993 994 995
// Caller has to ensure seqno is not nullptr.
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
                               SequenceNumber largest_seqno,
                               SequenceNumber* seqno) {
  const auto& props = table_properties.user_collected_properties;
  const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
  const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
996

997
  *seqno = kDisableGlobalSequenceNumber;
998 999
  if (version_pos == props.end()) {
    if (seqno_pos != props.end()) {
1000
      std::array<char, 200> msg_buf;
1001
      // This is not an external sst file, global_seqno is not supported.
1002 1003
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
1004 1005
          "A non-external sst file have global seqno property with value %s",
          seqno_pos->second.c_str());
1006
      return Status::Corruption(msg_buf.data());
1007
    }
1008
    return Status::OK();
1009 1010 1011 1012 1013
  }

  uint32_t version = DecodeFixed32(version_pos->second.c_str());
  if (version < 2) {
    if (seqno_pos != props.end() || version != 1) {
1014
      std::array<char, 200> msg_buf;
1015
      // This is a v1 external sst file, global_seqno is not supported.
1016 1017 1018 1019 1020
      snprintf(msg_buf.data(), msg_buf.max_size(),
               "An external sst file with version %u have global seqno "
               "property with value %s",
               version, seqno_pos->second.c_str());
      return Status::Corruption(msg_buf.data());
1021
    }
1022
    return Status::OK();
1023 1024
  }

1025 1026 1027 1028 1029 1030 1031
  // Since we have a plan to deprecate global_seqno, we do not return failure
  // if seqno_pos == props.end(). We rely on version_pos to detect whether the
  // SST is external.
  SequenceNumber global_seqno(0);
  if (seqno_pos != props.end()) {
    global_seqno = DecodeFixed64(seqno_pos->second.c_str());
  }
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
  // SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
  // to denote it is unknown.
  if (largest_seqno < kMaxSequenceNumber) {
    if (global_seqno == 0) {
      global_seqno = largest_seqno;
    }
    if (global_seqno != largest_seqno) {
      std::array<char, 200> msg_buf;
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
          "An external sst file with version %u have global seqno property "
          "with value %s, while largest seqno in the file is %llu",
          version, seqno_pos->second.c_str(),
          static_cast<unsigned long long>(largest_seqno));
      return Status::Corruption(msg_buf.data());
    }
1048
  }
1049
  *seqno = global_seqno;
1050 1051

  if (global_seqno > kMaxSequenceNumber) {
1052 1053 1054 1055 1056 1057
    std::array<char, 200> msg_buf;
    snprintf(msg_buf.data(), msg_buf.max_size(),
             "An external sst file with version %u have global seqno property "
             "with value %llu, which is greater than kMaxSequenceNumber",
             version, static_cast<unsigned long long>(global_seqno));
    return Status::Corruption(msg_buf.data());
1058 1059
  }

1060
  return Status::OK();
1061
}
1062 1063
}  // namespace

K
krad 已提交
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
                                   size_t cache_key_prefix_size,
                                   const BlockHandle& handle, char* cache_key) {
  assert(cache_key != nullptr);
  assert(cache_key_prefix_size != 0);
  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
  char* end =
      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
  return Slice(cache_key, static_cast<size_t>(end - cache_key));
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
Status BlockBasedTable::Open(
    const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
    const BlockBasedTableOptions& table_options,
    const InternalKeyComparator& internal_comparator,
    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
    std::unique_ptr<TableReader>* table_reader,
    const SliceTransform* prefix_extractor,
    const bool prefetch_index_and_filter_in_cache, const bool skip_filters,
    const int level, const bool immortal_table,
    const SequenceNumber largest_seqno, TailPrefetchStats* tail_prefetch_stats,
    BlockCacheTracer* const block_cache_tracer) {
S
Siying Dong 已提交
1087
  table_reader->reset();
1088

1089
  Status s;
1090
  Footer footer;
1091 1092
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;

1093 1094 1095
  // prefetch both index and filters, down to all partitions
  const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
  const bool preload_all = !table_options.cache_index_and_filter_blocks;
1096

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
  s = PrefetchTail(file.get(), file_size, tail_prefetch_stats, prefetch_all,
                   preload_all, &prefetch_buffer);

  // Read in the following order:
  //    1. Footer
  //    2. [metaindex block]
  //    3. [meta block: properties]
  //    4. [meta block: range deletion tombstone]
  //    5. [meta block: compression dictionary]
  //    6. [meta block: index]
  //    7. [meta block: filter]
1108 1109
  s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
                         kBlockBasedTableMagicNumber);
1110 1111 1112
  if (!s.ok()) {
    return s;
  }
1113
  if (!BlockBasedTableSupportedVersion(footer.version())) {
1114
    return Status::Corruption(
1115
        "Unknown Footer version. Maybe this file was created with newer "
1116 1117
        "version of RocksDB?");
  }
J
jorlow@chromium.org 已提交
1118

A
Aaron Gao 已提交
1119
  // We've successfully read the footer. We are ready to serve requests.
1120 1121 1122
  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
  // raw pointer will be used to create HashIndexReader, whose reset may
  // access a dangling pointer.
1123
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
1124
  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
1125
                                      internal_comparator, skip_filters, level,
1126
                                      immortal_table);
K
Kai Liu 已提交
1127
  rep->file = std::move(file);
I
xxHash  
Igor Canadi 已提交
1128
  rep->footer = footer;
1129
  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
1130 1131
  // We need to wrap data with internal_prefix_transform to make sure it can
  // handle prefix correctly.
1132
  rep->internal_prefix_transform.reset(
1133
      new InternalKeySliceTransform(prefix_extractor));
1134
  SetupCacheKeyPrefix(rep);
1135 1136
  std::unique_ptr<BlockBasedTable> new_table(
      new BlockBasedTable(rep, block_cache_tracer));
K
Kai Liu 已提交
1137

K
krad 已提交
1138 1139 1140 1141 1142
  // page cache options
  rep->persistent_cache_options =
      PersistentCacheOptions(rep->table_options.persistent_cache,
                             std::string(rep->persistent_cache_key_prefix,
                                         rep->persistent_cache_key_prefix_size),
1143
                             rep->ioptions.statistics);
K
krad 已提交
1144

1145 1146 1147 1148 1149
  // Meta-blocks are not dictionary compressed. Explicitly set the dictionary
  // handle to null, otherwise it may be seen as uninitialized during the below
  // meta-block reads.
  rep->compression_dict_handle = BlockHandle::NullBlockHandle();

1150
  // Read metaindex
K
Kai Liu 已提交
1151
  std::unique_ptr<Block> meta;
S
sdong 已提交
1152
  std::unique_ptr<InternalIterator> meta_iter;
1153
  s = new_table->ReadMetaBlock(prefetch_buffer.get(), &meta, &meta_iter);
1154 1155 1156
  if (!s.ok()) {
    return s;
  }
K
Kai Liu 已提交
1157

1158 1159
  // Populates table_properties and some fields that depend on it,
  // such as index_type.
1160 1161
  s = new_table->ReadPropertiesBlock(prefetch_buffer.get(), meta_iter.get(),
                                     largest_seqno);
1162 1163 1164
  if (!s.ok()) {
    return s;
  }
1165
  s = new_table->ReadRangeDelBlock(prefetch_buffer.get(), meta_iter.get(),
1166
                                   internal_comparator, &lookup_context);
1167 1168 1169
  if (!s.ok()) {
    return s;
  }
1170 1171
  s = new_table->PrefetchIndexAndFilterBlocks(
      prefetch_buffer.get(), meta_iter.get(), new_table.get(), prefetch_all,
1172
      table_options, level, &lookup_context);
1173 1174 1175 1176 1177 1178 1179 1180

  if (s.ok()) {
    // Update tail prefetch stats
    assert(prefetch_buffer.get() != nullptr);
    if (tail_prefetch_stats != nullptr) {
      assert(prefetch_buffer->min_offset_read() < file_size);
      tail_prefetch_stats->RecordEffectiveSize(
          static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
I
Igor Canadi 已提交
1181
    }
1182 1183

    *table_reader = std::move(new_table);
I
Igor Canadi 已提交
1184 1185
  }

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
  return s;
}

Status BlockBasedTable::PrefetchTail(
    RandomAccessFileReader* file, uint64_t file_size,
    TailPrefetchStats* tail_prefetch_stats, const bool prefetch_all,
    const bool preload_all,
    std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
  size_t tail_prefetch_size = 0;
  if (tail_prefetch_stats != nullptr) {
    // Multiple threads may get a 0 (no history) when running in parallel,
    // but it will get cleared after the first of them finishes.
    tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
  }
  if (tail_prefetch_size == 0) {
    // Before read footer, readahead backwards to prefetch data. Do more
    // readahead if we're going to read index/filter.
    // TODO: This may incorrectly select small readahead in case partitioned
    // index/filter is enabled and top-level partition pinning is enabled.
    // That's because we need to issue readahead before we read the properties,
    // at which point we don't yet know the index type.
    tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
  }
  size_t prefetch_off;
  size_t prefetch_len;
  if (file_size < tail_prefetch_size) {
    prefetch_off = 0;
    prefetch_len = static_cast<size_t>(file_size);
  } else {
    prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
    prefetch_len = tail_prefetch_size;
  }
  TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
                           &tail_prefetch_size);
  Status s;
  // TODO should not have this special logic in the future.
  if (!file->use_direct_io()) {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, false, true));
    s = file->Prefetch(prefetch_off, prefetch_len);
  } else {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, true, true));
    s = (*prefetch_buffer)->Prefetch(file, prefetch_off, prefetch_len);
  }
  return s;
}

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
Status VerifyChecksum(const ChecksumType type, const char* buf, size_t len,
                      uint32_t expected) {
  Status s;
  uint32_t actual = 0;
  switch (type) {
    case kNoChecksum:
      break;
    case kCRC32c:
      expected = crc32c::Unmask(expected);
      actual = crc32c::Value(buf, len);
      break;
    case kxxHash:
      actual = XXH32(buf, static_cast<int>(len), 0);
      break;
    case kxxHash64:
      actual = static_cast<uint32_t>(XXH64(buf, static_cast<int>(len), 0) &
                                     uint64_t{0xffffffff});
      break;
    default:
      s = Status::Corruption("unknown checksum type");
  }
  if (s.ok() && actual != expected) {
    s = Status::Corruption("properties block checksum mismatched");
  }
  return s;
}

1259
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
1260
    FilePrefetchBuffer* prefetch_buffer, const Slice& handle_value,
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270
    TableProperties** table_properties) {
  assert(table_properties != nullptr);
  // If this is an external SST file ingested with write_global_seqno set to
  // true, then we expect the checksum mismatch because checksum was written
  // by SstFileWriter, but its global seqno in the properties block may have
  // been changed during ingestion. In this case, we read the properties
  // block, copy it to a memory buffer, change the global seqno to its
  // original value, i.e. 0, and verify the checksum again.
  BlockHandle props_block_handle;
  CacheAllocationPtr tmp_buf;
1271 1272
  Status s = ReadProperties(handle_value, rep_->file.get(), prefetch_buffer,
                            rep_->footer, rep_->ioptions, table_properties,
1273 1274 1275 1276 1277 1278 1279 1280
                            false /* verify_checksum */, &props_block_handle,
                            &tmp_buf, false /* compression_type_missing */,
                            nullptr /* memory_allocator */);
  if (s.ok() && tmp_buf) {
    const auto seqno_pos_iter =
        (*table_properties)
            ->properties_offsets.find(
                ExternalSstFilePropertyNames::kGlobalSeqno);
1281
    size_t block_size = static_cast<size_t>(props_block_handle.size());
1282 1283 1284 1285 1286 1287
    if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
      uint64_t global_seqno_offset = seqno_pos_iter->second;
      EncodeFixed64(
          tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
    }
    uint32_t value = DecodeFixed32(tmp_buf.get() + block_size + 1);
1288
    s = rocksdb::VerifyChecksum(rep_->footer.checksum(), tmp_buf.get(),
1289 1290 1291 1292 1293
                                block_size + 1, value);
  }
  return s;
}

1294
Status BlockBasedTable::ReadPropertiesBlock(
1295
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1296
    const SequenceNumber largest_seqno) {
1297
  bool found_properties_block = true;
1298 1299
  Status s;
  s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
1300

1301
  if (!s.ok()) {
1302
    ROCKS_LOG_WARN(rep_->ioptions.info_log,
1303 1304
                   "Error when seeking to properties block from file: %s",
                   s.ToString().c_str());
1305
  } else if (found_properties_block) {
K
Kai Liu 已提交
1306
    s = meta_iter->status();
K
kailiu 已提交
1307
    TableProperties* table_properties = nullptr;
K
Kai Liu 已提交
1308
    if (s.ok()) {
1309
      s = ReadProperties(
1310 1311
          meta_iter->value(), rep_->file.get(), prefetch_buffer, rep_->footer,
          rep_->ioptions, &table_properties, true /* verify_checksum */,
1312 1313 1314 1315 1316
          nullptr /* ret_block_handle */, nullptr /* ret_block_contents */,
          false /* compression_type_missing */, nullptr /* memory_allocator */);
    }

    if (s.IsCorruption()) {
1317 1318
      s = TryReadPropertiesWithGlobalSeqno(prefetch_buffer, meta_iter->value(),
                                           &table_properties);
1319 1320 1321 1322
    }
    std::unique_ptr<TableProperties> props_guard;
    if (table_properties != nullptr) {
      props_guard.reset(table_properties);
K
Kai Liu 已提交
1323
    }
J
jorlow@chromium.org 已提交
1324

K
Kai Liu 已提交
1325
    if (!s.ok()) {
1326
      ROCKS_LOG_WARN(rep_->ioptions.info_log,
1327 1328 1329
                     "Encountered error while reading data from properties "
                     "block %s",
                     s.ToString().c_str());
K
kailiu 已提交
1330
    } else {
1331
      assert(table_properties != nullptr);
1332 1333 1334 1335 1336 1337
      rep_->table_properties.reset(props_guard.release());
      rep_->blocks_maybe_compressed =
          rep_->table_properties->compression_name !=
          CompressionTypeToString(kNoCompression);
      rep_->blocks_definitely_zstd_compressed =
          (rep_->table_properties->compression_name ==
1338
               CompressionTypeToString(kZSTD) ||
1339
           rep_->table_properties->compression_name ==
1340
               CompressionTypeToString(kZSTDNotFinalCompression));
K
Kai Liu 已提交
1341
    }
1342
  } else {
1343
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1344
                    "Cannot find Properties block from file.");
K
Kai Liu 已提交
1345
  }
1346
#ifndef ROCKSDB_LITE
1347 1348 1349
  if (rep_->table_properties) {
    ParseSliceTransform(rep_->table_properties->prefix_extractor_name,
                        &(rep_->table_prefix_extractor));
1350 1351
  }
#endif  // ROCKSDB_LITE
K
Kai Liu 已提交
1352

1353
  // Read the table properties, if provided.
1354 1355 1356
  if (rep_->table_properties) {
    rep_->whole_key_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
1357
                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
1358 1359 1360 1361 1362 1363
                           rep_->ioptions.info_log);
    rep_->prefix_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
                           BlockBasedTablePropertyNames::kPrefixFiltering,
                           rep_->ioptions.info_log);

1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
    rep_->index_key_includes_seq =
        rep_->table_properties->index_key_is_user_key == 0;
    rep_->index_value_is_full =
        rep_->table_properties->index_value_is_delta_encoded == 0;

    // Update index_type with the true type.
    // If table properties don't contain index type, we assume that the table
    // is in very old format and has kBinarySearch index type.
    auto& props = rep_->table_properties->user_collected_properties;
    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
    if (pos != props.end()) {
      rep_->index_type = static_cast<BlockBasedTableOptions::IndexType>(
          DecodeFixed32(pos->second.c_str()));
    }

    rep_->index_has_first_key =
        rep_->index_type == BlockBasedTableOptions::kBinarySearchWithFirstKey;

1382 1383
    s = GetGlobalSequenceNumber(*(rep_->table_properties), largest_seqno,
                                &(rep_->global_seqno));
1384
    if (!s.ok()) {
1385
      ROCKS_LOG_ERROR(rep_->ioptions.info_log, "%s", s.ToString().c_str());
1386
    }
1387
  }
1388 1389
  return s;
}
1390

1391
Status BlockBasedTable::ReadRangeDelBlock(
1392
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1393 1394
    const InternalKeyComparator& internal_comparator,
    BlockCacheLookupContext* lookup_context) {
1395
  Status s;
1396
  bool found_range_del_block;
1397 1398
  BlockHandle range_del_handle;
  s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
1399
  if (!s.ok()) {
1400
    ROCKS_LOG_WARN(
1401
        rep_->ioptions.info_log,
1402 1403
        "Error when seeking to range delete tombstones block from file: %s",
        s.ToString().c_str());
1404
  } else if (found_range_del_block && !range_del_handle.IsNull()) {
1405
    ReadOptions read_options;
1406
    std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
1407 1408 1409
        read_options, range_del_handle,
        /*input_iter=*/nullptr, BlockType::kRangeDeletion,
        /*get_context=*/nullptr, lookup_context, Status(), prefetch_buffer));
1410 1411
    assert(iter != nullptr);
    s = iter->status();
1412 1413
    if (!s.ok()) {
      ROCKS_LOG_WARN(
1414
          rep_->ioptions.info_log,
1415 1416
          "Encountered error while reading data from range del block %s",
          s.ToString().c_str());
1417
    } else {
1418
      rep_->fragmented_range_dels =
1419 1420
          std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
                                                         internal_comparator);
1421 1422
    }
  }
1423 1424 1425 1426
  return s;
}

Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
1427
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1428
    BlockBasedTable* new_table, bool prefetch_all,
1429 1430
    const BlockBasedTableOptions& table_options, const int level,
    BlockCacheLookupContext* lookup_context) {
1431 1432 1433
  Status s;

  // Find filter handle and filter type
1434
  if (rep_->filter_policy) {
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
    for (auto filter_type :
         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
          Rep::FilterType::kBlockFilter}) {
      std::string prefix;
      switch (filter_type) {
        case Rep::FilterType::kFullFilter:
          prefix = kFullFilterBlockPrefix;
          break;
        case Rep::FilterType::kPartitionedFilter:
          prefix = kPartitionedFilterBlockPrefix;
          break;
        case Rep::FilterType::kBlockFilter:
          prefix = kFilterBlockPrefix;
          break;
        default:
          assert(0);
      }
      std::string filter_block_key = prefix;
1453 1454
      filter_block_key.append(rep_->filter_policy->Name());
      if (FindMetaBlock(meta_iter, filter_block_key, &rep_->filter_handle)
1455
              .ok()) {
1456
        rep_->filter_type = filter_type;
1457 1458 1459 1460
        break;
      }
    }
  }
1461

1462 1463 1464 1465 1466 1467
  // Find compression dictionary handle
  bool found_compression_dict = false;
  s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
                                 &rep_->compression_dict_handle);
  if (!s.ok()) {
    return s;
1468 1469
  }

1470
  BlockBasedTableOptions::IndexType index_type = rep_->index_type;
1471 1472 1473

  const bool use_cache = table_options.cache_index_and_filter_blocks;

1474 1475 1476 1477
  // pin both index and filters, down to all partitions
  const bool pin_all =
      rep_->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;

1478 1479 1480 1481 1482
  // prefetch the first level of index
  const bool prefetch_index =
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
  // pin the first level of index
  const bool pin_index =
      pin_all || (table_options.pin_top_level_index_and_filter &&
                  index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);

  std::unique_ptr<IndexReader> index_reader;
  s = new_table->CreateIndexReader(prefetch_buffer, meta_iter, use_cache,
                                   prefetch_index, pin_index, lookup_context,
                                   &index_reader);
  if (!s.ok()) {
    return s;
  }

  rep_->index_reader = std::move(index_reader);

  // The partitions of partitioned index are always stored in cache. They
  // are hence follow the configuration for pin and prefetch regardless of
  // the value of cache_index_and_filter_blocks
  if (prefetch_all) {
    rep_->index_reader->CacheDependencies(pin_all);
  }

1505 1506
  // prefetch the first level of filter
  const bool prefetch_filter =
1507 1508 1509
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1510
  // Partition fitlers cannot be enabled without partition indexes
1511
  assert(!prefetch_filter || prefetch_index);
1512 1513 1514
  // pin the first level of filter
  const bool pin_filter =
      pin_all || (table_options.pin_top_level_index_and_filter &&
1515
                  rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1516

1517 1518 1519 1520 1521 1522
  if (rep_->filter_policy) {
    auto filter = new_table->CreateFilterBlockReader(
        prefetch_buffer, use_cache, prefetch_filter, pin_filter,
        lookup_context);
    if (filter) {
      // Refer to the comment above about paritioned indexes always being cached
1523
      if (prefetch_all) {
1524
        filter->CacheDependencies(pin_all);
1525
      }
1526 1527

      rep_->filter = std::move(filter);
1528 1529 1530
    }
  }

1531 1532 1533 1534 1535
  if (!rep_->compression_dict_handle.IsNull()) {
    std::unique_ptr<UncompressionDictReader> uncompression_dict_reader;
    s = UncompressionDictReader::Create(this, prefetch_buffer, use_cache,
                                        prefetch_all, pin_all, lookup_context,
                                        &uncompression_dict_reader);
1536 1537
    if (!s.ok()) {
      return s;
K
Kai Liu 已提交
1538
    }
1539

1540
    rep_->uncompression_dict_reader = std::move(uncompression_dict_reader);
K
Kai Liu 已提交
1541
  }
1542 1543

  assert(s.ok());
J
jorlow@chromium.org 已提交
1544 1545 1546
  return s;
}

S
Siying Dong 已提交
1547
void BlockBasedTable::SetupForCompaction() {
1548
  switch (rep_->ioptions.access_hint_on_compaction_start) {
1549 1550 1551
    case Options::NONE:
      break;
    case Options::NORMAL:
1552
      rep_->file->file()->Hint(RandomAccessFile::NORMAL);
1553 1554
      break;
    case Options::SEQUENTIAL:
1555
      rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
1556 1557
      break;
    case Options::WILLNEED:
1558
      rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
1559 1560 1561 1562 1563 1564
      break;
    default:
      assert(false);
  }
}

K
kailiu 已提交
1565 1566
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
    const {
K
kailiu 已提交
1567
  return rep_->table_properties;
K
Kai Liu 已提交
1568
}
S
Sanjay Ghemawat 已提交
1569

1570 1571 1572 1573 1574 1575 1576 1577
size_t BlockBasedTable::ApproximateMemoryUsage() const {
  size_t usage = 0;
  if (rep_->filter) {
    usage += rep_->filter->ApproximateMemoryUsage();
  }
  if (rep_->index_reader) {
    usage += rep_->index_reader->ApproximateMemoryUsage();
  }
1578 1579
  if (rep_->uncompression_dict_reader) {
    usage += rep_->uncompression_dict_reader->ApproximateMemoryUsage();
1580
  }
1581 1582 1583
  return usage;
}

K
Kai Liu 已提交
1584 1585
// Load the meta-block from the file. On success, return the loaded meta block
// and its iterator.
1586
Status BlockBasedTable::ReadMetaBlock(FilePrefetchBuffer* prefetch_buffer,
S
sdong 已提交
1587 1588
                                      std::unique_ptr<Block>* meta_block,
                                      std::unique_ptr<InternalIterator>* iter) {
S
Sanjay Ghemawat 已提交
1589 1590
  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
  // it is an empty block.
1591
  std::unique_ptr<Block> meta;
K
Kai Liu 已提交
1592
  Status s = ReadBlockFromFile(
1593 1594
      rep_->file.get(), prefetch_buffer, rep_->footer, ReadOptions(),
      rep_->footer.metaindex_handle(), &meta, rep_->ioptions,
1595
      true /* decompress */, true /*maybe_compressed*/, BlockType::kMetaIndex,
1596
      UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options,
1597
      kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */,
1598
      GetMemoryAllocator(rep_->table_options));
K
Kai Liu 已提交
1599

K
Kai Liu 已提交
1600
  if (!s.ok()) {
1601
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1602 1603 1604
                    "Encountered error while reading data from properties"
                    " block %s",
                    s.ToString().c_str());
K
Kai Liu 已提交
1605
    return s;
S
Sanjay Ghemawat 已提交
1606
  }
K
Kai Liu 已提交
1607

1608
  *meta_block = std::move(meta);
K
Kai Liu 已提交
1609
  // meta block uses bytewise comparator.
1610 1611
  iter->reset(meta_block->get()->NewDataIterator(BytewiseComparator(),
                                                 BytewiseComparator()));
K
Kai Liu 已提交
1612
  return Status::OK();
S
Sanjay Ghemawat 已提交
1613 1614
}

1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
template <typename TBlocklike>
class BlocklikeTraits;

template <>
class BlocklikeTraits<BlockContents> {
 public:
  static BlockContents* Create(BlockContents&& contents,
                               SequenceNumber /* global_seqno */,
                               size_t /* read_amp_bytes_per_bit */,
                               Statistics* /* statistics */) {
    return new BlockContents(std::move(contents));
  }

  static uint32_t GetNumRestarts(const BlockContents& /* contents */) {
    return 0;
  }
};

template <>
class BlocklikeTraits<Block> {
 public:
  static Block* Create(BlockContents&& contents, SequenceNumber global_seqno,
                       size_t read_amp_bytes_per_bit, Statistics* statistics) {
    return new Block(std::move(contents), global_seqno, read_amp_bytes_per_bit,
                     statistics);
  }

  static uint32_t GetNumRestarts(const Block& block) {
    return block.NumRestarts();
  }
};

template <typename TBlocklike>
1648 1649
Status BlockBasedTable::GetDataBlockFromCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
1650
    Cache* block_cache, Cache* block_cache_compressed,
1651
    const ReadOptions& read_options, CachableEntry<TBlocklike>* block,
1652
    const UncompressionDict& uncompression_dict, BlockType block_type,
1653 1654
    GetContext* get_context) const {
  const size_t read_amp_bytes_per_bit =
1655 1656 1657
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1658 1659 1660
  assert(block);
  assert(block->IsEmpty());

1661
  Status s;
1662
  BlockContents* compressed_block = nullptr;
1663 1664 1665 1666
  Cache::Handle* block_cache_compressed_handle = nullptr;

  // Lookup uncompressed cache first
  if (block_cache != nullptr) {
1667 1668
    auto cache_handle = GetEntryFromCache(block_cache, block_cache_key,
                                          block_type, get_context);
1669 1670
    if (cache_handle != nullptr) {
      block->SetCachedValue(
1671
          reinterpret_cast<TBlocklike*>(block_cache->Value(cache_handle)),
1672
          block_cache, cache_handle);
1673 1674 1675 1676 1677
      return s;
    }
  }

  // If not found, search from the compressed block cache.
1678
  assert(block->IsEmpty());
1679 1680 1681 1682 1683 1684 1685 1686

  if (block_cache_compressed == nullptr) {
    return s;
  }

  assert(!compressed_block_cache_key.empty());
  block_cache_compressed_handle =
      block_cache_compressed->Lookup(compressed_block_cache_key);
1687 1688 1689

  Statistics* statistics = rep_->ioptions.statistics;

1690 1691 1692 1693 1694 1695 1696 1697 1698
  // if we found in the compressed cache, then uncompress and insert into
  // uncompressed cache
  if (block_cache_compressed_handle == nullptr) {
    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
    return s;
  }

  // found compressed block
  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
1699
  compressed_block = reinterpret_cast<BlockContents*>(
1700
      block_cache_compressed->Value(block_cache_compressed_handle));
1701 1702
  CompressionType compression_type = compressed_block->get_compression_type();
  assert(compression_type != kNoCompression);
1703 1704 1705

  // Retrieve the uncompressed contents into a new buffer
  BlockContents contents;
1706
  UncompressionContext context(compression_type);
1707
  UncompressionInfo info(context, uncompression_dict, compression_type);
1708 1709 1710 1711
  s = UncompressBlockContents(
      info, compressed_block->data.data(), compressed_block->data.size(),
      &contents, rep_->table_options.format_version, rep_->ioptions,
      GetMemoryAllocator(rep_->table_options));
1712 1713 1714

  // Insert uncompressed block into block cache
  if (s.ok()) {
1715 1716 1717 1718
    std::unique_ptr<TBlocklike> block_holder(
        BlocklikeTraits<TBlocklike>::Create(
            std::move(contents), rep_->get_global_seqno(block_type),
            read_amp_bytes_per_bit, statistics));  // uncompressed block
1719 1720

    if (block_cache != nullptr && block_holder->own_bytes() &&
1721
        read_options.fill_cache) {
1722 1723 1724
      size_t charge = block_holder->ApproximateMemoryUsage();
      Cache::Handle* cache_handle = nullptr;
      s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1725
                              &DeleteCachedEntry<TBlocklike>, &cache_handle);
1726
      if (s.ok()) {
1727 1728 1729 1730
        assert(cache_handle != nullptr);
        block->SetCachedValue(block_holder.release(), block_cache,
                              cache_handle);

1731
        UpdateCacheInsertionMetrics(block_type, get_context, charge);
1732 1733 1734
      } else {
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
      }
1735 1736
    } else {
      block->SetOwnedValue(block_holder.release());
1737 1738 1739 1740 1741 1742 1743 1744
    }
  }

  // Release hold on compressed cache entry
  block_cache_compressed->Release(block_cache_compressed_handle);
  return s;
}

1745
template <typename TBlocklike>
1746 1747 1748
Status BlockBasedTable::PutDataBlockToCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
    Cache* block_cache, Cache* block_cache_compressed,
1749
    CachableEntry<TBlocklike>* cached_block, BlockContents* raw_block_contents,
1750
    CompressionType raw_block_comp_type,
1751
    const UncompressionDict& uncompression_dict, SequenceNumber seq_no,
1752
    MemoryAllocator* memory_allocator, BlockType block_type,
1753 1754 1755 1756
    GetContext* get_context) const {
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  const uint32_t format_version = rep_->table_options.format_version;
  const size_t read_amp_bytes_per_bit =
1757 1758 1759
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1760
  const Cache::Priority priority =
1761 1762 1763 1764
      rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
              (block_type == BlockType::kFilter ||
               block_type == BlockType::kCompressionDictionary ||
               block_type == BlockType::kIndex)
1765 1766
          ? Cache::Priority::HIGH
          : Cache::Priority::LOW;
1767 1768
  assert(cached_block);
  assert(cached_block->IsEmpty());
1769 1770

  Status s;
1771
  Statistics* statistics = ioptions.statistics;
1772

1773
  std::unique_ptr<TBlocklike> block_holder;
1774
  if (raw_block_comp_type != kNoCompression) {
1775 1776
    // Retrieve the uncompressed contents into a new buffer
    BlockContents uncompressed_block_contents;
1777
    UncompressionContext context(raw_block_comp_type);
1778
    UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
1779 1780 1781 1782
    s = UncompressBlockContents(info, raw_block_contents->data.data(),
                                raw_block_contents->data.size(),
                                &uncompressed_block_contents, format_version,
                                ioptions, memory_allocator);
1783 1784 1785
    if (!s.ok()) {
      return s;
    }
1786

1787 1788 1789
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
        std::move(uncompressed_block_contents), seq_no, read_amp_bytes_per_bit,
        statistics));
1790
  } else {
1791 1792 1793
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
        std::move(*raw_block_contents), seq_no, read_amp_bytes_per_bit,
        statistics));
1794 1795 1796 1797
  }

  // Insert compressed block into compressed block cache.
  // Release the hold on the compressed cache entry immediately.
1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
  if (block_cache_compressed != nullptr &&
      raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
      raw_block_contents->own_bytes()) {
#ifndef NDEBUG
    assert(raw_block_contents->is_raw_block);
#endif  // NDEBUG

    // We cannot directly put raw_block_contents because this could point to
    // an object in the stack.
    BlockContents* block_cont_for_comp_cache =
        new BlockContents(std::move(*raw_block_contents));
    s = block_cache_compressed->Insert(
        compressed_block_cache_key, block_cont_for_comp_cache,
        block_cont_for_comp_cache->ApproximateMemoryUsage(),
        &DeleteCachedEntry<BlockContents>);
1813 1814 1815 1816 1817
    if (s.ok()) {
      // Avoid the following code to delete this cached block.
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
    } else {
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
1818
      delete block_cont_for_comp_cache;
1819
    }
1820 1821 1822
  }

  // insert into uncompressed block cache
1823 1824 1825 1826
  if (block_cache != nullptr && block_holder->own_bytes()) {
    size_t charge = block_holder->ApproximateMemoryUsage();
    Cache::Handle* cache_handle = nullptr;
    s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1827 1828
                            &DeleteCachedEntry<TBlocklike>, &cache_handle,
                            priority);
1829
    if (s.ok()) {
1830 1831 1832 1833
      assert(cache_handle != nullptr);
      cached_block->SetCachedValue(block_holder.release(), block_cache,
                                   cache_handle);

1834
      UpdateCacheInsertionMetrics(block_type, get_context, charge);
1835 1836 1837
    } else {
      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
    }
1838 1839
  } else {
    cached_block->SetOwnedValue(block_holder.release());
1840 1841 1842 1843 1844
  }

  return s;
}

1845 1846 1847
std::unique_ptr<FilterBlockReader> BlockBasedTable::CreateFilterBlockReader(
    FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
    bool pin, BlockCacheLookupContext* lookup_context) {
M
Maysam Yabandeh 已提交
1848
  auto& rep = rep_;
1849 1850 1851
  auto filter_type = rep->filter_type;
  if (filter_type == Rep::FilterType::kNoFilter) {
    return std::unique_ptr<FilterBlockReader>();
I
Igor Canadi 已提交
1852 1853 1854 1855
  }

  assert(rep->filter_policy);

M
Maysam Yabandeh 已提交
1856
  switch (filter_type) {
1857 1858 1859
    case Rep::FilterType::kPartitionedFilter:
      return PartitionedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);
M
Maysam Yabandeh 已提交
1860 1861

    case Rep::FilterType::kBlockFilter:
1862 1863 1864 1865 1866 1867
      return BlockBasedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);

    case Rep::FilterType::kFullFilter:
      return FullFilterBlockReader::Create(this, prefetch_buffer, use_cache,
                                           prefetch, pin, lookup_context);
I
Igor Canadi 已提交
1868

M
Maysam Yabandeh 已提交
1869 1870 1871 1872
    default:
      // filter_type is either kNoFilter (exited the function at the first if),
      // or it must be covered in this switch block
      assert(false);
1873
      return std::unique_ptr<FilterBlockReader>();
1874
  }
K
Kai Liu 已提交
1875 1876
}

1877 1878
// disable_prefix_seek should be set to true when prefix_extractor found in SST
// differs from the one in mutable_cf_options and index type is HashBasedIndex
1879
InternalIteratorBase<IndexValue>* BlockBasedTable::NewIndexIterator(
1880
    const ReadOptions& read_options, bool disable_prefix_seek,
1881 1882
    IndexBlockIter* input_iter, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
1883 1884
  assert(rep_ != nullptr);
  assert(rep_->index_reader != nullptr);
1885

1886
  // We don't return pinned data from index blocks, so no need
1887
  // to set `block_contents_pinned`.
1888
  return rep_->index_reader->NewIterator(read_options, disable_prefix_seek,
1889 1890
                                         input_iter, get_context,
                                         lookup_context);
K
Kai Liu 已提交
1891 1892
}

L
Lei Jin 已提交
1893 1894
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
1895 1896
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
M
Maysam Yabandeh 已提交
1897 1898
template <typename TBlockIter>
TBlockIter* BlockBasedTable::NewDataBlockIterator(
1899
    const ReadOptions& ro, const BlockHandle& handle, TBlockIter* input_iter,
1900 1901
    BlockType block_type, GetContext* get_context,
    BlockCacheLookupContext* lookup_context, Status s,
1902
    FilePrefetchBuffer* prefetch_buffer, bool for_compaction) const {
1903 1904
  PERF_TIMER_GUARD(new_table_block_iter_nanos);

1905 1906 1907 1908 1909 1910
  TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
  if (!s.ok()) {
    iter->Invalidate(s);
    return iter;
  }

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921
  UncompressionDict uncompression_dict;
  if (rep_->uncompression_dict_reader) {
    const bool no_io = (ro.read_tier == kBlockCacheTier);
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        prefetch_buffer, no_io, get_context, lookup_context,
        &uncompression_dict);
    if (!s.ok()) {
      iter->Invalidate(s);
      return iter;
    }
  }
1922

L
Lei Jin 已提交
1923
  CachableEntry<Block> block;
1924
  s = RetrieveBlock(prefetch_buffer, ro, handle, uncompression_dict, &block,
1925 1926
                    block_type, get_context, lookup_context, for_compaction,
                    /* use_cache */ true);
1927

1928 1929 1930 1931 1932 1933 1934
  if (!s.ok()) {
    assert(block.IsEmpty());
    iter->Invalidate(s);
    return iter;
  }

  assert(block.GetValue() != nullptr);
1935

1936 1937 1938 1939 1940 1941 1942
  // Block contents are pinned and it is still pinned after the iterator
  // is destroyed as long as cleanup functions are moved to another object,
  // when:
  // 1. block cache handle is set to be released in cleanup function, or
  // 2. it's pointing to immortal source. If own_bytes is true then we are
  //    not reading data from the original source, whether immortal or not.
  //    Otherwise, the block is pinned iff the source is immortal.
1943 1944
  const bool block_contents_pinned =
      block.IsCached() ||
1945
      (!block.GetValue()->own_bytes() && rep_->immortal_table);
1946 1947
  iter = InitBlockIterator<TBlockIter>(rep_, block.GetValue(), iter,
                                       block_contents_pinned);
1948 1949

  if (!block.IsCached()) {
1950
    if (!ro.fill_cache && rep_->cache_key_prefix_size != 0) {
1951
      // insert a dummy record to block cache to track the memory usage
1952
      Cache* const block_cache = rep_->table_options.block_cache.get();
1953 1954 1955 1956 1957 1958 1959 1960
      Cache::Handle* cache_handle = nullptr;
      // There are two other types of cache keys: 1) SST cache key added in
      // `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
      // `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
      // from SST cache key(31 bytes), and use non-zero prefix to
      // differentiate from `write_buffer_manager`
      const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
      char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
1961
      // Prefix: use rep_->cache_key_prefix padded by 0s
1962
      memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
1963 1964 1965
      assert(rep_->cache_key_prefix_size != 0);
      assert(rep_->cache_key_prefix_size <= kExtraCacheKeyPrefix);
      memcpy(cache_key, rep_->cache_key_prefix, rep_->cache_key_prefix_size);
1966 1967 1968
      char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
                                 next_cache_key_id_++);
      assert(end - cache_key <=
1969
             static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
1970 1971 1972 1973
      const Slice unique_key(cache_key, static_cast<size_t>(end - cache_key));
      s = block_cache->Insert(unique_key, nullptr,
                              block.GetValue()->ApproximateMemoryUsage(),
                              nullptr, &cache_handle);
1974

1975
      if (s.ok()) {
1976 1977 1978
        assert(cache_handle != nullptr);
        iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
                              cache_handle);
1979
      }
1980
    }
1981 1982
  } else {
    iter->SetCacheHandle(block.GetCacheHandle());
1983 1984
  }

1985
  block.TransferTo(iter);
1986

1987 1988 1989
  return iter;
}

1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
template <>
DataBlockIter* BlockBasedTable::InitBlockIterator<DataBlockIter>(
    const Rep* rep, Block* block, DataBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewDataIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, block_contents_pinned);
}

template <>
IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
    const Rep* rep, Block* block, IndexBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewIndexIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, /* total_order_seek */ true,
      rep->index_has_first_key, rep->index_key_includes_seq,
      rep->index_value_is_full, block_contents_pinned);
}

A
anand76 已提交
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088
// Convert an uncompressed data block (i.e CachableEntry<Block>)
// into an iterator over the contents of the corresponding block.
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
template <typename TBlockIter>
TBlockIter* BlockBasedTable::NewDataBlockIterator(
    const ReadOptions& ro, CachableEntry<Block>& block, TBlockIter* input_iter,
    Status s) const {
  PERF_TIMER_GUARD(new_table_block_iter_nanos);

  TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
  if (!s.ok()) {
    iter->Invalidate(s);
    return iter;
  }

  assert(block.GetValue() != nullptr);
  // Block contents are pinned and it is still pinned after the iterator
  // is destroyed as long as cleanup functions are moved to another object,
  // when:
  // 1. block cache handle is set to be released in cleanup function, or
  // 2. it's pointing to immortal source. If own_bytes is true then we are
  //    not reading data from the original source, whether immortal or not.
  //    Otherwise, the block is pinned iff the source is immortal.
  const bool block_contents_pinned =
      block.IsCached() ||
      (!block.GetValue()->own_bytes() && rep_->immortal_table);
  iter = InitBlockIterator<TBlockIter>(rep_, block.GetValue(), iter,
                                       block_contents_pinned);

  if (!block.IsCached()) {
    if (!ro.fill_cache && rep_->cache_key_prefix_size != 0) {
      // insert a dummy record to block cache to track the memory usage
      Cache* const block_cache = rep_->table_options.block_cache.get();
      Cache::Handle* cache_handle = nullptr;
      // There are two other types of cache keys: 1) SST cache key added in
      // `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
      // `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
      // from SST cache key(31 bytes), and use non-zero prefix to
      // differentiate from `write_buffer_manager`
      const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
      char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
      // Prefix: use rep_->cache_key_prefix padded by 0s
      memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
      assert(rep_->cache_key_prefix_size != 0);
      assert(rep_->cache_key_prefix_size <= kExtraCacheKeyPrefix);
      memcpy(cache_key, rep_->cache_key_prefix, rep_->cache_key_prefix_size);
      char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
                                 next_cache_key_id_++);
      assert(end - cache_key <=
             static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
      const Slice unique_key(cache_key, static_cast<size_t>(end - cache_key));
      s = block_cache->Insert(unique_key, nullptr,
                              block.GetValue()->ApproximateMemoryUsage(),
                              nullptr, &cache_handle);
      if (s.ok()) {
        assert(cache_handle != nullptr);
        iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
                              cache_handle);
      }
    }
  } else {
    iter->SetCacheHandle(block.GetCacheHandle());
  }

  block.TransferTo(iter);
  return iter;
}

// Lookup the cache for the given data block referenced by an index iterator
// value (i.e BlockHandle). If it exists in the cache, initialize block to
// the contents of the data block.
Status BlockBasedTable::GetDataBlockFromCache(
    const ReadOptions& ro, const BlockHandle& handle,
    const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block, BlockType block_type,
    GetContext* get_context) const {
  BlockCacheLookupContext lookup_data_block_context(
      TableReaderCaller::kUserMultiGet);
2089
  assert(block_type == BlockType::kData);
A
anand76 已提交
2090
  Status s = RetrieveBlock(nullptr, ro, handle, uncompression_dict, block,
2091 2092
                           block_type, get_context, &lookup_data_block_context,
                           /* for_compaction */ false, /* use_cache */ true);
A
anand76 已提交
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
  if (s.IsIncomplete()) {
    s = Status::OK();
  }

  return s;
}

// If contents is nullptr, this function looks up the block caches for the
// data block referenced by handle, and read the block from disk if necessary.
// If contents is non-null, it skips the cache lookup and disk read, since
// the caller has already read it. In both cases, if ro.fill_cache is true,
// it inserts the block into the block cache.
2105
template <typename TBlocklike>
2106
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
2107
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2108
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2109
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
A
anand76 已提交
2110 2111
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    BlockContents* contents) const {
2112
  assert(block_entry != nullptr);
2113
  const bool no_io = (ro.read_tier == kBlockCacheTier);
2114
  Cache* block_cache = rep_->table_options.block_cache.get();
2115
  // No point to cache compressed blocks if it never goes away
2116
  Cache* block_cache_compressed =
2117 2118
      rep_->immortal_table ? nullptr
                           : rep_->table_options.block_cache_compressed.get();
L
Lei Jin 已提交
2119

2120 2121
  // First, try to get the block from the cache
  //
L
Lei Jin 已提交
2122
  // If either block cache is enabled, we'll try to read from it.
2123
  Status s;
2124 2125 2126 2127
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  Slice key /* key to the block cache */;
  Slice ckey /* key to the compressed block cache */;
2128
  bool is_cache_hit = false;
L
Lei Jin 已提交
2129 2130 2131
  if (block_cache != nullptr || block_cache_compressed != nullptr) {
    // create key for block cache
    if (block_cache != nullptr) {
2132
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
2133
                        handle, cache_key);
L
Lei Jin 已提交
2134 2135 2136
    }

    if (block_cache_compressed != nullptr) {
2137 2138
      ckey = GetCacheKey(rep_->compressed_cache_key_prefix,
                         rep_->compressed_cache_key_prefix_size, handle,
L
Lei Jin 已提交
2139 2140 2141
                         compressed_cache_key);
    }

A
anand76 已提交
2142 2143 2144 2145 2146 2147 2148 2149 2150
    if (!contents) {
      s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
                                ro, block_entry, uncompression_dict, block_type,
                                get_context);
      if (block_entry->GetValue()) {
        // TODO(haoyu): Differentiate cache hit on uncompressed block cache and
        // compressed block cache.
        is_cache_hit = true;
      }
2151
    }
A
anand76 已提交
2152

2153 2154
    // Can't find the block from the cache. If I/O is allowed, read from the
    // file.
2155
    if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
2156
      Statistics* statistics = rep_->ioptions.statistics;
2157
      const bool maybe_compressed =
2158 2159 2160
          block_type != BlockType::kFilter &&
          block_type != BlockType::kCompressionDictionary &&
          rep_->blocks_maybe_compressed;
2161
      const bool do_uncompress = maybe_compressed && !block_cache_compressed;
2162 2163
      CompressionType raw_block_comp_type;
      BlockContents raw_block_contents;
A
anand76 已提交
2164
      if (!contents) {
2165
        StopWatch sw(rep_->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
2166
        BlockFetcher block_fetcher(
2167
            rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle,
2168 2169 2170
            &raw_block_contents, rep_->ioptions, do_uncompress,
            maybe_compressed, block_type, uncompression_dict,
            rep_->persistent_cache_options,
2171 2172
            GetMemoryAllocator(rep_->table_options),
            GetMemoryAllocatorForCompressedBlock(rep_->table_options));
2173 2174
        s = block_fetcher.ReadBlockContents();
        raw_block_comp_type = block_fetcher.get_compression_type();
A
anand76 已提交
2175 2176 2177
        contents = &raw_block_contents;
      } else {
        raw_block_comp_type = contents->get_compression_type();
L
Lei Jin 已提交
2178 2179 2180
      }

      if (s.ok()) {
2181
        SequenceNumber seq_no = rep_->get_global_seqno(block_type);
2182 2183
        // If filling cache is allowed and a cache is configured, try to put the
        // block to the cache.
2184
        s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed,
A
anand76 已提交
2185
                                block_entry, contents,
2186 2187
                                raw_block_comp_type, uncompression_dict, seq_no,
                                GetMemoryAllocator(rep_->table_options),
2188
                                block_type, get_context);
L
Lei Jin 已提交
2189 2190 2191
      }
    }
  }
2192 2193

  // Fill lookup_context.
2194 2195
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
2196 2197 2198 2199
    size_t usage = 0;
    uint64_t nkeys = 0;
    if (block_entry->GetValue()) {
      // Approximate the number of keys in the block using restarts.
2200 2201 2202
      nkeys =
          rep_->table_options.block_restart_interval *
          BlocklikeTraits<TBlocklike>::GetNumRestarts(*block_entry->GetValue());
2203 2204 2205 2206 2207 2208 2209
      usage = block_entry->GetValue()->ApproximateMemoryUsage();
    }
    TraceType trace_block_type = TraceType::kTraceMax;
    switch (block_type) {
      case BlockType::kData:
        trace_block_type = TraceType::kBlockTraceDataBlock;
        break;
2210 2211 2212
      case BlockType::kFilter:
        trace_block_type = TraceType::kBlockTraceFilterBlock;
        break;
2213 2214 2215
      case BlockType::kCompressionDictionary:
        trace_block_type = TraceType::kBlockTraceUncompressionDictBlock;
        break;
2216 2217 2218
      case BlockType::kRangeDeletion:
        trace_block_type = TraceType::kBlockTraceRangeDeletionBlock;
        break;
2219 2220 2221
      case BlockType::kIndex:
        trace_block_type = TraceType::kBlockTraceIndexBlock;
        break;
2222 2223 2224 2225 2226
      default:
        // This cannot happen.
        assert(false);
        break;
    }
2227 2228
    bool no_insert = no_io || !ro.fill_cache;
    if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
2229 2230
            trace_block_type, lookup_context->caller)) {
      // Defer logging the access to Get() and MultiGet() to trace additional
2231
      // information, e.g., referenced_key_exist_in_block.
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245

      // Make a copy of the block key here since it will be logged later.
      lookup_context->FillLookupContext(
          is_cache_hit, no_insert, trace_block_type,
          /*block_size=*/usage, /*block_key=*/key.ToString(), nkeys);
    } else {
      // Avoid making copy of block_key and cf_name when constructing the access
      // record.
      BlockCacheTraceRecord access_record(
          rep_->ioptions.env->NowMicros(),
          /*block_key=*/"", trace_block_type,
          /*block_size=*/usage, rep_->cf_id_for_tracing(),
          /*cf_name=*/"", rep_->level_for_tracing(),
          rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
2246 2247 2248
          no_insert, lookup_context->get_id,
          lookup_context->get_from_user_specified_snapshot,
          /*referenced_key=*/"");
2249 2250
      block_cache_tracer_->WriteBlockAccess(access_record, key,
                                            rep_->cf_name_for_tracing(),
2251
                                            lookup_context->referenced_key);
2252 2253 2254
    }
  }

2255
  assert(s.ok() || block_entry->GetValue() == nullptr);
2256
  return s;
2257 2258
}

A
anand76 已提交
2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301
// This function reads multiple data blocks from disk using Env::MultiRead()
// and optionally inserts them into the block cache. It uses the scratch
// buffer provided by the caller, which is contiguous. If scratch is a nullptr
// it allocates a separate buffer for each block. Typically, if the blocks
// need to be uncompressed and there is no compressed block cache, callers
// can allocate a temporary scratch buffer in order to minimize memory
// allocations.
// If options.fill_cache is true, it inserts the blocks into cache. If its
// false and scratch is non-null and the blocks are uncompressed, it copies
// the buffers to heap. In any case, the CachableEntry<Block> returned will
// own the data bytes.
// batch - A MultiGetRange with only those keys with unique data blocks not
//         found in cache
// handles - A vector of block handles. Some of them me be NULL handles
// scratch - An optional contiguous buffer to read compressed blocks into
void BlockBasedTable::MaybeLoadBlocksToCache(
    const ReadOptions& options,
    const MultiGetRange* batch,
    const autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE>*  handles,
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE>* statuses,
    autovector<
      CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE>* results,
    char* scratch,
    const UncompressionDict& uncompression_dict) const {

  RandomAccessFileReader* file = rep_->file.get();
  const Footer& footer = rep_->footer;
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  SequenceNumber global_seqno = rep_->get_global_seqno(BlockType::kData);
  size_t read_amp_bytes_per_bit = rep_->table_options.read_amp_bytes_per_bit;
  MemoryAllocator* memory_allocator = GetMemoryAllocator(rep_->table_options);

  if (file->use_direct_io() || ioptions.allow_mmap_reads) {
    size_t idx_in_batch = 0;
    for (auto mget_iter = batch->begin(); mget_iter != batch->end();
         ++mget_iter, ++idx_in_batch) {
      BlockCacheLookupContext lookup_data_block_context(
          TableReaderCaller::kUserMultiGet);
      const BlockHandle& handle = (*handles)[idx_in_batch];
      if (handle.IsNull()) {
        continue;
      }

2302 2303 2304 2305 2306
      (*statuses)[idx_in_batch] =
          RetrieveBlock(nullptr, options, handle, uncompression_dict,
                        &(*results)[idx_in_batch], BlockType::kData,
                        mget_iter->get_context, &lookup_data_block_context,
                        /* for_compaction */ false, /* use_cache */ true);
A
anand76 已提交
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
    }
    return;
  }

  autovector<ReadRequest, MultiGetContext::MAX_BATCH_SIZE> read_reqs;
  size_t buf_offset = 0;
  size_t idx_in_batch = 0;
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];
    if (handle.IsNull()) {
      continue;
    }

    ReadRequest req;
    req.len = handle.size() + kBlockTrailerSize;
    if (scratch == nullptr) {
      req.scratch = new char[req.len];
    } else {
      req.scratch = scratch + buf_offset;
      buf_offset += req.len;
    }
    req.offset = handle.offset();
    req.status = Status::OK();
    read_reqs.emplace_back(req);
  }

  file->MultiRead(&read_reqs[0], read_reqs.size());

  size_t read_req_idx = 0;
  idx_in_batch = 0;
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];

    if (handle.IsNull()) {
      continue;
    }

    ReadRequest& req = read_reqs[read_req_idx++];
    Status s = req.status;
    if (s.ok()) {
      if (req.result.size() != handle.size() + kBlockTrailerSize) {
        s = Status::Corruption("truncated block read from " +
                               rep_->file->file_name() + " offset " +
                               ToString(handle.offset()) + ", expected " +
                               ToString(handle.size() + kBlockTrailerSize) +
                               " bytes, got " + ToString(req.result.size()));
      }
    }

    BlockContents raw_block_contents;
    if (s.ok()) {
      if (scratch == nullptr) {
        // We allocated a buffer for this block. Give ownership of it to
        // BlockContents so it can free the memory
        assert(req.result.data() == req.scratch);
        std::unique_ptr<char[]> raw_block(req.scratch);
        raw_block_contents = BlockContents(std::move(raw_block),
                                 handle.size());
      } else {
        // We used the scratch buffer, so no need to free anything
        raw_block_contents = BlockContents(Slice(req.scratch,
                                 handle.size()));
      }
#ifndef NDEBUG
      raw_block_contents.is_raw_block = true;
#endif
      if (options.verify_checksums) {
        PERF_TIMER_GUARD(block_checksum_time);
        const char* data = req.result.data();
        uint32_t expected = DecodeFixed32(data + handle.size() + 1);
        s = rocksdb::VerifyChecksum(footer.checksum(), req.result.data(),
                                    handle.size() + 1, expected);
      }
    }
    if (s.ok()) {
      if (options.fill_cache) {
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
        CachableEntry<Block>* block_entry = &(*results)[idx_in_batch];
        // MaybeReadBlockAndLoadToCache will insert into the block caches if
        // necessary. Since we're passing the raw block contents, it will
        // avoid looking up the block cache
        s = MaybeReadBlockAndLoadToCache(nullptr, options, handle,
              uncompression_dict, block_entry, BlockType::kData,
              mget_iter->get_context, &lookup_data_block_context,
              &raw_block_contents);
      } else {
        CompressionType compression_type =
                raw_block_contents.get_compression_type();
        BlockContents contents;
        if (compression_type != kNoCompression) {
          UncompressionContext context(compression_type);
          UncompressionInfo info(context, uncompression_dict, compression_type);
          s = UncompressBlockContents(info, req.result.data(), handle.size(),
                    &contents, footer.version(), rep_->ioptions,
                    memory_allocator);
        } else {
          if (scratch != nullptr) {
            // If we used the scratch buffer, then the contents need to be
            // copied to heap
            Slice raw = Slice(req.result.data(), handle.size());
            contents = BlockContents(CopyBufferToHeap(
                  GetMemoryAllocator(rep_->table_options), raw),
                  handle.size());
          } else {
            contents = std::move(raw_block_contents);
          }
        }
        if (s.ok()) {
          (*results)[idx_in_batch].SetOwnedValue(new Block(std::move(contents),
                global_seqno, read_amp_bytes_per_bit, ioptions.statistics));
        }
      }
    }
    (*statuses)[idx_in_batch] = s;
  }
}

2427
template <typename TBlocklike>
2428
Status BlockBasedTable::RetrieveBlock(
2429
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2430
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2431
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
2432
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
2433
    bool for_compaction, bool use_cache) const {
2434 2435 2436 2437
  assert(block_entry);
  assert(block_entry->IsEmpty());

  Status s;
2438
  if (use_cache) {
2439
    s = MaybeReadBlockAndLoadToCache(prefetch_buffer, ro, handle,
2440
                                     uncompression_dict, block_entry,
A
anand76 已提交
2441 2442
                                     block_type, get_context, lookup_context,
                                     /*contents=*/nullptr);
2443 2444 2445 2446 2447 2448

    if (!s.ok()) {
      return s;
    }

    if (block_entry->GetValue() != nullptr) {
2449
      assert(s.ok());
2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
      return s;
    }
  }

  assert(block_entry->IsEmpty());

  const bool no_io = ro.read_tier == kBlockCacheTier;
  if (no_io) {
    return Status::Incomplete("no blocking io");
  }

2461
  const bool maybe_compressed =
2462 2463 2464
      block_type != BlockType::kFilter &&
      block_type != BlockType::kCompressionDictionary &&
      rep_->blocks_maybe_compressed;
2465 2466
  const bool do_uncompress = maybe_compressed;
  std::unique_ptr<TBlocklike> block;
2467 2468

  {
2469
    StopWatch sw(rep_->ioptions.env, rep_->ioptions.statistics,
2470 2471
                 READ_BLOCK_GET_MICROS);
    s = ReadBlockFromFile(
2472
        rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block,
2473 2474 2475
        rep_->ioptions, do_uncompress, maybe_compressed, block_type,
        uncompression_dict, rep_->persistent_cache_options,
        rep_->get_global_seqno(block_type),
2476 2477 2478
        block_type == BlockType::kData
            ? rep_->table_options.read_amp_bytes_per_bit
            : 0,
2479
        GetMemoryAllocator(rep_->table_options), for_compaction);
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
  }

  if (!s.ok()) {
    return s;
  }

  block_entry->SetOwnedValue(block.release());

  assert(s.ok());
  return s;
}

2492 2493 2494 2495 2496 2497 2498
// Explicitly instantiate templates for both "blocklike" types we use.
// This makes it possible to keep the template definitions in the .cc file.
template Status BlockBasedTable::RetrieveBlock<BlockContents>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<BlockContents>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
2499
    bool for_compaction, bool use_cache) const;
2500 2501 2502 2503 2504 2505

template Status BlockBasedTable::RetrieveBlock<Block>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
2506
    bool for_compaction, bool use_cache) const;
2507

2508
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
2509
    const BlockBasedTable* table,
2510 2511 2512 2513
    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
    : table_(table), block_map_(block_map) {}

InternalIteratorBase<IndexValue>*
2514
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
2515
    const BlockHandle& handle) {
M
Maysam Yabandeh 已提交
2516
  // Return a block iterator on the index partition
2517 2518 2519 2520
  auto block = block_map_->find(handle.offset());
  // This is a possible scenario since block cache might not have had space
  // for the partition
  if (block != block_map_->end()) {
2521
    const Rep* rep = table_->get_rep();
2522 2523
    assert(rep);

M
Maysam Yabandeh 已提交
2524
    Statistics* kNullStats = nullptr;
2525
    // We don't return pinned data from index blocks, so no need
2526
    // to set `block_contents_pinned`.
2527
    return block->second.GetValue()->NewIndexIterator(
M
Maysam Yabandeh 已提交
2528
        &rep->internal_comparator, rep->internal_comparator.user_comparator(),
2529 2530
        nullptr, kNullStats, true, rep->index_has_first_key,
        rep->index_key_includes_seq, rep->index_value_is_full);
2531 2532
  }
  // Create an empty iterator
2533
  return new IndexBlockIter();
2534 2535
}

T
Tyler Harter 已提交
2536 2537
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
2538 2539
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
T
Tyler Harter 已提交
2540 2541 2542 2543
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
T
Tyler Harter 已提交
2544
//
K
Kai Liu 已提交
2545 2546 2547
// Otherwise, this method guarantees no I/O will be incurred.
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
2548 2549 2550
bool BlockBasedTable::PrefixMayMatch(
    const Slice& internal_key, const ReadOptions& read_options,
    const SliceTransform* options_prefix_extractor,
2551 2552
    const bool need_upper_bound_check,
    BlockCacheLookupContext* lookup_context) const {
2553
  if (!rep_->filter_policy) {
2554 2555 2556
    return true;
  }

2557 2558 2559 2560 2561 2562 2563 2564 2565 2566
  const SliceTransform* prefix_extractor;

  if (rep_->table_prefix_extractor == nullptr) {
    if (need_upper_bound_check) {
      return true;
    }
    prefix_extractor = options_prefix_extractor;
  } else {
    prefix_extractor = rep_->table_prefix_extractor.get();
  }
2567
  auto user_key = ExtractUserKey(internal_key);
2568
  if (!prefix_extractor->InDomain(user_key)) {
2569 2570
    return true;
  }
L
Lei Jin 已提交
2571

T
Tyler Harter 已提交
2572 2573 2574
  bool may_match = true;
  Status s;

2575
  // First, try check with full filter
2576
  FilterBlockReader* const filter = rep_->filter.get();
2577
  bool filter_checked = true;
2578 2579
  if (filter != nullptr) {
    if (!filter->IsBlockBased()) {
M
Maysam Yabandeh 已提交
2580
      const Slice* const const_ikey_ptr = &internal_key;
2581 2582 2583
      may_match = filter->RangeMayExist(
          read_options.iterate_upper_bound, user_key, prefix_extractor,
          rep_->internal_comparator.user_comparator(), const_ikey_ptr,
2584
          &filter_checked, need_upper_bound_check, lookup_context);
2585
    } else {
2586 2587 2588 2589 2590
      // if prefix_extractor changed for block based filter, skip filter
      if (need_upper_bound_check) {
        return true;
      }
      auto prefix = prefix_extractor->Transform(user_key);
M
Maysam Yabandeh 已提交
2591 2592 2593 2594 2595 2596 2597 2598 2599
      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
      auto internal_prefix = internal_key_prefix.Encode();

      // To prevent any io operation in this method, we set `read_tier` to make
      // sure we always read index or filter only when they have already been
      // loaded to memory.
      ReadOptions no_io_read_options;
      no_io_read_options.read_tier = kBlockCacheTier;

2600
      // Then, try find it within each block
2601 2602
      // we already know prefix_extractor and prefix_extractor_name must match
      // because `CheckPrefixMayMatch` first checks `check_filter_ == true`
2603
      std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2604 2605
          no_io_read_options,
          /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2606
          /*get_context=*/nullptr, lookup_context));
2607 2608 2609 2610 2611 2612 2613 2614
      iiter->Seek(internal_prefix);

      if (!iiter->Valid()) {
        // we're past end of file
        // if it's incomplete, it means that we avoided I/O
        // and we're not really sure that we're past the end
        // of the file
        may_match = iiter->status().IsIncomplete();
2615 2616
      } else if ((rep_->index_key_includes_seq ? ExtractUserKey(iiter->key())
                                               : iiter->key())
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634
                     .starts_with(ExtractUserKey(internal_prefix))) {
        // we need to check for this subtle case because our only
        // guarantee is that "the key is a string >= last key in that data
        // block" according to the doc/table_format.txt spec.
        //
        // Suppose iiter->key() starts with the desired prefix; it is not
        // necessarily the case that the corresponding data block will
        // contain the prefix, since iiter->key() need not be in the
        // block.  However, the next data block may contain the prefix, so
        // we return true to play it safe.
        may_match = true;
      } else if (filter->IsBlockBased()) {
        // iiter->key() does NOT start with the desired prefix.  Because
        // Seek() finds the first key that is >= the seek target, this
        // means that iiter->key() > prefix.  Thus, any data blocks coming
        // after the data block corresponding to iiter->key() cannot
        // possibly contain the key.  Thus, the corresponding data block
        // is the only on could potentially contain the prefix.
2635
        BlockHandle handle = iiter->value().handle;
2636 2637
        may_match = filter->PrefixMayMatch(
            prefix, prefix_extractor, handle.offset(), /*no_io=*/false,
2638
            /*const_key_ptr=*/nullptr, /*get_context=*/nullptr, lookup_context);
2639
      }
2640
    }
T
Tyler Harter 已提交
2641
  }
T
Tyler Harter 已提交
2642

2643 2644 2645 2646 2647 2648
  if (filter_checked) {
    Statistics* statistics = rep_->ioptions.statistics;
    RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
    if (!may_match) {
      RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
    }
T
Tyler Harter 已提交
2649 2650
  }

T
Tyler Harter 已提交
2651 2652 2653
  return may_match;
}

2654 2655
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Seek(const Slice& target) {
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
  SeekImpl(&target);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToFirst() {
  SeekImpl(nullptr);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekImpl(
    const Slice* target) {
2667
  is_out_of_bound_ = false;
2668 2669
  is_at_first_key_from_index_ = false;
  if (target && !CheckPrefixMayMatch(*target)) {
2670 2671 2672 2673
    ResetDataIter();
    return;
  }

2674
  bool need_seek_index = true;
2675
  if (block_iter_points_to_real_block_ && block_iter_.Valid()) {
2676
    // Reseek.
2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693
    prev_block_offset_ = index_iter_->value().handle.offset();

    if (target) {
      // We can avoid an index seek if:
      // 1. The new seek key is larger than the current key
      // 2. The new seek key is within the upper bound of the block
      // Since we don't necessarily know the internal key for either
      // the current key or the upper bound, we check user keys and
      // exclude the equality case. Considering internal keys can
      // improve for the boundary cases, but it would complicate the
      // code.
      if (user_comparator_.Compare(ExtractUserKey(*target),
                                   block_iter_.user_key()) > 0 &&
          user_comparator_.Compare(ExtractUserKey(*target),
                                   index_iter_->user_key()) < 0) {
        need_seek_index = false;
      }
2694
    }
2695 2696
  }

2697
  if (need_seek_index) {
2698 2699 2700 2701 2702 2703
    if (target) {
      index_iter_->Seek(*target);
    } else {
      index_iter_->SeekToFirst();
    }

2704 2705 2706 2707 2708
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }
2709

2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734
  IndexValue v = index_iter_->value();
  const bool same_block = block_iter_points_to_real_block_ &&
                          v.handle.offset() == prev_block_offset_;

  // TODO(kolmike): Remove the != kBlockCacheTier condition.
  if (!v.first_internal_key.empty() && !same_block &&
      (!target || icomp_.Compare(*target, v.first_internal_key) <= 0) &&
      read_options_.read_tier != kBlockCacheTier) {
    // Index contains the first key of the block, and it's >= target.
    // We can defer reading the block.
    is_at_first_key_from_index_ = true;
    ResetDataIter();
  } else {
    // Need to use the data block.
    if (!same_block) {
      InitDataBlock();
    }

    if (target) {
      block_iter_.Seek(*target);
    } else {
      block_iter_.SeekToFirst();
    }
    FindKeyForward();
  }
2735

2736
  CheckDataBlockWithinUpperBound();
2737
  CheckOutOfBound();
2738 2739

  if (target) {
2740 2741 2742 2743 2744
    assert(!Valid() || ((block_type_ == BlockType::kIndex &&
                         !table_->get_rep()->index_key_includes_seq)
                            ? (user_comparator_.Compare(ExtractUserKey(*target),
                                                        key()) <= 0)
                            : (icomp_.Compare(*target, key()) <= 0)));
2745
  }
2746 2747
}

2748 2749 2750
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekForPrev(
    const Slice& target) {
2751
  is_out_of_bound_ = false;
2752
  is_at_first_key_from_index_ = false;
2753
  if (!CheckPrefixMayMatch(target)) {
2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775
    ResetDataIter();
    return;
  }

  SavePrevIndexValue();

  // Call Seek() rather than SeekForPrev() in the index block, because the
  // target data block will likely to contain the position for `target`, the
  // same as Seek(), rather than than before.
  // For example, if we have three data blocks, each containing two keys:
  //   [2, 4]  [6, 8] [10, 12]
  //  (the keys in the index block would be [4, 8, 12])
  // and the user calls SeekForPrev(7), we need to go to the second block,
  // just like if they call Seek(7).
  // The only case where the block is difference is when they seek to a position
  // in the boundary. For example, if they SeekForPrev(5), we should go to the
  // first block, rather than the second. However, we don't have the information
  // to distinguish the two unless we read the second block. In this case, we'll
  // end up with reading two blocks.
  index_iter_->Seek(target);

  if (!index_iter_->Valid()) {
2776 2777 2778 2779 2780
    if (!index_iter_->status().ok()) {
      ResetDataIter();
      return;
    }

2781 2782 2783 2784 2785 2786 2787 2788 2789
    index_iter_->SeekToLast();
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }

  InitDataBlock();

M
Maysam Yabandeh 已提交
2790
  block_iter_.SeekForPrev(target);
2791 2792

  FindKeyBackward();
2793
  CheckDataBlockWithinUpperBound();
M
Maysam Yabandeh 已提交
2794 2795
  assert(!block_iter_.Valid() ||
         icomp_.Compare(target, block_iter_.key()) >= 0);
2796 2797
}

2798 2799
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToLast() {
2800
  is_out_of_bound_ = false;
2801
  is_at_first_key_from_index_ = false;
2802 2803 2804 2805 2806 2807 2808
  SavePrevIndexValue();
  index_iter_->SeekToLast();
  if (!index_iter_->Valid()) {
    ResetDataIter();
    return;
  }
  InitDataBlock();
M
Maysam Yabandeh 已提交
2809
  block_iter_.SeekToLast();
2810
  FindKeyBackward();
2811
  CheckDataBlockWithinUpperBound();
2812 2813
}

2814 2815
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Next() {
2816 2817 2818
  if (is_at_first_key_from_index_ && !MaterializeCurrentBlock()) {
    return;
  }
2819
  assert(block_iter_points_to_real_block_);
M
Maysam Yabandeh 已提交
2820
  block_iter_.Next();
2821
  FindKeyForward();
2822
  CheckOutOfBound();
2823 2824
}

2825 2826
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::NextAndGetResult(
2827
    IterateResult* result) {
2828 2829 2830
  Next();
  bool is_valid = Valid();
  if (is_valid) {
2831 2832
    result->key = key();
    result->may_be_out_of_upper_bound = MayBeOutOfUpperBound();
2833 2834 2835 2836
  }
  return is_valid;
}

2837 2838
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Prev() {
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
  if (is_at_first_key_from_index_) {
    is_at_first_key_from_index_ = false;

    index_iter_->Prev();
    if (!index_iter_->Valid()) {
      return;
    }

    InitDataBlock();
    block_iter_.SeekToLast();
  } else {
    assert(block_iter_points_to_real_block_);
    block_iter_.Prev();
  }

2854 2855 2856
  FindKeyBackward();
}

2857 2858 2859 2860 2861 2862 2863
// Found that 256 KB readahead size provides the best performance, based on
// experiments, for auto readahead. Experiment data is in PR #3282.
template <class TBlockIter, typename TValue>
const size_t
    BlockBasedTableIterator<TBlockIter, TValue>::kMaxAutoReadaheadSize =
        256 * 1024;

2864 2865
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::InitDataBlock() {
2866
  BlockHandle data_block_handle = index_iter_->value().handle;
2867
  if (!block_iter_points_to_real_block_ ||
2868
      data_block_handle.offset() != prev_block_offset_ ||
2869
      // if previous attempt of reading the block missed cache, try again
M
Maysam Yabandeh 已提交
2870
      block_iter_.status().IsIncomplete()) {
2871 2872 2873 2874 2875
    if (block_iter_points_to_real_block_) {
      ResetDataIter();
    }
    auto* rep = table_->get_rep();

2876 2877 2878 2879 2880 2881
    // Prefetch additional data for range scans (iterators). Enabled only for
    // user reads.
    // Implicit auto readahead:
    //   Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
    // Explicit user requested readahead:
    //   Enabled from the very first IO when ReadOptions.readahead_size is set.
2882
    if (lookup_context_.caller != TableReaderCaller::kCompaction) {
2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
      if (read_options_.readahead_size == 0) {
        // Implicit auto readahead
        num_file_reads_++;
        if (num_file_reads_ > kMinNumFileReadsToStartAutoReadahead) {
          if (!rep->file->use_direct_io() &&
              (data_block_handle.offset() +
                   static_cast<size_t>(data_block_handle.size()) +
                   kBlockTrailerSize >
               readahead_limit_)) {
            // Buffered I/O
            // Discarding the return status of Prefetch calls intentionally, as
            // we can fallback to reading from disk if Prefetch fails.
            rep->file->Prefetch(data_block_handle.offset(), readahead_size_);
            readahead_limit_ = static_cast<size_t>(data_block_handle.offset() +
                                                   readahead_size_);
            // Keep exponentially increasing readahead size until
            // kMaxAutoReadaheadSize.
            readahead_size_ =
                std::min(kMaxAutoReadaheadSize, readahead_size_ * 2);
          } else if (rep->file->use_direct_io() && !prefetch_buffer_) {
            // Direct I/O
            // Let FilePrefetchBuffer take care of the readahead.
            prefetch_buffer_.reset(
                new FilePrefetchBuffer(rep->file.get(), kInitAutoReadaheadSize,
                                       kMaxAutoReadaheadSize));
          }
2909
        }
2910 2911 2912 2913 2914 2915 2916
      } else if (!prefetch_buffer_) {
        // Explicit user requested readahead
        // The actual condition is:
        // if (read_options_.readahead_size != 0 && !prefetch_buffer_)
        prefetch_buffer_.reset(new FilePrefetchBuffer(
            rep->file.get(), read_options_.readahead_size,
            read_options_.readahead_size));
2917
      }
2918 2919 2920 2921
    } else if (!prefetch_buffer_) {
      prefetch_buffer_.reset(
          new FilePrefetchBuffer(rep->file.get(), compaction_readahead_size_,
                                 compaction_readahead_size_));
2922 2923
    }

2924
    Status s;
2925
    table_->NewDataBlockIterator<TBlockIter>(
2926
        read_options_, data_block_handle, &block_iter_, block_type_,
2927
        /*get_context=*/nullptr, &lookup_context_, s, prefetch_buffer_.get(),
2928 2929
        /*for_compaction=*/lookup_context_.caller ==
            TableReaderCaller::kCompaction);
2930
    block_iter_points_to_real_block_ = true;
2931
    CheckDataBlockWithinUpperBound();
2932 2933 2934
  }
}

2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::MaterializeCurrentBlock() {
  assert(is_at_first_key_from_index_);
  assert(!block_iter_points_to_real_block_);
  assert(index_iter_->Valid());

  is_at_first_key_from_index_ = false;
  InitDataBlock();
  assert(block_iter_points_to_real_block_);
  block_iter_.SeekToFirst();

  if (!block_iter_.Valid() ||
      icomp_.Compare(block_iter_.key(),
                     index_iter_->value().first_internal_key) != 0) {
    // Uh oh.
    block_iter_.Invalidate(Status::Corruption(
        "first key in index doesn't match first key in block"));
    return false;
  }

  return true;
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyForward() {
  // This method's code is kept short to make it likely to be inlined.

  assert(!is_out_of_bound_);
  assert(block_iter_points_to_real_block_);

  if (!block_iter_.Valid()) {
    // This is the only call site of FindBlockForward(), but it's extracted into
    // a separate method to keep FindKeyForward() short and likely to be
    // inlined. When transitioning to a different block, we call
    // FindBlockForward(), which is much longer and is probably not inlined.
    FindBlockForward();
  } else {
    // This is the fast path that avoids a function call.
  }
}

2976
template <class TBlockIter, typename TValue>
2977
void BlockBasedTableIterator<TBlockIter, TValue>::FindBlockForward() {
2978 2979
  // TODO the while loop inherits from two-level-iterator. We don't know
  // whether a block can be empty so it can be replaced by an "if".
2980
  do {
M
Maysam Yabandeh 已提交
2981
    if (!block_iter_.status().ok()) {
2982 2983
      return;
    }
2984
    // Whether next data block is out of upper bound, if there is one.
2985 2986 2987 2988 2989
    const bool next_block_is_out_of_bound =
        read_options_.iterate_upper_bound != nullptr &&
        block_iter_points_to_real_block_ && !data_block_within_upper_bound_;
    assert(!next_block_is_out_of_bound ||
           user_comparator_.Compare(*read_options_.iterate_upper_bound,
2990
                                    index_iter_->user_key()) <= 0);
2991 2992
    ResetDataIter();
    index_iter_->Next();
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003
    if (next_block_is_out_of_bound) {
      // The next block is out of bound. No need to read it.
      TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr);
      // We need to make sure this is not the last data block before setting
      // is_out_of_bound_, since the index key for the last data block can be
      // larger than smallest key of the next file on the same level.
      if (index_iter_->Valid()) {
        is_out_of_bound_ = true;
      }
      return;
    }
3004

3005
    if (!index_iter_->Valid()) {
3006 3007
      return;
    }
3008

3009
    IndexValue v = index_iter_->value();
3010

3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
    // TODO(kolmike): Remove the != kBlockCacheTier condition.
    if (!v.first_internal_key.empty() &&
        read_options_.read_tier != kBlockCacheTier) {
      // Index contains the first key of the block. Defer reading the block.
      is_at_first_key_from_index_ = true;
      return;
    }

    InitDataBlock();
    block_iter_.SeekToFirst();
  } while (!block_iter_.Valid());
3022 3023
}

3024 3025
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyBackward() {
M
Maysam Yabandeh 已提交
3026 3027
  while (!block_iter_.Valid()) {
    if (!block_iter_.status().ok()) {
3028 3029 3030 3031 3032 3033 3034 3035
      return;
    }

    ResetDataIter();
    index_iter_->Prev();

    if (index_iter_->Valid()) {
      InitDataBlock();
M
Maysam Yabandeh 已提交
3036
      block_iter_.SeekToLast();
3037 3038 3039 3040 3041 3042 3043 3044 3045
    } else {
      return;
    }
  }

  // We could have check lower bound here too, but we opt not to do it for
  // code simplicity.
}

3046 3047
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::CheckOutOfBound() {
3048
  if (read_options_.iterate_upper_bound != nullptr && Valid()) {
3049 3050 3051 3052 3053
    is_out_of_bound_ = user_comparator_.Compare(
                           *read_options_.iterate_upper_bound, user_key()) <= 0;
  }
}

3054 3055 3056 3057 3058 3059 3060 3061 3062 3063
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::CheckDataBlockWithinUpperBound() {
  if (read_options_.iterate_upper_bound != nullptr &&
      block_iter_points_to_real_block_) {
    data_block_within_upper_bound_ =
        (user_comparator_.Compare(*read_options_.iterate_upper_bound,
                                  index_iter_->user_key()) > 0);
  }
}

3064 3065
InternalIterator* BlockBasedTable::NewIterator(
    const ReadOptions& read_options, const SliceTransform* prefix_extractor,
3066 3067
    Arena* arena, bool skip_filters, TableReaderCaller caller, size_t compaction_readahead_size) {
  BlockCacheLookupContext lookup_context{caller};
3068
  bool need_upper_bound_check =
3069
      PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
3070
  if (arena == nullptr) {
M
Maysam Yabandeh 已提交
3071
    return new BlockBasedTableIterator<DataBlockIter>(
3072
        this, read_options, rep_->internal_comparator,
3073 3074
        NewIndexIterator(
            read_options,
3075
            need_upper_bound_check &&
3076 3077
                rep_->index_type == BlockBasedTableOptions::kHashSearch,
            /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context),
3078
        !skip_filters && !read_options.total_order_seek &&
3079
            prefix_extractor != nullptr,
3080 3081
        need_upper_bound_check, prefix_extractor, BlockType::kData, caller,
        compaction_readahead_size);
3082
  } else {
M
Maysam Yabandeh 已提交
3083 3084 3085
    auto* mem =
        arena->AllocateAligned(sizeof(BlockBasedTableIterator<DataBlockIter>));
    return new (mem) BlockBasedTableIterator<DataBlockIter>(
3086
        this, read_options, rep_->internal_comparator,
3087 3088 3089
        NewIndexIterator(read_options, need_upper_bound_check,
                         /*input_iter=*/nullptr, /*get_context=*/nullptr,
                         &lookup_context),
3090
        !skip_filters && !read_options.total_order_seek &&
3091
            prefix_extractor != nullptr,
3092 3093
        need_upper_bound_check, prefix_extractor, BlockType::kData, caller,
        compaction_readahead_size);
3094
  }
J
jorlow@chromium.org 已提交
3095 3096
}

3097
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
3098
    const ReadOptions& read_options) {
3099 3100 3101
  if (rep_->fragmented_range_dels == nullptr) {
    return nullptr;
  }
3102 3103 3104 3105 3106
  SequenceNumber snapshot = kMaxSequenceNumber;
  if (read_options.snapshot != nullptr) {
    snapshot = read_options.snapshot->GetSequenceNumber();
  }
  return new FragmentedRangeTombstoneIterator(
3107
      rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
3108 3109
}

3110 3111 3112
bool BlockBasedTable::FullFilterKeyMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    const Slice& internal_key, const bool no_io,
3113
    const SliceTransform* prefix_extractor, GetContext* get_context,
3114
    BlockCacheLookupContext* lookup_context) const {
3115 3116 3117 3118
  if (filter == nullptr || filter->IsBlockBased()) {
    return true;
  }
  Slice user_key = ExtractUserKey(internal_key);
M
Maysam Yabandeh 已提交
3119
  const Slice* const const_ikey_ptr = &internal_key;
3120
  bool may_match = true;
3121
  if (rep_->whole_key_filtering) {
3122 3123 3124
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
    Slice user_key_without_ts = StripTimestampFromUserKey(user_key, ts_sz);
3125 3126
    may_match =
        filter->KeyMayMatch(user_key_without_ts, prefix_extractor, kNotValid,
3127
                            no_io, const_ikey_ptr, get_context, lookup_context);
3128
  } else if (!read_options.total_order_seek && prefix_extractor &&
3129
             rep_->table_properties->prefix_extractor_name.compare(
3130 3131 3132
                 prefix_extractor->Name()) == 0 &&
             prefix_extractor->InDomain(user_key) &&
             !filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
3133 3134 3135
                                     prefix_extractor, kNotValid, no_io,
                                     const_ikey_ptr, get_context,
                                     lookup_context)) {
3136 3137 3138 3139
    may_match = false;
  }
  if (may_match) {
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
3140
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
3141
  }
3142
  return may_match;
3143 3144
}

3145 3146 3147
void BlockBasedTable::FullFilterKeysMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    MultiGetRange* range, const bool no_io,
3148 3149
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
3150 3151 3152
  if (filter == nullptr || filter->IsBlockBased()) {
    return;
  }
3153
  if (rep_->whole_key_filtering) {
3154 3155
    filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io,
                         lookup_context);
3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
  } else if (!read_options.total_order_seek && prefix_extractor &&
             rep_->table_properties->prefix_extractor_name.compare(
                 prefix_extractor->Name()) == 0) {
    for (auto iter = range->begin(); iter != range->end(); ++iter) {
      Slice user_key = iter->lkey->user_key();

      if (!prefix_extractor->InDomain(user_key)) {
        range->SkipKey(iter);
      }
    }
3166 3167
    filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false,
                             lookup_context);
3168 3169 3170
  }
}

3171
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
3172 3173 3174
                            GetContext* get_context,
                            const SliceTransform* prefix_extractor,
                            bool skip_filters) {
M
Maysam Yabandeh 已提交
3175
  assert(key.size() >= 8);  // key must be internal key
3176
  assert(get_context != nullptr);
S
Sanjay Ghemawat 已提交
3177
  Status s;
M
Maysam Yabandeh 已提交
3178
  const bool no_io = read_options.read_tier == kBlockCacheTier;
3179 3180 3181 3182 3183 3184

  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;

  // First check the full filter
  // If full filter not useful, Then go into each block
H
haoyuhuang 已提交
3185
  uint64_t tracing_get_id = get_context->get_tracing_get_id();
3186 3187 3188 3189 3190 3191 3192 3193 3194
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserGet, tracing_get_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
    // Trace the key since it contains both user key and sequence number.
    lookup_context.referenced_key = key.ToString();
    lookup_context.get_from_user_specified_snapshot =
        read_options.snapshot != nullptr;
  }
3195 3196 3197
  const bool may_match =
      FullFilterKeyMayMatch(read_options, filter, key, no_io, prefix_extractor,
                            get_context, &lookup_context);
3198
  if (!may_match) {
3199
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3200
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3201
  } else {
M
Maysam Yabandeh 已提交
3202
    IndexBlockIter iiter_on_stack;
3203 3204
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
3205
    bool need_upper_bound_check = false;
3206
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
3207
      need_upper_bound_check = PrefixExtractorChanged(
3208
          rep_->table_properties.get(), prefix_extractor);
3209
    }
3210 3211 3212
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
                         get_context, &lookup_context);
3213
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3214
    if (iiter != &iiter_on_stack) {
M
Maysam Yabandeh 已提交
3215
      iiter_unique_ptr.reset(iiter);
M
Maysam Yabandeh 已提交
3216
    }
3217

3218 3219
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
3220
    bool matched = false;  // if such user key mathced a key in SST
3221
    bool done = false;
M
Maysam Yabandeh 已提交
3222
    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
3223
      IndexValue v = iiter->value();
3224

3225 3226
      bool not_exist_in_filter =
          filter != nullptr && filter->IsBlockBased() == true &&
3227
          !filter->KeyMayMatch(ExtractUserKeyAndStripTimestamp(key, ts_sz),
3228
                               prefix_extractor, v.handle.offset(), no_io,
3229 3230
                               /*const_ikey_ptr=*/nullptr, get_context,
                               &lookup_context);
3231 3232 3233 3234 3235 3236

      if (not_exist_in_filter) {
        // Not found
        // TODO: think about interaction with Merge. If a user key cannot
        // cross one data block, we should be fine.
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3237
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3238
        break;
3239
      }
3240

3241 3242 3243 3244 3245 3246 3247 3248
      if (!v.first_internal_key.empty() && !skip_filters &&
          UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                  .Compare(ExtractUserKey(key),
                           ExtractUserKey(v.first_internal_key)) < 0) {
        // The requested key falls between highest key in previous block and
        // lowest key in current block.
        break;
      }
3249

3250
      BlockCacheLookupContext lookup_data_block_context{
3251 3252 3253
          TableReaderCaller::kUserGet, tracing_get_id,
          /*get_from_user_specified_snapshot=*/read_options.snapshot !=
              nullptr};
3254 3255 3256 3257
      bool does_referenced_key_exist = false;
      DataBlockIter biter;
      uint64_t referenced_data_size = 0;
      NewDataBlockIterator<DataBlockIter>(
3258 3259
          read_options, v.handle, &biter, BlockType::kData, get_context,
          &lookup_data_block_context,
3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272
          /*s=*/Status(), /*prefetch_buffer*/ nullptr);

      if (no_io && biter.status().IsIncomplete()) {
        // couldn't get block from block_cache
        // Update Saver.state to Found because we are only looking for
        // whether we can guarantee the key is not there when "no_io" is set
        get_context->MarkKeyMayExist();
        break;
      }
      if (!biter.status().ok()) {
        s = biter.status();
        break;
      }
3273

3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293
      bool may_exist = biter.SeekForGet(key);
      // If user-specified timestamp is supported, we cannot end the search
      // just because hash index lookup indicates the key+ts does not exist.
      if (!may_exist && ts_sz == 0) {
        // HashSeek cannot find the key this block and the the iter is not
        // the end of the block, i.e. cannot be in the following blocks
        // either. In this case, the seek_key cannot be found, so we break
        // from the top level for-loop.
        done = true;
      } else {
        // Call the *saver function on each entry/block until it returns false
        for (; biter.Valid(); biter.Next()) {
          ParsedInternalKey parsed_key;
          if (!ParseInternalKey(biter.key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }

          if (!get_context->SaveValue(
                  parsed_key, biter.value(), &matched,
                  biter.IsValuePinned() ? &biter : nullptr)) {
3294 3295 3296 3297
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size = biter.key().size() + biter.value().size();
            }
3298 3299
            done = true;
            break;
3300 3301
          }
        }
3302 3303 3304
        s = biter.status();
      }
      // Write the block cache access record.
3305
      if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3306 3307
        // Avoid making copy of block_key, cf_name, and referenced_key when
        // constructing the access record.
3308 3309 3310 3311
        Slice referenced_key;
        if (does_referenced_key_exist) {
          referenced_key = biter.key();
        } else {
3312
          referenced_key = key;
3313
        }
3314 3315 3316 3317 3318 3319 3320 3321
        BlockCacheTraceRecord access_record(
            rep_->ioptions.env->NowMicros(),
            /*block_key=*/"", lookup_data_block_context.block_type,
            lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
            /*cf_name=*/"", rep_->level_for_tracing(),
            rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
            lookup_data_block_context.is_cache_hit,
            lookup_data_block_context.no_insert,
3322
            lookup_data_block_context.get_id,
3323
            lookup_data_block_context.get_from_user_specified_snapshot,
3324 3325 3326 3327 3328
            /*referenced_key=*/"", referenced_data_size,
            lookup_data_block_context.num_keys_in_block,
            does_referenced_key_exist);
        block_cache_tracer_->WriteBlockAccess(
            access_record, lookup_data_block_context.block_key,
3329
            rep_->cf_name_for_tracing(), referenced_key);
S
Sanjay Ghemawat 已提交
3330
      }
3331

M
Maysam Yabandeh 已提交
3332 3333 3334 3335
      if (done) {
        // Avoid the extra Next which is expensive in two-level indexes
        break;
      }
3336
    }
3337 3338
    if (matched && filter != nullptr && !filter->IsBlockBased()) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
3339 3340
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                rep_->level);
3341
    }
3342
    if (s.ok()) {
M
Maysam Yabandeh 已提交
3343
      s = iiter->status();
S
Sanjay Ghemawat 已提交
3344 3345
    }
  }
K
Kai Liu 已提交
3346

S
Sanjay Ghemawat 已提交
3347 3348 3349
  return s;
}

3350 3351 3352 3353 3354
using MultiGetRange = MultiGetContext::Range;
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
                               const MultiGetRange* mget_range,
                               const SliceTransform* prefix_extractor,
                               bool skip_filters) {
3355 3356
  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;
3357 3358
  MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
                               mget_range->end());
3359 3360 3361 3362

  // First check the full filter
  // If full filter not useful, Then go into each block
  const bool no_io = read_options.read_tier == kBlockCacheTier;
3363 3364
  uint64_t tracing_mget_id = BlockCacheTraceHelper::kReservedGetId;
  if (!sst_file_range.empty() && sst_file_range.begin()->get_context) {
H
haoyuhuang 已提交
3365
    tracing_mget_id = sst_file_range.begin()->get_context->get_tracing_get_id();
3366
  }
3367 3368 3369
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserMultiGet, tracing_mget_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
3370 3371
  FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
                         prefix_extractor, &lookup_context);
3372 3373 3374 3375 3376 3377 3378 3379 3380 3381

  if (skip_filters || !sst_file_range.empty()) {
    IndexBlockIter iiter_on_stack;
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
    bool need_upper_bound_check = false;
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
      need_upper_bound_check = PrefixExtractorChanged(
          rep_->table_properties.get(), prefix_extractor);
    }
3382 3383
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
3384
                         sst_file_range.begin()->get_context, &lookup_context);
3385
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
3386 3387 3388 3389
    if (iiter != &iiter_on_stack) {
      iiter_unique_ptr.reset(iiter);
    }

3390
    uint64_t offset = std::numeric_limits<uint64_t>::max();
A
anand76 已提交
3391 3392 3393 3394 3395 3396 3397 3398 3399
    autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE> block_handles;
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE> results;
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE> statuses;
    static const size_t kMultiGetReadStackBufSize = 8192;
    char stack_buf[kMultiGetReadStackBufSize];
    std::unique_ptr<char[]> block_buf;
    {
      MultiGetRange data_block_range(sst_file_range, sst_file_range.begin(),
                                     sst_file_range.end());
3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410

      UncompressionDict uncompression_dict;
      Status uncompression_dict_status;
      if (rep_->uncompression_dict_reader) {
        uncompression_dict_status =
            rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
                nullptr /* prefetch_buffer */, no_io,
                sst_file_range.begin()->get_context, &lookup_context,
                &uncompression_dict);
      }

A
anand76 已提交
3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435
      size_t total_len = 0;
      ReadOptions ro = read_options;
      ro.read_tier = kBlockCacheTier;

      for (auto miter = data_block_range.begin();
            miter != data_block_range.end(); ++miter) {
        const Slice& key = miter->ikey;
        iiter->Seek(miter->ikey);

        IndexValue v;
        if (iiter->Valid()) {
          v = iiter->value();
        }
        if (!iiter->Valid() ||
            (!v.first_internal_key.empty() && !skip_filters &&
            UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                    .Compare(ExtractUserKey(key),
                             ExtractUserKey(v.first_internal_key)) < 0)) {
          // The requested key falls between highest key in previous block and
          // lowest key in current block.
          *(miter->s) = iiter->status();
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }
3436 3437 3438 3439 3440 3441 3442 3443

        if (!uncompression_dict_status.ok()) {
          *(miter->s) = uncompression_dict_status;
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }

A
anand76 已提交
3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494
        statuses.emplace_back();
        results.emplace_back();
        if (v.handle.offset() == offset) {
          // We're going to reuse the block for this key later on. No need to
          // look it up now. Place a null handle
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
          continue;
        }
        offset = v.handle.offset();
        BlockHandle handle = v.handle;
        Status s = GetDataBlockFromCache(ro, handle, uncompression_dict,
              &(results.back()), BlockType::kData, miter->get_context);
        if (s.ok() && !results.back().IsEmpty()) {
          // Found it in the cache. Add NULL handle to indicate there is
          // nothing to read from disk
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
        } else {
          block_handles.emplace_back(handle);
          total_len += handle.size();
        }
      }

      if (total_len) {
        char* scratch = nullptr;
        // If the blocks need to be uncompressed and we don't need the
        // compressed blocks, then we can use a contiguous block of
        // memory to read in all the blocks as it will be temporary
        // storage
        // 1. If blocks are compressed and compressed block cache is there,
        //    alloc heap bufs
        // 2. If blocks are uncompressed, alloc heap bufs
        // 3. If blocks are compressed and no compressed block cache, use
        //    stack buf
        if (rep_->table_options.block_cache_compressed == nullptr &&
            rep_->blocks_maybe_compressed) {
          if (total_len <= kMultiGetReadStackBufSize) {
            scratch = stack_buf;
          } else {
            scratch = new char[total_len];
            block_buf.reset(scratch);
          }
        }
        MaybeLoadBlocksToCache(read_options,
            &data_block_range, &block_handles, &statuses, &results,
            scratch, uncompression_dict);
      }
    }

    DataBlockIter first_biter;
    DataBlockIter next_biter;
    size_t idx_in_batch = 0;
3495 3496 3497 3498 3499 3500 3501
    for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
         ++miter) {
      Status s;
      GetContext* get_context = miter->get_context;
      const Slice& key = miter->ikey;
      bool matched = false;  // if such user key matched a key in SST
      bool done = false;
A
anand76 已提交
3502 3503 3504
      bool first_block = true;
      do {
        DataBlockIter* biter = nullptr;
3505
        bool reusing_block = true;
3506 3507 3508
        uint64_t referenced_data_size = 0;
        bool does_referenced_key_exist = false;
        BlockCacheLookupContext lookup_data_block_context(
3509 3510 3511
            TableReaderCaller::kUserMultiGet, tracing_mget_id,
            /*get_from_user_specified_snapshot=*/read_options.snapshot !=
                nullptr);
A
anand76 已提交
3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534
        if (first_block) {
          if (!block_handles[idx_in_batch].IsNull() ||
              !results[idx_in_batch].IsEmpty()) {
            first_biter.Invalidate(Status::OK());
            NewDataBlockIterator<DataBlockIter>(
                read_options, results[idx_in_batch], &first_biter,
                statuses[idx_in_batch]);
            reusing_block = false;
          }
          biter = &first_biter;
          idx_in_batch++;
        } else {
          IndexValue v = iiter->value();
          if (!v.first_internal_key.empty() && !skip_filters &&
              UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                      .Compare(ExtractUserKey(key),
                               ExtractUserKey(v.first_internal_key)) < 0) {
            // The requested key falls between highest key in previous block and
            // lowest key in current block.
            break;
          }

          next_biter.Invalidate(Status::OK());
3535
          NewDataBlockIterator<DataBlockIter>(
A
anand76 已提交
3536 3537 3538 3539
              read_options, iiter->value().handle, &next_biter,
              BlockType::kData, get_context, &lookup_data_block_context,
              Status(), nullptr);
          biter = &next_biter;
3540 3541
          reusing_block = false;
        }
3542

3543
        if (read_options.read_tier == kBlockCacheTier &&
A
anand76 已提交
3544
            biter->status().IsIncomplete()) {
3545 3546 3547 3548 3549 3550
          // couldn't get block from block_cache
          // Update Saver.state to Found because we are only looking for
          // whether we can guarantee the key is not there when "no_io" is set
          get_context->MarkKeyMayExist();
          break;
        }
A
anand76 已提交
3551 3552
        if (!biter->status().ok()) {
          s = biter->status();
3553 3554 3555
          break;
        }

A
anand76 已提交
3556
        bool may_exist = biter->SeekForGet(key);
3557 3558 3559 3560 3561
        if (!may_exist) {
          // HashSeek cannot find the key this block and the the iter is not
          // the end of the block, i.e. cannot be in the following blocks
          // either. In this case, the seek_key cannot be found, so we break
          // from the top level for-loop.
A
anand76 已提交
3562 3563
          break;
        }
3564

A
anand76 已提交
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582
        // Call the *saver function on each entry/block until it returns false
        for (; biter->Valid(); biter->Next()) {
          ParsedInternalKey parsed_key;
          Cleanable dummy;
          Cleanable* value_pinner = nullptr;
          if (!ParseInternalKey(biter->key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }
          if (biter->IsValuePinned()) {
            if (reusing_block) {
              Cache* block_cache = rep_->table_options.block_cache.get();
              assert(biter->cache_handle() != nullptr);
              block_cache->Ref(biter->cache_handle());
              dummy.RegisterCleanup(&ReleaseCachedEntry, block_cache,
                                    biter->cache_handle());
              value_pinner = &dummy;
            } else {
              value_pinner = biter;
3583
            }
3584
          }
3585 3586 3587 3588 3589 3590 3591
          if (!get_context->SaveValue(parsed_key, biter->value(), &matched,
                                      value_pinner)) {
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size =
                  biter->key().size() + biter->value().size();
            }
A
anand76 已提交
3592 3593 3594 3595
            done = true;
            break;
          }
          s = biter->status();
3596 3597
        }
        // Write the block cache access.
3598
        if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3599 3600
          // Avoid making copy of block_key, cf_name, and referenced_key when
          // constructing the access record.
3601 3602 3603 3604
          Slice referenced_key;
          if (does_referenced_key_exist) {
            referenced_key = biter->key();
          } else {
3605
            referenced_key = key;
3606
          }
3607 3608 3609 3610 3611 3612 3613 3614
          BlockCacheTraceRecord access_record(
              rep_->ioptions.env->NowMicros(),
              /*block_key=*/"", lookup_data_block_context.block_type,
              lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
              /*cf_name=*/"", rep_->level_for_tracing(),
              rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
              lookup_data_block_context.is_cache_hit,
              lookup_data_block_context.no_insert,
3615
              lookup_data_block_context.get_id,
3616
              lookup_data_block_context.get_from_user_specified_snapshot,
3617 3618 3619 3620 3621
              /*referenced_key=*/"", referenced_data_size,
              lookup_data_block_context.num_keys_in_block,
              does_referenced_key_exist);
          block_cache_tracer_->WriteBlockAccess(
              access_record, lookup_data_block_context.block_key,
3622
              rep_->cf_name_for_tracing(), referenced_key);
3623
        }
A
anand76 已提交
3624
        s = biter->status();
3625 3626 3627 3628
        if (done) {
          // Avoid the extra Next which is expensive in two-level indexes
          break;
        }
A
anand76 已提交
3629 3630 3631 3632 3633 3634 3635
        if (first_block) {
          iiter->Seek(key);
        }
        first_block = false;
        iiter->Next();
      } while (iiter->Valid());

3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648
      if (matched && filter != nullptr && !filter->IsBlockBased()) {
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                  rep_->level);
      }
      if (s.ok()) {
        s = iiter->status();
      }
      *(miter->s) = s;
    }
  }
}

3649 3650 3651
Status BlockBasedTable::Prefetch(const Slice* const begin,
                                 const Slice* const end) {
  auto& comparator = rep_->internal_comparator;
3652
  UserComparatorWrapper user_comparator(comparator.user_comparator());
3653 3654 3655 3656
  // pre-condition
  if (begin && end && comparator.Compare(*begin, *end) > 0) {
    return Status::InvalidArgument(*begin, *end);
  }
3657
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
M
Maysam Yabandeh 已提交
3658
  IndexBlockIter iiter_on_stack;
3659 3660 3661
  auto iiter = NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                                &iiter_on_stack, /*get_context=*/nullptr,
                                &lookup_context);
3662
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3663
  if (iiter != &iiter_on_stack) {
3664
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
M
Maysam Yabandeh 已提交
3665
  }
3666

M
Maysam Yabandeh 已提交
3667
  if (!iiter->status().ok()) {
3668
    // error opening index iterator
M
Maysam Yabandeh 已提交
3669
    return iiter->status();
3670 3671 3672 3673 3674
  }

  // indicates if we are on the last page that need to be pre-fetched
  bool prefetching_boundary_page = false;

M
Maysam Yabandeh 已提交
3675 3676
  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
       iiter->Next()) {
3677 3678
    BlockHandle block_handle = iiter->value().handle;
    const bool is_user_key = !rep_->index_key_includes_seq;
M
Maysam Yabandeh 已提交
3679 3680 3681
    if (end &&
        ((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
         (is_user_key &&
3682
          user_comparator.Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
3683 3684 3685 3686 3687 3688 3689 3690 3691 3692
      if (prefetching_boundary_page) {
        break;
      }

      // The index entry represents the last key in the data block.
      // We should load this page into memory as well, but no more
      prefetching_boundary_page = true;
    }

    // Load the block specified by the block_handle into the block cache
M
Maysam Yabandeh 已提交
3693
    DataBlockIter biter;
3694 3695 3696 3697 3698

    NewDataBlockIterator<DataBlockIter>(
        ReadOptions(), block_handle, &biter, /*type=*/BlockType::kData,
        /*get_context=*/nullptr, &lookup_context, Status(),
        /*prefetch_buffer=*/nullptr);
3699 3700 3701 3702 3703 3704 3705 3706 3707 3708

    if (!biter.status().ok()) {
      // there was an unexpected error while pre-fetching
      return biter.status();
    }
  }

  return Status::OK();
}

3709
Status BlockBasedTable::VerifyChecksum(TableReaderCaller caller) {
A
Aaron G 已提交
3710 3711 3712 3713
  Status s;
  // Check Meta blocks
  std::unique_ptr<Block> meta;
  std::unique_ptr<InternalIterator> meta_iter;
3714
  s = ReadMetaBlock(nullptr /* prefetch buffer */, &meta, &meta_iter);
A
Aaron G 已提交
3715
  if (s.ok()) {
3716
    s = VerifyChecksumInMetaBlocks(meta_iter.get());
A
Aaron G 已提交
3717 3718 3719 3720 3721 3722 3723
    if (!s.ok()) {
      return s;
    }
  } else {
    return s;
  }
  // Check Data blocks
M
Maysam Yabandeh 已提交
3724
  IndexBlockIter iiter_on_stack;
3725
  BlockCacheLookupContext context{caller};
3726
  InternalIteratorBase<IndexValue>* iiter = NewIndexIterator(
3727
      ReadOptions(), /*need_upper_bound_check=*/false, &iiter_on_stack,
3728
      /*get_context=*/nullptr, &context);
3729
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
A
Aaron G 已提交
3730
  if (iiter != &iiter_on_stack) {
3731
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
A
Aaron G 已提交
3732 3733 3734 3735 3736 3737 3738 3739 3740
  }
  if (!iiter->status().ok()) {
    // error opening index iterator
    return iiter->status();
  }
  s = VerifyChecksumInBlocks(iiter);
  return s;
}

3741
Status BlockBasedTable::VerifyChecksumInBlocks(
3742
    InternalIteratorBase<IndexValue>* index_iter) {
A
Aaron G 已提交
3743 3744 3745 3746 3747 3748
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
    if (!s.ok()) {
      break;
    }
3749
    BlockHandle handle = index_iter->value().handle;
3750
    BlockContents contents;
3751 3752 3753
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
3754
        false /* decompress */, false /*maybe_compressed*/, BlockType::kData,
3755
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
3756 3757 3758 3759 3760 3761 3762 3763
    s = block_fetcher.ReadBlockContents();
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
    const Slice& meta_block_name) {
  if (meta_block_name.starts_with(kFilterBlockPrefix) ||
      meta_block_name.starts_with(kFullFilterBlockPrefix) ||
      meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
    return BlockType::kFilter;
  }

  if (meta_block_name == kPropertiesBlock) {
    return BlockType::kProperties;
  }

  if (meta_block_name == kCompressionDictBlock) {
    return BlockType::kCompressionDictionary;
  }

  if (meta_block_name == kRangeDelBlock) {
    return BlockType::kRangeDeletion;
  }

  if (meta_block_name == kHashIndexPrefixesBlock) {
    return BlockType::kHashIndexPrefixes;
  }

  if (meta_block_name == kHashIndexPrefixesMetadataBlock) {
    return BlockType::kHashIndexMetadata;
  }

  assert(false);
  return BlockType::kInvalid;
}

3796
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
3797 3798 3799 3800
    InternalIteratorBase<Slice>* index_iter) {
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
A
Aaron G 已提交
3801 3802 3803
    if (!s.ok()) {
      break;
    }
3804 3805 3806
    BlockHandle handle;
    Slice input = index_iter->value();
    s = handle.DecodeFrom(&input);
A
Aaron G 已提交
3807
    BlockContents contents;
3808
    const Slice meta_block_name = index_iter->key();
3809 3810 3811 3812
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
        false /* decompress */, false /*maybe_compressed*/,
3813
        GetBlockTypeForMetaBlockByName(meta_block_name),
3814
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
S
Siying Dong 已提交
3815
    s = block_fetcher.ReadBlockContents();
3816
    if (s.IsCorruption() && meta_block_name == kPropertiesBlock) {
3817
      TableProperties* table_properties;
3818
      s = TryReadPropertiesWithGlobalSeqno(nullptr /* prefetch_buffer */,
3819 3820 3821 3822
                                           index_iter->value(),
                                           &table_properties);
      delete table_properties;
    }
A
Aaron G 已提交
3823 3824 3825 3826 3827 3828 3829
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3830 3831 3832 3833 3834 3835 3836 3837 3838
bool BlockBasedTable::TEST_BlockInCache(const BlockHandle& handle) const {
  assert(rep_ != nullptr);

  Cache* const cache = rep_->table_options.block_cache.get();
  if (cache == nullptr) {
    return false;
  }

  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
3839 3840 3841
  Slice cache_key =
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
                  cache_key_storage);
3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852

  Cache::Handle* const cache_handle = cache->Lookup(cache_key);
  if (cache_handle == nullptr) {
    return false;
  }

  cache->Release(cache_handle);

  return true;
}

S
Siying Dong 已提交
3853 3854
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
                                      const Slice& key) {
3855
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
3856
      options, /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
3857
      /*get_context=*/nullptr, /*lookup_context=*/nullptr));
I
Igor Canadi 已提交
3858 3859 3860
  iiter->Seek(key);
  assert(iiter->Valid());

3861
  return TEST_BlockInCache(iiter->value().handle);
3862 3863 3864 3865 3866 3867 3868 3869 3870
}

// REQUIRES: The following fields of rep_ should have already been populated:
//  1. file
//  2. index_handle,
//  3. options
//  4. internal_comparator
//  5. index_type
Status BlockBasedTable::CreateIndexReader(
3871 3872
    FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* preloaded_meta_index_iter, bool use_cache, bool prefetch,
3873 3874
    bool pin, BlockCacheLookupContext* lookup_context,
    std::unique_ptr<IndexReader>* index_reader) {
3875 3876
  // kHashSearch requires non-empty prefix_extractor but bypass checking
  // prefix_extractor here since we have no access to MutableCFOptions.
3877
  // Add need_upper_bound_check flag in  BlockBasedTable::NewIndexIterator.
3878 3879
  // If prefix_extractor does not match prefix_extractor_name from table
  // properties, turn off Hash Index by setting total_order_seek to true
3880

3881
  switch (rep_->index_type) {
M
Maysam Yabandeh 已提交
3882
    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
3883
      return PartitionIndexReader::Create(this, prefetch_buffer, use_cache,
3884 3885
                                          prefetch, pin, lookup_context,
                                          index_reader);
M
Maysam Yabandeh 已提交
3886
    }
3887 3888
    case BlockBasedTableOptions::kBinarySearch:
    case BlockBasedTableOptions::kBinarySearchWithFirstKey: {
3889
      return BinarySearchIndexReader::Create(this, prefetch_buffer, use_cache,
3890 3891
                                             prefetch, pin, lookup_context,
                                             index_reader);
3892 3893
    }
    case BlockBasedTableOptions::kHashSearch: {
K
Kai Liu 已提交
3894
      std::unique_ptr<Block> meta_guard;
S
sdong 已提交
3895
      std::unique_ptr<InternalIterator> meta_iter_guard;
K
Kai Liu 已提交
3896 3897
      auto meta_index_iter = preloaded_meta_index_iter;
      if (meta_index_iter == nullptr) {
3898
        auto s = ReadMetaBlock(prefetch_buffer, &meta_guard, &meta_iter_guard);
K
Kai Liu 已提交
3899
        if (!s.ok()) {
3900 3901
          // we simply fall back to binary search in case there is any
          // problem with prefix hash index loading.
3902 3903 3904
          ROCKS_LOG_WARN(rep_->ioptions.info_log,
                         "Unable to read the metaindex block."
                         " Fall back to binary search index.");
3905 3906
          return BinarySearchIndexReader::Create(this, prefetch_buffer,
                                                 use_cache, prefetch, pin,
3907
                                                 lookup_context, index_reader);
K
Kai Liu 已提交
3908 3909 3910 3911
        }
        meta_index_iter = meta_iter_guard.get();
      }

3912
      return HashIndexReader::Create(this, prefetch_buffer, meta_index_iter,
3913 3914
                                     use_cache, prefetch, pin, lookup_context,
                                     index_reader);
3915 3916 3917
    }
    default: {
      std::string error_message =
3918
          "Unrecognized index type: " + ToString(rep_->index_type);
3919
      return Status::InvalidArgument(error_message.c_str());
3920 3921 3922 3923
    }
  }
}

3924
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key,
3925 3926
                                              TableReaderCaller caller) {
  BlockCacheLookupContext context(caller);
3927 3928
  IndexBlockIter iiter_on_stack;
  auto index_iter =
3929
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
3930 3931
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
K
Kai Liu 已提交
3932

J
jorlow@chromium.org 已提交
3933 3934 3935
  index_iter->Seek(key);
  uint64_t result;
  if (index_iter->Valid()) {
3936
    BlockHandle handle = index_iter->value().handle;
3937
    result = handle.offset();
J
jorlow@chromium.org 已提交
3938
  } else {
K
Kai Liu 已提交
3939 3940 3941
    // key is past the last key in the file. If table_properties is not
    // available, approximate the offset by returning the offset of the
    // metaindex block (which is right near the end of the file).
3942 3943 3944 3945
    result = 0;
    if (rep_->table_properties) {
      result = rep_->table_properties->data_size;
    }
K
Kai Liu 已提交
3946 3947
    // table_properties is not present in the table.
    if (result == 0) {
I
xxHash  
Igor Canadi 已提交
3948
      result = rep_->footer.metaindex_handle().offset();
K
Kai Liu 已提交
3949
    }
J
jorlow@chromium.org 已提交
3950
  }
3951 3952 3953 3954 3955

  if (index_iter != &iiter_on_stack) {
    delete index_iter;
  }

J
jorlow@chromium.org 已提交
3956 3957 3958
  return result;
}

3959 3960 3961
bool BlockBasedTable::TEST_FilterBlockInCache() const {
  assert(rep_ != nullptr);
  return TEST_BlockInCache(rep_->filter_handle);
3962 3963
}

3964 3965 3966 3967
bool BlockBasedTable::TEST_IndexBlockInCache() const {
  assert(rep_ != nullptr);

  return TEST_BlockInCache(rep_->footer.index_handle());
3968 3969
}

O
omegaga 已提交
3970 3971
Status BlockBasedTable::GetKVPairsFromDataBlocks(
    std::vector<KVPairBlock>* kv_pair_blocks) {
3972
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3973 3974 3975
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
O
omegaga 已提交
3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991

  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    // Cannot read Index Block
    return s;
  }

  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();

    if (!s.ok()) {
      break;
    }

    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3992
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3993 3994
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3995 3996
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
O
omegaga 已提交
3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024
    s = datablock_iter->status();

    if (!s.ok()) {
      // Error reading the block - Skipped
      continue;
    }

    KVPairBlock kv_pair_block;
    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        // Error reading the block - Skipped
        break;
      }
      const Slice& key = datablock_iter->key();
      const Slice& value = datablock_iter->value();
      std::string key_copy = std::string(key.data(), key.size());
      std::string value_copy = std::string(value.data(), value.size());

      kv_pair_block.push_back(
          std::make_pair(std::move(key_copy), std::move(value_copy)));
    }
    kv_pair_blocks->push_back(std::move(kv_pair_block));
  }
  return Status::OK();
}

4025
Status BlockBasedTable::DumpTable(WritableFile* out_file) {
4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038
  // Output Footer
  out_file->Append(
      "Footer Details:\n"
      "--------------------------------------\n"
      "  ");
  out_file->Append(rep_->footer.ToString().c_str());
  out_file->Append("\n");

  // Output MetaIndex
  out_file->Append(
      "Metaindex Details:\n"
      "--------------------------------------\n");
  std::unique_ptr<Block> meta;
S
sdong 已提交
4039
  std::unique_ptr<InternalIterator> meta_iter;
4040
  Status s = ReadMetaBlock(nullptr /* prefetch_buffer */, &meta, &meta_iter);
4041 4042 4043 4044 4045 4046 4047 4048 4049 4050
  if (s.ok()) {
    for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
      s = meta_iter->status();
      if (!s.ok()) {
        return s;
      }
      if (meta_iter->key() == rocksdb::kPropertiesBlock) {
        out_file->Append("  Properties block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4051 4052 4053 4054
      } else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
        out_file->Append("  Compression dictionary block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4055 4056 4057 4058 4059
      } else if (strstr(meta_iter->key().ToString().c_str(),
                        "filter.rocksdb.") != nullptr) {
        out_file->Append("  Filter block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4060 4061 4062 4063
      } else if (meta_iter->key() == rocksdb::kRangeDelBlock) {
        out_file->Append("  Range deletion block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082
      }
    }
    out_file->Append("\n");
  } else {
    return s;
  }

  // Output TableProperties
  const rocksdb::TableProperties* table_properties;
  table_properties = rep_->table_properties.get();

  if (table_properties != nullptr) {
    out_file->Append(
        "Table Properties:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
    out_file->Append("\n");
  }
4083

4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097
  if (rep_->filter) {
    out_file->Append(
        "Filter Details:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(rep_->filter->ToString().c_str());
    out_file->Append("\n");
  }

  // Output Index block
  s = DumpIndexBlock(out_file);
  if (!s.ok()) {
    return s;
  }
4098 4099

  // Output compression dictionary
4100 4101 4102 4103 4104 4105
  if (rep_->uncompression_dict_reader) {
    UncompressionDict uncompression_dict;
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        nullptr /* prefetch_buffer */, false /* no_io */,
        nullptr /* get_context */, nullptr /* lookup_context */,
        &uncompression_dict);
4106 4107 4108
    if (!s.ok()) {
      return s;
    }
4109 4110

    const Slice& raw_dict = uncompression_dict.GetRawDict();
4111 4112 4113 4114
    out_file->Append(
        "Compression Dictionary:\n"
        "--------------------------------------\n");
    out_file->Append("  size (bytes): ");
4115
    out_file->Append(rocksdb::ToString(raw_dict.size()));
4116 4117
    out_file->Append("\n\n");
    out_file->Append("  HEX    ");
4118
    out_file->Append(raw_dict.ToString(true).c_str());
4119 4120 4121
    out_file->Append("\n\n");
  }

4122
  // Output range deletions block
A
Andrew Kryczka 已提交
4123
  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
A
Andrew Kryczka 已提交
4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
  if (range_del_iter != nullptr) {
    range_del_iter->SeekToFirst();
    if (range_del_iter->Valid()) {
      out_file->Append(
          "Range deletions:\n"
          "--------------------------------------\n"
          "  ");
      for (; range_del_iter->Valid(); range_del_iter->Next()) {
        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
      }
      out_file->Append("\n");
4135
    }
A
Andrew Kryczka 已提交
4136
    delete range_del_iter;
4137
  }
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
  // Output Data blocks
  s = DumpDataBlocks(out_file);

  return s;
}

Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
  out_file->Append(
      "Index Details:\n"
      "--------------------------------------\n");
4148
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
4149 4150 4151
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

  out_file->Append("  Block key hex dump: Data block handle\n");
  out_file->Append("  Block key ascii\n\n");
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }
    Slice key = blockhandles_iter->key();
M
Maysam Yabandeh 已提交
4167
    Slice user_key;
4168
    InternalKey ikey;
4169
    if (!rep_->index_key_includes_seq) {
4170 4171
      user_key = key;
    } else {
M
Maysam Yabandeh 已提交
4172 4173 4174
      ikey.DecodeFrom(key);
      user_key = ikey.user_key();
    }
4175 4176

    out_file->Append("  HEX    ");
M
Maysam Yabandeh 已提交
4177
    out_file->Append(user_key.ToString(true).c_str());
4178
    out_file->Append(": ");
4179 4180 4181
    out_file->Append(blockhandles_iter->value()
                         .ToString(true, rep_->index_has_first_key)
                         .c_str());
4182 4183
    out_file->Append("\n");

M
Maysam Yabandeh 已提交
4184
    std::string str_key = user_key.ToString();
4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
    std::string res_key("");
    char cspace = ' ';
    for (size_t i = 0; i < str_key.size(); i++) {
      res_key.append(&str_key[i], 1);
      res_key.append(1, cspace);
    }
    out_file->Append("  ASCII  ");
    out_file->Append(res_key.c_str());
    out_file->Append("\n  ------\n");
  }
  out_file->Append("\n");
  return Status::OK();
}

Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
4200
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
4201 4202 4203
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
4204 4205 4206 4207 4208 4209
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

4210 4211 4212 4213
  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
  uint64_t datablock_size_max = 0;
  uint64_t datablock_size_sum = 0;

4214 4215 4216 4217 4218 4219 4220 4221
  size_t block_id = 1;
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       block_id++, blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }

4222
    BlockHandle bh = blockhandles_iter->value().handle;
4223 4224 4225 4226 4227
    uint64_t datablock_size = bh.size();
    datablock_size_min = std::min(datablock_size_min, datablock_size);
    datablock_size_max = std::max(datablock_size_max, datablock_size);
    datablock_size_sum += datablock_size;

4228
    out_file->Append("Data Block # ");
S
sdong 已提交
4229
    out_file->Append(rocksdb::ToString(block_id));
4230
    out_file->Append(" @ ");
4231
    out_file->Append(blockhandles_iter->value().handle.ToString(true).c_str());
4232 4233 4234
    out_file->Append("\n");
    out_file->Append("--------------------------------------\n");

S
sdong 已提交
4235
    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
4236
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
4237 4238
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
4239 4240
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254
    s = datablock_iter->status();

    if (!s.ok()) {
      out_file->Append("Error reading the block - Skipped \n\n");
      continue;
    }

    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        out_file->Append("Error reading the block - Skipped \n");
        break;
      }
4255
      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
4256 4257 4258
    }
    out_file->Append("\n");
  }
4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276

  uint64_t num_datablocks = block_id - 1;
  if (num_datablocks) {
    double datablock_size_avg =
        static_cast<double>(datablock_size_sum) / num_datablocks;
    out_file->Append("Data Block Summary:\n");
    out_file->Append("--------------------------------------");
    out_file->Append("\n  # data blocks: ");
    out_file->Append(rocksdb::ToString(num_datablocks));
    out_file->Append("\n  min data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_min));
    out_file->Append("\n  max data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_max));
    out_file->Append("\n  avg data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_avg));
    out_file->Append("\n");
  }

4277 4278 4279
  return Status::OK();
}

4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
                                   WritableFile* out_file) {
  InternalKey ikey;
  ikey.DecodeFrom(key);

  out_file->Append("  HEX    ");
  out_file->Append(ikey.user_key().ToString(true).c_str());
  out_file->Append(": ");
  out_file->Append(value.ToString(true).c_str());
  out_file->Append("\n");

  std::string str_key = ikey.user_key().ToString();
  std::string str_value = value.ToString();
  std::string res_key(""), res_value("");
  char cspace = ' ';
  for (size_t i = 0; i < str_key.size(); i++) {
4296 4297 4298 4299 4300
    if (str_key[i] == '\0') {
      res_key.append("\\0", 2);
    } else {
      res_key.append(&str_key[i], 1);
    }
4301 4302 4303
    res_key.append(1, cspace);
  }
  for (size_t i = 0; i < str_value.size(); i++) {
4304 4305 4306 4307 4308
    if (str_value[i] == '\0') {
      res_value.append("\\0", 2);
    } else {
      res_value.append(&str_value[i], 1);
    }
4309 4310 4311 4312 4313 4314 4315 4316 4317 4318
    res_value.append(1, cspace);
  }

  out_file->Append("  ASCII  ");
  out_file->Append(res_key.c_str());
  out_file->Append(": ");
  out_file->Append(res_value.c_str());
  out_file->Append("\n  ------\n");
}

4319
}  // namespace rocksdb