block_based_table_reader.cc 159.3 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "table/block_based/block_based_table_reader.h"
10

11
#include <algorithm>
12
#include <array>
13
#include <limits>
14 15
#include <string>
#include <utility>
O
omegaga 已提交
16
#include <vector>
17

T
Tyler Harter 已提交
18
#include "db/dbformat.h"
19
#include "db/pinned_iterators_manager.h"
T
Tyler Harter 已提交
20

21
#include "rocksdb/cache.h"
22 23 24
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
25
#include "rocksdb/iterator.h"
26 27
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
S
Siying Dong 已提交
28
#include "rocksdb/table.h"
29
#include "rocksdb/table_properties.h"
30

31 32 33 34 35 36 37
#include "table/block_based/block.h"
#include "table/block_based/block_based_filter_block.h"
#include "table/block_based/block_based_table_factory.h"
#include "table/block_based/block_prefix_index.h"
#include "table/block_based/filter_block.h"
#include "table/block_based/full_filter_block.h"
#include "table/block_based/partitioned_filter_block.h"
38
#include "table/block_fetcher.h"
J
jorlow@chromium.org 已提交
39
#include "table/format.h"
K
krad 已提交
40
#include "table/get_context.h"
S
sdong 已提交
41
#include "table/internal_iterator.h"
42
#include "table/meta_blocks.h"
43
#include "table/multiget_context.h"
K
krad 已提交
44
#include "table/persistent_cache_helper.h"
45
#include "table/sst_file_writer_collectors.h"
J
jorlow@chromium.org 已提交
46
#include "table/two_level_iterator.h"
47

48
#include "monitoring/perf_context_imp.h"
49
#include "test_util/sync_point.h"
J
jorlow@chromium.org 已提交
50
#include "util/coding.h"
51
#include "util/crc32c.h"
52
#include "util/file_reader_writer.h"
53
#include "util/stop_watch.h"
54
#include "util/string_util.h"
55
#include "util/xxhash.h"
J
jorlow@chromium.org 已提交
56

57
namespace rocksdb {
J
jorlow@chromium.org 已提交
58

I
xxHash  
Igor Canadi 已提交
59
extern const uint64_t kBlockBasedTableMagicNumber;
K
Kai Liu 已提交
60 61
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
62 63 64

typedef BlockBasedTable::IndexReader IndexReader;

M
Maysam Yabandeh 已提交
65 66 67 68
BlockBasedTable::~BlockBasedTable() {
  delete rep_;
}

69 70
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);

71 72 73 74 75
namespace {
// Read the block identified by "handle" from "file".
// The only relevant option is options.verify_checksums for now.
// On failure return non-OK.
// On success fill *result and return OK - caller owns *result
76
// @param uncompression_dict Data for presetting the compression library's
77
//    dictionary.
78 79 80 81
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
    std::unique_ptr<Block>* result, const ImmutableCFOptions& ioptions,
82
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
83
    const UncompressionDict& uncompression_dict,
84
    const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
85 86
    size_t read_amp_bytes_per_bit, MemoryAllocator* memory_allocator,
    bool for_compaction = false) {
87 88
  assert(result);

89
  BlockContents contents;
90 91 92 93
  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, &contents, ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);
S
Siying Dong 已提交
94
  Status s = block_fetcher.ReadBlockContents();
95
  if (s.ok()) {
96 97
    result->reset(new Block(std::move(contents), global_seqno,
                            read_amp_bytes_per_bit, ioptions.statistics));
98 99 100 101 102
  }

  return s;
}

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
    std::unique_ptr<BlockContents>* result, const ImmutableCFOptions& ioptions,
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
    const UncompressionDict& uncompression_dict,
    const PersistentCacheOptions& cache_options,
    SequenceNumber /* global_seqno */, size_t /* read_amp_bytes_per_bit */,
    MemoryAllocator* memory_allocator, bool for_compaction = false) {
  assert(result);

  result->reset(new BlockContents);

  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, result->get(), ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);

  const Status s = block_fetcher.ReadBlockContents();
  if (!s.ok()) {
    result->reset();
  }

  return s;
}

Y
Yi Wu 已提交
129
inline MemoryAllocator* GetMemoryAllocator(
130
    const BlockBasedTableOptions& table_options) {
131 132 133
  return table_options.block_cache.get()
             ? table_options.block_cache->memory_allocator()
             : nullptr;
134 135
}

136 137 138 139 140 141 142
inline MemoryAllocator* GetMemoryAllocatorForCompressedBlock(
    const BlockBasedTableOptions& table_options) {
  return table_options.block_cache_compressed.get()
             ? table_options.block_cache_compressed->memory_allocator()
             : nullptr;
}

143 144
// Delete the entry resided in the cache.
template <class Entry>
A
Andrew Kryczka 已提交
145
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
146 147 148 149
  auto entry = reinterpret_cast<Entry*>(value);
  delete entry;
}

150 151 152 153
// Release the cached entry and decrement its ref count.
void ForceReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
154
  cache->Release(handle, true /* force_erase */);
155 156
}

157 158 159 160 161 162 163 164
// Release the cached entry and decrement its ref count.
// Do not force erase
void ReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
  cache->Release(handle, false /* force_erase */);
}

165 166 167
// For hash based index, return true if prefix_extractor and
// prefix_extractor_block mismatch, false otherwise. This flag will be used
// as total_order_seek via NewIndexIterator
168 169
bool PrefixExtractorChanged(const TableProperties* table_properties,
                            const SliceTransform* prefix_extractor) {
170 171 172
  // BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
  // Turn off hash index in prefix_extractor is not set; if  prefix_extractor
  // is set but prefix_extractor_block is not set, also disable hash index
173 174
  if (prefix_extractor == nullptr || table_properties == nullptr ||
      table_properties->prefix_extractor_name.empty()) {
175 176
    return true;
  }
177

178
  // prefix_extractor and prefix_extractor_block are both non-empty
179 180
  if (table_properties->prefix_extractor_name.compare(
          prefix_extractor->Name()) != 0) {
181 182 183 184 185 186
    return true;
  } else {
    return false;
  }
}

A
anand76 已提交
187 188 189 190 191 192 193
CacheAllocationPtr CopyBufferToHeap(MemoryAllocator* allocator, Slice& buf) {
  CacheAllocationPtr heap_buf;
  heap_buf = AllocateBlock(buf.size(), allocator);
  memcpy(heap_buf.get(), buf.data(), buf.size());
  return heap_buf;
}

194 195
}  // namespace

196 197 198 199 200
// Encapsulates common functionality for the various index reader
// implementations. Provides access to the index block regardless of whether
// it is owned by the reader or stored in the cache, or whether it is pinned
// in the cache or not.
class BlockBasedTable::IndexReaderCommon : public BlockBasedTable::IndexReader {
201
 public:
202 203
  IndexReaderCommon(const BlockBasedTable* t,
                    CachableEntry<Block>&& index_block)
204
      : table_(t), index_block_(std::move(index_block)) {
205 206 207
    assert(table_ != nullptr);
  }

208
 protected:
209
  static Status ReadIndexBlock(const BlockBasedTable* table,
210 211 212
                               FilePrefetchBuffer* prefetch_buffer,
                               const ReadOptions& read_options,
                               GetContext* get_context,
213
                               BlockCacheLookupContext* lookup_context,
214
                               CachableEntry<Block>* index_block);
215

216
  const BlockBasedTable* table() const { return table_; }
217 218 219 220 221 222 223 224

  const InternalKeyComparator* internal_comparator() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);

    return &table_->get_rep()->internal_comparator;
  }

225
  bool index_has_first_key() const {
226 227
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
228 229
    return table_->get_rep()->index_has_first_key;
  }
230

231 232 233 234
  bool index_key_includes_seq() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
    return table_->get_rep()->index_key_includes_seq;
235 236 237 238 239
  }

  bool index_value_is_full() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
240
    return table_->get_rep()->index_value_is_full;
241 242
  }

243
  Status GetOrReadIndexBlock(bool no_io, GetContext* get_context,
244
                             BlockCacheLookupContext* lookup_context,
245 246 247 248
                             CachableEntry<Block>* index_block) const;

  size_t ApproximateIndexBlockMemoryUsage() const {
    assert(!index_block_.GetOwnValue() || index_block_.GetValue() != nullptr);
249 250 251
    return index_block_.GetOwnValue()
               ? index_block_.GetValue()->ApproximateMemoryUsage()
               : 0;
252 253
  }

254
 private:
255
  const BlockBasedTable* table_;
256 257 258 259
  CachableEntry<Block> index_block_;
};

Status BlockBasedTable::IndexReaderCommon::ReadIndexBlock(
260
    const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
261
    const ReadOptions& read_options, GetContext* get_context,
262
    BlockCacheLookupContext* lookup_context,
263
    CachableEntry<Block>* index_block) {
264 265 266 267 268 269 270 271 272
  PERF_TIMER_GUARD(read_index_block_nanos);

  assert(table != nullptr);
  assert(index_block != nullptr);
  assert(index_block->IsEmpty());

  const Rep* const rep = table->get_rep();
  assert(rep != nullptr);

273 274
  const Status s = table->RetrieveBlock(
      prefetch_buffer, read_options, rep->footer.index_handle(),
275
      UncompressionDict::GetEmptyDict(), index_block, BlockType::kIndex,
276
      get_context, lookup_context);
277 278 279 280 281

  return s;
}

Status BlockBasedTable::IndexReaderCommon::GetOrReadIndexBlock(
282
    bool no_io, GetContext* get_context,
283
    BlockCacheLookupContext* lookup_context,
284
    CachableEntry<Block>* index_block) const {
285 286 287
  assert(index_block != nullptr);

  if (!index_block_.IsEmpty()) {
288
    index_block->SetUnownedValue(index_block_.GetValue());
289 290 291
    return Status::OK();
  }

292 293 294 295 296
  ReadOptions read_options;
  if (no_io) {
    read_options.read_tier = kBlockCacheTier;
  }

297 298
  return ReadIndexBlock(table_, /*prefetch_buffer=*/nullptr, read_options,
                        get_context, lookup_context, index_block);
299 300
}

M
Maysam Yabandeh 已提交
301
// Index that allows binary search lookup in a two-level index structure.
302
class PartitionIndexReader : public BlockBasedTable::IndexReaderCommon {
M
Maysam Yabandeh 已提交
303 304 305 306 307
 public:
  // Read the partition index from the file and create an instance for
  // `PartitionIndexReader`.
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
308
  static Status Create(const BlockBasedTable* table,
309
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
310 311 312
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
313 314 315 316 317 318 319
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
320 321 322
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
323 324 325
      if (!s.ok()) {
        return s;
      }
M
Maysam Yabandeh 已提交
326

327 328 329
      if (use_cache && !pin) {
        index_block.Reset();
      }
M
Maysam Yabandeh 已提交
330 331
    }

332 333
    index_reader->reset(
        new PartitionIndexReader(table, std::move(index_block)));
334 335

    return Status::OK();
M
Maysam Yabandeh 已提交
336 337 338
  }

  // return a two-level iterator: first level is on the partition index
339
  InternalIteratorBase<IndexValue>* NewIterator(
340
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
341 342
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
343
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
344
    CachableEntry<Block> index_block;
345 346
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
347 348 349 350 351 352
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

353
      return NewErrorInternalIterator<IndexValue>(s);
354 355
    }

356
    InternalIteratorBase<IndexValue>* it = nullptr;
357

M
Maysam Yabandeh 已提交
358
    Statistics* kNullStats = nullptr;
M
Maysam Yabandeh 已提交
359
    // Filters are already checked before seeking the index
360
    if (!partition_map_.empty()) {
361
      // We don't return pinned data from index blocks, so no need
362
      // to set `block_contents_pinned`.
363
      it = NewTwoLevelIterator(
364 365 366
          new BlockBasedTable::PartitionedIndexIteratorState(table(),
                                                             &partition_map_),
          index_block.GetValue()->NewIndexIterator(
367
              internal_comparator(), internal_comparator()->user_comparator(),
368 369
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()));
370
    } else {
371 372 373
      ReadOptions ro;
      ro.fill_cache = read_options.fill_cache;
      // We don't return pinned data from index blocks, so no need
374
      // to set `block_contents_pinned`.
375
      it = new BlockBasedTableIterator<IndexBlockIter, IndexValue>(
376
          table(), ro, *internal_comparator(),
377
          index_block.GetValue()->NewIndexIterator(
378
              internal_comparator(), internal_comparator()->user_comparator(),
379 380
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()),
381
          false, true, /* prefix_extractor */ nullptr, BlockType::kIndex,
382 383
          lookup_context ? lookup_context->caller
                         : TableReaderCaller::kUncategorized);
384
    }
385 386 387 388 389 390

    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;

M
Maysam Yabandeh 已提交
391
    // TODO(myabandeh): Update TwoLevelIterator to be able to make use of
M
Maysam Yabandeh 已提交
392 393 394 395 396
    // on-stack BlockIter while the state is on heap. Currentlly it assumes
    // the first level iter is always on heap and will attempt to delete it
    // in its destructor.
  }

397
  void CacheDependencies(bool pin) override {
M
Maysam Yabandeh 已提交
398
    // Before read partitions, prefetch them to avoid lots of IOs
399
    BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
400
    const BlockBasedTable::Rep* rep = table()->rep_;
M
Maysam Yabandeh 已提交
401
    IndexBlockIter biter;
M
Maysam Yabandeh 已提交
402
    BlockHandle handle;
M
Maysam Yabandeh 已提交
403
    Statistics* kNullStats = nullptr;
404 405

    CachableEntry<Block> index_block;
406
    Status s = GetOrReadIndexBlock(false /* no_io */, nullptr /* get_context */,
407
                                   &lookup_context, &index_block);
408 409 410
    if (!s.ok()) {
      ROCKS_LOG_WARN(rep->ioptions.info_log,
                     "Error retrieving top-level index block while trying to "
411 412
                     "cache index partitions: %s",
                     s.ToString().c_str());
413 414 415 416
      return;
    }

    // We don't return pinned data from index blocks, so no need
417
    // to set `block_contents_pinned`.
418
    index_block.GetValue()->NewIndexIterator(
419
        internal_comparator(), internal_comparator()->user_comparator(), &biter,
420 421
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
M
Maysam Yabandeh 已提交
422 423 424
    // Index partitions are assumed to be consecuitive. Prefetch them all.
    // Read the first block offset
    biter.SeekToFirst();
425 426 427 428
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
429
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
430 431 432 433
    uint64_t prefetch_off = handle.offset();

    // Read the last block's offset
    biter.SeekToLast();
434 435 436 437
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
438
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
439 440 441
    uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
    uint64_t prefetch_len = last_off - prefetch_off;
    std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
442
    auto& file = rep->file;
M
Maysam Yabandeh 已提交
443
    prefetch_buffer.reset(new FilePrefetchBuffer());
444 445
    s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
                                  static_cast<size_t>(prefetch_len));
M
Maysam Yabandeh 已提交
446 447 448 449 450

    // After prefetch, read the partitions one by one
    biter.SeekToFirst();
    auto ro = ReadOptions();
    for (; biter.Valid(); biter.Next()) {
451
      handle = biter.value().handle;
452
      CachableEntry<Block> block;
453 454
      // TODO: Support counter batch update for partitioned index and
      // filter blocks
455 456
      s = table()->MaybeReadBlockAndLoadToCache(
          prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
A
anand76 已提交
457 458
          &block, BlockType::kIndex, /*get_context=*/nullptr, &lookup_context,
          /*contents=*/nullptr);
M
Maysam Yabandeh 已提交
459

460 461 462
      assert(s.ok() || block.GetValue() == nullptr);
      if (s.ok() && block.GetValue() != nullptr) {
        if (block.IsCached()) {
463
          if (pin) {
464
            partition_map_[handle.offset()] = std::move(block);
465
          }
M
Maysam Yabandeh 已提交
466 467 468
        }
      }
    }
M
Maysam Yabandeh 已提交
469 470
  }

471
  size_t ApproximateMemoryUsage() const override {
472
    size_t usage = ApproximateIndexBlockMemoryUsage();
473
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
474
    usage += malloc_usable_size(const_cast<PartitionIndexReader*>(this));
475 476 477 478 479
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    // TODO(myabandeh): more accurate estimate of partition_map_ mem usage
    return usage;
M
Maysam Yabandeh 已提交
480 481 482
  }

 private:
483 484
  PartitionIndexReader(const BlockBasedTable* t,
                       CachableEntry<Block>&& index_block)
485
      : IndexReaderCommon(t, std::move(index_block)) {}
486

487
  std::unordered_map<uint64_t, CachableEntry<Block>> partition_map_;
M
Maysam Yabandeh 已提交
488 489
};

490 491 492
// Index that allows binary search lookup for the first key of each block.
// This class can be viewed as a thin wrapper for `Block` class which already
// supports binary search.
493
class BinarySearchIndexReader : public BlockBasedTable::IndexReaderCommon {
494 495 496
 public:
  // Read index from the file and create an intance for
  // `BinarySearchIndexReader`.
497 498
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
499
  static Status Create(const BlockBasedTable* table,
500
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
501 502 503
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
504 505 506 507 508 509 510
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
511 512 513
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
514 515 516
      if (!s.ok()) {
        return s;
      }
517

518 519 520
      if (use_cache && !pin) {
        index_block.Reset();
      }
521 522
    }

523 524
    index_reader->reset(
        new BinarySearchIndexReader(table, std::move(index_block)));
525 526

    return Status::OK();
527 528
  }

529
  InternalIteratorBase<IndexValue>* NewIterator(
530
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
531 532
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
533
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
534
    CachableEntry<Block> index_block;
535 536
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
537 538 539 540 541 542
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

543
      return NewErrorInternalIterator<IndexValue>(s);
544 545
    }

M
Maysam Yabandeh 已提交
546
    Statistics* kNullStats = nullptr;
547
    // We don't return pinned data from index blocks, so no need
548
    // to set `block_contents_pinned`.
549
    auto it = index_block.GetValue()->NewIndexIterator(
550
        internal_comparator(), internal_comparator()->user_comparator(), iter,
551 552
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
553

554 555 556 557 558
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
559

560
  size_t ApproximateMemoryUsage() const override {
561
    size_t usage = ApproximateIndexBlockMemoryUsage();
562
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
563
    usage += malloc_usable_size(const_cast<BinarySearchIndexReader*>(this));
564 565 566 567
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
568 569
  }

570
 private:
571
  BinarySearchIndexReader(const BlockBasedTable* t,
572
                          CachableEntry<Block>&& index_block)
573
      : IndexReaderCommon(t, std::move(index_block)) {}
574 575 576 577
};

// Index that leverages an internal hash table to quicken the lookup for a given
// key.
578
class HashIndexReader : public BlockBasedTable::IndexReaderCommon {
579
 public:
580
  static Status Create(const BlockBasedTable* table,
581 582
                       FilePrefetchBuffer* prefetch_buffer,
                       InternalIterator* meta_index_iter, bool use_cache,
583 584 585
                       bool prefetch, bool pin,
                       BlockCacheLookupContext* lookup_context,
                       std::unique_ptr<IndexReader>* index_reader) {
586 587 588 589
    assert(table != nullptr);
    assert(index_reader != nullptr);
    assert(!pin || prefetch);

590
    const BlockBasedTable::Rep* rep = table->get_rep();
591 592 593 594
    assert(rep != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
595 596 597
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
598 599 600
      if (!s.ok()) {
        return s;
      }
601

602 603 604
      if (use_cache && !pin) {
        index_block.Reset();
      }
605 606
    }

607 608 609 610
    // Note, failure to create prefix hash index does not need to be a
    // hard error. We can still fall back to the original binary search index.
    // So, Create will succeed regardless, from this point on.

611
    index_reader->reset(new HashIndexReader(table, std::move(index_block)));
612

K
Kai Liu 已提交
613 614
    // Get prefixes block
    BlockHandle prefixes_handle;
615 616
    Status s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
                             &prefixes_handle);
K
Kai Liu 已提交
617
    if (!s.ok()) {
618 619
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
620 621 622 623 624 625 626
    }

    // Get index metadata block
    BlockHandle prefixes_meta_handle;
    s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
                      &prefixes_meta_handle);
    if (!s.ok()) {
627 628
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
629 630
    }

631 632 633 634 635
    RandomAccessFileReader* const file = rep->file.get();
    const Footer& footer = rep->footer;
    const ImmutableCFOptions& ioptions = rep->ioptions;
    const PersistentCacheOptions& cache_options = rep->persistent_cache_options;
    MemoryAllocator* const memory_allocator =
636
        GetMemoryAllocator(rep->table_options);
637

K
Kai Liu 已提交
638 639
    // Read contents for the blocks
    BlockContents prefixes_contents;
S
Siying Dong 已提交
640 641
    BlockFetcher prefixes_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_handle,
642
        &prefixes_contents, ioptions, true /*decompress*/,
643 644
        true /*maybe_compressed*/, BlockType::kHashIndexPrefixes,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
S
Siying Dong 已提交
645
    s = prefixes_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
646 647 648 649
    if (!s.ok()) {
      return s;
    }
    BlockContents prefixes_meta_contents;
S
Siying Dong 已提交
650 651
    BlockFetcher prefixes_meta_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_meta_handle,
652
        &prefixes_meta_contents, ioptions, true /*decompress*/,
653 654
        true /*maybe_compressed*/, BlockType::kHashIndexMetadata,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
655
    s = prefixes_meta_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
656
    if (!s.ok()) {
657 658
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
659 660
    }

661
    BlockPrefixIndex* prefix_index = nullptr;
662 663
    s = BlockPrefixIndex::Create(rep->internal_prefix_transform.get(),
                                 prefixes_contents.data,
664 665 666
                                 prefixes_meta_contents.data, &prefix_index);
    // TODO: log error
    if (s.ok()) {
667 668 669
      HashIndexReader* const hash_index_reader =
          static_cast<HashIndexReader*>(index_reader->get());
      hash_index_reader->prefix_index_.reset(prefix_index);
K
Kai Liu 已提交
670 671
    }

672
    return Status::OK();
673 674
  }

675
  InternalIteratorBase<IndexValue>* NewIterator(
676
      const ReadOptions& read_options, bool disable_prefix_seek,
677 678
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
679
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
680
    CachableEntry<Block> index_block;
681 682
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
683 684 685 686 687 688
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

689
      return NewErrorInternalIterator<IndexValue>(s);
690 691
    }

M
Maysam Yabandeh 已提交
692
    Statistics* kNullStats = nullptr;
693 694
    const bool total_order_seek =
        read_options.total_order_seek || disable_prefix_seek;
695
    // We don't return pinned data from index blocks, so no need
696
    // to set `block_contents_pinned`.
697
    auto it = index_block.GetValue()->NewIndexIterator(
698
        internal_comparator(), internal_comparator()->user_comparator(), iter,
699 700 701
        kNullStats, total_order_seek, index_has_first_key(),
        index_key_includes_seq(), index_value_is_full(),
        false /* block_contents_pinned */, prefix_index_.get());
702

703 704 705 706 707
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
708

709
  size_t ApproximateMemoryUsage() const override {
710
    size_t usage = ApproximateIndexBlockMemoryUsage();
711
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
712
    usage += malloc_usable_size(const_cast<HashIndexReader*>(this));
713
#else
M
Maysam Yabandeh 已提交
714 715 716
    if (prefix_index_) {
      usage += prefix_index_->ApproximateMemoryUsage();
    }
717 718 719
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
720 721
  }

722
 private:
723
  HashIndexReader(const BlockBasedTable* t, CachableEntry<Block>&& index_block)
724
      : IndexReaderCommon(t, std::move(index_block)) {}
K
Kai Liu 已提交
725

M
Maysam Yabandeh 已提交
726
  std::unique_ptr<BlockPrefixIndex> prefix_index_;
727 728
};

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
                                            GetContext* get_context,
                                            size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  PERF_COUNTER_ADD(block_cache_hit_count, 1);
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_hit;
    get_context->get_context_stats_.num_cache_bytes_read += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_HIT);
    RecordTick(statistics, BLOCK_CACHE_BYTES_READ, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_HIT);
      }
      break;

    case BlockType::kCompressionDictionary:
      // TODO: introduce perf counter for compression dictionary hit count
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_HIT);
      }
      break;

    case BlockType::kIndex:
      PERF_COUNTER_ADD(block_cache_index_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_HIT);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
                                             GetContext* get_context) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce aggregate (not per-level) block cache miss count
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_miss;
  } else {
    RecordTick(statistics, BLOCK_CACHE_MISS);
  }

  // TODO: introduce perf counters for misses per block type
  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_MISS);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_MISS);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_MISS);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheInsertionMetrics(BlockType block_type,
                                                  GetContext* get_context,
                                                  size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce perf counters for block cache insertions
  if (get_context) {
    ++get_context->get_context_stats_.num_cache_add;
    get_context->get_context_stats_.num_cache_bytes_write += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_ADD);
    RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_add;
        get_context->get_context_stats_.num_cache_filter_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_add;
        get_context->get_context_stats_
            .num_cache_compression_dict_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
                   usage);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_add;
        get_context->get_context_stats_.num_cache_index_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usage);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_add;
        get_context->get_context_stats_.num_cache_data_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, usage);
      }
      break;
  }
}

901
Cache::Handle* BlockBasedTable::GetEntryFromCache(
902
    Cache* block_cache, const Slice& key, BlockType block_type,
903
    GetContext* get_context) const {
904 905
  auto cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics);

906
  if (cache_handle != nullptr) {
907 908
    UpdateCacheHitMetrics(block_type, get_context,
                          block_cache->GetUsage(cache_handle));
909
  } else {
910
    UpdateCacheMissMetrics(block_type, get_context);
911 912 913 914 915
  }

  return cache_handle;
}

916
// Helper function to setup the cache key's prefix for the Table.
917
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
918 919
  assert(kMaxCacheKeyPrefixSize >= 10);
  rep->cache_key_prefix_size = 0;
920
  rep->compressed_cache_key_prefix_size = 0;
921
  if (rep->table_options.block_cache != nullptr) {
922 923
    GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
                        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
924
  }
K
krad 已提交
925 926 927 928 929
  if (rep->table_options.persistent_cache != nullptr) {
    GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
                        &rep->persistent_cache_key_prefix[0],
                        &rep->persistent_cache_key_prefix_size);
  }
930 931
  if (rep->table_options.block_cache_compressed != nullptr) {
    GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
932
                        rep->file->file(), &rep->compressed_cache_key_prefix[0],
933 934 935 936
                        &rep->compressed_cache_key_prefix_size);
  }
}

937 938
void BlockBasedTable::GenerateCachePrefix(Cache* cc, RandomAccessFile* file,
                                          char* buffer, size_t* size) {
939 940 941 942 943
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
K
krad 已提交
944
  if (cc && *size == 0) {
945 946 947 948 949
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
  }
}

950 951
void BlockBasedTable::GenerateCachePrefix(Cache* cc, WritableFile* file,
                                          char* buffer, size_t* size) {
952 953 954 955 956 957 958 959
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
  if (*size == 0) {
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
960 961 962
  }
}

963 964 965 966 967 968 969 970 971 972 973 974
namespace {
// Return True if table_properties has `user_prop_name` has a `true` value
// or it doesn't contain this property (for backward compatible).
bool IsFeatureSupported(const TableProperties& table_properties,
                        const std::string& user_prop_name, Logger* info_log) {
  auto& props = table_properties.user_collected_properties;
  auto pos = props.find(user_prop_name);
  // Older version doesn't have this value set. Skip this check.
  if (pos != props.end()) {
    if (pos->second == kPropFalse) {
      return false;
    } else if (pos->second != kPropTrue) {
975 976
      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
                     user_prop_name.c_str(), pos->second.c_str());
977 978 979 980
    }
  }
  return true;
}
981

982 983 984 985 986 987 988
// Caller has to ensure seqno is not nullptr.
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
                               SequenceNumber largest_seqno,
                               SequenceNumber* seqno) {
  const auto& props = table_properties.user_collected_properties;
  const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
  const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
989

990
  *seqno = kDisableGlobalSequenceNumber;
991 992
  if (version_pos == props.end()) {
    if (seqno_pos != props.end()) {
993
      std::array<char, 200> msg_buf;
994
      // This is not an external sst file, global_seqno is not supported.
995 996
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
997 998
          "A non-external sst file have global seqno property with value %s",
          seqno_pos->second.c_str());
999
      return Status::Corruption(msg_buf.data());
1000
    }
1001
    return Status::OK();
1002 1003 1004 1005 1006
  }

  uint32_t version = DecodeFixed32(version_pos->second.c_str());
  if (version < 2) {
    if (seqno_pos != props.end() || version != 1) {
1007
      std::array<char, 200> msg_buf;
1008
      // This is a v1 external sst file, global_seqno is not supported.
1009 1010 1011 1012 1013
      snprintf(msg_buf.data(), msg_buf.max_size(),
               "An external sst file with version %u have global seqno "
               "property with value %s",
               version, seqno_pos->second.c_str());
      return Status::Corruption(msg_buf.data());
1014
    }
1015
    return Status::OK();
1016 1017
  }

1018 1019 1020 1021 1022 1023 1024
  // Since we have a plan to deprecate global_seqno, we do not return failure
  // if seqno_pos == props.end(). We rely on version_pos to detect whether the
  // SST is external.
  SequenceNumber global_seqno(0);
  if (seqno_pos != props.end()) {
    global_seqno = DecodeFixed64(seqno_pos->second.c_str());
  }
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
  // SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
  // to denote it is unknown.
  if (largest_seqno < kMaxSequenceNumber) {
    if (global_seqno == 0) {
      global_seqno = largest_seqno;
    }
    if (global_seqno != largest_seqno) {
      std::array<char, 200> msg_buf;
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
          "An external sst file with version %u have global seqno property "
          "with value %s, while largest seqno in the file is %llu",
          version, seqno_pos->second.c_str(),
          static_cast<unsigned long long>(largest_seqno));
      return Status::Corruption(msg_buf.data());
    }
1041
  }
1042
  *seqno = global_seqno;
1043 1044

  if (global_seqno > kMaxSequenceNumber) {
1045 1046 1047 1048 1049 1050
    std::array<char, 200> msg_buf;
    snprintf(msg_buf.data(), msg_buf.max_size(),
             "An external sst file with version %u have global seqno property "
             "with value %llu, which is greater than kMaxSequenceNumber",
             version, static_cast<unsigned long long>(global_seqno));
    return Status::Corruption(msg_buf.data());
1051 1052
  }

1053
  return Status::OK();
1054
}
1055 1056
}  // namespace

K
krad 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
                                   size_t cache_key_prefix_size,
                                   const BlockHandle& handle, char* cache_key) {
  assert(cache_key != nullptr);
  assert(cache_key_prefix_size != 0);
  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
  char* end =
      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
  return Slice(cache_key, static_cast<size_t>(end - cache_key));
}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
Status BlockBasedTable::Open(
    const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
    const BlockBasedTableOptions& table_options,
    const InternalKeyComparator& internal_comparator,
    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
    std::unique_ptr<TableReader>* table_reader,
    const SliceTransform* prefix_extractor,
    const bool prefetch_index_and_filter_in_cache, const bool skip_filters,
    const int level, const bool immortal_table,
    const SequenceNumber largest_seqno, TailPrefetchStats* tail_prefetch_stats,
    BlockCacheTracer* const block_cache_tracer) {
S
Siying Dong 已提交
1080
  table_reader->reset();
1081

1082
  Status s;
1083
  Footer footer;
1084 1085
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;

1086 1087 1088
  // prefetch both index and filters, down to all partitions
  const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
  const bool preload_all = !table_options.cache_index_and_filter_blocks;
1089

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
  s = PrefetchTail(file.get(), file_size, tail_prefetch_stats, prefetch_all,
                   preload_all, &prefetch_buffer);

  // Read in the following order:
  //    1. Footer
  //    2. [metaindex block]
  //    3. [meta block: properties]
  //    4. [meta block: range deletion tombstone]
  //    5. [meta block: compression dictionary]
  //    6. [meta block: index]
  //    7. [meta block: filter]
1101 1102
  s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
                         kBlockBasedTableMagicNumber);
1103 1104 1105
  if (!s.ok()) {
    return s;
  }
1106
  if (!BlockBasedTableSupportedVersion(footer.version())) {
1107
    return Status::Corruption(
1108
        "Unknown Footer version. Maybe this file was created with newer "
1109 1110
        "version of RocksDB?");
  }
J
jorlow@chromium.org 已提交
1111

A
Aaron Gao 已提交
1112
  // We've successfully read the footer. We are ready to serve requests.
1113 1114 1115
  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
  // raw pointer will be used to create HashIndexReader, whose reset may
  // access a dangling pointer.
1116
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
1117
  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
1118
                                      internal_comparator, skip_filters, level,
1119
                                      immortal_table);
K
Kai Liu 已提交
1120
  rep->file = std::move(file);
I
xxHash  
Igor Canadi 已提交
1121
  rep->footer = footer;
1122
  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
1123 1124
  // We need to wrap data with internal_prefix_transform to make sure it can
  // handle prefix correctly.
1125
  rep->internal_prefix_transform.reset(
1126
      new InternalKeySliceTransform(prefix_extractor));
1127
  SetupCacheKeyPrefix(rep);
1128 1129
  std::unique_ptr<BlockBasedTable> new_table(
      new BlockBasedTable(rep, block_cache_tracer));
K
Kai Liu 已提交
1130

K
krad 已提交
1131 1132 1133 1134 1135
  // page cache options
  rep->persistent_cache_options =
      PersistentCacheOptions(rep->table_options.persistent_cache,
                             std::string(rep->persistent_cache_key_prefix,
                                         rep->persistent_cache_key_prefix_size),
1136
                             rep->ioptions.statistics);
K
krad 已提交
1137

1138 1139 1140 1141 1142
  // Meta-blocks are not dictionary compressed. Explicitly set the dictionary
  // handle to null, otherwise it may be seen as uninitialized during the below
  // meta-block reads.
  rep->compression_dict_handle = BlockHandle::NullBlockHandle();

1143
  // Read metaindex
K
Kai Liu 已提交
1144
  std::unique_ptr<Block> meta;
S
sdong 已提交
1145
  std::unique_ptr<InternalIterator> meta_iter;
1146
  s = new_table->ReadMetaBlock(prefetch_buffer.get(), &meta, &meta_iter);
1147 1148 1149
  if (!s.ok()) {
    return s;
  }
K
Kai Liu 已提交
1150

1151 1152
  // Populates table_properties and some fields that depend on it,
  // such as index_type.
1153 1154
  s = new_table->ReadPropertiesBlock(prefetch_buffer.get(), meta_iter.get(),
                                     largest_seqno);
1155 1156 1157
  if (!s.ok()) {
    return s;
  }
1158
  s = new_table->ReadRangeDelBlock(prefetch_buffer.get(), meta_iter.get(),
1159
                                   internal_comparator, &lookup_context);
1160 1161 1162
  if (!s.ok()) {
    return s;
  }
1163 1164
  s = new_table->PrefetchIndexAndFilterBlocks(
      prefetch_buffer.get(), meta_iter.get(), new_table.get(), prefetch_all,
1165
      table_options, level, &lookup_context);
1166 1167 1168 1169 1170 1171 1172 1173

  if (s.ok()) {
    // Update tail prefetch stats
    assert(prefetch_buffer.get() != nullptr);
    if (tail_prefetch_stats != nullptr) {
      assert(prefetch_buffer->min_offset_read() < file_size);
      tail_prefetch_stats->RecordEffectiveSize(
          static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
I
Igor Canadi 已提交
1174
    }
1175 1176

    *table_reader = std::move(new_table);
I
Igor Canadi 已提交
1177 1178
  }

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
  return s;
}

Status BlockBasedTable::PrefetchTail(
    RandomAccessFileReader* file, uint64_t file_size,
    TailPrefetchStats* tail_prefetch_stats, const bool prefetch_all,
    const bool preload_all,
    std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
  size_t tail_prefetch_size = 0;
  if (tail_prefetch_stats != nullptr) {
    // Multiple threads may get a 0 (no history) when running in parallel,
    // but it will get cleared after the first of them finishes.
    tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
  }
  if (tail_prefetch_size == 0) {
    // Before read footer, readahead backwards to prefetch data. Do more
    // readahead if we're going to read index/filter.
    // TODO: This may incorrectly select small readahead in case partitioned
    // index/filter is enabled and top-level partition pinning is enabled.
    // That's because we need to issue readahead before we read the properties,
    // at which point we don't yet know the index type.
    tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
  }
  size_t prefetch_off;
  size_t prefetch_len;
  if (file_size < tail_prefetch_size) {
    prefetch_off = 0;
    prefetch_len = static_cast<size_t>(file_size);
  } else {
    prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
    prefetch_len = tail_prefetch_size;
  }
  TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
                           &tail_prefetch_size);
  Status s;
  // TODO should not have this special logic in the future.
  if (!file->use_direct_io()) {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, false, true));
    s = file->Prefetch(prefetch_off, prefetch_len);
  } else {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, true, true));
    s = (*prefetch_buffer)->Prefetch(file, prefetch_off, prefetch_len);
  }
  return s;
}

1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
Status VerifyChecksum(const ChecksumType type, const char* buf, size_t len,
                      uint32_t expected) {
  Status s;
  uint32_t actual = 0;
  switch (type) {
    case kNoChecksum:
      break;
    case kCRC32c:
      expected = crc32c::Unmask(expected);
      actual = crc32c::Value(buf, len);
      break;
    case kxxHash:
      actual = XXH32(buf, static_cast<int>(len), 0);
      break;
    case kxxHash64:
      actual = static_cast<uint32_t>(XXH64(buf, static_cast<int>(len), 0) &
                                     uint64_t{0xffffffff});
      break;
    default:
      s = Status::Corruption("unknown checksum type");
  }
  if (s.ok() && actual != expected) {
    s = Status::Corruption("properties block checksum mismatched");
  }
  return s;
}

1252
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
1253
    FilePrefetchBuffer* prefetch_buffer, const Slice& handle_value,
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
    TableProperties** table_properties) {
  assert(table_properties != nullptr);
  // If this is an external SST file ingested with write_global_seqno set to
  // true, then we expect the checksum mismatch because checksum was written
  // by SstFileWriter, but its global seqno in the properties block may have
  // been changed during ingestion. In this case, we read the properties
  // block, copy it to a memory buffer, change the global seqno to its
  // original value, i.e. 0, and verify the checksum again.
  BlockHandle props_block_handle;
  CacheAllocationPtr tmp_buf;
1264 1265
  Status s = ReadProperties(handle_value, rep_->file.get(), prefetch_buffer,
                            rep_->footer, rep_->ioptions, table_properties,
1266 1267 1268 1269 1270 1271 1272 1273
                            false /* verify_checksum */, &props_block_handle,
                            &tmp_buf, false /* compression_type_missing */,
                            nullptr /* memory_allocator */);
  if (s.ok() && tmp_buf) {
    const auto seqno_pos_iter =
        (*table_properties)
            ->properties_offsets.find(
                ExternalSstFilePropertyNames::kGlobalSeqno);
1274
    size_t block_size = static_cast<size_t>(props_block_handle.size());
1275 1276 1277 1278 1279 1280
    if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
      uint64_t global_seqno_offset = seqno_pos_iter->second;
      EncodeFixed64(
          tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
    }
    uint32_t value = DecodeFixed32(tmp_buf.get() + block_size + 1);
1281
    s = rocksdb::VerifyChecksum(rep_->footer.checksum(), tmp_buf.get(),
1282 1283 1284 1285 1286
                                block_size + 1, value);
  }
  return s;
}

1287
Status BlockBasedTable::ReadPropertiesBlock(
1288
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1289
    const SequenceNumber largest_seqno) {
1290
  bool found_properties_block = true;
1291 1292
  Status s;
  s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
1293

1294
  if (!s.ok()) {
1295
    ROCKS_LOG_WARN(rep_->ioptions.info_log,
1296 1297
                   "Error when seeking to properties block from file: %s",
                   s.ToString().c_str());
1298
  } else if (found_properties_block) {
K
Kai Liu 已提交
1299
    s = meta_iter->status();
K
kailiu 已提交
1300
    TableProperties* table_properties = nullptr;
K
Kai Liu 已提交
1301
    if (s.ok()) {
1302
      s = ReadProperties(
1303 1304
          meta_iter->value(), rep_->file.get(), prefetch_buffer, rep_->footer,
          rep_->ioptions, &table_properties, true /* verify_checksum */,
1305 1306 1307 1308 1309
          nullptr /* ret_block_handle */, nullptr /* ret_block_contents */,
          false /* compression_type_missing */, nullptr /* memory_allocator */);
    }

    if (s.IsCorruption()) {
1310 1311
      s = TryReadPropertiesWithGlobalSeqno(prefetch_buffer, meta_iter->value(),
                                           &table_properties);
1312 1313 1314 1315
    }
    std::unique_ptr<TableProperties> props_guard;
    if (table_properties != nullptr) {
      props_guard.reset(table_properties);
K
Kai Liu 已提交
1316
    }
J
jorlow@chromium.org 已提交
1317

K
Kai Liu 已提交
1318
    if (!s.ok()) {
1319
      ROCKS_LOG_WARN(rep_->ioptions.info_log,
1320 1321 1322
                     "Encountered error while reading data from properties "
                     "block %s",
                     s.ToString().c_str());
K
kailiu 已提交
1323
    } else {
1324
      assert(table_properties != nullptr);
1325 1326 1327 1328 1329 1330
      rep_->table_properties.reset(props_guard.release());
      rep_->blocks_maybe_compressed =
          rep_->table_properties->compression_name !=
          CompressionTypeToString(kNoCompression);
      rep_->blocks_definitely_zstd_compressed =
          (rep_->table_properties->compression_name ==
1331
               CompressionTypeToString(kZSTD) ||
1332
           rep_->table_properties->compression_name ==
1333
               CompressionTypeToString(kZSTDNotFinalCompression));
K
Kai Liu 已提交
1334
    }
1335
  } else {
1336
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1337
                    "Cannot find Properties block from file.");
K
Kai Liu 已提交
1338
  }
1339
#ifndef ROCKSDB_LITE
1340 1341 1342
  if (rep_->table_properties) {
    ParseSliceTransform(rep_->table_properties->prefix_extractor_name,
                        &(rep_->table_prefix_extractor));
1343 1344
  }
#endif  // ROCKSDB_LITE
K
Kai Liu 已提交
1345

1346
  // Read the table properties, if provided.
1347 1348 1349
  if (rep_->table_properties) {
    rep_->whole_key_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
1350
                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
1351 1352 1353 1354 1355 1356
                           rep_->ioptions.info_log);
    rep_->prefix_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
                           BlockBasedTablePropertyNames::kPrefixFiltering,
                           rep_->ioptions.info_log);

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
    rep_->index_key_includes_seq =
        rep_->table_properties->index_key_is_user_key == 0;
    rep_->index_value_is_full =
        rep_->table_properties->index_value_is_delta_encoded == 0;

    // Update index_type with the true type.
    // If table properties don't contain index type, we assume that the table
    // is in very old format and has kBinarySearch index type.
    auto& props = rep_->table_properties->user_collected_properties;
    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
    if (pos != props.end()) {
      rep_->index_type = static_cast<BlockBasedTableOptions::IndexType>(
          DecodeFixed32(pos->second.c_str()));
    }

    rep_->index_has_first_key =
        rep_->index_type == BlockBasedTableOptions::kBinarySearchWithFirstKey;

1375 1376
    s = GetGlobalSequenceNumber(*(rep_->table_properties), largest_seqno,
                                &(rep_->global_seqno));
1377
    if (!s.ok()) {
1378
      ROCKS_LOG_ERROR(rep_->ioptions.info_log, "%s", s.ToString().c_str());
1379
    }
1380
  }
1381 1382
  return s;
}
1383

1384
Status BlockBasedTable::ReadRangeDelBlock(
1385
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1386 1387
    const InternalKeyComparator& internal_comparator,
    BlockCacheLookupContext* lookup_context) {
1388
  Status s;
1389
  bool found_range_del_block;
1390 1391
  BlockHandle range_del_handle;
  s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
1392
  if (!s.ok()) {
1393
    ROCKS_LOG_WARN(
1394
        rep_->ioptions.info_log,
1395 1396
        "Error when seeking to range delete tombstones block from file: %s",
        s.ToString().c_str());
1397
  } else if (found_range_del_block && !range_del_handle.IsNull()) {
1398
    ReadOptions read_options;
1399
    std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
1400 1401 1402
        read_options, range_del_handle,
        /*input_iter=*/nullptr, BlockType::kRangeDeletion,
        /*get_context=*/nullptr, lookup_context, Status(), prefetch_buffer));
1403 1404
    assert(iter != nullptr);
    s = iter->status();
1405 1406
    if (!s.ok()) {
      ROCKS_LOG_WARN(
1407
          rep_->ioptions.info_log,
1408 1409
          "Encountered error while reading data from range del block %s",
          s.ToString().c_str());
1410
    } else {
1411
      rep_->fragmented_range_dels =
1412 1413
          std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
                                                         internal_comparator);
1414 1415
    }
  }
1416 1417 1418 1419
  return s;
}

Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
1420
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1421
    BlockBasedTable* new_table, bool prefetch_all,
1422 1423
    const BlockBasedTableOptions& table_options, const int level,
    BlockCacheLookupContext* lookup_context) {
1424 1425 1426
  Status s;

  // Find filter handle and filter type
1427
  if (rep_->filter_policy) {
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
    for (auto filter_type :
         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
          Rep::FilterType::kBlockFilter}) {
      std::string prefix;
      switch (filter_type) {
        case Rep::FilterType::kFullFilter:
          prefix = kFullFilterBlockPrefix;
          break;
        case Rep::FilterType::kPartitionedFilter:
          prefix = kPartitionedFilterBlockPrefix;
          break;
        case Rep::FilterType::kBlockFilter:
          prefix = kFilterBlockPrefix;
          break;
        default:
          assert(0);
      }
      std::string filter_block_key = prefix;
1446 1447
      filter_block_key.append(rep_->filter_policy->Name());
      if (FindMetaBlock(meta_iter, filter_block_key, &rep_->filter_handle)
1448
              .ok()) {
1449
        rep_->filter_type = filter_type;
1450 1451 1452 1453
        break;
      }
    }
  }
1454

1455 1456 1457 1458 1459 1460
  // Find compression dictionary handle
  bool found_compression_dict = false;
  s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
                                 &rep_->compression_dict_handle);
  if (!s.ok()) {
    return s;
1461 1462
  }

1463
  BlockBasedTableOptions::IndexType index_type = rep_->index_type;
1464 1465 1466

  const bool use_cache = table_options.cache_index_and_filter_blocks;

1467 1468 1469 1470
  // pin both index and filters, down to all partitions
  const bool pin_all =
      rep_->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;

1471 1472 1473 1474 1475
  // prefetch the first level of index
  const bool prefetch_index =
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
  // pin the first level of index
  const bool pin_index =
      pin_all || (table_options.pin_top_level_index_and_filter &&
                  index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);

  std::unique_ptr<IndexReader> index_reader;
  s = new_table->CreateIndexReader(prefetch_buffer, meta_iter, use_cache,
                                   prefetch_index, pin_index, lookup_context,
                                   &index_reader);
  if (!s.ok()) {
    return s;
  }

  rep_->index_reader = std::move(index_reader);

  // The partitions of partitioned index are always stored in cache. They
  // are hence follow the configuration for pin and prefetch regardless of
  // the value of cache_index_and_filter_blocks
  if (prefetch_all) {
    rep_->index_reader->CacheDependencies(pin_all);
  }

1498 1499
  // prefetch the first level of filter
  const bool prefetch_filter =
1500 1501 1502
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1503
  // Partition fitlers cannot be enabled without partition indexes
1504
  assert(!prefetch_filter || prefetch_index);
1505 1506 1507
  // pin the first level of filter
  const bool pin_filter =
      pin_all || (table_options.pin_top_level_index_and_filter &&
1508
                  rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1509

1510 1511 1512 1513 1514 1515
  if (rep_->filter_policy) {
    auto filter = new_table->CreateFilterBlockReader(
        prefetch_buffer, use_cache, prefetch_filter, pin_filter,
        lookup_context);
    if (filter) {
      // Refer to the comment above about paritioned indexes always being cached
1516
      if (prefetch_all) {
1517
        filter->CacheDependencies(pin_all);
1518
      }
1519 1520

      rep_->filter = std::move(filter);
1521 1522 1523
    }
  }

1524 1525 1526 1527 1528
  if (!rep_->compression_dict_handle.IsNull()) {
    std::unique_ptr<UncompressionDictReader> uncompression_dict_reader;
    s = UncompressionDictReader::Create(this, prefetch_buffer, use_cache,
                                        prefetch_all, pin_all, lookup_context,
                                        &uncompression_dict_reader);
1529 1530
    if (!s.ok()) {
      return s;
K
Kai Liu 已提交
1531
    }
1532

1533
    rep_->uncompression_dict_reader = std::move(uncompression_dict_reader);
K
Kai Liu 已提交
1534
  }
1535 1536

  assert(s.ok());
J
jorlow@chromium.org 已提交
1537 1538 1539
  return s;
}

S
Siying Dong 已提交
1540
void BlockBasedTable::SetupForCompaction() {
1541
  switch (rep_->ioptions.access_hint_on_compaction_start) {
1542 1543 1544
    case Options::NONE:
      break;
    case Options::NORMAL:
1545
      rep_->file->file()->Hint(RandomAccessFile::NORMAL);
1546 1547
      break;
    case Options::SEQUENTIAL:
1548
      rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
1549 1550
      break;
    case Options::WILLNEED:
1551
      rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
1552 1553 1554 1555 1556 1557
      break;
    default:
      assert(false);
  }
}

K
kailiu 已提交
1558 1559
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
    const {
K
kailiu 已提交
1560
  return rep_->table_properties;
K
Kai Liu 已提交
1561
}
S
Sanjay Ghemawat 已提交
1562

1563 1564 1565 1566 1567 1568 1569 1570
size_t BlockBasedTable::ApproximateMemoryUsage() const {
  size_t usage = 0;
  if (rep_->filter) {
    usage += rep_->filter->ApproximateMemoryUsage();
  }
  if (rep_->index_reader) {
    usage += rep_->index_reader->ApproximateMemoryUsage();
  }
1571 1572
  if (rep_->uncompression_dict_reader) {
    usage += rep_->uncompression_dict_reader->ApproximateMemoryUsage();
1573
  }
1574 1575 1576
  return usage;
}

K
Kai Liu 已提交
1577 1578
// Load the meta-block from the file. On success, return the loaded meta block
// and its iterator.
1579
Status BlockBasedTable::ReadMetaBlock(FilePrefetchBuffer* prefetch_buffer,
S
sdong 已提交
1580 1581
                                      std::unique_ptr<Block>* meta_block,
                                      std::unique_ptr<InternalIterator>* iter) {
S
Sanjay Ghemawat 已提交
1582 1583
  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
  // it is an empty block.
1584
  std::unique_ptr<Block> meta;
K
Kai Liu 已提交
1585
  Status s = ReadBlockFromFile(
1586 1587
      rep_->file.get(), prefetch_buffer, rep_->footer, ReadOptions(),
      rep_->footer.metaindex_handle(), &meta, rep_->ioptions,
1588
      true /* decompress */, true /*maybe_compressed*/, BlockType::kMetaIndex,
1589
      UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options,
1590
      kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */,
1591
      GetMemoryAllocator(rep_->table_options));
K
Kai Liu 已提交
1592

K
Kai Liu 已提交
1593
  if (!s.ok()) {
1594
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1595 1596 1597
                    "Encountered error while reading data from properties"
                    " block %s",
                    s.ToString().c_str());
K
Kai Liu 已提交
1598
    return s;
S
Sanjay Ghemawat 已提交
1599
  }
K
Kai Liu 已提交
1600

1601
  *meta_block = std::move(meta);
K
Kai Liu 已提交
1602
  // meta block uses bytewise comparator.
1603 1604
  iter->reset(meta_block->get()->NewDataIterator(BytewiseComparator(),
                                                 BytewiseComparator()));
K
Kai Liu 已提交
1605
  return Status::OK();
S
Sanjay Ghemawat 已提交
1606 1607
}

1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
template <typename TBlocklike>
class BlocklikeTraits;

template <>
class BlocklikeTraits<BlockContents> {
 public:
  static BlockContents* Create(BlockContents&& contents,
                               SequenceNumber /* global_seqno */,
                               size_t /* read_amp_bytes_per_bit */,
                               Statistics* /* statistics */) {
    return new BlockContents(std::move(contents));
  }

  static uint32_t GetNumRestarts(const BlockContents& /* contents */) {
    return 0;
  }
};

template <>
class BlocklikeTraits<Block> {
 public:
  static Block* Create(BlockContents&& contents, SequenceNumber global_seqno,
                       size_t read_amp_bytes_per_bit, Statistics* statistics) {
    return new Block(std::move(contents), global_seqno, read_amp_bytes_per_bit,
                     statistics);
  }

  static uint32_t GetNumRestarts(const Block& block) {
    return block.NumRestarts();
  }
};

template <typename TBlocklike>
1641 1642
Status BlockBasedTable::GetDataBlockFromCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
1643
    Cache* block_cache, Cache* block_cache_compressed,
1644
    const ReadOptions& read_options, CachableEntry<TBlocklike>* block,
1645
    const UncompressionDict& uncompression_dict, BlockType block_type,
1646 1647
    GetContext* get_context) const {
  const size_t read_amp_bytes_per_bit =
1648 1649 1650
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1651 1652 1653
  assert(block);
  assert(block->IsEmpty());

1654
  Status s;
1655
  BlockContents* compressed_block = nullptr;
1656 1657 1658 1659
  Cache::Handle* block_cache_compressed_handle = nullptr;

  // Lookup uncompressed cache first
  if (block_cache != nullptr) {
1660 1661
    auto cache_handle = GetEntryFromCache(block_cache, block_cache_key,
                                          block_type, get_context);
1662 1663
    if (cache_handle != nullptr) {
      block->SetCachedValue(
1664
          reinterpret_cast<TBlocklike*>(block_cache->Value(cache_handle)),
1665
          block_cache, cache_handle);
1666 1667 1668 1669 1670
      return s;
    }
  }

  // If not found, search from the compressed block cache.
1671
  assert(block->IsEmpty());
1672 1673 1674 1675 1676 1677 1678 1679

  if (block_cache_compressed == nullptr) {
    return s;
  }

  assert(!compressed_block_cache_key.empty());
  block_cache_compressed_handle =
      block_cache_compressed->Lookup(compressed_block_cache_key);
1680 1681 1682

  Statistics* statistics = rep_->ioptions.statistics;

1683 1684 1685 1686 1687 1688 1689 1690 1691
  // if we found in the compressed cache, then uncompress and insert into
  // uncompressed cache
  if (block_cache_compressed_handle == nullptr) {
    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
    return s;
  }

  // found compressed block
  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
1692
  compressed_block = reinterpret_cast<BlockContents*>(
1693
      block_cache_compressed->Value(block_cache_compressed_handle));
1694 1695
  CompressionType compression_type = compressed_block->get_compression_type();
  assert(compression_type != kNoCompression);
1696 1697 1698

  // Retrieve the uncompressed contents into a new buffer
  BlockContents contents;
1699
  UncompressionContext context(compression_type);
1700
  UncompressionInfo info(context, uncompression_dict, compression_type);
1701 1702 1703 1704
  s = UncompressBlockContents(
      info, compressed_block->data.data(), compressed_block->data.size(),
      &contents, rep_->table_options.format_version, rep_->ioptions,
      GetMemoryAllocator(rep_->table_options));
1705 1706 1707

  // Insert uncompressed block into block cache
  if (s.ok()) {
1708 1709 1710 1711
    std::unique_ptr<TBlocklike> block_holder(
        BlocklikeTraits<TBlocklike>::Create(
            std::move(contents), rep_->get_global_seqno(block_type),
            read_amp_bytes_per_bit, statistics));  // uncompressed block
1712 1713

    if (block_cache != nullptr && block_holder->own_bytes() &&
1714
        read_options.fill_cache) {
1715 1716 1717
      size_t charge = block_holder->ApproximateMemoryUsage();
      Cache::Handle* cache_handle = nullptr;
      s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1718
                              &DeleteCachedEntry<TBlocklike>, &cache_handle);
1719
      if (s.ok()) {
1720 1721 1722 1723
        assert(cache_handle != nullptr);
        block->SetCachedValue(block_holder.release(), block_cache,
                              cache_handle);

1724
        UpdateCacheInsertionMetrics(block_type, get_context, charge);
1725 1726 1727
      } else {
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
      }
1728 1729
    } else {
      block->SetOwnedValue(block_holder.release());
1730 1731 1732 1733 1734 1735 1736 1737
    }
  }

  // Release hold on compressed cache entry
  block_cache_compressed->Release(block_cache_compressed_handle);
  return s;
}

1738
template <typename TBlocklike>
1739 1740 1741
Status BlockBasedTable::PutDataBlockToCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
    Cache* block_cache, Cache* block_cache_compressed,
1742
    CachableEntry<TBlocklike>* cached_block, BlockContents* raw_block_contents,
1743
    CompressionType raw_block_comp_type,
1744
    const UncompressionDict& uncompression_dict, SequenceNumber seq_no,
1745
    MemoryAllocator* memory_allocator, BlockType block_type,
1746 1747 1748 1749
    GetContext* get_context) const {
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  const uint32_t format_version = rep_->table_options.format_version;
  const size_t read_amp_bytes_per_bit =
1750 1751 1752
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1753
  const Cache::Priority priority =
1754 1755 1756 1757
      rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
              (block_type == BlockType::kFilter ||
               block_type == BlockType::kCompressionDictionary ||
               block_type == BlockType::kIndex)
1758 1759
          ? Cache::Priority::HIGH
          : Cache::Priority::LOW;
1760 1761
  assert(cached_block);
  assert(cached_block->IsEmpty());
1762 1763

  Status s;
1764
  Statistics* statistics = ioptions.statistics;
1765

1766
  std::unique_ptr<TBlocklike> block_holder;
1767
  if (raw_block_comp_type != kNoCompression) {
1768 1769
    // Retrieve the uncompressed contents into a new buffer
    BlockContents uncompressed_block_contents;
1770
    UncompressionContext context(raw_block_comp_type);
1771
    UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
1772 1773 1774 1775
    s = UncompressBlockContents(info, raw_block_contents->data.data(),
                                raw_block_contents->data.size(),
                                &uncompressed_block_contents, format_version,
                                ioptions, memory_allocator);
1776 1777 1778
    if (!s.ok()) {
      return s;
    }
1779

1780 1781 1782
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
        std::move(uncompressed_block_contents), seq_no, read_amp_bytes_per_bit,
        statistics));
1783
  } else {
1784 1785 1786
    block_holder.reset(BlocklikeTraits<TBlocklike>::Create(
        std::move(*raw_block_contents), seq_no, read_amp_bytes_per_bit,
        statistics));
1787 1788 1789 1790
  }

  // Insert compressed block into compressed block cache.
  // Release the hold on the compressed cache entry immediately.
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805
  if (block_cache_compressed != nullptr &&
      raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
      raw_block_contents->own_bytes()) {
#ifndef NDEBUG
    assert(raw_block_contents->is_raw_block);
#endif  // NDEBUG

    // We cannot directly put raw_block_contents because this could point to
    // an object in the stack.
    BlockContents* block_cont_for_comp_cache =
        new BlockContents(std::move(*raw_block_contents));
    s = block_cache_compressed->Insert(
        compressed_block_cache_key, block_cont_for_comp_cache,
        block_cont_for_comp_cache->ApproximateMemoryUsage(),
        &DeleteCachedEntry<BlockContents>);
1806 1807 1808 1809 1810
    if (s.ok()) {
      // Avoid the following code to delete this cached block.
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
    } else {
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
1811
      delete block_cont_for_comp_cache;
1812
    }
1813 1814 1815
  }

  // insert into uncompressed block cache
1816 1817 1818 1819
  if (block_cache != nullptr && block_holder->own_bytes()) {
    size_t charge = block_holder->ApproximateMemoryUsage();
    Cache::Handle* cache_handle = nullptr;
    s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1820 1821
                            &DeleteCachedEntry<TBlocklike>, &cache_handle,
                            priority);
1822
    if (s.ok()) {
1823 1824 1825 1826
      assert(cache_handle != nullptr);
      cached_block->SetCachedValue(block_holder.release(), block_cache,
                                   cache_handle);

1827
      UpdateCacheInsertionMetrics(block_type, get_context, charge);
1828 1829 1830
    } else {
      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
    }
1831 1832
  } else {
    cached_block->SetOwnedValue(block_holder.release());
1833 1834 1835 1836 1837
  }

  return s;
}

1838 1839 1840
std::unique_ptr<FilterBlockReader> BlockBasedTable::CreateFilterBlockReader(
    FilePrefetchBuffer* prefetch_buffer, bool use_cache, bool prefetch,
    bool pin, BlockCacheLookupContext* lookup_context) {
M
Maysam Yabandeh 已提交
1841
  auto& rep = rep_;
1842 1843 1844
  auto filter_type = rep->filter_type;
  if (filter_type == Rep::FilterType::kNoFilter) {
    return std::unique_ptr<FilterBlockReader>();
I
Igor Canadi 已提交
1845 1846 1847 1848
  }

  assert(rep->filter_policy);

M
Maysam Yabandeh 已提交
1849
  switch (filter_type) {
1850 1851 1852
    case Rep::FilterType::kPartitionedFilter:
      return PartitionedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);
M
Maysam Yabandeh 已提交
1853 1854

    case Rep::FilterType::kBlockFilter:
1855 1856 1857 1858 1859 1860
      return BlockBasedFilterBlockReader::Create(
          this, prefetch_buffer, use_cache, prefetch, pin, lookup_context);

    case Rep::FilterType::kFullFilter:
      return FullFilterBlockReader::Create(this, prefetch_buffer, use_cache,
                                           prefetch, pin, lookup_context);
I
Igor Canadi 已提交
1861

M
Maysam Yabandeh 已提交
1862 1863 1864 1865
    default:
      // filter_type is either kNoFilter (exited the function at the first if),
      // or it must be covered in this switch block
      assert(false);
1866
      return std::unique_ptr<FilterBlockReader>();
1867
  }
K
Kai Liu 已提交
1868 1869
}

1870 1871
// disable_prefix_seek should be set to true when prefix_extractor found in SST
// differs from the one in mutable_cf_options and index type is HashBasedIndex
1872
InternalIteratorBase<IndexValue>* BlockBasedTable::NewIndexIterator(
1873
    const ReadOptions& read_options, bool disable_prefix_seek,
1874 1875
    IndexBlockIter* input_iter, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
1876 1877
  assert(rep_ != nullptr);
  assert(rep_->index_reader != nullptr);
1878

1879
  // We don't return pinned data from index blocks, so no need
1880
  // to set `block_contents_pinned`.
1881
  return rep_->index_reader->NewIterator(read_options, disable_prefix_seek,
1882 1883
                                         input_iter, get_context,
                                         lookup_context);
K
Kai Liu 已提交
1884 1885
}

L
Lei Jin 已提交
1886 1887
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
1888 1889
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
M
Maysam Yabandeh 已提交
1890 1891
template <typename TBlockIter>
TBlockIter* BlockBasedTable::NewDataBlockIterator(
1892
    const ReadOptions& ro, const BlockHandle& handle, TBlockIter* input_iter,
1893 1894
    BlockType block_type, GetContext* get_context,
    BlockCacheLookupContext* lookup_context, Status s,
1895
    FilePrefetchBuffer* prefetch_buffer, bool for_compaction) const {
1896 1897
  PERF_TIMER_GUARD(new_table_block_iter_nanos);

1898 1899 1900 1901 1902 1903
  TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
  if (!s.ok()) {
    iter->Invalidate(s);
    return iter;
  }

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914
  UncompressionDict uncompression_dict;
  if (rep_->uncompression_dict_reader) {
    const bool no_io = (ro.read_tier == kBlockCacheTier);
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        prefetch_buffer, no_io, get_context, lookup_context,
        &uncompression_dict);
    if (!s.ok()) {
      iter->Invalidate(s);
      return iter;
    }
  }
1915

L
Lei Jin 已提交
1916
  CachableEntry<Block> block;
1917
  s = RetrieveBlock(prefetch_buffer, ro, handle, uncompression_dict, &block,
1918
                    block_type, get_context, lookup_context, for_compaction);
1919

1920 1921 1922 1923 1924 1925 1926
  if (!s.ok()) {
    assert(block.IsEmpty());
    iter->Invalidate(s);
    return iter;
  }

  assert(block.GetValue() != nullptr);
1927

1928 1929 1930 1931 1932 1933 1934
  // Block contents are pinned and it is still pinned after the iterator
  // is destroyed as long as cleanup functions are moved to another object,
  // when:
  // 1. block cache handle is set to be released in cleanup function, or
  // 2. it's pointing to immortal source. If own_bytes is true then we are
  //    not reading data from the original source, whether immortal or not.
  //    Otherwise, the block is pinned iff the source is immortal.
1935 1936
  const bool block_contents_pinned =
      block.IsCached() ||
1937
      (!block.GetValue()->own_bytes() && rep_->immortal_table);
1938 1939
  iter = InitBlockIterator<TBlockIter>(rep_, block.GetValue(), iter,
                                       block_contents_pinned);
1940 1941

  if (!block.IsCached()) {
1942
    if (!ro.fill_cache && rep_->cache_key_prefix_size != 0) {
1943
      // insert a dummy record to block cache to track the memory usage
1944
      Cache* const block_cache = rep_->table_options.block_cache.get();
1945 1946 1947 1948 1949 1950 1951 1952
      Cache::Handle* cache_handle = nullptr;
      // There are two other types of cache keys: 1) SST cache key added in
      // `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
      // `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
      // from SST cache key(31 bytes), and use non-zero prefix to
      // differentiate from `write_buffer_manager`
      const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
      char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
1953
      // Prefix: use rep_->cache_key_prefix padded by 0s
1954
      memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
1955 1956 1957
      assert(rep_->cache_key_prefix_size != 0);
      assert(rep_->cache_key_prefix_size <= kExtraCacheKeyPrefix);
      memcpy(cache_key, rep_->cache_key_prefix, rep_->cache_key_prefix_size);
1958 1959 1960
      char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
                                 next_cache_key_id_++);
      assert(end - cache_key <=
1961
             static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
1962 1963 1964 1965
      const Slice unique_key(cache_key, static_cast<size_t>(end - cache_key));
      s = block_cache->Insert(unique_key, nullptr,
                              block.GetValue()->ApproximateMemoryUsage(),
                              nullptr, &cache_handle);
1966

1967
      if (s.ok()) {
1968 1969 1970
        assert(cache_handle != nullptr);
        iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
                              cache_handle);
1971
      }
1972
    }
1973 1974
  } else {
    iter->SetCacheHandle(block.GetCacheHandle());
1975 1976
  }

1977
  block.TransferTo(iter);
1978

1979 1980 1981
  return iter;
}

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
template <>
DataBlockIter* BlockBasedTable::InitBlockIterator<DataBlockIter>(
    const Rep* rep, Block* block, DataBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewDataIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, block_contents_pinned);
}

template <>
IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
    const Rep* rep, Block* block, IndexBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewIndexIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, /* total_order_seek */ true,
      rep->index_has_first_key, rep->index_key_includes_seq,
      rep->index_value_is_full, block_contents_pinned);
}

A
anand76 已提交
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094
// Convert an uncompressed data block (i.e CachableEntry<Block>)
// into an iterator over the contents of the corresponding block.
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
template <typename TBlockIter>
TBlockIter* BlockBasedTable::NewDataBlockIterator(
    const ReadOptions& ro, CachableEntry<Block>& block, TBlockIter* input_iter,
    Status s) const {
  PERF_TIMER_GUARD(new_table_block_iter_nanos);

  TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
  if (!s.ok()) {
    iter->Invalidate(s);
    return iter;
  }

  assert(block.GetValue() != nullptr);
  // Block contents are pinned and it is still pinned after the iterator
  // is destroyed as long as cleanup functions are moved to another object,
  // when:
  // 1. block cache handle is set to be released in cleanup function, or
  // 2. it's pointing to immortal source. If own_bytes is true then we are
  //    not reading data from the original source, whether immortal or not.
  //    Otherwise, the block is pinned iff the source is immortal.
  const bool block_contents_pinned =
      block.IsCached() ||
      (!block.GetValue()->own_bytes() && rep_->immortal_table);
  iter = InitBlockIterator<TBlockIter>(rep_, block.GetValue(), iter,
                                       block_contents_pinned);

  if (!block.IsCached()) {
    if (!ro.fill_cache && rep_->cache_key_prefix_size != 0) {
      // insert a dummy record to block cache to track the memory usage
      Cache* const block_cache = rep_->table_options.block_cache.get();
      Cache::Handle* cache_handle = nullptr;
      // There are two other types of cache keys: 1) SST cache key added in
      // `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
      // `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
      // from SST cache key(31 bytes), and use non-zero prefix to
      // differentiate from `write_buffer_manager`
      const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
      char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
      // Prefix: use rep_->cache_key_prefix padded by 0s
      memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
      assert(rep_->cache_key_prefix_size != 0);
      assert(rep_->cache_key_prefix_size <= kExtraCacheKeyPrefix);
      memcpy(cache_key, rep_->cache_key_prefix, rep_->cache_key_prefix_size);
      char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
                                 next_cache_key_id_++);
      assert(end - cache_key <=
             static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
      const Slice unique_key(cache_key, static_cast<size_t>(end - cache_key));
      s = block_cache->Insert(unique_key, nullptr,
                              block.GetValue()->ApproximateMemoryUsage(),
                              nullptr, &cache_handle);
      if (s.ok()) {
        assert(cache_handle != nullptr);
        iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
                              cache_handle);
      }
    }
  } else {
    iter->SetCacheHandle(block.GetCacheHandle());
  }

  block.TransferTo(iter);
  return iter;
}

// Lookup the cache for the given data block referenced by an index iterator
// value (i.e BlockHandle). If it exists in the cache, initialize block to
// the contents of the data block.
Status BlockBasedTable::GetDataBlockFromCache(
    const ReadOptions& ro, const BlockHandle& handle,
    const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block, BlockType block_type,
    GetContext* get_context) const {
  BlockCacheLookupContext lookup_data_block_context(
      TableReaderCaller::kUserMultiGet);
  Status s = RetrieveBlock(nullptr, ro, handle, uncompression_dict, block,
                    block_type, get_context, &lookup_data_block_context);
  if (s.IsIncomplete()) {
    s = Status::OK();
  }

  return s;
}

// If contents is nullptr, this function looks up the block caches for the
// data block referenced by handle, and read the block from disk if necessary.
// If contents is non-null, it skips the cache lookup and disk read, since
// the caller has already read it. In both cases, if ro.fill_cache is true,
// it inserts the block into the block cache.
2095
template <typename TBlocklike>
2096
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
2097
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2098
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2099
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
A
anand76 已提交
2100 2101
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    BlockContents* contents) const {
2102
  assert(block_entry != nullptr);
2103
  const bool no_io = (ro.read_tier == kBlockCacheTier);
2104
  Cache* block_cache = rep_->table_options.block_cache.get();
2105
  // No point to cache compressed blocks if it never goes away
2106
  Cache* block_cache_compressed =
2107 2108
      rep_->immortal_table ? nullptr
                           : rep_->table_options.block_cache_compressed.get();
L
Lei Jin 已提交
2109

2110 2111
  // First, try to get the block from the cache
  //
L
Lei Jin 已提交
2112
  // If either block cache is enabled, we'll try to read from it.
2113
  Status s;
2114 2115 2116 2117
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  Slice key /* key to the block cache */;
  Slice ckey /* key to the compressed block cache */;
2118
  bool is_cache_hit = false;
L
Lei Jin 已提交
2119 2120 2121
  if (block_cache != nullptr || block_cache_compressed != nullptr) {
    // create key for block cache
    if (block_cache != nullptr) {
2122
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
2123
                        handle, cache_key);
L
Lei Jin 已提交
2124 2125 2126
    }

    if (block_cache_compressed != nullptr) {
2127 2128
      ckey = GetCacheKey(rep_->compressed_cache_key_prefix,
                         rep_->compressed_cache_key_prefix_size, handle,
L
Lei Jin 已提交
2129 2130 2131
                         compressed_cache_key);
    }

A
anand76 已提交
2132 2133 2134 2135 2136 2137 2138 2139 2140
    if (!contents) {
      s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
                                ro, block_entry, uncompression_dict, block_type,
                                get_context);
      if (block_entry->GetValue()) {
        // TODO(haoyu): Differentiate cache hit on uncompressed block cache and
        // compressed block cache.
        is_cache_hit = true;
      }
2141
    }
A
anand76 已提交
2142

2143 2144
    // Can't find the block from the cache. If I/O is allowed, read from the
    // file.
2145
    if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
2146
      Statistics* statistics = rep_->ioptions.statistics;
2147
      const bool maybe_compressed =
2148 2149 2150
          block_type != BlockType::kFilter &&
          block_type != BlockType::kCompressionDictionary &&
          rep_->blocks_maybe_compressed;
2151
      const bool do_uncompress = maybe_compressed && !block_cache_compressed;
2152 2153
      CompressionType raw_block_comp_type;
      BlockContents raw_block_contents;
A
anand76 已提交
2154
      if (!contents) {
2155
        StopWatch sw(rep_->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
2156
        BlockFetcher block_fetcher(
2157
            rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle,
2158 2159 2160
            &raw_block_contents, rep_->ioptions, do_uncompress,
            maybe_compressed, block_type, uncompression_dict,
            rep_->persistent_cache_options,
2161 2162
            GetMemoryAllocator(rep_->table_options),
            GetMemoryAllocatorForCompressedBlock(rep_->table_options));
2163 2164
        s = block_fetcher.ReadBlockContents();
        raw_block_comp_type = block_fetcher.get_compression_type();
A
anand76 已提交
2165 2166 2167
        contents = &raw_block_contents;
      } else {
        raw_block_comp_type = contents->get_compression_type();
L
Lei Jin 已提交
2168 2169 2170
      }

      if (s.ok()) {
2171
        SequenceNumber seq_no = rep_->get_global_seqno(block_type);
2172 2173
        // If filling cache is allowed and a cache is configured, try to put the
        // block to the cache.
2174
        s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed,
A
anand76 已提交
2175
                                block_entry, contents,
2176 2177
                                raw_block_comp_type, uncompression_dict, seq_no,
                                GetMemoryAllocator(rep_->table_options),
2178
                                block_type, get_context);
L
Lei Jin 已提交
2179 2180 2181
      }
    }
  }
2182 2183

  // Fill lookup_context.
2184 2185
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
2186 2187 2188 2189
    size_t usage = 0;
    uint64_t nkeys = 0;
    if (block_entry->GetValue()) {
      // Approximate the number of keys in the block using restarts.
2190 2191 2192
      nkeys =
          rep_->table_options.block_restart_interval *
          BlocklikeTraits<TBlocklike>::GetNumRestarts(*block_entry->GetValue());
2193 2194 2195 2196 2197 2198 2199
      usage = block_entry->GetValue()->ApproximateMemoryUsage();
    }
    TraceType trace_block_type = TraceType::kTraceMax;
    switch (block_type) {
      case BlockType::kData:
        trace_block_type = TraceType::kBlockTraceDataBlock;
        break;
2200 2201 2202
      case BlockType::kFilter:
        trace_block_type = TraceType::kBlockTraceFilterBlock;
        break;
2203 2204 2205
      case BlockType::kCompressionDictionary:
        trace_block_type = TraceType::kBlockTraceUncompressionDictBlock;
        break;
2206 2207 2208
      case BlockType::kRangeDeletion:
        trace_block_type = TraceType::kBlockTraceRangeDeletionBlock;
        break;
2209 2210 2211
      case BlockType::kIndex:
        trace_block_type = TraceType::kBlockTraceIndexBlock;
        break;
2212 2213 2214 2215 2216
      default:
        // This cannot happen.
        assert(false);
        break;
    }
2217 2218
    bool no_insert = no_io || !ro.fill_cache;
    if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
2219 2220
            trace_block_type, lookup_context->caller)) {
      // Defer logging the access to Get() and MultiGet() to trace additional
2221
      // information, e.g., referenced_key_exist_in_block.
2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235

      // Make a copy of the block key here since it will be logged later.
      lookup_context->FillLookupContext(
          is_cache_hit, no_insert, trace_block_type,
          /*block_size=*/usage, /*block_key=*/key.ToString(), nkeys);
    } else {
      // Avoid making copy of block_key and cf_name when constructing the access
      // record.
      BlockCacheTraceRecord access_record(
          rep_->ioptions.env->NowMicros(),
          /*block_key=*/"", trace_block_type,
          /*block_size=*/usage, rep_->cf_id_for_tracing(),
          /*cf_name=*/"", rep_->level_for_tracing(),
          rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
2236 2237 2238
          no_insert, lookup_context->get_id,
          lookup_context->get_from_user_specified_snapshot,
          /*referenced_key=*/"");
2239 2240
      block_cache_tracer_->WriteBlockAccess(access_record, key,
                                            rep_->cf_name_for_tracing(),
2241
                                            lookup_context->referenced_key);
2242 2243 2244
    }
  }

2245
  assert(s.ok() || block_entry->GetValue() == nullptr);
2246
  return s;
2247 2248
}

A
anand76 已提交
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414
// This function reads multiple data blocks from disk using Env::MultiRead()
// and optionally inserts them into the block cache. It uses the scratch
// buffer provided by the caller, which is contiguous. If scratch is a nullptr
// it allocates a separate buffer for each block. Typically, if the blocks
// need to be uncompressed and there is no compressed block cache, callers
// can allocate a temporary scratch buffer in order to minimize memory
// allocations.
// If options.fill_cache is true, it inserts the blocks into cache. If its
// false and scratch is non-null and the blocks are uncompressed, it copies
// the buffers to heap. In any case, the CachableEntry<Block> returned will
// own the data bytes.
// batch - A MultiGetRange with only those keys with unique data blocks not
//         found in cache
// handles - A vector of block handles. Some of them me be NULL handles
// scratch - An optional contiguous buffer to read compressed blocks into
void BlockBasedTable::MaybeLoadBlocksToCache(
    const ReadOptions& options,
    const MultiGetRange* batch,
    const autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE>*  handles,
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE>* statuses,
    autovector<
      CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE>* results,
    char* scratch,
    const UncompressionDict& uncompression_dict) const {

  RandomAccessFileReader* file = rep_->file.get();
  const Footer& footer = rep_->footer;
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  SequenceNumber global_seqno = rep_->get_global_seqno(BlockType::kData);
  size_t read_amp_bytes_per_bit = rep_->table_options.read_amp_bytes_per_bit;
  MemoryAllocator* memory_allocator = GetMemoryAllocator(rep_->table_options);

  if (file->use_direct_io() || ioptions.allow_mmap_reads) {
    size_t idx_in_batch = 0;
    for (auto mget_iter = batch->begin(); mget_iter != batch->end();
         ++mget_iter, ++idx_in_batch) {
      BlockCacheLookupContext lookup_data_block_context(
          TableReaderCaller::kUserMultiGet);
      const BlockHandle& handle = (*handles)[idx_in_batch];
      if (handle.IsNull()) {
        continue;
      }

      (*statuses)[idx_in_batch] = RetrieveBlock(nullptr, options, handle,
            uncompression_dict, &(*results)[idx_in_batch], BlockType::kData,
            mget_iter->get_context, &lookup_data_block_context);
    }
    return;
  }

  autovector<ReadRequest, MultiGetContext::MAX_BATCH_SIZE> read_reqs;
  size_t buf_offset = 0;
  size_t idx_in_batch = 0;
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];
    if (handle.IsNull()) {
      continue;
    }

    ReadRequest req;
    req.len = handle.size() + kBlockTrailerSize;
    if (scratch == nullptr) {
      req.scratch = new char[req.len];
    } else {
      req.scratch = scratch + buf_offset;
      buf_offset += req.len;
    }
    req.offset = handle.offset();
    req.status = Status::OK();
    read_reqs.emplace_back(req);
  }

  file->MultiRead(&read_reqs[0], read_reqs.size());

  size_t read_req_idx = 0;
  idx_in_batch = 0;
  for (auto mget_iter = batch->begin(); mget_iter != batch->end();
       ++mget_iter, ++idx_in_batch) {
    const BlockHandle& handle = (*handles)[idx_in_batch];

    if (handle.IsNull()) {
      continue;
    }

    ReadRequest& req = read_reqs[read_req_idx++];
    Status s = req.status;
    if (s.ok()) {
      if (req.result.size() != handle.size() + kBlockTrailerSize) {
        s = Status::Corruption("truncated block read from " +
                               rep_->file->file_name() + " offset " +
                               ToString(handle.offset()) + ", expected " +
                               ToString(handle.size() + kBlockTrailerSize) +
                               " bytes, got " + ToString(req.result.size()));
      }
    }

    BlockContents raw_block_contents;
    if (s.ok()) {
      if (scratch == nullptr) {
        // We allocated a buffer for this block. Give ownership of it to
        // BlockContents so it can free the memory
        assert(req.result.data() == req.scratch);
        std::unique_ptr<char[]> raw_block(req.scratch);
        raw_block_contents = BlockContents(std::move(raw_block),
                                 handle.size());
      } else {
        // We used the scratch buffer, so no need to free anything
        raw_block_contents = BlockContents(Slice(req.scratch,
                                 handle.size()));
      }
#ifndef NDEBUG
      raw_block_contents.is_raw_block = true;
#endif
      if (options.verify_checksums) {
        PERF_TIMER_GUARD(block_checksum_time);
        const char* data = req.result.data();
        uint32_t expected = DecodeFixed32(data + handle.size() + 1);
        s = rocksdb::VerifyChecksum(footer.checksum(), req.result.data(),
                                    handle.size() + 1, expected);
      }
    }
    if (s.ok()) {
      if (options.fill_cache) {
        BlockCacheLookupContext lookup_data_block_context(
            TableReaderCaller::kUserMultiGet);
        CachableEntry<Block>* block_entry = &(*results)[idx_in_batch];
        // MaybeReadBlockAndLoadToCache will insert into the block caches if
        // necessary. Since we're passing the raw block contents, it will
        // avoid looking up the block cache
        s = MaybeReadBlockAndLoadToCache(nullptr, options, handle,
              uncompression_dict, block_entry, BlockType::kData,
              mget_iter->get_context, &lookup_data_block_context,
              &raw_block_contents);
      } else {
        CompressionType compression_type =
                raw_block_contents.get_compression_type();
        BlockContents contents;
        if (compression_type != kNoCompression) {
          UncompressionContext context(compression_type);
          UncompressionInfo info(context, uncompression_dict, compression_type);
          s = UncompressBlockContents(info, req.result.data(), handle.size(),
                    &contents, footer.version(), rep_->ioptions,
                    memory_allocator);
        } else {
          if (scratch != nullptr) {
            // If we used the scratch buffer, then the contents need to be
            // copied to heap
            Slice raw = Slice(req.result.data(), handle.size());
            contents = BlockContents(CopyBufferToHeap(
                  GetMemoryAllocator(rep_->table_options), raw),
                  handle.size());
          } else {
            contents = std::move(raw_block_contents);
          }
        }
        if (s.ok()) {
          (*results)[idx_in_batch].SetOwnedValue(new Block(std::move(contents),
                global_seqno, read_amp_bytes_per_bit, ioptions.statistics));
        }
      }
    }
    (*statuses)[idx_in_batch] = s;
  }
}

2415
template <typename TBlocklike>
2416
Status BlockBasedTable::RetrieveBlock(
2417
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2418
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2419
    CachableEntry<TBlocklike>* block_entry, BlockType block_type,
2420 2421
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction) const {
2422 2423 2424 2425
  assert(block_entry);
  assert(block_entry->IsEmpty());

  Status s;
2426 2427 2428 2429
  if (rep_->table_options.cache_index_and_filter_blocks ||
      (block_type != BlockType::kFilter &&
       block_type != BlockType::kCompressionDictionary &&
       block_type != BlockType::kIndex)) {
2430
    s = MaybeReadBlockAndLoadToCache(prefetch_buffer, ro, handle,
2431
                                     uncompression_dict, block_entry,
A
anand76 已提交
2432 2433
                                     block_type, get_context, lookup_context,
                                     /*contents=*/nullptr);
2434 2435 2436 2437 2438 2439

    if (!s.ok()) {
      return s;
    }

    if (block_entry->GetValue() != nullptr) {
2440
      assert(s.ok());
2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
      return s;
    }
  }

  assert(block_entry->IsEmpty());

  const bool no_io = ro.read_tier == kBlockCacheTier;
  if (no_io) {
    return Status::Incomplete("no blocking io");
  }

2452
  const bool maybe_compressed =
2453 2454 2455
      block_type != BlockType::kFilter &&
      block_type != BlockType::kCompressionDictionary &&
      rep_->blocks_maybe_compressed;
2456 2457
  const bool do_uncompress = maybe_compressed;
  std::unique_ptr<TBlocklike> block;
2458 2459

  {
2460
    StopWatch sw(rep_->ioptions.env, rep_->ioptions.statistics,
2461 2462
                 READ_BLOCK_GET_MICROS);
    s = ReadBlockFromFile(
2463
        rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block,
2464 2465 2466
        rep_->ioptions, do_uncompress, maybe_compressed, block_type,
        uncompression_dict, rep_->persistent_cache_options,
        rep_->get_global_seqno(block_type),
2467 2468 2469
        block_type == BlockType::kData
            ? rep_->table_options.read_amp_bytes_per_bit
            : 0,
2470
        GetMemoryAllocator(rep_->table_options), for_compaction);
2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
  }

  if (!s.ok()) {
    return s;
  }

  block_entry->SetOwnedValue(block.release());

  assert(s.ok());
  return s;
}

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
// Explicitly instantiate templates for both "blocklike" types we use.
// This makes it possible to keep the template definitions in the .cc file.
template Status BlockBasedTable::RetrieveBlock<BlockContents>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<BlockContents>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction) const;

template Status BlockBasedTable::RetrieveBlock<Block>(
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
    CachableEntry<Block>* block_entry, BlockType block_type,
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction) const;

2499
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
2500
    const BlockBasedTable* table,
2501 2502 2503 2504
    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
    : table_(table), block_map_(block_map) {}

InternalIteratorBase<IndexValue>*
2505
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
2506
    const BlockHandle& handle) {
M
Maysam Yabandeh 已提交
2507
  // Return a block iterator on the index partition
2508 2509 2510 2511
  auto block = block_map_->find(handle.offset());
  // This is a possible scenario since block cache might not have had space
  // for the partition
  if (block != block_map_->end()) {
2512
    const Rep* rep = table_->get_rep();
2513 2514
    assert(rep);

M
Maysam Yabandeh 已提交
2515
    Statistics* kNullStats = nullptr;
2516
    // We don't return pinned data from index blocks, so no need
2517
    // to set `block_contents_pinned`.
2518
    return block->second.GetValue()->NewIndexIterator(
M
Maysam Yabandeh 已提交
2519
        &rep->internal_comparator, rep->internal_comparator.user_comparator(),
2520 2521
        nullptr, kNullStats, true, rep->index_has_first_key,
        rep->index_key_includes_seq, rep->index_value_is_full);
2522 2523
  }
  // Create an empty iterator
2524
  return new IndexBlockIter();
2525 2526
}

T
Tyler Harter 已提交
2527 2528
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
2529 2530
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
T
Tyler Harter 已提交
2531 2532 2533 2534
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
T
Tyler Harter 已提交
2535
//
K
Kai Liu 已提交
2536 2537 2538
// Otherwise, this method guarantees no I/O will be incurred.
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
2539 2540 2541
bool BlockBasedTable::PrefixMayMatch(
    const Slice& internal_key, const ReadOptions& read_options,
    const SliceTransform* options_prefix_extractor,
2542 2543
    const bool need_upper_bound_check,
    BlockCacheLookupContext* lookup_context) const {
2544
  if (!rep_->filter_policy) {
2545 2546 2547
    return true;
  }

2548 2549 2550 2551 2552 2553 2554 2555 2556 2557
  const SliceTransform* prefix_extractor;

  if (rep_->table_prefix_extractor == nullptr) {
    if (need_upper_bound_check) {
      return true;
    }
    prefix_extractor = options_prefix_extractor;
  } else {
    prefix_extractor = rep_->table_prefix_extractor.get();
  }
2558
  auto user_key = ExtractUserKey(internal_key);
2559
  if (!prefix_extractor->InDomain(user_key)) {
2560 2561
    return true;
  }
L
Lei Jin 已提交
2562

T
Tyler Harter 已提交
2563 2564 2565
  bool may_match = true;
  Status s;

2566
  // First, try check with full filter
2567
  FilterBlockReader* const filter = rep_->filter.get();
2568
  bool filter_checked = true;
2569 2570
  if (filter != nullptr) {
    if (!filter->IsBlockBased()) {
M
Maysam Yabandeh 已提交
2571
      const Slice* const const_ikey_ptr = &internal_key;
2572 2573 2574
      may_match = filter->RangeMayExist(
          read_options.iterate_upper_bound, user_key, prefix_extractor,
          rep_->internal_comparator.user_comparator(), const_ikey_ptr,
2575
          &filter_checked, need_upper_bound_check, lookup_context);
2576
    } else {
2577 2578 2579 2580 2581
      // if prefix_extractor changed for block based filter, skip filter
      if (need_upper_bound_check) {
        return true;
      }
      auto prefix = prefix_extractor->Transform(user_key);
M
Maysam Yabandeh 已提交
2582 2583 2584 2585 2586 2587 2588 2589 2590
      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
      auto internal_prefix = internal_key_prefix.Encode();

      // To prevent any io operation in this method, we set `read_tier` to make
      // sure we always read index or filter only when they have already been
      // loaded to memory.
      ReadOptions no_io_read_options;
      no_io_read_options.read_tier = kBlockCacheTier;

2591
      // Then, try find it within each block
2592 2593
      // we already know prefix_extractor and prefix_extractor_name must match
      // because `CheckPrefixMayMatch` first checks `check_filter_ == true`
2594
      std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2595 2596
          no_io_read_options,
          /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2597
          /*get_context=*/nullptr, lookup_context));
2598 2599 2600 2601 2602 2603 2604 2605
      iiter->Seek(internal_prefix);

      if (!iiter->Valid()) {
        // we're past end of file
        // if it's incomplete, it means that we avoided I/O
        // and we're not really sure that we're past the end
        // of the file
        may_match = iiter->status().IsIncomplete();
2606 2607
      } else if ((rep_->index_key_includes_seq ? ExtractUserKey(iiter->key())
                                               : iiter->key())
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625
                     .starts_with(ExtractUserKey(internal_prefix))) {
        // we need to check for this subtle case because our only
        // guarantee is that "the key is a string >= last key in that data
        // block" according to the doc/table_format.txt spec.
        //
        // Suppose iiter->key() starts with the desired prefix; it is not
        // necessarily the case that the corresponding data block will
        // contain the prefix, since iiter->key() need not be in the
        // block.  However, the next data block may contain the prefix, so
        // we return true to play it safe.
        may_match = true;
      } else if (filter->IsBlockBased()) {
        // iiter->key() does NOT start with the desired prefix.  Because
        // Seek() finds the first key that is >= the seek target, this
        // means that iiter->key() > prefix.  Thus, any data blocks coming
        // after the data block corresponding to iiter->key() cannot
        // possibly contain the key.  Thus, the corresponding data block
        // is the only on could potentially contain the prefix.
2626
        BlockHandle handle = iiter->value().handle;
2627 2628
        may_match = filter->PrefixMayMatch(
            prefix, prefix_extractor, handle.offset(), /*no_io=*/false,
2629
            /*const_key_ptr=*/nullptr, /*get_context=*/nullptr, lookup_context);
2630
      }
2631
    }
T
Tyler Harter 已提交
2632
  }
T
Tyler Harter 已提交
2633

2634 2635 2636 2637 2638 2639
  if (filter_checked) {
    Statistics* statistics = rep_->ioptions.statistics;
    RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
    if (!may_match) {
      RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
    }
T
Tyler Harter 已提交
2640 2641
  }

T
Tyler Harter 已提交
2642 2643 2644
  return may_match;
}

2645 2646
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Seek(const Slice& target) {
2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657
  SeekImpl(&target);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToFirst() {
  SeekImpl(nullptr);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekImpl(
    const Slice* target) {
2658
  is_out_of_bound_ = false;
2659 2660
  is_at_first_key_from_index_ = false;
  if (target && !CheckPrefixMayMatch(*target)) {
2661 2662 2663 2664
    ResetDataIter();
    return;
  }

2665
  bool need_seek_index = true;
2666
  if (block_iter_points_to_real_block_ && block_iter_.Valid()) {
2667
    // Reseek.
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
    prev_block_offset_ = index_iter_->value().handle.offset();

    if (target) {
      // We can avoid an index seek if:
      // 1. The new seek key is larger than the current key
      // 2. The new seek key is within the upper bound of the block
      // Since we don't necessarily know the internal key for either
      // the current key or the upper bound, we check user keys and
      // exclude the equality case. Considering internal keys can
      // improve for the boundary cases, but it would complicate the
      // code.
      if (user_comparator_.Compare(ExtractUserKey(*target),
                                   block_iter_.user_key()) > 0 &&
          user_comparator_.Compare(ExtractUserKey(*target),
                                   index_iter_->user_key()) < 0) {
        need_seek_index = false;
      }
2685
    }
2686 2687
  }

2688
  if (need_seek_index) {
2689 2690 2691 2692 2693 2694
    if (target) {
      index_iter_->Seek(*target);
    } else {
      index_iter_->SeekToFirst();
    }

2695 2696 2697 2698 2699
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }
2700

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
  IndexValue v = index_iter_->value();
  const bool same_block = block_iter_points_to_real_block_ &&
                          v.handle.offset() == prev_block_offset_;

  // TODO(kolmike): Remove the != kBlockCacheTier condition.
  if (!v.first_internal_key.empty() && !same_block &&
      (!target || icomp_.Compare(*target, v.first_internal_key) <= 0) &&
      read_options_.read_tier != kBlockCacheTier) {
    // Index contains the first key of the block, and it's >= target.
    // We can defer reading the block.
    is_at_first_key_from_index_ = true;
    ResetDataIter();
  } else {
    // Need to use the data block.
    if (!same_block) {
      InitDataBlock();
    }

    if (target) {
      block_iter_.Seek(*target);
    } else {
      block_iter_.SeekToFirst();
    }
    FindKeyForward();
  }
2726

2727
  CheckDataBlockWithinUpperBound();
2728
  CheckOutOfBound();
2729 2730

  if (target) {
2731 2732 2733 2734 2735
    assert(!Valid() || ((block_type_ == BlockType::kIndex &&
                         !table_->get_rep()->index_key_includes_seq)
                            ? (user_comparator_.Compare(ExtractUserKey(*target),
                                                        key()) <= 0)
                            : (icomp_.Compare(*target, key()) <= 0)));
2736
  }
2737 2738
}

2739 2740 2741
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekForPrev(
    const Slice& target) {
2742
  is_out_of_bound_ = false;
2743
  is_at_first_key_from_index_ = false;
2744
  if (!CheckPrefixMayMatch(target)) {
2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766
    ResetDataIter();
    return;
  }

  SavePrevIndexValue();

  // Call Seek() rather than SeekForPrev() in the index block, because the
  // target data block will likely to contain the position for `target`, the
  // same as Seek(), rather than than before.
  // For example, if we have three data blocks, each containing two keys:
  //   [2, 4]  [6, 8] [10, 12]
  //  (the keys in the index block would be [4, 8, 12])
  // and the user calls SeekForPrev(7), we need to go to the second block,
  // just like if they call Seek(7).
  // The only case where the block is difference is when they seek to a position
  // in the boundary. For example, if they SeekForPrev(5), we should go to the
  // first block, rather than the second. However, we don't have the information
  // to distinguish the two unless we read the second block. In this case, we'll
  // end up with reading two blocks.
  index_iter_->Seek(target);

  if (!index_iter_->Valid()) {
2767 2768 2769 2770 2771
    if (!index_iter_->status().ok()) {
      ResetDataIter();
      return;
    }

2772 2773 2774 2775 2776 2777 2778 2779 2780
    index_iter_->SeekToLast();
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }

  InitDataBlock();

M
Maysam Yabandeh 已提交
2781
  block_iter_.SeekForPrev(target);
2782 2783

  FindKeyBackward();
2784
  CheckDataBlockWithinUpperBound();
M
Maysam Yabandeh 已提交
2785 2786
  assert(!block_iter_.Valid() ||
         icomp_.Compare(target, block_iter_.key()) >= 0);
2787 2788
}

2789 2790
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToLast() {
2791
  is_out_of_bound_ = false;
2792
  is_at_first_key_from_index_ = false;
2793 2794 2795 2796 2797 2798 2799
  SavePrevIndexValue();
  index_iter_->SeekToLast();
  if (!index_iter_->Valid()) {
    ResetDataIter();
    return;
  }
  InitDataBlock();
M
Maysam Yabandeh 已提交
2800
  block_iter_.SeekToLast();
2801
  FindKeyBackward();
2802
  CheckDataBlockWithinUpperBound();
2803 2804
}

2805 2806
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Next() {
2807 2808 2809
  if (is_at_first_key_from_index_ && !MaterializeCurrentBlock()) {
    return;
  }
2810
  assert(block_iter_points_to_real_block_);
M
Maysam Yabandeh 已提交
2811
  block_iter_.Next();
2812
  FindKeyForward();
2813
  CheckOutOfBound();
2814 2815
}

2816 2817
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::NextAndGetResult(
2818
    IterateResult* result) {
2819 2820 2821
  Next();
  bool is_valid = Valid();
  if (is_valid) {
2822 2823
    result->key = key();
    result->may_be_out_of_upper_bound = MayBeOutOfUpperBound();
2824 2825 2826 2827
  }
  return is_valid;
}

2828 2829
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Prev() {
2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
  if (is_at_first_key_from_index_) {
    is_at_first_key_from_index_ = false;

    index_iter_->Prev();
    if (!index_iter_->Valid()) {
      return;
    }

    InitDataBlock();
    block_iter_.SeekToLast();
  } else {
    assert(block_iter_points_to_real_block_);
    block_iter_.Prev();
  }

2845 2846 2847
  FindKeyBackward();
}

2848 2849 2850 2851 2852 2853 2854
// Found that 256 KB readahead size provides the best performance, based on
// experiments, for auto readahead. Experiment data is in PR #3282.
template <class TBlockIter, typename TValue>
const size_t
    BlockBasedTableIterator<TBlockIter, TValue>::kMaxAutoReadaheadSize =
        256 * 1024;

2855 2856
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::InitDataBlock() {
2857
  BlockHandle data_block_handle = index_iter_->value().handle;
2858
  if (!block_iter_points_to_real_block_ ||
2859
      data_block_handle.offset() != prev_block_offset_ ||
2860
      // if previous attempt of reading the block missed cache, try again
M
Maysam Yabandeh 已提交
2861
      block_iter_.status().IsIncomplete()) {
2862 2863 2864 2865 2866
    if (block_iter_points_to_real_block_) {
      ResetDataIter();
    }
    auto* rep = table_->get_rep();

2867 2868 2869 2870 2871 2872
    // Prefetch additional data for range scans (iterators). Enabled only for
    // user reads.
    // Implicit auto readahead:
    //   Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
    // Explicit user requested readahead:
    //   Enabled from the very first IO when ReadOptions.readahead_size is set.
2873
    if (lookup_context_.caller != TableReaderCaller::kCompaction) {
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
      if (read_options_.readahead_size == 0) {
        // Implicit auto readahead
        num_file_reads_++;
        if (num_file_reads_ > kMinNumFileReadsToStartAutoReadahead) {
          if (!rep->file->use_direct_io() &&
              (data_block_handle.offset() +
                   static_cast<size_t>(data_block_handle.size()) +
                   kBlockTrailerSize >
               readahead_limit_)) {
            // Buffered I/O
            // Discarding the return status of Prefetch calls intentionally, as
            // we can fallback to reading from disk if Prefetch fails.
            rep->file->Prefetch(data_block_handle.offset(), readahead_size_);
            readahead_limit_ = static_cast<size_t>(data_block_handle.offset() +
                                                   readahead_size_);
            // Keep exponentially increasing readahead size until
            // kMaxAutoReadaheadSize.
            readahead_size_ =
                std::min(kMaxAutoReadaheadSize, readahead_size_ * 2);
          } else if (rep->file->use_direct_io() && !prefetch_buffer_) {
            // Direct I/O
            // Let FilePrefetchBuffer take care of the readahead.
            prefetch_buffer_.reset(
                new FilePrefetchBuffer(rep->file.get(), kInitAutoReadaheadSize,
                                       kMaxAutoReadaheadSize));
          }
2900
        }
2901 2902 2903 2904 2905 2906 2907
      } else if (!prefetch_buffer_) {
        // Explicit user requested readahead
        // The actual condition is:
        // if (read_options_.readahead_size != 0 && !prefetch_buffer_)
        prefetch_buffer_.reset(new FilePrefetchBuffer(
            rep->file.get(), read_options_.readahead_size,
            read_options_.readahead_size));
2908
      }
2909 2910 2911 2912
    } else if (!prefetch_buffer_) {
      prefetch_buffer_.reset(
          new FilePrefetchBuffer(rep->file.get(), compaction_readahead_size_,
                                 compaction_readahead_size_));
2913 2914
    }

2915
    Status s;
2916
    table_->NewDataBlockIterator<TBlockIter>(
2917
        read_options_, data_block_handle, &block_iter_, block_type_,
2918
        /*get_context=*/nullptr, &lookup_context_, s, prefetch_buffer_.get(),
2919 2920
        /*for_compaction=*/lookup_context_.caller ==
            TableReaderCaller::kCompaction);
2921
    block_iter_points_to_real_block_ = true;
2922
    CheckDataBlockWithinUpperBound();
2923 2924 2925
  }
}

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::MaterializeCurrentBlock() {
  assert(is_at_first_key_from_index_);
  assert(!block_iter_points_to_real_block_);
  assert(index_iter_->Valid());

  is_at_first_key_from_index_ = false;
  InitDataBlock();
  assert(block_iter_points_to_real_block_);
  block_iter_.SeekToFirst();

  if (!block_iter_.Valid() ||
      icomp_.Compare(block_iter_.key(),
                     index_iter_->value().first_internal_key) != 0) {
    // Uh oh.
    block_iter_.Invalidate(Status::Corruption(
        "first key in index doesn't match first key in block"));
    return false;
  }

  return true;
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyForward() {
  // This method's code is kept short to make it likely to be inlined.

  assert(!is_out_of_bound_);
  assert(block_iter_points_to_real_block_);

  if (!block_iter_.Valid()) {
    // This is the only call site of FindBlockForward(), but it's extracted into
    // a separate method to keep FindKeyForward() short and likely to be
    // inlined. When transitioning to a different block, we call
    // FindBlockForward(), which is much longer and is probably not inlined.
    FindBlockForward();
  } else {
    // This is the fast path that avoids a function call.
  }
}

2967
template <class TBlockIter, typename TValue>
2968
void BlockBasedTableIterator<TBlockIter, TValue>::FindBlockForward() {
2969 2970
  // TODO the while loop inherits from two-level-iterator. We don't know
  // whether a block can be empty so it can be replaced by an "if".
2971
  do {
M
Maysam Yabandeh 已提交
2972
    if (!block_iter_.status().ok()) {
2973 2974
      return;
    }
2975
    // Whether next data block is out of upper bound, if there is one.
2976 2977 2978 2979 2980
    const bool next_block_is_out_of_bound =
        read_options_.iterate_upper_bound != nullptr &&
        block_iter_points_to_real_block_ && !data_block_within_upper_bound_;
    assert(!next_block_is_out_of_bound ||
           user_comparator_.Compare(*read_options_.iterate_upper_bound,
2981
                                    index_iter_->user_key()) <= 0);
2982 2983
    ResetDataIter();
    index_iter_->Next();
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
    if (next_block_is_out_of_bound) {
      // The next block is out of bound. No need to read it.
      TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr);
      // We need to make sure this is not the last data block before setting
      // is_out_of_bound_, since the index key for the last data block can be
      // larger than smallest key of the next file on the same level.
      if (index_iter_->Valid()) {
        is_out_of_bound_ = true;
      }
      return;
    }
2995

2996
    if (!index_iter_->Valid()) {
2997 2998
      return;
    }
2999

3000
    IndexValue v = index_iter_->value();
3001

3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
    // TODO(kolmike): Remove the != kBlockCacheTier condition.
    if (!v.first_internal_key.empty() &&
        read_options_.read_tier != kBlockCacheTier) {
      // Index contains the first key of the block. Defer reading the block.
      is_at_first_key_from_index_ = true;
      return;
    }

    InitDataBlock();
    block_iter_.SeekToFirst();
  } while (!block_iter_.Valid());
3013 3014
}

3015 3016
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyBackward() {
M
Maysam Yabandeh 已提交
3017 3018
  while (!block_iter_.Valid()) {
    if (!block_iter_.status().ok()) {
3019 3020 3021 3022 3023 3024 3025 3026
      return;
    }

    ResetDataIter();
    index_iter_->Prev();

    if (index_iter_->Valid()) {
      InitDataBlock();
M
Maysam Yabandeh 已提交
3027
      block_iter_.SeekToLast();
3028 3029 3030 3031 3032 3033 3034 3035 3036
    } else {
      return;
    }
  }

  // We could have check lower bound here too, but we opt not to do it for
  // code simplicity.
}

3037 3038
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::CheckOutOfBound() {
3039
  if (read_options_.iterate_upper_bound != nullptr && Valid()) {
3040 3041 3042 3043 3044
    is_out_of_bound_ = user_comparator_.Compare(
                           *read_options_.iterate_upper_bound, user_key()) <= 0;
  }
}

3045 3046 3047 3048 3049 3050 3051 3052 3053 3054
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::CheckDataBlockWithinUpperBound() {
  if (read_options_.iterate_upper_bound != nullptr &&
      block_iter_points_to_real_block_) {
    data_block_within_upper_bound_ =
        (user_comparator_.Compare(*read_options_.iterate_upper_bound,
                                  index_iter_->user_key()) > 0);
  }
}

3055 3056
InternalIterator* BlockBasedTable::NewIterator(
    const ReadOptions& read_options, const SliceTransform* prefix_extractor,
3057 3058
    Arena* arena, bool skip_filters, TableReaderCaller caller, size_t compaction_readahead_size) {
  BlockCacheLookupContext lookup_context{caller};
3059
  bool need_upper_bound_check =
3060
      PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
3061
  if (arena == nullptr) {
M
Maysam Yabandeh 已提交
3062
    return new BlockBasedTableIterator<DataBlockIter>(
3063
        this, read_options, rep_->internal_comparator,
3064 3065
        NewIndexIterator(
            read_options,
3066
            need_upper_bound_check &&
3067 3068
                rep_->index_type == BlockBasedTableOptions::kHashSearch,
            /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context),
3069
        !skip_filters && !read_options.total_order_seek &&
3070
            prefix_extractor != nullptr,
3071 3072
        need_upper_bound_check, prefix_extractor, BlockType::kData, caller,
        compaction_readahead_size);
3073
  } else {
M
Maysam Yabandeh 已提交
3074 3075 3076
    auto* mem =
        arena->AllocateAligned(sizeof(BlockBasedTableIterator<DataBlockIter>));
    return new (mem) BlockBasedTableIterator<DataBlockIter>(
3077
        this, read_options, rep_->internal_comparator,
3078 3079 3080
        NewIndexIterator(read_options, need_upper_bound_check,
                         /*input_iter=*/nullptr, /*get_context=*/nullptr,
                         &lookup_context),
3081
        !skip_filters && !read_options.total_order_seek &&
3082
            prefix_extractor != nullptr,
3083 3084
        need_upper_bound_check, prefix_extractor, BlockType::kData, caller,
        compaction_readahead_size);
3085
  }
J
jorlow@chromium.org 已提交
3086 3087
}

3088
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
3089
    const ReadOptions& read_options) {
3090 3091 3092
  if (rep_->fragmented_range_dels == nullptr) {
    return nullptr;
  }
3093 3094 3095 3096 3097
  SequenceNumber snapshot = kMaxSequenceNumber;
  if (read_options.snapshot != nullptr) {
    snapshot = read_options.snapshot->GetSequenceNumber();
  }
  return new FragmentedRangeTombstoneIterator(
3098
      rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
3099 3100
}

3101 3102 3103
bool BlockBasedTable::FullFilterKeyMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    const Slice& internal_key, const bool no_io,
3104
    const SliceTransform* prefix_extractor, GetContext* get_context,
3105
    BlockCacheLookupContext* lookup_context) const {
3106 3107 3108 3109
  if (filter == nullptr || filter->IsBlockBased()) {
    return true;
  }
  Slice user_key = ExtractUserKey(internal_key);
M
Maysam Yabandeh 已提交
3110
  const Slice* const const_ikey_ptr = &internal_key;
3111
  bool may_match = true;
3112
  if (rep_->whole_key_filtering) {
3113 3114 3115
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
    Slice user_key_without_ts = StripTimestampFromUserKey(user_key, ts_sz);
3116 3117
    may_match =
        filter->KeyMayMatch(user_key_without_ts, prefix_extractor, kNotValid,
3118
                            no_io, const_ikey_ptr, get_context, lookup_context);
3119
  } else if (!read_options.total_order_seek && prefix_extractor &&
3120
             rep_->table_properties->prefix_extractor_name.compare(
3121 3122 3123
                 prefix_extractor->Name()) == 0 &&
             prefix_extractor->InDomain(user_key) &&
             !filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
3124 3125 3126
                                     prefix_extractor, kNotValid, no_io,
                                     const_ikey_ptr, get_context,
                                     lookup_context)) {
3127 3128 3129 3130
    may_match = false;
  }
  if (may_match) {
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
3131
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
3132
  }
3133
  return may_match;
3134 3135
}

3136 3137 3138
void BlockBasedTable::FullFilterKeysMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    MultiGetRange* range, const bool no_io,
3139 3140
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
3141 3142 3143
  if (filter == nullptr || filter->IsBlockBased()) {
    return;
  }
3144
  if (rep_->whole_key_filtering) {
3145 3146
    filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io,
                         lookup_context);
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156
  } else if (!read_options.total_order_seek && prefix_extractor &&
             rep_->table_properties->prefix_extractor_name.compare(
                 prefix_extractor->Name()) == 0) {
    for (auto iter = range->begin(); iter != range->end(); ++iter) {
      Slice user_key = iter->lkey->user_key();

      if (!prefix_extractor->InDomain(user_key)) {
        range->SkipKey(iter);
      }
    }
3157 3158
    filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false,
                             lookup_context);
3159 3160 3161
  }
}

3162
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
3163 3164 3165
                            GetContext* get_context,
                            const SliceTransform* prefix_extractor,
                            bool skip_filters) {
M
Maysam Yabandeh 已提交
3166
  assert(key.size() >= 8);  // key must be internal key
3167
  assert(get_context != nullptr);
S
Sanjay Ghemawat 已提交
3168
  Status s;
M
Maysam Yabandeh 已提交
3169
  const bool no_io = read_options.read_tier == kBlockCacheTier;
3170 3171 3172 3173 3174 3175

  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;

  // First check the full filter
  // If full filter not useful, Then go into each block
H
haoyuhuang 已提交
3176
  uint64_t tracing_get_id = get_context->get_tracing_get_id();
3177 3178 3179 3180 3181 3182 3183 3184 3185
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserGet, tracing_get_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
    // Trace the key since it contains both user key and sequence number.
    lookup_context.referenced_key = key.ToString();
    lookup_context.get_from_user_specified_snapshot =
        read_options.snapshot != nullptr;
  }
3186 3187 3188
  const bool may_match =
      FullFilterKeyMayMatch(read_options, filter, key, no_io, prefix_extractor,
                            get_context, &lookup_context);
3189
  if (!may_match) {
3190
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3191
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3192
  } else {
M
Maysam Yabandeh 已提交
3193
    IndexBlockIter iiter_on_stack;
3194 3195
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
3196
    bool need_upper_bound_check = false;
3197
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
3198
      need_upper_bound_check = PrefixExtractorChanged(
3199
          rep_->table_properties.get(), prefix_extractor);
3200
    }
3201 3202 3203
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
                         get_context, &lookup_context);
3204
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3205
    if (iiter != &iiter_on_stack) {
M
Maysam Yabandeh 已提交
3206
      iiter_unique_ptr.reset(iiter);
M
Maysam Yabandeh 已提交
3207
    }
3208

3209 3210
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
3211
    bool matched = false;  // if such user key mathced a key in SST
3212
    bool done = false;
M
Maysam Yabandeh 已提交
3213
    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
3214
      IndexValue v = iiter->value();
3215

3216 3217
      bool not_exist_in_filter =
          filter != nullptr && filter->IsBlockBased() == true &&
3218
          !filter->KeyMayMatch(ExtractUserKeyAndStripTimestamp(key, ts_sz),
3219
                               prefix_extractor, v.handle.offset(), no_io,
3220 3221
                               /*const_ikey_ptr=*/nullptr, get_context,
                               &lookup_context);
3222 3223 3224 3225 3226 3227

      if (not_exist_in_filter) {
        // Not found
        // TODO: think about interaction with Merge. If a user key cannot
        // cross one data block, we should be fine.
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3228
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3229
        break;
3230
      }
3231

3232 3233 3234 3235 3236 3237 3238 3239
      if (!v.first_internal_key.empty() && !skip_filters &&
          UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                  .Compare(ExtractUserKey(key),
                           ExtractUserKey(v.first_internal_key)) < 0) {
        // The requested key falls between highest key in previous block and
        // lowest key in current block.
        break;
      }
3240

3241
      BlockCacheLookupContext lookup_data_block_context{
3242 3243 3244
          TableReaderCaller::kUserGet, tracing_get_id,
          /*get_from_user_specified_snapshot=*/read_options.snapshot !=
              nullptr};
3245 3246 3247 3248
      bool does_referenced_key_exist = false;
      DataBlockIter biter;
      uint64_t referenced_data_size = 0;
      NewDataBlockIterator<DataBlockIter>(
3249 3250
          read_options, v.handle, &biter, BlockType::kData, get_context,
          &lookup_data_block_context,
3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263
          /*s=*/Status(), /*prefetch_buffer*/ nullptr);

      if (no_io && biter.status().IsIncomplete()) {
        // couldn't get block from block_cache
        // Update Saver.state to Found because we are only looking for
        // whether we can guarantee the key is not there when "no_io" is set
        get_context->MarkKeyMayExist();
        break;
      }
      if (!biter.status().ok()) {
        s = biter.status();
        break;
      }
3264

3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284
      bool may_exist = biter.SeekForGet(key);
      // If user-specified timestamp is supported, we cannot end the search
      // just because hash index lookup indicates the key+ts does not exist.
      if (!may_exist && ts_sz == 0) {
        // HashSeek cannot find the key this block and the the iter is not
        // the end of the block, i.e. cannot be in the following blocks
        // either. In this case, the seek_key cannot be found, so we break
        // from the top level for-loop.
        done = true;
      } else {
        // Call the *saver function on each entry/block until it returns false
        for (; biter.Valid(); biter.Next()) {
          ParsedInternalKey parsed_key;
          if (!ParseInternalKey(biter.key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }

          if (!get_context->SaveValue(
                  parsed_key, biter.value(), &matched,
                  biter.IsValuePinned() ? &biter : nullptr)) {
3285 3286 3287 3288
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size = biter.key().size() + biter.value().size();
            }
3289 3290
            done = true;
            break;
3291 3292
          }
        }
3293 3294 3295
        s = biter.status();
      }
      // Write the block cache access record.
3296
      if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3297 3298
        // Avoid making copy of block_key, cf_name, and referenced_key when
        // constructing the access record.
3299 3300 3301 3302
        Slice referenced_key;
        if (does_referenced_key_exist) {
          referenced_key = biter.key();
        } else {
3303
          referenced_key = key;
3304
        }
3305 3306 3307 3308 3309 3310 3311 3312
        BlockCacheTraceRecord access_record(
            rep_->ioptions.env->NowMicros(),
            /*block_key=*/"", lookup_data_block_context.block_type,
            lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
            /*cf_name=*/"", rep_->level_for_tracing(),
            rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
            lookup_data_block_context.is_cache_hit,
            lookup_data_block_context.no_insert,
3313
            lookup_data_block_context.get_id,
3314
            lookup_data_block_context.get_from_user_specified_snapshot,
3315 3316 3317 3318 3319
            /*referenced_key=*/"", referenced_data_size,
            lookup_data_block_context.num_keys_in_block,
            does_referenced_key_exist);
        block_cache_tracer_->WriteBlockAccess(
            access_record, lookup_data_block_context.block_key,
3320
            rep_->cf_name_for_tracing(), referenced_key);
S
Sanjay Ghemawat 已提交
3321
      }
3322

M
Maysam Yabandeh 已提交
3323 3324 3325 3326
      if (done) {
        // Avoid the extra Next which is expensive in two-level indexes
        break;
      }
3327
    }
3328 3329
    if (matched && filter != nullptr && !filter->IsBlockBased()) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
3330 3331
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                rep_->level);
3332
    }
3333
    if (s.ok()) {
M
Maysam Yabandeh 已提交
3334
      s = iiter->status();
S
Sanjay Ghemawat 已提交
3335 3336
    }
  }
K
Kai Liu 已提交
3337

S
Sanjay Ghemawat 已提交
3338 3339 3340
  return s;
}

3341 3342 3343 3344 3345
using MultiGetRange = MultiGetContext::Range;
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
                               const MultiGetRange* mget_range,
                               const SliceTransform* prefix_extractor,
                               bool skip_filters) {
3346 3347
  FilterBlockReader* const filter =
      !skip_filters ? rep_->filter.get() : nullptr;
3348 3349
  MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
                               mget_range->end());
3350 3351 3352 3353

  // First check the full filter
  // If full filter not useful, Then go into each block
  const bool no_io = read_options.read_tier == kBlockCacheTier;
3354 3355
  uint64_t tracing_mget_id = BlockCacheTraceHelper::kReservedGetId;
  if (!sst_file_range.empty() && sst_file_range.begin()->get_context) {
H
haoyuhuang 已提交
3356
    tracing_mget_id = sst_file_range.begin()->get_context->get_tracing_get_id();
3357
  }
3358 3359 3360
  BlockCacheLookupContext lookup_context{
      TableReaderCaller::kUserMultiGet, tracing_mget_id,
      /*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
3361 3362
  FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
                         prefix_extractor, &lookup_context);
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372

  if (skip_filters || !sst_file_range.empty()) {
    IndexBlockIter iiter_on_stack;
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
    bool need_upper_bound_check = false;
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
      need_upper_bound_check = PrefixExtractorChanged(
          rep_->table_properties.get(), prefix_extractor);
    }
3373 3374
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
3375
                         sst_file_range.begin()->get_context, &lookup_context);
3376
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
3377 3378 3379 3380
    if (iiter != &iiter_on_stack) {
      iiter_unique_ptr.reset(iiter);
    }

3381
    uint64_t offset = std::numeric_limits<uint64_t>::max();
A
anand76 已提交
3382 3383 3384 3385 3386 3387 3388 3389 3390
    autovector<BlockHandle, MultiGetContext::MAX_BATCH_SIZE> block_handles;
    autovector<CachableEntry<Block>, MultiGetContext::MAX_BATCH_SIZE> results;
    autovector<Status, MultiGetContext::MAX_BATCH_SIZE> statuses;
    static const size_t kMultiGetReadStackBufSize = 8192;
    char stack_buf[kMultiGetReadStackBufSize];
    std::unique_ptr<char[]> block_buf;
    {
      MultiGetRange data_block_range(sst_file_range, sst_file_range.begin(),
                                     sst_file_range.end());
3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401

      UncompressionDict uncompression_dict;
      Status uncompression_dict_status;
      if (rep_->uncompression_dict_reader) {
        uncompression_dict_status =
            rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
                nullptr /* prefetch_buffer */, no_io,
                sst_file_range.begin()->get_context, &lookup_context,
                &uncompression_dict);
      }

A
anand76 已提交
3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426
      size_t total_len = 0;
      ReadOptions ro = read_options;
      ro.read_tier = kBlockCacheTier;

      for (auto miter = data_block_range.begin();
            miter != data_block_range.end(); ++miter) {
        const Slice& key = miter->ikey;
        iiter->Seek(miter->ikey);

        IndexValue v;
        if (iiter->Valid()) {
          v = iiter->value();
        }
        if (!iiter->Valid() ||
            (!v.first_internal_key.empty() && !skip_filters &&
            UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                    .Compare(ExtractUserKey(key),
                             ExtractUserKey(v.first_internal_key)) < 0)) {
          // The requested key falls between highest key in previous block and
          // lowest key in current block.
          *(miter->s) = iiter->status();
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }
3427 3428 3429 3430 3431 3432 3433 3434

        if (!uncompression_dict_status.ok()) {
          *(miter->s) = uncompression_dict_status;
          data_block_range.SkipKey(miter);
          sst_file_range.SkipKey(miter);
          continue;
        }

A
anand76 已提交
3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485
        statuses.emplace_back();
        results.emplace_back();
        if (v.handle.offset() == offset) {
          // We're going to reuse the block for this key later on. No need to
          // look it up now. Place a null handle
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
          continue;
        }
        offset = v.handle.offset();
        BlockHandle handle = v.handle;
        Status s = GetDataBlockFromCache(ro, handle, uncompression_dict,
              &(results.back()), BlockType::kData, miter->get_context);
        if (s.ok() && !results.back().IsEmpty()) {
          // Found it in the cache. Add NULL handle to indicate there is
          // nothing to read from disk
          block_handles.emplace_back(BlockHandle::NullBlockHandle());
        } else {
          block_handles.emplace_back(handle);
          total_len += handle.size();
        }
      }

      if (total_len) {
        char* scratch = nullptr;
        // If the blocks need to be uncompressed and we don't need the
        // compressed blocks, then we can use a contiguous block of
        // memory to read in all the blocks as it will be temporary
        // storage
        // 1. If blocks are compressed and compressed block cache is there,
        //    alloc heap bufs
        // 2. If blocks are uncompressed, alloc heap bufs
        // 3. If blocks are compressed and no compressed block cache, use
        //    stack buf
        if (rep_->table_options.block_cache_compressed == nullptr &&
            rep_->blocks_maybe_compressed) {
          if (total_len <= kMultiGetReadStackBufSize) {
            scratch = stack_buf;
          } else {
            scratch = new char[total_len];
            block_buf.reset(scratch);
          }
        }
        MaybeLoadBlocksToCache(read_options,
            &data_block_range, &block_handles, &statuses, &results,
            scratch, uncompression_dict);
      }
    }

    DataBlockIter first_biter;
    DataBlockIter next_biter;
    size_t idx_in_batch = 0;
3486 3487 3488 3489 3490 3491 3492
    for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
         ++miter) {
      Status s;
      GetContext* get_context = miter->get_context;
      const Slice& key = miter->ikey;
      bool matched = false;  // if such user key matched a key in SST
      bool done = false;
A
anand76 已提交
3493 3494 3495
      bool first_block = true;
      do {
        DataBlockIter* biter = nullptr;
3496
        bool reusing_block = true;
3497 3498 3499
        uint64_t referenced_data_size = 0;
        bool does_referenced_key_exist = false;
        BlockCacheLookupContext lookup_data_block_context(
3500 3501 3502
            TableReaderCaller::kUserMultiGet, tracing_mget_id,
            /*get_from_user_specified_snapshot=*/read_options.snapshot !=
                nullptr);
A
anand76 已提交
3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
        if (first_block) {
          if (!block_handles[idx_in_batch].IsNull() ||
              !results[idx_in_batch].IsEmpty()) {
            first_biter.Invalidate(Status::OK());
            NewDataBlockIterator<DataBlockIter>(
                read_options, results[idx_in_batch], &first_biter,
                statuses[idx_in_batch]);
            reusing_block = false;
          }
          biter = &first_biter;
          idx_in_batch++;
        } else {
          IndexValue v = iiter->value();
          if (!v.first_internal_key.empty() && !skip_filters &&
              UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                      .Compare(ExtractUserKey(key),
                               ExtractUserKey(v.first_internal_key)) < 0) {
            // The requested key falls between highest key in previous block and
            // lowest key in current block.
            break;
          }

          next_biter.Invalidate(Status::OK());
3526
          NewDataBlockIterator<DataBlockIter>(
A
anand76 已提交
3527 3528 3529 3530
              read_options, iiter->value().handle, &next_biter,
              BlockType::kData, get_context, &lookup_data_block_context,
              Status(), nullptr);
          biter = &next_biter;
3531 3532
          reusing_block = false;
        }
3533

3534
        if (read_options.read_tier == kBlockCacheTier &&
A
anand76 已提交
3535
            biter->status().IsIncomplete()) {
3536 3537 3538 3539 3540 3541
          // couldn't get block from block_cache
          // Update Saver.state to Found because we are only looking for
          // whether we can guarantee the key is not there when "no_io" is set
          get_context->MarkKeyMayExist();
          break;
        }
A
anand76 已提交
3542 3543
        if (!biter->status().ok()) {
          s = biter->status();
3544 3545 3546
          break;
        }

A
anand76 已提交
3547
        bool may_exist = biter->SeekForGet(key);
3548 3549 3550 3551 3552
        if (!may_exist) {
          // HashSeek cannot find the key this block and the the iter is not
          // the end of the block, i.e. cannot be in the following blocks
          // either. In this case, the seek_key cannot be found, so we break
          // from the top level for-loop.
A
anand76 已提交
3553 3554
          break;
        }
3555

A
anand76 已提交
3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
        // Call the *saver function on each entry/block until it returns false
        for (; biter->Valid(); biter->Next()) {
          ParsedInternalKey parsed_key;
          Cleanable dummy;
          Cleanable* value_pinner = nullptr;
          if (!ParseInternalKey(biter->key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }
          if (biter->IsValuePinned()) {
            if (reusing_block) {
              Cache* block_cache = rep_->table_options.block_cache.get();
              assert(biter->cache_handle() != nullptr);
              block_cache->Ref(biter->cache_handle());
              dummy.RegisterCleanup(&ReleaseCachedEntry, block_cache,
                                    biter->cache_handle());
              value_pinner = &dummy;
            } else {
              value_pinner = biter;
3574
            }
3575
          }
3576 3577 3578 3579 3580 3581 3582
          if (!get_context->SaveValue(parsed_key, biter->value(), &matched,
                                      value_pinner)) {
            if (get_context->State() == GetContext::GetState::kFound) {
              does_referenced_key_exist = true;
              referenced_data_size =
                  biter->key().size() + biter->value().size();
            }
A
anand76 已提交
3583 3584 3585 3586
            done = true;
            break;
          }
          s = biter->status();
3587 3588
        }
        // Write the block cache access.
3589
        if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3590 3591
          // Avoid making copy of block_key, cf_name, and referenced_key when
          // constructing the access record.
3592 3593 3594 3595
          Slice referenced_key;
          if (does_referenced_key_exist) {
            referenced_key = biter->key();
          } else {
3596
            referenced_key = key;
3597
          }
3598 3599 3600 3601 3602 3603 3604 3605
          BlockCacheTraceRecord access_record(
              rep_->ioptions.env->NowMicros(),
              /*block_key=*/"", lookup_data_block_context.block_type,
              lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
              /*cf_name=*/"", rep_->level_for_tracing(),
              rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
              lookup_data_block_context.is_cache_hit,
              lookup_data_block_context.no_insert,
3606
              lookup_data_block_context.get_id,
3607
              lookup_data_block_context.get_from_user_specified_snapshot,
3608 3609 3610 3611 3612
              /*referenced_key=*/"", referenced_data_size,
              lookup_data_block_context.num_keys_in_block,
              does_referenced_key_exist);
          block_cache_tracer_->WriteBlockAccess(
              access_record, lookup_data_block_context.block_key,
3613
              rep_->cf_name_for_tracing(), referenced_key);
3614
        }
A
anand76 已提交
3615
        s = biter->status();
3616 3617 3618 3619
        if (done) {
          // Avoid the extra Next which is expensive in two-level indexes
          break;
        }
A
anand76 已提交
3620 3621 3622 3623 3624 3625 3626
        if (first_block) {
          iiter->Seek(key);
        }
        first_block = false;
        iiter->Next();
      } while (iiter->Valid());

3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639
      if (matched && filter != nullptr && !filter->IsBlockBased()) {
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                  rep_->level);
      }
      if (s.ok()) {
        s = iiter->status();
      }
      *(miter->s) = s;
    }
  }
}

3640 3641 3642
Status BlockBasedTable::Prefetch(const Slice* const begin,
                                 const Slice* const end) {
  auto& comparator = rep_->internal_comparator;
3643
  UserComparatorWrapper user_comparator(comparator.user_comparator());
3644 3645 3646 3647
  // pre-condition
  if (begin && end && comparator.Compare(*begin, *end) > 0) {
    return Status::InvalidArgument(*begin, *end);
  }
3648
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
M
Maysam Yabandeh 已提交
3649
  IndexBlockIter iiter_on_stack;
3650 3651 3652
  auto iiter = NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                                &iiter_on_stack, /*get_context=*/nullptr,
                                &lookup_context);
3653
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3654
  if (iiter != &iiter_on_stack) {
3655
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
M
Maysam Yabandeh 已提交
3656
  }
3657

M
Maysam Yabandeh 已提交
3658
  if (!iiter->status().ok()) {
3659
    // error opening index iterator
M
Maysam Yabandeh 已提交
3660
    return iiter->status();
3661 3662 3663 3664 3665
  }

  // indicates if we are on the last page that need to be pre-fetched
  bool prefetching_boundary_page = false;

M
Maysam Yabandeh 已提交
3666 3667
  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
       iiter->Next()) {
3668 3669
    BlockHandle block_handle = iiter->value().handle;
    const bool is_user_key = !rep_->index_key_includes_seq;
M
Maysam Yabandeh 已提交
3670 3671 3672
    if (end &&
        ((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
         (is_user_key &&
3673
          user_comparator.Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
3674 3675 3676 3677 3678 3679 3680 3681 3682 3683
      if (prefetching_boundary_page) {
        break;
      }

      // The index entry represents the last key in the data block.
      // We should load this page into memory as well, but no more
      prefetching_boundary_page = true;
    }

    // Load the block specified by the block_handle into the block cache
M
Maysam Yabandeh 已提交
3684
    DataBlockIter biter;
3685 3686 3687 3688 3689

    NewDataBlockIterator<DataBlockIter>(
        ReadOptions(), block_handle, &biter, /*type=*/BlockType::kData,
        /*get_context=*/nullptr, &lookup_context, Status(),
        /*prefetch_buffer=*/nullptr);
3690 3691 3692 3693 3694 3695 3696 3697 3698 3699

    if (!biter.status().ok()) {
      // there was an unexpected error while pre-fetching
      return biter.status();
    }
  }

  return Status::OK();
}

3700
Status BlockBasedTable::VerifyChecksum(TableReaderCaller caller) {
A
Aaron G 已提交
3701 3702 3703 3704
  Status s;
  // Check Meta blocks
  std::unique_ptr<Block> meta;
  std::unique_ptr<InternalIterator> meta_iter;
3705
  s = ReadMetaBlock(nullptr /* prefetch buffer */, &meta, &meta_iter);
A
Aaron G 已提交
3706
  if (s.ok()) {
3707
    s = VerifyChecksumInMetaBlocks(meta_iter.get());
A
Aaron G 已提交
3708 3709 3710 3711 3712 3713 3714
    if (!s.ok()) {
      return s;
    }
  } else {
    return s;
  }
  // Check Data blocks
M
Maysam Yabandeh 已提交
3715
  IndexBlockIter iiter_on_stack;
3716
  BlockCacheLookupContext context{caller};
3717
  InternalIteratorBase<IndexValue>* iiter = NewIndexIterator(
3718
      ReadOptions(), /*need_upper_bound_check=*/false, &iiter_on_stack,
3719
      /*get_context=*/nullptr, &context);
3720
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
A
Aaron G 已提交
3721
  if (iiter != &iiter_on_stack) {
3722
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
A
Aaron G 已提交
3723 3724 3725 3726 3727 3728 3729 3730 3731
  }
  if (!iiter->status().ok()) {
    // error opening index iterator
    return iiter->status();
  }
  s = VerifyChecksumInBlocks(iiter);
  return s;
}

3732
Status BlockBasedTable::VerifyChecksumInBlocks(
3733
    InternalIteratorBase<IndexValue>* index_iter) {
A
Aaron G 已提交
3734 3735 3736 3737 3738 3739
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
    if (!s.ok()) {
      break;
    }
3740
    BlockHandle handle = index_iter->value().handle;
3741
    BlockContents contents;
3742 3743 3744
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
3745
        false /* decompress */, false /*maybe_compressed*/, BlockType::kData,
3746
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
3747 3748 3749 3750 3751 3752 3753 3754
    s = block_fetcher.ReadBlockContents();
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
    const Slice& meta_block_name) {
  if (meta_block_name.starts_with(kFilterBlockPrefix) ||
      meta_block_name.starts_with(kFullFilterBlockPrefix) ||
      meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
    return BlockType::kFilter;
  }

  if (meta_block_name == kPropertiesBlock) {
    return BlockType::kProperties;
  }

  if (meta_block_name == kCompressionDictBlock) {
    return BlockType::kCompressionDictionary;
  }

  if (meta_block_name == kRangeDelBlock) {
    return BlockType::kRangeDeletion;
  }

  if (meta_block_name == kHashIndexPrefixesBlock) {
    return BlockType::kHashIndexPrefixes;
  }

  if (meta_block_name == kHashIndexPrefixesMetadataBlock) {
    return BlockType::kHashIndexMetadata;
  }

  assert(false);
  return BlockType::kInvalid;
}

3787
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
3788 3789 3790 3791
    InternalIteratorBase<Slice>* index_iter) {
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
A
Aaron G 已提交
3792 3793 3794
    if (!s.ok()) {
      break;
    }
3795 3796 3797
    BlockHandle handle;
    Slice input = index_iter->value();
    s = handle.DecodeFrom(&input);
A
Aaron G 已提交
3798
    BlockContents contents;
3799
    const Slice meta_block_name = index_iter->key();
3800 3801 3802 3803
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
        false /* decompress */, false /*maybe_compressed*/,
3804
        GetBlockTypeForMetaBlockByName(meta_block_name),
3805
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
S
Siying Dong 已提交
3806
    s = block_fetcher.ReadBlockContents();
3807
    if (s.IsCorruption() && meta_block_name == kPropertiesBlock) {
3808
      TableProperties* table_properties;
3809
      s = TryReadPropertiesWithGlobalSeqno(nullptr /* prefetch_buffer */,
3810 3811 3812 3813
                                           index_iter->value(),
                                           &table_properties);
      delete table_properties;
    }
A
Aaron G 已提交
3814 3815 3816 3817 3818 3819 3820
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3821 3822 3823 3824 3825 3826 3827 3828 3829
bool BlockBasedTable::TEST_BlockInCache(const BlockHandle& handle) const {
  assert(rep_ != nullptr);

  Cache* const cache = rep_->table_options.block_cache.get();
  if (cache == nullptr) {
    return false;
  }

  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
3830 3831 3832
  Slice cache_key =
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
                  cache_key_storage);
3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843

  Cache::Handle* const cache_handle = cache->Lookup(cache_key);
  if (cache_handle == nullptr) {
    return false;
  }

  cache->Release(cache_handle);

  return true;
}

S
Siying Dong 已提交
3844 3845
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
                                      const Slice& key) {
3846
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
3847
      options, /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
3848
      /*get_context=*/nullptr, /*lookup_context=*/nullptr));
I
Igor Canadi 已提交
3849 3850 3851
  iiter->Seek(key);
  assert(iiter->Valid());

3852
  return TEST_BlockInCache(iiter->value().handle);
3853 3854 3855 3856 3857 3858 3859 3860 3861
}

// REQUIRES: The following fields of rep_ should have already been populated:
//  1. file
//  2. index_handle,
//  3. options
//  4. internal_comparator
//  5. index_type
Status BlockBasedTable::CreateIndexReader(
3862 3863
    FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* preloaded_meta_index_iter, bool use_cache, bool prefetch,
3864 3865
    bool pin, BlockCacheLookupContext* lookup_context,
    std::unique_ptr<IndexReader>* index_reader) {
3866 3867
  // kHashSearch requires non-empty prefix_extractor but bypass checking
  // prefix_extractor here since we have no access to MutableCFOptions.
3868
  // Add need_upper_bound_check flag in  BlockBasedTable::NewIndexIterator.
3869 3870
  // If prefix_extractor does not match prefix_extractor_name from table
  // properties, turn off Hash Index by setting total_order_seek to true
3871

3872
  switch (rep_->index_type) {
M
Maysam Yabandeh 已提交
3873
    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
3874
      return PartitionIndexReader::Create(this, prefetch_buffer, use_cache,
3875 3876
                                          prefetch, pin, lookup_context,
                                          index_reader);
M
Maysam Yabandeh 已提交
3877
    }
3878 3879
    case BlockBasedTableOptions::kBinarySearch:
    case BlockBasedTableOptions::kBinarySearchWithFirstKey: {
3880
      return BinarySearchIndexReader::Create(this, prefetch_buffer, use_cache,
3881 3882
                                             prefetch, pin, lookup_context,
                                             index_reader);
3883 3884
    }
    case BlockBasedTableOptions::kHashSearch: {
K
Kai Liu 已提交
3885
      std::unique_ptr<Block> meta_guard;
S
sdong 已提交
3886
      std::unique_ptr<InternalIterator> meta_iter_guard;
K
Kai Liu 已提交
3887 3888
      auto meta_index_iter = preloaded_meta_index_iter;
      if (meta_index_iter == nullptr) {
3889
        auto s = ReadMetaBlock(prefetch_buffer, &meta_guard, &meta_iter_guard);
K
Kai Liu 已提交
3890
        if (!s.ok()) {
3891 3892
          // we simply fall back to binary search in case there is any
          // problem with prefix hash index loading.
3893 3894 3895
          ROCKS_LOG_WARN(rep_->ioptions.info_log,
                         "Unable to read the metaindex block."
                         " Fall back to binary search index.");
3896 3897
          return BinarySearchIndexReader::Create(this, prefetch_buffer,
                                                 use_cache, prefetch, pin,
3898
                                                 lookup_context, index_reader);
K
Kai Liu 已提交
3899 3900 3901 3902
        }
        meta_index_iter = meta_iter_guard.get();
      }

3903
      return HashIndexReader::Create(this, prefetch_buffer, meta_index_iter,
3904 3905
                                     use_cache, prefetch, pin, lookup_context,
                                     index_reader);
3906 3907 3908
    }
    default: {
      std::string error_message =
3909
          "Unrecognized index type: " + ToString(rep_->index_type);
3910
      return Status::InvalidArgument(error_message.c_str());
3911 3912 3913 3914
    }
  }
}

3915
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key,
3916 3917
                                              TableReaderCaller caller) {
  BlockCacheLookupContext context(caller);
3918 3919
  IndexBlockIter iiter_on_stack;
  auto index_iter =
3920
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
3921 3922
                       /*input_iter=*/&iiter_on_stack, /*get_context=*/nullptr,
                       /*lookup_context=*/&context);
K
Kai Liu 已提交
3923

J
jorlow@chromium.org 已提交
3924 3925 3926
  index_iter->Seek(key);
  uint64_t result;
  if (index_iter->Valid()) {
3927
    BlockHandle handle = index_iter->value().handle;
3928
    result = handle.offset();
J
jorlow@chromium.org 已提交
3929
  } else {
K
Kai Liu 已提交
3930 3931 3932
    // key is past the last key in the file. If table_properties is not
    // available, approximate the offset by returning the offset of the
    // metaindex block (which is right near the end of the file).
3933 3934 3935 3936
    result = 0;
    if (rep_->table_properties) {
      result = rep_->table_properties->data_size;
    }
K
Kai Liu 已提交
3937 3938
    // table_properties is not present in the table.
    if (result == 0) {
I
xxHash  
Igor Canadi 已提交
3939
      result = rep_->footer.metaindex_handle().offset();
K
Kai Liu 已提交
3940
    }
J
jorlow@chromium.org 已提交
3941
  }
3942 3943 3944 3945 3946

  if (index_iter != &iiter_on_stack) {
    delete index_iter;
  }

J
jorlow@chromium.org 已提交
3947 3948 3949
  return result;
}

3950 3951 3952
bool BlockBasedTable::TEST_FilterBlockInCache() const {
  assert(rep_ != nullptr);
  return TEST_BlockInCache(rep_->filter_handle);
3953 3954
}

3955 3956 3957 3958
bool BlockBasedTable::TEST_IndexBlockInCache() const {
  assert(rep_ != nullptr);

  return TEST_BlockInCache(rep_->footer.index_handle());
3959 3960
}

O
omegaga 已提交
3961 3962
Status BlockBasedTable::GetKVPairsFromDataBlocks(
    std::vector<KVPairBlock>* kv_pair_blocks) {
3963
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3964 3965 3966
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
O
omegaga 已提交
3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982

  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    // Cannot read Index Block
    return s;
  }

  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();

    if (!s.ok()) {
      break;
    }

    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3983
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3984 3985
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3986 3987
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
O
omegaga 已提交
3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015
    s = datablock_iter->status();

    if (!s.ok()) {
      // Error reading the block - Skipped
      continue;
    }

    KVPairBlock kv_pair_block;
    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        // Error reading the block - Skipped
        break;
      }
      const Slice& key = datablock_iter->key();
      const Slice& value = datablock_iter->value();
      std::string key_copy = std::string(key.data(), key.size());
      std::string value_copy = std::string(value.data(), value.size());

      kv_pair_block.push_back(
          std::make_pair(std::move(key_copy), std::move(value_copy)));
    }
    kv_pair_blocks->push_back(std::move(kv_pair_block));
  }
  return Status::OK();
}

4016
Status BlockBasedTable::DumpTable(WritableFile* out_file) {
4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
  // Output Footer
  out_file->Append(
      "Footer Details:\n"
      "--------------------------------------\n"
      "  ");
  out_file->Append(rep_->footer.ToString().c_str());
  out_file->Append("\n");

  // Output MetaIndex
  out_file->Append(
      "Metaindex Details:\n"
      "--------------------------------------\n");
  std::unique_ptr<Block> meta;
S
sdong 已提交
4030
  std::unique_ptr<InternalIterator> meta_iter;
4031
  Status s = ReadMetaBlock(nullptr /* prefetch_buffer */, &meta, &meta_iter);
4032 4033 4034 4035 4036 4037 4038 4039 4040 4041
  if (s.ok()) {
    for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
      s = meta_iter->status();
      if (!s.ok()) {
        return s;
      }
      if (meta_iter->key() == rocksdb::kPropertiesBlock) {
        out_file->Append("  Properties block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4042 4043 4044 4045
      } else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
        out_file->Append("  Compression dictionary block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4046 4047 4048 4049 4050
      } else if (strstr(meta_iter->key().ToString().c_str(),
                        "filter.rocksdb.") != nullptr) {
        out_file->Append("  Filter block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4051 4052 4053 4054
      } else if (meta_iter->key() == rocksdb::kRangeDelBlock) {
        out_file->Append("  Range deletion block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073
      }
    }
    out_file->Append("\n");
  } else {
    return s;
  }

  // Output TableProperties
  const rocksdb::TableProperties* table_properties;
  table_properties = rep_->table_properties.get();

  if (table_properties != nullptr) {
    out_file->Append(
        "Table Properties:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
    out_file->Append("\n");
  }
4074

4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088
  if (rep_->filter) {
    out_file->Append(
        "Filter Details:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(rep_->filter->ToString().c_str());
    out_file->Append("\n");
  }

  // Output Index block
  s = DumpIndexBlock(out_file);
  if (!s.ok()) {
    return s;
  }
4089 4090

  // Output compression dictionary
4091 4092 4093 4094 4095 4096
  if (rep_->uncompression_dict_reader) {
    UncompressionDict uncompression_dict;
    s = rep_->uncompression_dict_reader->GetOrReadUncompressionDictionary(
        nullptr /* prefetch_buffer */, false /* no_io */,
        nullptr /* get_context */, nullptr /* lookup_context */,
        &uncompression_dict);
4097 4098 4099
    if (!s.ok()) {
      return s;
    }
4100 4101

    const Slice& raw_dict = uncompression_dict.GetRawDict();
4102 4103 4104 4105
    out_file->Append(
        "Compression Dictionary:\n"
        "--------------------------------------\n");
    out_file->Append("  size (bytes): ");
4106
    out_file->Append(rocksdb::ToString(raw_dict.size()));
4107 4108
    out_file->Append("\n\n");
    out_file->Append("  HEX    ");
4109
    out_file->Append(raw_dict.ToString(true).c_str());
4110 4111 4112
    out_file->Append("\n\n");
  }

4113
  // Output range deletions block
A
Andrew Kryczka 已提交
4114
  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
A
Andrew Kryczka 已提交
4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125
  if (range_del_iter != nullptr) {
    range_del_iter->SeekToFirst();
    if (range_del_iter->Valid()) {
      out_file->Append(
          "Range deletions:\n"
          "--------------------------------------\n"
          "  ");
      for (; range_del_iter->Valid(); range_del_iter->Next()) {
        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
      }
      out_file->Append("\n");
4126
    }
A
Andrew Kryczka 已提交
4127
    delete range_del_iter;
4128
  }
4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
  // Output Data blocks
  s = DumpDataBlocks(out_file);

  return s;
}

Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
  out_file->Append(
      "Index Details:\n"
      "--------------------------------------\n");
4139
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
4140 4141 4142
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

  out_file->Append("  Block key hex dump: Data block handle\n");
  out_file->Append("  Block key ascii\n\n");
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }
    Slice key = blockhandles_iter->key();
M
Maysam Yabandeh 已提交
4158
    Slice user_key;
4159
    InternalKey ikey;
4160
    if (!rep_->index_key_includes_seq) {
4161 4162
      user_key = key;
    } else {
M
Maysam Yabandeh 已提交
4163 4164 4165
      ikey.DecodeFrom(key);
      user_key = ikey.user_key();
    }
4166 4167

    out_file->Append("  HEX    ");
M
Maysam Yabandeh 已提交
4168
    out_file->Append(user_key.ToString(true).c_str());
4169
    out_file->Append(": ");
4170 4171 4172
    out_file->Append(blockhandles_iter->value()
                         .ToString(true, rep_->index_has_first_key)
                         .c_str());
4173 4174
    out_file->Append("\n");

M
Maysam Yabandeh 已提交
4175
    std::string str_key = user_key.ToString();
4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190
    std::string res_key("");
    char cspace = ' ';
    for (size_t i = 0; i < str_key.size(); i++) {
      res_key.append(&str_key[i], 1);
      res_key.append(1, cspace);
    }
    out_file->Append("  ASCII  ");
    out_file->Append(res_key.c_str());
    out_file->Append("\n  ------\n");
  }
  out_file->Append("\n");
  return Status::OK();
}

Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
4191
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
4192 4193 4194
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
4195 4196 4197 4198 4199 4200
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

4201 4202 4203 4204
  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
  uint64_t datablock_size_max = 0;
  uint64_t datablock_size_sum = 0;

4205 4206 4207 4208 4209 4210 4211 4212
  size_t block_id = 1;
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       block_id++, blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }

4213
    BlockHandle bh = blockhandles_iter->value().handle;
4214 4215 4216 4217 4218
    uint64_t datablock_size = bh.size();
    datablock_size_min = std::min(datablock_size_min, datablock_size);
    datablock_size_max = std::max(datablock_size_max, datablock_size);
    datablock_size_sum += datablock_size;

4219
    out_file->Append("Data Block # ");
S
sdong 已提交
4220
    out_file->Append(rocksdb::ToString(block_id));
4221
    out_file->Append(" @ ");
4222
    out_file->Append(blockhandles_iter->value().handle.ToString(true).c_str());
4223 4224 4225
    out_file->Append("\n");
    out_file->Append("--------------------------------------\n");

S
sdong 已提交
4226
    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
4227
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
4228 4229
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
4230 4231
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245
    s = datablock_iter->status();

    if (!s.ok()) {
      out_file->Append("Error reading the block - Skipped \n\n");
      continue;
    }

    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        out_file->Append("Error reading the block - Skipped \n");
        break;
      }
4246
      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
4247 4248 4249
    }
    out_file->Append("\n");
  }
4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267

  uint64_t num_datablocks = block_id - 1;
  if (num_datablocks) {
    double datablock_size_avg =
        static_cast<double>(datablock_size_sum) / num_datablocks;
    out_file->Append("Data Block Summary:\n");
    out_file->Append("--------------------------------------");
    out_file->Append("\n  # data blocks: ");
    out_file->Append(rocksdb::ToString(num_datablocks));
    out_file->Append("\n  min data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_min));
    out_file->Append("\n  max data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_max));
    out_file->Append("\n  avg data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_avg));
    out_file->Append("\n");
  }

4268 4269 4270
  return Status::OK();
}

4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
                                   WritableFile* out_file) {
  InternalKey ikey;
  ikey.DecodeFrom(key);

  out_file->Append("  HEX    ");
  out_file->Append(ikey.user_key().ToString(true).c_str());
  out_file->Append(": ");
  out_file->Append(value.ToString(true).c_str());
  out_file->Append("\n");

  std::string str_key = ikey.user_key().ToString();
  std::string str_value = value.ToString();
  std::string res_key(""), res_value("");
  char cspace = ' ';
  for (size_t i = 0; i < str_key.size(); i++) {
4287 4288 4289 4290 4291
    if (str_key[i] == '\0') {
      res_key.append("\\0", 2);
    } else {
      res_key.append(&str_key[i], 1);
    }
4292 4293 4294
    res_key.append(1, cspace);
  }
  for (size_t i = 0; i < str_value.size(); i++) {
4295 4296 4297 4298 4299
    if (str_value[i] == '\0') {
      res_value.append("\\0", 2);
    } else {
      res_value.append(&str_value[i], 1);
    }
4300 4301 4302 4303 4304 4305 4306 4307 4308 4309
    res_value.append(1, cspace);
  }

  out_file->Append("  ASCII  ");
  out_file->Append(res_key.c_str());
  out_file->Append(": ");
  out_file->Append(res_value.c_str());
  out_file->Append("\n  ------\n");
}

4310
}  // namespace rocksdb