block_based_table_reader.cc 152.2 KB
Newer Older
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
5
//
J
jorlow@chromium.org 已提交
6 7 8
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
9
#include "table/block_based/block_based_table_reader.h"
10

11
#include <algorithm>
12
#include <array>
13
#include <limits>
14 15
#include <string>
#include <utility>
O
omegaga 已提交
16
#include <vector>
17

T
Tyler Harter 已提交
18
#include "db/dbformat.h"
19
#include "db/pinned_iterators_manager.h"
T
Tyler Harter 已提交
20

21
#include "rocksdb/cache.h"
22 23 24
#include "rocksdb/comparator.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
25
#include "rocksdb/iterator.h"
26 27
#include "rocksdb/options.h"
#include "rocksdb/statistics.h"
S
Siying Dong 已提交
28
#include "rocksdb/table.h"
29
#include "rocksdb/table_properties.h"
30

31 32 33 34 35 36 37
#include "table/block_based/block.h"
#include "table/block_based/block_based_filter_block.h"
#include "table/block_based/block_based_table_factory.h"
#include "table/block_based/block_prefix_index.h"
#include "table/block_based/filter_block.h"
#include "table/block_based/full_filter_block.h"
#include "table/block_based/partitioned_filter_block.h"
38
#include "table/block_fetcher.h"
J
jorlow@chromium.org 已提交
39
#include "table/format.h"
K
krad 已提交
40
#include "table/get_context.h"
S
sdong 已提交
41
#include "table/internal_iterator.h"
42
#include "table/meta_blocks.h"
43
#include "table/multiget_context.h"
K
krad 已提交
44
#include "table/persistent_cache_helper.h"
45
#include "table/sst_file_writer_collectors.h"
J
jorlow@chromium.org 已提交
46
#include "table/two_level_iterator.h"
47

48
#include "monitoring/perf_context_imp.h"
49
#include "test_util/sync_point.h"
J
jorlow@chromium.org 已提交
50
#include "util/coding.h"
51
#include "util/crc32c.h"
52
#include "util/file_reader_writer.h"
53
#include "util/stop_watch.h"
54
#include "util/string_util.h"
55
#include "util/xxhash.h"
J
jorlow@chromium.org 已提交
56

57
namespace rocksdb {
J
jorlow@chromium.org 已提交
58

I
xxHash  
Igor Canadi 已提交
59
extern const uint64_t kBlockBasedTableMagicNumber;
K
Kai Liu 已提交
60 61
extern const std::string kHashIndexPrefixesBlock;
extern const std::string kHashIndexPrefixesMetadataBlock;
62 63 64

typedef BlockBasedTable::IndexReader IndexReader;

M
Maysam Yabandeh 已提交
65 66 67 68 69
BlockBasedTable::~BlockBasedTable() {
  Close();
  delete rep_;
}

70 71
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);

72 73 74 75 76
namespace {
// Read the block identified by "handle" from "file".
// The only relevant option is options.verify_checksums for now.
// On failure return non-OK.
// On success fill *result and return OK - caller owns *result
77
// @param uncompression_dict Data for presetting the compression library's
78
//    dictionary.
79 80 81 82
Status ReadBlockFromFile(
    RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
    const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
    std::unique_ptr<Block>* result, const ImmutableCFOptions& ioptions,
83
    bool do_uncompress, bool maybe_compressed, BlockType block_type,
84
    const UncompressionDict& uncompression_dict,
85
    const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
86 87
    size_t read_amp_bytes_per_bit, MemoryAllocator* memory_allocator,
    bool for_compaction = false) {
88
  BlockContents contents;
89 90 91 92
  BlockFetcher block_fetcher(
      file, prefetch_buffer, footer, options, handle, &contents, ioptions,
      do_uncompress, maybe_compressed, block_type, uncompression_dict,
      cache_options, memory_allocator, nullptr, for_compaction);
S
Siying Dong 已提交
93
  Status s = block_fetcher.ReadBlockContents();
94
  if (s.ok()) {
95 96
    result->reset(new Block(std::move(contents), global_seqno,
                            read_amp_bytes_per_bit, ioptions.statistics));
97 98 99 100 101
  }

  return s;
}

Y
Yi Wu 已提交
102
inline MemoryAllocator* GetMemoryAllocator(
103
    const BlockBasedTableOptions& table_options) {
104 105 106
  return table_options.block_cache.get()
             ? table_options.block_cache->memory_allocator()
             : nullptr;
107 108
}

109 110 111 112 113 114 115
inline MemoryAllocator* GetMemoryAllocatorForCompressedBlock(
    const BlockBasedTableOptions& table_options) {
  return table_options.block_cache_compressed.get()
             ? table_options.block_cache_compressed->memory_allocator()
             : nullptr;
}

116 117
// Delete the entry resided in the cache.
template <class Entry>
A
Andrew Kryczka 已提交
118
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
119 120 121 122
  auto entry = reinterpret_cast<Entry*>(value);
  delete entry;
}

123
void DeleteCachedFilterEntry(const Slice& key, void* value);
124
void DeleteCachedUncompressionDictEntry(const Slice& key, void* value);
125

126 127 128 129
// Release the cached entry and decrement its ref count.
void ForceReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
130
  cache->Release(handle, true /* force_erase */);
131 132
}

133 134 135 136 137 138 139 140
// Release the cached entry and decrement its ref count.
// Do not force erase
void ReleaseCachedEntry(void* arg, void* h) {
  Cache* cache = reinterpret_cast<Cache*>(arg);
  Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
  cache->Release(handle, false /* force_erase */);
}

141 142 143
// For hash based index, return true if prefix_extractor and
// prefix_extractor_block mismatch, false otherwise. This flag will be used
// as total_order_seek via NewIndexIterator
144 145
bool PrefixExtractorChanged(const TableProperties* table_properties,
                            const SliceTransform* prefix_extractor) {
146 147 148
  // BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
  // Turn off hash index in prefix_extractor is not set; if  prefix_extractor
  // is set but prefix_extractor_block is not set, also disable hash index
149 150
  if (prefix_extractor == nullptr || table_properties == nullptr ||
      table_properties->prefix_extractor_name.empty()) {
151 152
    return true;
  }
153

154
  // prefix_extractor and prefix_extractor_block are both non-empty
155 156
  if (table_properties->prefix_extractor_name.compare(
          prefix_extractor->Name()) != 0) {
157 158 159 160 161 162
    return true;
  } else {
    return false;
  }
}

163 164
}  // namespace

165 166 167 168 169
// Encapsulates common functionality for the various index reader
// implementations. Provides access to the index block regardless of whether
// it is owned by the reader or stored in the cache, or whether it is pinned
// in the cache or not.
class BlockBasedTable::IndexReaderCommon : public BlockBasedTable::IndexReader {
170
 public:
171 172
  IndexReaderCommon(const BlockBasedTable* t,
                    CachableEntry<Block>&& index_block)
173
      : table_(t), index_block_(std::move(index_block)) {
174 175 176
    assert(table_ != nullptr);
  }

177
 protected:
178
  static Status ReadIndexBlock(const BlockBasedTable* table,
179 180 181
                               FilePrefetchBuffer* prefetch_buffer,
                               const ReadOptions& read_options,
                               GetContext* get_context,
182
                               BlockCacheLookupContext* lookup_context,
183
                               CachableEntry<Block>* index_block);
184

185
  const BlockBasedTable* table() const { return table_; }
186 187 188 189 190 191 192 193

  const InternalKeyComparator* internal_comparator() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);

    return &table_->get_rep()->internal_comparator;
  }

194
  bool index_has_first_key() const {
195 196
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
197 198
    return table_->get_rep()->index_has_first_key;
  }
199

200 201 202 203
  bool index_key_includes_seq() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
    return table_->get_rep()->index_key_includes_seq;
204 205 206 207 208
  }

  bool index_value_is_full() const {
    assert(table_ != nullptr);
    assert(table_->get_rep() != nullptr);
209
    return table_->get_rep()->index_value_is_full;
210 211
  }

212
  Status GetOrReadIndexBlock(bool no_io, GetContext* get_context,
213
                             BlockCacheLookupContext* lookup_context,
214 215 216 217
                             CachableEntry<Block>* index_block) const;

  size_t ApproximateIndexBlockMemoryUsage() const {
    assert(!index_block_.GetOwnValue() || index_block_.GetValue() != nullptr);
218 219 220
    return index_block_.GetOwnValue()
               ? index_block_.GetValue()->ApproximateMemoryUsage()
               : 0;
221 222
  }

223
 private:
224
  const BlockBasedTable* table_;
225 226 227 228
  CachableEntry<Block> index_block_;
};

Status BlockBasedTable::IndexReaderCommon::ReadIndexBlock(
229
    const BlockBasedTable* table, FilePrefetchBuffer* prefetch_buffer,
230
    const ReadOptions& read_options, GetContext* get_context,
231
    BlockCacheLookupContext* lookup_context,
232
    CachableEntry<Block>* index_block) {
233 234 235 236 237 238 239 240 241
  PERF_TIMER_GUARD(read_index_block_nanos);

  assert(table != nullptr);
  assert(index_block != nullptr);
  assert(index_block->IsEmpty());

  const Rep* const rep = table->get_rep();
  assert(rep != nullptr);

242 243
  const Status s = table->RetrieveBlock(
      prefetch_buffer, read_options, rep->footer.index_handle(),
244
      UncompressionDict::GetEmptyDict(), index_block, BlockType::kIndex,
245
      get_context, lookup_context);
246 247 248 249 250

  return s;
}

Status BlockBasedTable::IndexReaderCommon::GetOrReadIndexBlock(
251
    bool no_io, GetContext* get_context,
252
    BlockCacheLookupContext* lookup_context,
253
    CachableEntry<Block>* index_block) const {
254 255 256
  assert(index_block != nullptr);

  if (!index_block_.IsEmpty()) {
257
    index_block->SetUnownedValue(index_block_.GetValue());
258 259 260
    return Status::OK();
  }

261 262 263 264 265
  ReadOptions read_options;
  if (no_io) {
    read_options.read_tier = kBlockCacheTier;
  }

266 267
  return ReadIndexBlock(table_, /*prefetch_buffer=*/nullptr, read_options,
                        get_context, lookup_context, index_block);
268 269
}

M
Maysam Yabandeh 已提交
270
// Index that allows binary search lookup in a two-level index structure.
271
class PartitionIndexReader : public BlockBasedTable::IndexReaderCommon {
M
Maysam Yabandeh 已提交
272 273 274 275 276
 public:
  // Read the partition index from the file and create an instance for
  // `PartitionIndexReader`.
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
277
  static Status Create(const BlockBasedTable* table,
278
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
279 280
                       bool prefetch, bool pin, IndexReader** index_reader,
                       BlockCacheLookupContext* lookup_context) {
281 282 283 284 285 286 287
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
288 289 290
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
291 292 293
      if (!s.ok()) {
        return s;
      }
M
Maysam Yabandeh 已提交
294

295 296 297
      if (use_cache && !pin) {
        index_block.Reset();
      }
M
Maysam Yabandeh 已提交
298 299
    }

300 301 302
    *index_reader = new PartitionIndexReader(table, std::move(index_block));

    return Status::OK();
M
Maysam Yabandeh 已提交
303 304 305
  }

  // return a two-level iterator: first level is on the partition index
306
  InternalIteratorBase<IndexValue>* NewIterator(
307
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
308 309
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
310
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
311
    CachableEntry<Block> index_block;
312 313
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
314 315 316 317 318 319
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

320
      return NewErrorInternalIterator<IndexValue>(s);
321 322
    }

323
    InternalIteratorBase<IndexValue>* it = nullptr;
324

M
Maysam Yabandeh 已提交
325
    Statistics* kNullStats = nullptr;
M
Maysam Yabandeh 已提交
326
    // Filters are already checked before seeking the index
327
    if (!partition_map_.empty()) {
328
      // We don't return pinned data from index blocks, so no need
329
      // to set `block_contents_pinned`.
330
      it = NewTwoLevelIterator(
331 332 333
          new BlockBasedTable::PartitionedIndexIteratorState(table(),
                                                             &partition_map_),
          index_block.GetValue()->NewIndexIterator(
334
              internal_comparator(), internal_comparator()->user_comparator(),
335 336
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()));
337
    } else {
338 339 340
      ReadOptions ro;
      ro.fill_cache = read_options.fill_cache;
      // We don't return pinned data from index blocks, so no need
341
      // to set `block_contents_pinned`.
342
      it = new BlockBasedTableIterator<IndexBlockIter, IndexValue>(
343
          table(), ro, *internal_comparator(),
344
          index_block.GetValue()->NewIndexIterator(
345
              internal_comparator(), internal_comparator()->user_comparator(),
346 347
              nullptr, kNullStats, true, index_has_first_key(),
              index_key_includes_seq(), index_value_is_full()),
348
          false, true, /* prefix_extractor */ nullptr, BlockType::kIndex,
349 350
          lookup_context ? lookup_context->caller
                         : TableReaderCaller::kUncategorized);
351
    }
352 353 354 355 356 357

    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;

M
Maysam Yabandeh 已提交
358
    // TODO(myabandeh): Update TwoLevelIterator to be able to make use of
M
Maysam Yabandeh 已提交
359 360 361 362 363
    // on-stack BlockIter while the state is on heap. Currentlly it assumes
    // the first level iter is always on heap and will attempt to delete it
    // in its destructor.
  }

364
  void CacheDependencies(bool pin) override {
M
Maysam Yabandeh 已提交
365
    // Before read partitions, prefetch them to avoid lots of IOs
366
    BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
367
    const BlockBasedTable::Rep* rep = table()->rep_;
M
Maysam Yabandeh 已提交
368
    IndexBlockIter biter;
M
Maysam Yabandeh 已提交
369
    BlockHandle handle;
M
Maysam Yabandeh 已提交
370
    Statistics* kNullStats = nullptr;
371 372

    CachableEntry<Block> index_block;
373
    Status s = GetOrReadIndexBlock(false /* no_io */, nullptr /* get_context */,
374
                                   &lookup_context, &index_block);
375 376 377
    if (!s.ok()) {
      ROCKS_LOG_WARN(rep->ioptions.info_log,
                     "Error retrieving top-level index block while trying to "
378 379
                     "cache index partitions: %s",
                     s.ToString().c_str());
380 381 382 383
      return;
    }

    // We don't return pinned data from index blocks, so no need
384
    // to set `block_contents_pinned`.
385
    index_block.GetValue()->NewIndexIterator(
386
        internal_comparator(), internal_comparator()->user_comparator(), &biter,
387 388
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
M
Maysam Yabandeh 已提交
389 390 391
    // Index partitions are assumed to be consecuitive. Prefetch them all.
    // Read the first block offset
    biter.SeekToFirst();
392 393 394 395
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
396
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
397 398 399 400
    uint64_t prefetch_off = handle.offset();

    // Read the last block's offset
    biter.SeekToLast();
401 402 403 404
    if (!biter.Valid()) {
      // Empty index.
      return;
    }
405
    handle = biter.value().handle;
M
Maysam Yabandeh 已提交
406 407 408
    uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
    uint64_t prefetch_len = last_off - prefetch_off;
    std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
409
    auto& file = rep->file;
M
Maysam Yabandeh 已提交
410
    prefetch_buffer.reset(new FilePrefetchBuffer());
411 412
    s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
                                  static_cast<size_t>(prefetch_len));
M
Maysam Yabandeh 已提交
413 414 415 416 417

    // After prefetch, read the partitions one by one
    biter.SeekToFirst();
    auto ro = ReadOptions();
    for (; biter.Valid(); biter.Next()) {
418
      handle = biter.value().handle;
419
      CachableEntry<Block> block;
420 421
      // TODO: Support counter batch update for partitioned index and
      // filter blocks
422 423
      s = table()->MaybeReadBlockAndLoadToCache(
          prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
424
          &block, BlockType::kIndex, /*get_context=*/nullptr, &lookup_context);
M
Maysam Yabandeh 已提交
425

426 427 428
      assert(s.ok() || block.GetValue() == nullptr);
      if (s.ok() && block.GetValue() != nullptr) {
        if (block.IsCached()) {
429
          if (pin) {
430
            partition_map_[handle.offset()] = std::move(block);
431
          }
M
Maysam Yabandeh 已提交
432 433 434
        }
      }
    }
M
Maysam Yabandeh 已提交
435 436
  }

437
  size_t ApproximateMemoryUsage() const override {
438
    size_t usage = ApproximateIndexBlockMemoryUsage();
439 440 441 442 443 444 445
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
    usage += malloc_usable_size((void*)this);
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    // TODO(myabandeh): more accurate estimate of partition_map_ mem usage
    return usage;
M
Maysam Yabandeh 已提交
446 447 448
  }

 private:
449 450
  PartitionIndexReader(const BlockBasedTable* t,
                       CachableEntry<Block>&& index_block)
451
      : IndexReaderCommon(t, std::move(index_block)) {}
452

453
  std::unordered_map<uint64_t, CachableEntry<Block>> partition_map_;
M
Maysam Yabandeh 已提交
454 455
};

456 457 458
// Index that allows binary search lookup for the first key of each block.
// This class can be viewed as a thin wrapper for `Block` class which already
// supports binary search.
459
class BinarySearchIndexReader : public BlockBasedTable::IndexReaderCommon {
460 461 462
 public:
  // Read index from the file and create an intance for
  // `BinarySearchIndexReader`.
463 464
  // On success, index_reader will be populated; otherwise it will remain
  // unmodified.
465
  static Status Create(const BlockBasedTable* table,
466
                       FilePrefetchBuffer* prefetch_buffer, bool use_cache,
467 468
                       bool prefetch, bool pin, IndexReader** index_reader,
                       BlockCacheLookupContext* lookup_context) {
469 470 471 472 473 474 475
    assert(table != nullptr);
    assert(table->get_rep());
    assert(!pin || prefetch);
    assert(index_reader != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
476 477 478
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
479 480 481
      if (!s.ok()) {
        return s;
      }
482

483 484 485
      if (use_cache && !pin) {
        index_block.Reset();
      }
486 487
    }

488 489 490
    *index_reader = new BinarySearchIndexReader(table, std::move(index_block));

    return Status::OK();
491 492
  }

493
  InternalIteratorBase<IndexValue>* NewIterator(
494
      const ReadOptions& read_options, bool /* disable_prefix_seek */,
495 496
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
497
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
498
    CachableEntry<Block> index_block;
499 500
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
501 502 503 504 505 506
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

507
      return NewErrorInternalIterator<IndexValue>(s);
508 509
    }

M
Maysam Yabandeh 已提交
510
    Statistics* kNullStats = nullptr;
511
    // We don't return pinned data from index blocks, so no need
512
    // to set `block_contents_pinned`.
513
    auto it = index_block.GetValue()->NewIndexIterator(
514
        internal_comparator(), internal_comparator()->user_comparator(), iter,
515 516
        kNullStats, true, index_has_first_key(), index_key_includes_seq(),
        index_value_is_full());
517

518 519 520 521 522
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
523

524
  size_t ApproximateMemoryUsage() const override {
525
    size_t usage = ApproximateIndexBlockMemoryUsage();
526 527 528 529 530 531
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
    usage += malloc_usable_size((void*)this);
#else
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
532 533
  }

534
 private:
535
  BinarySearchIndexReader(const BlockBasedTable* t,
536
                          CachableEntry<Block>&& index_block)
537
      : IndexReaderCommon(t, std::move(index_block)) {}
538 539 540 541
};

// Index that leverages an internal hash table to quicken the lookup for a given
// key.
542
class HashIndexReader : public BlockBasedTable::IndexReaderCommon {
543
 public:
544
  static Status Create(const BlockBasedTable* table,
545 546
                       FilePrefetchBuffer* prefetch_buffer,
                       InternalIterator* meta_index_iter, bool use_cache,
547 548
                       bool prefetch, bool pin, IndexReader** index_reader,
                       BlockCacheLookupContext* lookup_context) {
549 550 551 552
    assert(table != nullptr);
    assert(index_reader != nullptr);
    assert(!pin || prefetch);

553
    const BlockBasedTable::Rep* rep = table->get_rep();
554 555 556 557
    assert(rep != nullptr);

    CachableEntry<Block> index_block;
    if (prefetch || !use_cache) {
558 559 560
      const Status s =
          ReadIndexBlock(table, prefetch_buffer, ReadOptions(),
                         /*get_context=*/nullptr, lookup_context, &index_block);
561 562 563
      if (!s.ok()) {
        return s;
      }
564

565 566 567
      if (use_cache && !pin) {
        index_block.Reset();
      }
568 569
    }

570 571 572 573
    // Note, failure to create prefix hash index does not need to be a
    // hard error. We can still fall back to the original binary search index.
    // So, Create will succeed regardless, from this point on.

574
    auto new_index_reader = new HashIndexReader(table, std::move(index_block));
575 576
    *index_reader = new_index_reader;

K
Kai Liu 已提交
577 578
    // Get prefixes block
    BlockHandle prefixes_handle;
579 580
    Status s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
                             &prefixes_handle);
K
Kai Liu 已提交
581
    if (!s.ok()) {
582 583
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
584 585 586 587 588 589 590
    }

    // Get index metadata block
    BlockHandle prefixes_meta_handle;
    s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
                      &prefixes_meta_handle);
    if (!s.ok()) {
591 592
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
593 594
    }

595 596 597 598 599
    RandomAccessFileReader* const file = rep->file.get();
    const Footer& footer = rep->footer;
    const ImmutableCFOptions& ioptions = rep->ioptions;
    const PersistentCacheOptions& cache_options = rep->persistent_cache_options;
    MemoryAllocator* const memory_allocator =
600
        GetMemoryAllocator(rep->table_options);
601

K
Kai Liu 已提交
602 603
    // Read contents for the blocks
    BlockContents prefixes_contents;
S
Siying Dong 已提交
604 605
    BlockFetcher prefixes_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_handle,
606
        &prefixes_contents, ioptions, true /*decompress*/,
607 608
        true /*maybe_compressed*/, BlockType::kHashIndexPrefixes,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
S
Siying Dong 已提交
609
    s = prefixes_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
610 611 612 613
    if (!s.ok()) {
      return s;
    }
    BlockContents prefixes_meta_contents;
S
Siying Dong 已提交
614 615
    BlockFetcher prefixes_meta_block_fetcher(
        file, prefetch_buffer, footer, ReadOptions(), prefixes_meta_handle,
616
        &prefixes_meta_contents, ioptions, true /*decompress*/,
617 618
        true /*maybe_compressed*/, BlockType::kHashIndexMetadata,
        UncompressionDict::GetEmptyDict(), cache_options, memory_allocator);
619
    s = prefixes_meta_block_fetcher.ReadBlockContents();
K
Kai Liu 已提交
620
    if (!s.ok()) {
621 622
      // TODO: log error
      return Status::OK();
K
Kai Liu 已提交
623 624
    }

625
    BlockPrefixIndex* prefix_index = nullptr;
626 627
    s = BlockPrefixIndex::Create(rep->internal_prefix_transform.get(),
                                 prefixes_contents.data,
628 629 630
                                 prefixes_meta_contents.data, &prefix_index);
    // TODO: log error
    if (s.ok()) {
M
Maysam Yabandeh 已提交
631
      new_index_reader->prefix_index_.reset(prefix_index);
K
Kai Liu 已提交
632 633
    }

634
    return Status::OK();
635 636
  }

637
  InternalIteratorBase<IndexValue>* NewIterator(
638
      const ReadOptions& read_options, bool disable_prefix_seek,
639 640
      IndexBlockIter* iter, GetContext* get_context,
      BlockCacheLookupContext* lookup_context) override {
641
    const bool no_io = (read_options.read_tier == kBlockCacheTier);
642
    CachableEntry<Block> index_block;
643 644
    const Status s =
        GetOrReadIndexBlock(no_io, get_context, lookup_context, &index_block);
645 646 647 648 649 650
    if (!s.ok()) {
      if (iter != nullptr) {
        iter->Invalidate(s);
        return iter;
      }

651
      return NewErrorInternalIterator<IndexValue>(s);
652 653
    }

M
Maysam Yabandeh 已提交
654
    Statistics* kNullStats = nullptr;
655 656
    const bool total_order_seek =
        read_options.total_order_seek || disable_prefix_seek;
657
    // We don't return pinned data from index blocks, so no need
658
    // to set `block_contents_pinned`.
659
    auto it = index_block.GetValue()->NewIndexIterator(
660
        internal_comparator(), internal_comparator()->user_comparator(), iter,
661 662 663
        kNullStats, total_order_seek, index_has_first_key(),
        index_key_includes_seq(), index_value_is_full(),
        false /* block_contents_pinned */, prefix_index_.get());
664

665 666 667 668 669
    assert(it != nullptr);
    index_block.TransferTo(it);

    return it;
  }
670

671
  size_t ApproximateMemoryUsage() const override {
672
    size_t usage = ApproximateIndexBlockMemoryUsage();
673 674 675
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
    usage += malloc_usable_size((void*)this);
#else
M
Maysam Yabandeh 已提交
676 677 678
    if (prefix_index_) {
      usage += prefix_index_->ApproximateMemoryUsage();
    }
679 680 681
    usage += sizeof(*this);
#endif  // ROCKSDB_MALLOC_USABLE_SIZE
    return usage;
682 683
  }

684
 private:
685
  HashIndexReader(const BlockBasedTable* t, CachableEntry<Block>&& index_block)
686
      : IndexReaderCommon(t, std::move(index_block)) {}
K
Kai Liu 已提交
687

M
Maysam Yabandeh 已提交
688
  std::unique_ptr<BlockPrefixIndex> prefix_index_;
689 690
};

691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
void BlockBasedTable::UpdateCacheHitMetrics(BlockType block_type,
                                            GetContext* get_context,
                                            size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  PERF_COUNTER_ADD(block_cache_hit_count, 1);
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_hit;
    get_context->get_context_stats_.num_cache_bytes_read += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_HIT);
    RecordTick(statistics, BLOCK_CACHE_BYTES_READ, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_HIT);
      }
      break;

    case BlockType::kCompressionDictionary:
      // TODO: introduce perf counter for compression dictionary hit count
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_HIT);
      }
      break;

    case BlockType::kIndex:
      PERF_COUNTER_ADD(block_cache_index_hit_count, 1);

      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_HIT);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_hit;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheMissMetrics(BlockType block_type,
                                             GetContext* get_context) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce aggregate (not per-level) block cache miss count
  PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
                            static_cast<uint32_t>(rep_->level));

  if (get_context) {
    ++get_context->get_context_stats_.num_cache_miss;
  } else {
    RecordTick(statistics, BLOCK_CACHE_MISS);
  }

  // TODO: introduce perf counters for misses per block type
  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_MISS);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_MISS);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_MISS);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_miss;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
      }
      break;
  }
}

void BlockBasedTable::UpdateCacheInsertionMetrics(BlockType block_type,
                                                  GetContext* get_context,
                                                  size_t usage) const {
  Statistics* const statistics = rep_->ioptions.statistics;

  // TODO: introduce perf counters for block cache insertions
  if (get_context) {
    ++get_context->get_context_stats_.num_cache_add;
    get_context->get_context_stats_.num_cache_bytes_write += usage;
  } else {
    RecordTick(statistics, BLOCK_CACHE_ADD);
    RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
  }

  switch (block_type) {
    case BlockType::kFilter:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_filter_add;
        get_context->get_context_stats_.num_cache_filter_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
        RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
      }
      break;

    case BlockType::kCompressionDictionary:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_compression_dict_add;
        get_context->get_context_stats_
            .num_cache_compression_dict_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
        RecordTick(statistics, BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT,
                   usage);
      }
      break;

    case BlockType::kIndex:
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_index_add;
        get_context->get_context_stats_.num_cache_index_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
        RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usage);
      }
      break;

    default:
      // TODO: introduce dedicated tickers/statistics/counters
      // for range tombstones
      if (get_context) {
        ++get_context->get_context_stats_.num_cache_data_add;
        get_context->get_context_stats_.num_cache_data_bytes_insert += usage;
      } else {
        RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
        RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, usage);
      }
      break;
  }
}

863
Cache::Handle* BlockBasedTable::GetEntryFromCache(
864
    Cache* block_cache, const Slice& key, BlockType block_type,
865
    GetContext* get_context) const {
866 867
  auto cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics);

868
  if (cache_handle != nullptr) {
869 870
    UpdateCacheHitMetrics(block_type, get_context,
                          block_cache->GetUsage(cache_handle));
871
  } else {
872
    UpdateCacheMissMetrics(block_type, get_context);
873 874 875 876 877
  }

  return cache_handle;
}

878
// Helper function to setup the cache key's prefix for the Table.
879
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
880 881
  assert(kMaxCacheKeyPrefixSize >= 10);
  rep->cache_key_prefix_size = 0;
882
  rep->compressed_cache_key_prefix_size = 0;
883
  if (rep->table_options.block_cache != nullptr) {
884 885
    GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
                        &rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
886
  }
K
krad 已提交
887 888 889 890 891
  if (rep->table_options.persistent_cache != nullptr) {
    GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
                        &rep->persistent_cache_key_prefix[0],
                        &rep->persistent_cache_key_prefix_size);
  }
892 893
  if (rep->table_options.block_cache_compressed != nullptr) {
    GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
894
                        rep->file->file(), &rep->compressed_cache_key_prefix[0],
895 896 897 898
                        &rep->compressed_cache_key_prefix_size);
  }
}

899 900
void BlockBasedTable::GenerateCachePrefix(Cache* cc, RandomAccessFile* file,
                                          char* buffer, size_t* size) {
901 902 903 904 905
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
K
krad 已提交
906
  if (cc && *size == 0) {
907 908 909 910 911
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
  }
}

912 913
void BlockBasedTable::GenerateCachePrefix(Cache* cc, WritableFile* file,
                                          char* buffer, size_t* size) {
914 915 916 917 918 919 920 921
  // generate an id from the file
  *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);

  // If the prefix wasn't generated or was too long,
  // create one from the cache.
  if (*size == 0) {
    char* end = EncodeVarint64(buffer, cc->NewId());
    *size = static_cast<size_t>(end - buffer);
922 923 924
  }
}

925 926 927 928 929 930 931 932 933 934 935 936
namespace {
// Return True if table_properties has `user_prop_name` has a `true` value
// or it doesn't contain this property (for backward compatible).
bool IsFeatureSupported(const TableProperties& table_properties,
                        const std::string& user_prop_name, Logger* info_log) {
  auto& props = table_properties.user_collected_properties;
  auto pos = props.find(user_prop_name);
  // Older version doesn't have this value set. Skip this check.
  if (pos != props.end()) {
    if (pos->second == kPropFalse) {
      return false;
    } else if (pos->second != kPropTrue) {
937 938
      ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
                     user_prop_name.c_str(), pos->second.c_str());
939 940 941 942
    }
  }
  return true;
}
943

944 945 946 947 948 949 950
// Caller has to ensure seqno is not nullptr.
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
                               SequenceNumber largest_seqno,
                               SequenceNumber* seqno) {
  const auto& props = table_properties.user_collected_properties;
  const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
  const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
951

952
  *seqno = kDisableGlobalSequenceNumber;
953 954
  if (version_pos == props.end()) {
    if (seqno_pos != props.end()) {
955
      std::array<char, 200> msg_buf;
956
      // This is not an external sst file, global_seqno is not supported.
957 958
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
959 960
          "A non-external sst file have global seqno property with value %s",
          seqno_pos->second.c_str());
961
      return Status::Corruption(msg_buf.data());
962
    }
963
    return Status::OK();
964 965 966 967 968
  }

  uint32_t version = DecodeFixed32(version_pos->second.c_str());
  if (version < 2) {
    if (seqno_pos != props.end() || version != 1) {
969
      std::array<char, 200> msg_buf;
970
      // This is a v1 external sst file, global_seqno is not supported.
971 972 973 974 975
      snprintf(msg_buf.data(), msg_buf.max_size(),
               "An external sst file with version %u have global seqno "
               "property with value %s",
               version, seqno_pos->second.c_str());
      return Status::Corruption(msg_buf.data());
976
    }
977
    return Status::OK();
978 979
  }

980 981 982 983 984 985 986
  // Since we have a plan to deprecate global_seqno, we do not return failure
  // if seqno_pos == props.end(). We rely on version_pos to detect whether the
  // SST is external.
  SequenceNumber global_seqno(0);
  if (seqno_pos != props.end()) {
    global_seqno = DecodeFixed64(seqno_pos->second.c_str());
  }
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
  // SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
  // to denote it is unknown.
  if (largest_seqno < kMaxSequenceNumber) {
    if (global_seqno == 0) {
      global_seqno = largest_seqno;
    }
    if (global_seqno != largest_seqno) {
      std::array<char, 200> msg_buf;
      snprintf(
          msg_buf.data(), msg_buf.max_size(),
          "An external sst file with version %u have global seqno property "
          "with value %s, while largest seqno in the file is %llu",
          version, seqno_pos->second.c_str(),
          static_cast<unsigned long long>(largest_seqno));
      return Status::Corruption(msg_buf.data());
    }
1003
  }
1004
  *seqno = global_seqno;
1005 1006

  if (global_seqno > kMaxSequenceNumber) {
1007 1008 1009 1010 1011 1012
    std::array<char, 200> msg_buf;
    snprintf(msg_buf.data(), msg_buf.max_size(),
             "An external sst file with version %u have global seqno property "
             "with value %llu, which is greater than kMaxSequenceNumber",
             version, static_cast<unsigned long long>(global_seqno));
    return Status::Corruption(msg_buf.data());
1013 1014
  }

1015
  return Status::OK();
1016
}
1017 1018
}  // namespace

K
krad 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
                                   size_t cache_key_prefix_size,
                                   const BlockHandle& handle, char* cache_key) {
  assert(cache_key != nullptr);
  assert(cache_key_prefix_size != 0);
  assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
  memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
  char* end =
      EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
  return Slice(cache_key, static_cast<size_t>(end - cache_key));
}

1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
Status BlockBasedTable::Open(
    const ImmutableCFOptions& ioptions, const EnvOptions& env_options,
    const BlockBasedTableOptions& table_options,
    const InternalKeyComparator& internal_comparator,
    std::unique_ptr<RandomAccessFileReader>&& file, uint64_t file_size,
    std::unique_ptr<TableReader>* table_reader,
    const SliceTransform* prefix_extractor,
    const bool prefetch_index_and_filter_in_cache, const bool skip_filters,
    const int level, const bool immortal_table,
    const SequenceNumber largest_seqno, TailPrefetchStats* tail_prefetch_stats,
    BlockCacheTracer* const block_cache_tracer) {
S
Siying Dong 已提交
1042
  table_reader->reset();
1043

1044
  Status s;
1045
  Footer footer;
1046 1047
  std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;

1048 1049 1050
  // prefetch both index and filters, down to all partitions
  const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
  const bool preload_all = !table_options.cache_index_and_filter_blocks;
1051

1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
  s = PrefetchTail(file.get(), file_size, tail_prefetch_stats, prefetch_all,
                   preload_all, &prefetch_buffer);

  // Read in the following order:
  //    1. Footer
  //    2. [metaindex block]
  //    3. [meta block: properties]
  //    4. [meta block: range deletion tombstone]
  //    5. [meta block: compression dictionary]
  //    6. [meta block: index]
  //    7. [meta block: filter]
1063 1064
  s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
                         kBlockBasedTableMagicNumber);
1065 1066 1067
  if (!s.ok()) {
    return s;
  }
1068
  if (!BlockBasedTableSupportedVersion(footer.version())) {
1069
    return Status::Corruption(
1070
        "Unknown Footer version. Maybe this file was created with newer "
1071 1072
        "version of RocksDB?");
  }
J
jorlow@chromium.org 已提交
1073

A
Aaron Gao 已提交
1074
  // We've successfully read the footer. We are ready to serve requests.
1075 1076 1077
  // Better not mutate rep_ after the creation. eg. internal_prefix_transform
  // raw pointer will be used to create HashIndexReader, whose reset may
  // access a dangling pointer.
1078
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
1079
  Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
1080
                                      internal_comparator, skip_filters, level,
1081
                                      immortal_table);
K
Kai Liu 已提交
1082
  rep->file = std::move(file);
I
xxHash  
Igor Canadi 已提交
1083
  rep->footer = footer;
1084
  rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
1085 1086
  // We need to wrap data with internal_prefix_transform to make sure it can
  // handle prefix correctly.
1087
  rep->internal_prefix_transform.reset(
1088
      new InternalKeySliceTransform(prefix_extractor));
1089
  SetupCacheKeyPrefix(rep);
1090 1091
  std::unique_ptr<BlockBasedTable> new_table(
      new BlockBasedTable(rep, block_cache_tracer));
K
Kai Liu 已提交
1092

K
krad 已提交
1093 1094 1095 1096 1097
  // page cache options
  rep->persistent_cache_options =
      PersistentCacheOptions(rep->table_options.persistent_cache,
                             std::string(rep->persistent_cache_key_prefix,
                                         rep->persistent_cache_key_prefix_size),
1098
                             rep->ioptions.statistics);
K
krad 已提交
1099

1100 1101 1102 1103 1104
  // Meta-blocks are not dictionary compressed. Explicitly set the dictionary
  // handle to null, otherwise it may be seen as uninitialized during the below
  // meta-block reads.
  rep->compression_dict_handle = BlockHandle::NullBlockHandle();

1105
  // Read metaindex
K
Kai Liu 已提交
1106
  std::unique_ptr<Block> meta;
S
sdong 已提交
1107
  std::unique_ptr<InternalIterator> meta_iter;
1108
  s = new_table->ReadMetaBlock(prefetch_buffer.get(), &meta, &meta_iter);
1109 1110 1111
  if (!s.ok()) {
    return s;
  }
K
Kai Liu 已提交
1112

1113 1114
  // Populates table_properties and some fields that depend on it,
  // such as index_type.
1115 1116
  s = new_table->ReadPropertiesBlock(prefetch_buffer.get(), meta_iter.get(),
                                     largest_seqno);
1117 1118 1119
  if (!s.ok()) {
    return s;
  }
1120
  s = new_table->ReadRangeDelBlock(prefetch_buffer.get(), meta_iter.get(),
1121
                                   internal_comparator, &lookup_context);
1122 1123 1124
  if (!s.ok()) {
    return s;
  }
1125 1126
  s = new_table->PrefetchIndexAndFilterBlocks(
      prefetch_buffer.get(), meta_iter.get(), new_table.get(), prefetch_all,
1127
      table_options, level, &lookup_context);
1128 1129 1130 1131 1132 1133 1134 1135

  if (s.ok()) {
    // Update tail prefetch stats
    assert(prefetch_buffer.get() != nullptr);
    if (tail_prefetch_stats != nullptr) {
      assert(prefetch_buffer->min_offset_read() < file_size);
      tail_prefetch_stats->RecordEffectiveSize(
          static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
I
Igor Canadi 已提交
1136
    }
1137 1138

    *table_reader = std::move(new_table);
I
Igor Canadi 已提交
1139 1140
  }

1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
  return s;
}

Status BlockBasedTable::PrefetchTail(
    RandomAccessFileReader* file, uint64_t file_size,
    TailPrefetchStats* tail_prefetch_stats, const bool prefetch_all,
    const bool preload_all,
    std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
  size_t tail_prefetch_size = 0;
  if (tail_prefetch_stats != nullptr) {
    // Multiple threads may get a 0 (no history) when running in parallel,
    // but it will get cleared after the first of them finishes.
    tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
  }
  if (tail_prefetch_size == 0) {
    // Before read footer, readahead backwards to prefetch data. Do more
    // readahead if we're going to read index/filter.
    // TODO: This may incorrectly select small readahead in case partitioned
    // index/filter is enabled and top-level partition pinning is enabled.
    // That's because we need to issue readahead before we read the properties,
    // at which point we don't yet know the index type.
    tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
  }
  size_t prefetch_off;
  size_t prefetch_len;
  if (file_size < tail_prefetch_size) {
    prefetch_off = 0;
    prefetch_len = static_cast<size_t>(file_size);
  } else {
    prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
    prefetch_len = tail_prefetch_size;
  }
  TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
                           &tail_prefetch_size);
  Status s;
  // TODO should not have this special logic in the future.
  if (!file->use_direct_io()) {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, false, true));
    s = file->Prefetch(prefetch_off, prefetch_len);
  } else {
    prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, true, true));
    s = (*prefetch_buffer)->Prefetch(file, prefetch_off, prefetch_len);
  }
  return s;
}

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
Status VerifyChecksum(const ChecksumType type, const char* buf, size_t len,
                      uint32_t expected) {
  Status s;
  uint32_t actual = 0;
  switch (type) {
    case kNoChecksum:
      break;
    case kCRC32c:
      expected = crc32c::Unmask(expected);
      actual = crc32c::Value(buf, len);
      break;
    case kxxHash:
      actual = XXH32(buf, static_cast<int>(len), 0);
      break;
    case kxxHash64:
      actual = static_cast<uint32_t>(XXH64(buf, static_cast<int>(len), 0) &
                                     uint64_t{0xffffffff});
      break;
    default:
      s = Status::Corruption("unknown checksum type");
  }
  if (s.ok() && actual != expected) {
    s = Status::Corruption("properties block checksum mismatched");
  }
  return s;
}

1214
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
1215
    FilePrefetchBuffer* prefetch_buffer, const Slice& handle_value,
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
    TableProperties** table_properties) {
  assert(table_properties != nullptr);
  // If this is an external SST file ingested with write_global_seqno set to
  // true, then we expect the checksum mismatch because checksum was written
  // by SstFileWriter, but its global seqno in the properties block may have
  // been changed during ingestion. In this case, we read the properties
  // block, copy it to a memory buffer, change the global seqno to its
  // original value, i.e. 0, and verify the checksum again.
  BlockHandle props_block_handle;
  CacheAllocationPtr tmp_buf;
1226 1227
  Status s = ReadProperties(handle_value, rep_->file.get(), prefetch_buffer,
                            rep_->footer, rep_->ioptions, table_properties,
1228 1229 1230 1231 1232 1233 1234 1235
                            false /* verify_checksum */, &props_block_handle,
                            &tmp_buf, false /* compression_type_missing */,
                            nullptr /* memory_allocator */);
  if (s.ok() && tmp_buf) {
    const auto seqno_pos_iter =
        (*table_properties)
            ->properties_offsets.find(
                ExternalSstFilePropertyNames::kGlobalSeqno);
1236
    size_t block_size = static_cast<size_t>(props_block_handle.size());
1237 1238 1239 1240 1241 1242
    if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
      uint64_t global_seqno_offset = seqno_pos_iter->second;
      EncodeFixed64(
          tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
    }
    uint32_t value = DecodeFixed32(tmp_buf.get() + block_size + 1);
1243
    s = rocksdb::VerifyChecksum(rep_->footer.checksum(), tmp_buf.get(),
1244 1245 1246 1247 1248
                                block_size + 1, value);
  }
  return s;
}

1249
Status BlockBasedTable::ReadPropertiesBlock(
1250
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1251
    const SequenceNumber largest_seqno) {
1252
  bool found_properties_block = true;
1253 1254
  Status s;
  s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
1255

1256
  if (!s.ok()) {
1257
    ROCKS_LOG_WARN(rep_->ioptions.info_log,
1258 1259
                   "Error when seeking to properties block from file: %s",
                   s.ToString().c_str());
1260
  } else if (found_properties_block) {
K
Kai Liu 已提交
1261
    s = meta_iter->status();
K
kailiu 已提交
1262
    TableProperties* table_properties = nullptr;
K
Kai Liu 已提交
1263
    if (s.ok()) {
1264
      s = ReadProperties(
1265 1266
          meta_iter->value(), rep_->file.get(), prefetch_buffer, rep_->footer,
          rep_->ioptions, &table_properties, true /* verify_checksum */,
1267 1268 1269 1270 1271
          nullptr /* ret_block_handle */, nullptr /* ret_block_contents */,
          false /* compression_type_missing */, nullptr /* memory_allocator */);
    }

    if (s.IsCorruption()) {
1272 1273
      s = TryReadPropertiesWithGlobalSeqno(prefetch_buffer, meta_iter->value(),
                                           &table_properties);
1274 1275 1276 1277
    }
    std::unique_ptr<TableProperties> props_guard;
    if (table_properties != nullptr) {
      props_guard.reset(table_properties);
K
Kai Liu 已提交
1278
    }
J
jorlow@chromium.org 已提交
1279

K
Kai Liu 已提交
1280
    if (!s.ok()) {
1281
      ROCKS_LOG_WARN(rep_->ioptions.info_log,
1282 1283 1284
                     "Encountered error while reading data from properties "
                     "block %s",
                     s.ToString().c_str());
K
kailiu 已提交
1285
    } else {
1286
      assert(table_properties != nullptr);
1287 1288 1289 1290 1291 1292
      rep_->table_properties.reset(props_guard.release());
      rep_->blocks_maybe_compressed =
          rep_->table_properties->compression_name !=
          CompressionTypeToString(kNoCompression);
      rep_->blocks_definitely_zstd_compressed =
          (rep_->table_properties->compression_name ==
1293
               CompressionTypeToString(kZSTD) ||
1294
           rep_->table_properties->compression_name ==
1295
               CompressionTypeToString(kZSTDNotFinalCompression));
K
Kai Liu 已提交
1296
    }
1297
  } else {
1298
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1299
                    "Cannot find Properties block from file.");
K
Kai Liu 已提交
1300
  }
1301
#ifndef ROCKSDB_LITE
1302 1303 1304
  if (rep_->table_properties) {
    ParseSliceTransform(rep_->table_properties->prefix_extractor_name,
                        &(rep_->table_prefix_extractor));
1305 1306
  }
#endif  // ROCKSDB_LITE
K
Kai Liu 已提交
1307

1308
  // Read the table properties, if provided.
1309 1310 1311
  if (rep_->table_properties) {
    rep_->whole_key_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
1312
                           BlockBasedTablePropertyNames::kWholeKeyFiltering,
1313 1314 1315 1316 1317 1318
                           rep_->ioptions.info_log);
    rep_->prefix_filtering &=
        IsFeatureSupported(*(rep_->table_properties),
                           BlockBasedTablePropertyNames::kPrefixFiltering,
                           rep_->ioptions.info_log);

1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
    rep_->index_key_includes_seq =
        rep_->table_properties->index_key_is_user_key == 0;
    rep_->index_value_is_full =
        rep_->table_properties->index_value_is_delta_encoded == 0;

    // Update index_type with the true type.
    // If table properties don't contain index type, we assume that the table
    // is in very old format and has kBinarySearch index type.
    auto& props = rep_->table_properties->user_collected_properties;
    auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
    if (pos != props.end()) {
      rep_->index_type = static_cast<BlockBasedTableOptions::IndexType>(
          DecodeFixed32(pos->second.c_str()));
    }

    rep_->index_has_first_key =
        rep_->index_type == BlockBasedTableOptions::kBinarySearchWithFirstKey;

1337 1338
    s = GetGlobalSequenceNumber(*(rep_->table_properties), largest_seqno,
                                &(rep_->global_seqno));
1339
    if (!s.ok()) {
1340
      ROCKS_LOG_ERROR(rep_->ioptions.info_log, "%s", s.ToString().c_str());
1341
    }
1342
  }
1343 1344
  return s;
}
1345

1346
Status BlockBasedTable::ReadRangeDelBlock(
1347
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1348 1349
    const InternalKeyComparator& internal_comparator,
    BlockCacheLookupContext* lookup_context) {
1350
  Status s;
1351
  bool found_range_del_block;
1352 1353
  BlockHandle range_del_handle;
  s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
1354
  if (!s.ok()) {
1355
    ROCKS_LOG_WARN(
1356
        rep_->ioptions.info_log,
1357 1358
        "Error when seeking to range delete tombstones block from file: %s",
        s.ToString().c_str());
1359
  } else if (found_range_del_block && !range_del_handle.IsNull()) {
1360
    ReadOptions read_options;
1361
    std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
1362 1363 1364
        read_options, range_del_handle,
        /*input_iter=*/nullptr, BlockType::kRangeDeletion,
        /*get_context=*/nullptr, lookup_context, Status(), prefetch_buffer));
1365 1366
    assert(iter != nullptr);
    s = iter->status();
1367 1368
    if (!s.ok()) {
      ROCKS_LOG_WARN(
1369
          rep_->ioptions.info_log,
1370 1371
          "Encountered error while reading data from range del block %s",
          s.ToString().c_str());
1372
    } else {
1373
      rep_->fragmented_range_dels =
1374 1375
          std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
                                                         internal_comparator);
1376 1377
    }
  }
1378 1379 1380 1381
  return s;
}

Status BlockBasedTable::ReadCompressionDictBlock(
1382 1383
    FilePrefetchBuffer* prefetch_buffer,
    std::unique_ptr<const BlockContents>* compression_dict_block) const {
1384
  assert(compression_dict_block != nullptr);
1385
  Status s;
1386
  if (!rep_->compression_dict_handle.IsNull()) {
1387 1388 1389
    std::unique_ptr<BlockContents> compression_dict_cont{new BlockContents()};
    PersistentCacheOptions cache_options;
    ReadOptions read_options;
1390
    read_options.verify_checksums = true;
1391
    BlockFetcher compression_block_fetcher(
1392 1393 1394
        rep_->file.get(), prefetch_buffer, rep_->footer, read_options,
        rep_->compression_dict_handle, compression_dict_cont.get(),
        rep_->ioptions, false /* decompress */, false /*maybe_compressed*/,
1395 1396
        BlockType::kCompressionDictionary, UncompressionDict::GetEmptyDict(),
        cache_options);
1397 1398 1399 1400
    s = compression_block_fetcher.ReadBlockContents();

    if (!s.ok()) {
      ROCKS_LOG_WARN(
1401
          rep_->ioptions.info_log,
1402 1403 1404 1405
          "Encountered error while reading data from compression dictionary "
          "block %s",
          s.ToString().c_str());
    } else {
1406
      *compression_dict_block = std::move(compression_dict_cont);
1407 1408 1409 1410 1411 1412
    }
  }
  return s;
}

Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
1413
    FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
1414
    BlockBasedTable* new_table, bool prefetch_all,
1415 1416
    const BlockBasedTableOptions& table_options, const int level,
    BlockCacheLookupContext* lookup_context) {
1417 1418 1419
  Status s;

  // Find filter handle and filter type
1420
  if (rep_->filter_policy) {
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
    for (auto filter_type :
         {Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
          Rep::FilterType::kBlockFilter}) {
      std::string prefix;
      switch (filter_type) {
        case Rep::FilterType::kFullFilter:
          prefix = kFullFilterBlockPrefix;
          break;
        case Rep::FilterType::kPartitionedFilter:
          prefix = kPartitionedFilterBlockPrefix;
          break;
        case Rep::FilterType::kBlockFilter:
          prefix = kFilterBlockPrefix;
          break;
        default:
          assert(0);
      }
      std::string filter_block_key = prefix;
1439 1440
      filter_block_key.append(rep_->filter_policy->Name());
      if (FindMetaBlock(meta_iter, filter_block_key, &rep_->filter_handle)
1441
              .ok()) {
1442
        rep_->filter_type = filter_type;
1443 1444 1445 1446
        break;
      }
    }
  }
1447

1448 1449 1450 1451
  {
    // Find compression dictionary handle
    bool found_compression_dict;
    s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
1452
                                   &rep_->compression_dict_handle);
1453 1454
  }

1455
  BlockBasedTableOptions::IndexType index_type = rep_->index_type;
1456 1457 1458

  const bool use_cache = table_options.cache_index_and_filter_blocks;

1459 1460 1461 1462 1463 1464 1465
  // prefetch the first level of index
  const bool prefetch_index =
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
  // prefetch the first level of filter
  const bool prefetch_filter =
1466 1467 1468
      prefetch_all ||
      (table_options.pin_top_level_index_and_filter &&
       rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1469
  // Partition fitlers cannot be enabled without partition indexes
1470
  assert(!prefetch_filter || prefetch_index);
1471 1472
  // pin both index and filters, down to all partitions
  const bool pin_all =
1473
      rep_->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;
1474 1475 1476 1477 1478 1479 1480
  // pin the first level of index
  const bool pin_index =
      pin_all || (table_options.pin_top_level_index_and_filter &&
                  index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
  // pin the first level of filter
  const bool pin_filter =
      pin_all || (table_options.pin_top_level_index_and_filter &&
1481
                  rep_->filter_type == Rep::FilterType::kPartitionedFilter);
1482 1483 1484 1485

  IndexReader* index_reader = nullptr;
  if (s.ok()) {
    s = new_table->CreateIndexReader(prefetch_buffer, meta_iter, use_cache,
1486 1487
                                     prefetch_index, pin_index, &index_reader,
                                     lookup_context);
1488 1489
    if (s.ok()) {
      assert(index_reader != nullptr);
1490
      rep_->index_reader.reset(index_reader);
1491 1492 1493 1494
      // The partitions of partitioned index are always stored in cache. They
      // are hence follow the configuration for pin and prefetch regardless of
      // the value of cache_index_and_filter_blocks
      if (prefetch_all) {
1495
        rep_->index_reader->CacheDependencies(pin_all);
1496 1497 1498 1499 1500 1501 1502
      }
    } else {
      delete index_reader;
      index_reader = nullptr;
    }
  }

1503
  // pre-fetching of blocks is turned on
1504
  // Will use block cache for meta-blocks access
1505
  // Always prefetch index and filter for level 0
1506
  // TODO(ajkr): also prefetch compression dictionary block
1507 1508
  // TODO(ajkr): also pin compression dictionary block when
  // `pin_l0_filter_and_index_blocks_in_cache == true`.
1509
  if (table_options.cache_index_and_filter_blocks) {
1510 1511 1512
    assert(table_options.block_cache != nullptr);
    if (s.ok() && prefetch_filter) {
      // Hack: Call GetFilter() to implicitly add filter to the block_cache
1513
      auto filter_entry =
1514 1515 1516
          new_table->GetFilter(rep_->table_prefix_extractor.get(),
                               /*prefetch_buffer=*/nullptr, /*no_io=*/false,
                               /*get_context=*/nullptr, lookup_context);
1517 1518
      if (filter_entry.GetValue() != nullptr && prefetch_all) {
        filter_entry.GetValue()->CacheDependencies(
1519
            pin_all, rep_->table_prefix_extractor.get());
1520 1521 1522 1523 1524
      }
      // if pin_filter is true then save it in rep_->filter_entry; it will be
      // released in the destructor only, hence it will be pinned in the
      // cache while this reader is alive
      if (pin_filter) {
1525
        rep_->filter_entry = std::move(filter_entry);
K
Kai Liu 已提交
1526
      }
1527 1528
    }
  } else {
1529
    std::unique_ptr<const BlockContents> compression_dict_block;
1530 1531
    if (s.ok()) {
      // Set filter block
1532
      if (rep_->filter_policy) {
M
Maysam Yabandeh 已提交
1533
        const bool is_a_filter_partition = true;
1534 1535 1536 1537
        auto filter = new_table->ReadFilter(
            prefetch_buffer, rep_->filter_handle, !is_a_filter_partition,
            rep_->table_prefix_extractor.get());
        rep_->filter.reset(filter);
1538 1539
        // Refer to the comment above about paritioned indexes always being
        // cached
1540
        if (filter && prefetch_all) {
1541 1542
          filter->CacheDependencies(pin_all,
                                    rep_->table_prefix_extractor.get());
M
Maysam Yabandeh 已提交
1543
        }
1544
      }
1545
      s = ReadCompressionDictBlock(prefetch_buffer, &compression_dict_block);
K
Kai Liu 已提交
1546
    }
1547
    if (s.ok() && !rep_->compression_dict_handle.IsNull()) {
1548 1549
      assert(compression_dict_block != nullptr);
      // TODO(ajkr): find a way to avoid the `compression_dict_block` data copy
1550
      rep_->uncompression_dict.reset(new UncompressionDict(
1551
          compression_dict_block->data.ToString(),
1552
          rep_->blocks_definitely_zstd_compressed, rep_->ioptions.statistics));
1553
    }
K
Kai Liu 已提交
1554
  }
J
jorlow@chromium.org 已提交
1555 1556 1557
  return s;
}

S
Siying Dong 已提交
1558
void BlockBasedTable::SetupForCompaction() {
1559
  switch (rep_->ioptions.access_hint_on_compaction_start) {
1560 1561 1562
    case Options::NONE:
      break;
    case Options::NORMAL:
1563
      rep_->file->file()->Hint(RandomAccessFile::NORMAL);
1564 1565
      break;
    case Options::SEQUENTIAL:
1566
      rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
1567 1568
      break;
    case Options::WILLNEED:
1569
      rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
1570 1571 1572 1573 1574 1575
      break;
    default:
      assert(false);
  }
}

K
kailiu 已提交
1576 1577
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
    const {
K
kailiu 已提交
1578
  return rep_->table_properties;
K
Kai Liu 已提交
1579
}
S
Sanjay Ghemawat 已提交
1580

1581 1582 1583 1584 1585 1586 1587 1588
size_t BlockBasedTable::ApproximateMemoryUsage() const {
  size_t usage = 0;
  if (rep_->filter) {
    usage += rep_->filter->ApproximateMemoryUsage();
  }
  if (rep_->index_reader) {
    usage += rep_->index_reader->ApproximateMemoryUsage();
  }
1589 1590 1591
  if (rep_->uncompression_dict) {
    usage += rep_->uncompression_dict->ApproximateMemoryUsage();
  }
1592 1593 1594
  return usage;
}

K
Kai Liu 已提交
1595 1596
// Load the meta-block from the file. On success, return the loaded meta block
// and its iterator.
1597
Status BlockBasedTable::ReadMetaBlock(FilePrefetchBuffer* prefetch_buffer,
S
sdong 已提交
1598 1599
                                      std::unique_ptr<Block>* meta_block,
                                      std::unique_ptr<InternalIterator>* iter) {
S
Sanjay Ghemawat 已提交
1600 1601
  // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
  // it is an empty block.
1602
  std::unique_ptr<Block> meta;
K
Kai Liu 已提交
1603
  Status s = ReadBlockFromFile(
1604 1605
      rep_->file.get(), prefetch_buffer, rep_->footer, ReadOptions(),
      rep_->footer.metaindex_handle(), &meta, rep_->ioptions,
1606
      true /* decompress */, true /*maybe_compressed*/, BlockType::kMetaIndex,
1607
      UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options,
1608
      kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */,
1609
      GetMemoryAllocator(rep_->table_options));
K
Kai Liu 已提交
1610

K
Kai Liu 已提交
1611
  if (!s.ok()) {
1612
    ROCKS_LOG_ERROR(rep_->ioptions.info_log,
1613 1614 1615
                    "Encountered error while reading data from properties"
                    " block %s",
                    s.ToString().c_str());
K
Kai Liu 已提交
1616
    return s;
S
Sanjay Ghemawat 已提交
1617
  }
K
Kai Liu 已提交
1618

1619
  *meta_block = std::move(meta);
K
Kai Liu 已提交
1620
  // meta block uses bytewise comparator.
1621 1622
  iter->reset(meta_block->get()->NewDataIterator(BytewiseComparator(),
                                                 BytewiseComparator()));
K
Kai Liu 已提交
1623
  return Status::OK();
S
Sanjay Ghemawat 已提交
1624 1625
}

1626 1627
Status BlockBasedTable::GetDataBlockFromCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
1628
    Cache* block_cache, Cache* block_cache_compressed,
1629
    const ReadOptions& read_options, CachableEntry<Block>* block,
1630
    const UncompressionDict& uncompression_dict, BlockType block_type,
1631 1632
    GetContext* get_context) const {
  const size_t read_amp_bytes_per_bit =
1633 1634 1635
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1636 1637 1638
  assert(block);
  assert(block->IsEmpty());

1639
  Status s;
1640
  BlockContents* compressed_block = nullptr;
1641 1642 1643 1644
  Cache::Handle* block_cache_compressed_handle = nullptr;

  // Lookup uncompressed cache first
  if (block_cache != nullptr) {
1645 1646
    auto cache_handle = GetEntryFromCache(block_cache, block_cache_key,
                                          block_type, get_context);
1647 1648 1649 1650
    if (cache_handle != nullptr) {
      block->SetCachedValue(
          reinterpret_cast<Block*>(block_cache->Value(cache_handle)),
          block_cache, cache_handle);
1651 1652 1653 1654 1655
      return s;
    }
  }

  // If not found, search from the compressed block cache.
1656
  assert(block->IsEmpty());
1657 1658 1659 1660 1661 1662 1663 1664

  if (block_cache_compressed == nullptr) {
    return s;
  }

  assert(!compressed_block_cache_key.empty());
  block_cache_compressed_handle =
      block_cache_compressed->Lookup(compressed_block_cache_key);
1665 1666 1667

  Statistics* statistics = rep_->ioptions.statistics;

1668 1669 1670 1671 1672 1673 1674 1675 1676
  // if we found in the compressed cache, then uncompress and insert into
  // uncompressed cache
  if (block_cache_compressed_handle == nullptr) {
    RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
    return s;
  }

  // found compressed block
  RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
1677
  compressed_block = reinterpret_cast<BlockContents*>(
1678
      block_cache_compressed->Value(block_cache_compressed_handle));
1679 1680
  CompressionType compression_type = compressed_block->get_compression_type();
  assert(compression_type != kNoCompression);
1681 1682 1683

  // Retrieve the uncompressed contents into a new buffer
  BlockContents contents;
1684
  UncompressionContext context(compression_type);
1685
  UncompressionInfo info(context, uncompression_dict, compression_type);
1686 1687 1688 1689
  s = UncompressBlockContents(
      info, compressed_block->data.data(), compressed_block->data.size(),
      &contents, rep_->table_options.format_version, rep_->ioptions,
      GetMemoryAllocator(rep_->table_options));
1690 1691 1692

  // Insert uncompressed block into block cache
  if (s.ok()) {
1693
    std::unique_ptr<Block> block_holder(
1694
        new Block(std::move(contents), rep_->get_global_seqno(block_type),
1695
                  read_amp_bytes_per_bit, statistics));  // uncompressed block
1696 1697

    if (block_cache != nullptr && block_holder->own_bytes() &&
1698
        read_options.fill_cache) {
1699 1700 1701
      size_t charge = block_holder->ApproximateMemoryUsage();
      Cache::Handle* cache_handle = nullptr;
      s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1702
                              &DeleteCachedEntry<Block>, &cache_handle);
1703
#ifndef NDEBUG
1704
      block_cache->TEST_mark_as_data_block(block_cache_key, charge);
1705
#endif  // NDEBUG
1706
      if (s.ok()) {
1707 1708 1709 1710
        assert(cache_handle != nullptr);
        block->SetCachedValue(block_holder.release(), block_cache,
                              cache_handle);

1711
        UpdateCacheInsertionMetrics(block_type, get_context, charge);
1712 1713 1714
      } else {
        RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
      }
1715 1716
    } else {
      block->SetOwnedValue(block_holder.release());
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
    }
  }

  // Release hold on compressed cache entry
  block_cache_compressed->Release(block_cache_compressed_handle);
  return s;
}

Status BlockBasedTable::PutDataBlockToCache(
    const Slice& block_cache_key, const Slice& compressed_block_cache_key,
    Cache* block_cache, Cache* block_cache_compressed,
1728
    CachableEntry<Block>* cached_block, BlockContents* raw_block_contents,
1729
    CompressionType raw_block_comp_type,
1730
    const UncompressionDict& uncompression_dict, SequenceNumber seq_no,
1731
    MemoryAllocator* memory_allocator, BlockType block_type,
1732 1733 1734 1735
    GetContext* get_context) const {
  const ImmutableCFOptions& ioptions = rep_->ioptions;
  const uint32_t format_version = rep_->table_options.format_version;
  const size_t read_amp_bytes_per_bit =
1736 1737 1738
      block_type == BlockType::kData
          ? rep_->table_options.read_amp_bytes_per_bit
          : 0;
1739
  const Cache::Priority priority =
1740 1741 1742 1743
      rep_->table_options.cache_index_and_filter_blocks_with_high_priority &&
              (block_type == BlockType::kFilter ||
               block_type == BlockType::kCompressionDictionary ||
               block_type == BlockType::kIndex)
1744 1745
          ? Cache::Priority::HIGH
          : Cache::Priority::LOW;
1746 1747
  assert(cached_block);
  assert(cached_block->IsEmpty());
1748
  assert(raw_block_comp_type == kNoCompression ||
1749 1750 1751
         block_cache_compressed != nullptr);

  Status s;
1752
  Statistics* statistics = ioptions.statistics;
1753 1754

  std::unique_ptr<Block> block_holder;
1755
  if (raw_block_comp_type != kNoCompression) {
1756 1757
    // Retrieve the uncompressed contents into a new buffer
    BlockContents uncompressed_block_contents;
1758
    UncompressionContext context(raw_block_comp_type);
1759
    UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
1760 1761 1762 1763
    s = UncompressBlockContents(info, raw_block_contents->data.data(),
                                raw_block_contents->data.size(),
                                &uncompressed_block_contents, format_version,
                                ioptions, memory_allocator);
1764 1765 1766
    if (!s.ok()) {
      return s;
    }
1767

1768 1769
    block_holder.reset(new Block(std::move(uncompressed_block_contents), seq_no,
                                 read_amp_bytes_per_bit, statistics));
1770
  } else {
1771 1772
    block_holder.reset(new Block(std::move(*raw_block_contents), seq_no,
                                 read_amp_bytes_per_bit, statistics));
1773 1774 1775 1776
  }

  // Insert compressed block into compressed block cache.
  // Release the hold on the compressed cache entry immediately.
1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
  if (block_cache_compressed != nullptr &&
      raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
      raw_block_contents->own_bytes()) {
#ifndef NDEBUG
    assert(raw_block_contents->is_raw_block);
#endif  // NDEBUG

    // We cannot directly put raw_block_contents because this could point to
    // an object in the stack.
    BlockContents* block_cont_for_comp_cache =
        new BlockContents(std::move(*raw_block_contents));
    s = block_cache_compressed->Insert(
        compressed_block_cache_key, block_cont_for_comp_cache,
        block_cont_for_comp_cache->ApproximateMemoryUsage(),
        &DeleteCachedEntry<BlockContents>);
1792 1793 1794 1795 1796
    if (s.ok()) {
      // Avoid the following code to delete this cached block.
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
    } else {
      RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
1797
      delete block_cont_for_comp_cache;
1798
    }
1799 1800 1801
  }

  // insert into uncompressed block cache
1802 1803 1804 1805
  if (block_cache != nullptr && block_holder->own_bytes()) {
    size_t charge = block_holder->ApproximateMemoryUsage();
    Cache::Handle* cache_handle = nullptr;
    s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
1806
                            &DeleteCachedEntry<Block>, &cache_handle, priority);
1807
#ifndef NDEBUG
1808
    block_cache->TEST_mark_as_data_block(block_cache_key, charge);
1809
#endif  // NDEBUG
1810
    if (s.ok()) {
1811 1812 1813 1814
      assert(cache_handle != nullptr);
      cached_block->SetCachedValue(block_holder.release(), block_cache,
                                   cache_handle);

1815
      UpdateCacheInsertionMetrics(block_type, get_context, charge);
1816 1817 1818
    } else {
      RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
    }
1819 1820
  } else {
    cached_block->SetOwnedValue(block_holder.release());
1821 1822 1823 1824 1825
  }

  return s;
}

M
Maysam Yabandeh 已提交
1826
FilterBlockReader* BlockBasedTable::ReadFilter(
1827
    FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_handle,
1828 1829
    const bool is_a_filter_partition,
    const SliceTransform* prefix_extractor) const {
M
Maysam Yabandeh 已提交
1830
  auto& rep = rep_;
K
Kai Liu 已提交
1831 1832
  // TODO: We might want to unify with ReadBlockFromFile() if we start
  // requiring checksum verification in Table::Open.
I
Igor Canadi 已提交
1833 1834 1835 1836
  if (rep->filter_type == Rep::FilterType::kNoFilter) {
    return nullptr;
  }
  BlockContents block;
S
Siying Dong 已提交
1837

1838 1839 1840
  BlockFetcher block_fetcher(
      rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(),
      filter_handle, &block, rep->ioptions, false /* decompress */,
1841 1842 1843
      false /*maybe_compressed*/, BlockType::kFilter,
      UncompressionDict::GetEmptyDict(), rep->persistent_cache_options,
      GetMemoryAllocator(rep->table_options));
S
Siying Dong 已提交
1844 1845 1846
  Status s = block_fetcher.ReadBlockContents();

  if (!s.ok()) {
I
Igor Canadi 已提交
1847 1848 1849 1850 1851 1852
    // Error reading the block
    return nullptr;
  }

  assert(rep->filter_policy);

M
Maysam Yabandeh 已提交
1853 1854 1855 1856 1857 1858 1859 1860 1861
  auto filter_type = rep->filter_type;
  if (rep->filter_type == Rep::FilterType::kPartitionedFilter &&
      is_a_filter_partition) {
    filter_type = Rep::FilterType::kFullFilter;
  }

  switch (filter_type) {
    case Rep::FilterType::kPartitionedFilter: {
      return new PartitionedFilterBlockReader(
1862
          rep->prefix_filtering ? prefix_extractor : nullptr,
M
Maysam Yabandeh 已提交
1863
          rep->whole_key_filtering, std::move(block), nullptr,
1864
          rep->ioptions.statistics, rep->internal_comparator, this,
1865
          rep_->index_key_includes_seq, rep_->index_value_is_full);
M
Maysam Yabandeh 已提交
1866 1867 1868 1869
    }

    case Rep::FilterType::kBlockFilter:
      return new BlockBasedFilterBlockReader(
1870
          rep->prefix_filtering ? prefix_extractor : nullptr,
M
Maysam Yabandeh 已提交
1871 1872 1873 1874 1875 1876 1877
          rep->table_options, rep->whole_key_filtering, std::move(block),
          rep->ioptions.statistics);

    case Rep::FilterType::kFullFilter: {
      auto filter_bits_reader =
          rep->filter_policy->GetFilterBitsReader(block.data);
      assert(filter_bits_reader != nullptr);
I
Igor Canadi 已提交
1878
      return new FullFilterBlockReader(
1879
          rep->prefix_filtering ? prefix_extractor : nullptr,
1880 1881
          rep->whole_key_filtering, std::move(block), filter_bits_reader,
          rep->ioptions.statistics);
1882
    }
I
Igor Canadi 已提交
1883

M
Maysam Yabandeh 已提交
1884 1885 1886 1887 1888 1889
    default:
      // filter_type is either kNoFilter (exited the function at the first if),
      // or it must be covered in this switch block
      assert(false);
      return nullptr;
  }
S
Sanjay Ghemawat 已提交
1890 1891
}

1892
CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
1893
    const SliceTransform* prefix_extractor, FilePrefetchBuffer* prefetch_buffer,
1894 1895
    bool no_io, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
M
Maysam Yabandeh 已提交
1896 1897
  const BlockHandle& filter_blk_handle = rep_->filter_handle;
  const bool is_a_filter_partition = true;
1898
  return GetFilter(prefetch_buffer, filter_blk_handle, !is_a_filter_partition,
1899
                   no_io, get_context, lookup_context, prefix_extractor);
M
Maysam Yabandeh 已提交
1900 1901
}

1902
CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
1903
    FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle,
1904
    const bool is_a_filter_partition, bool no_io, GetContext* get_context,
1905
    BlockCacheLookupContext* lookup_context,
1906
    const SliceTransform* prefix_extractor) const {
1907 1908 1909 1910
  // If cache_index_and_filter_blocks is false, filter should be pre-populated.
  // We will return rep_->filter anyway. rep_->filter can be nullptr if filter
  // read fails at Open() time. We don't want to reload again since it will
  // most probably fail again.
M
Maysam Yabandeh 已提交
1911 1912
  if (!is_a_filter_partition &&
      !rep_->table_options.cache_index_and_filter_blocks) {
1913 1914
    return {rep_->filter.get(), /*cache=*/nullptr, /*cache_handle=*/nullptr,
            /*own_value=*/false};
1915 1916
  }

1917 1918 1919
  Cache* block_cache = rep_->table_options.block_cache.get();
  if (rep_->filter_policy == nullptr /* do not use filter */ ||
      block_cache == nullptr /* no block cache at all */) {
1920
    return CachableEntry<FilterBlockReader>();
K
Kai Liu 已提交
1921 1922
  }

1923
  if (!is_a_filter_partition && rep_->filter_entry.IsCached()) {
1924
    return {rep_->filter_entry.GetValue(), /*cache=*/nullptr,
1925
            /*cache_handle=*/nullptr, /*own_value=*/false};
1926 1927 1928 1929
  }

  PERF_TIMER_GUARD(read_filter_block_nanos);

K
Kai Liu 已提交
1930 1931
  // Fetching from the cache
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
1932
  auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
M
Maysam Yabandeh 已提交
1933
                         filter_blk_handle, cache_key);
K
Kai Liu 已提交
1934

1935 1936
  Cache::Handle* cache_handle =
      GetEntryFromCache(block_cache, key, BlockType::kFilter, get_context);
K
Kai Liu 已提交
1937 1938

  FilterBlockReader* filter = nullptr;
1939 1940 1941
  size_t usage = 0;
  bool is_cache_hit = false;
  bool return_empty_reader = false;
K
Kai Liu 已提交
1942
  if (cache_handle != nullptr) {
1943 1944
    filter =
        reinterpret_cast<FilterBlockReader*>(block_cache->Value(cache_handle));
1945 1946
    usage = filter->ApproximateMemoryUsage();
    is_cache_hit = true;
K
Kai Liu 已提交
1947 1948
  } else if (no_io) {
    // Do not invoke any io.
1949
    return_empty_reader = true;
K
Kai Liu 已提交
1950
  } else {
1951 1952
    filter = ReadFilter(prefetch_buffer, filter_blk_handle,
                        is_a_filter_partition, prefix_extractor);
I
Igor Canadi 已提交
1953
    if (filter != nullptr) {
1954
      usage = filter->ApproximateMemoryUsage();
1955
      Status s = block_cache->Insert(
1956
          key, filter, usage, &DeleteCachedFilterEntry, &cache_handle,
1957 1958 1959
          rep_->table_options.cache_index_and_filter_blocks_with_high_priority
              ? Cache::Priority::HIGH
              : Cache::Priority::LOW);
1960
      if (s.ok()) {
1961
        UpdateCacheInsertionMetrics(BlockType::kFilter, get_context, usage);
1962
      } else {
1963
        RecordTick(rep_->ioptions.statistics, BLOCK_CACHE_ADD_FAILURES);
1964
        delete filter;
1965
        return_empty_reader = true;
1966
      }
K
Kai Liu 已提交
1967 1968 1969
    }
  }

1970 1971
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
    // Avoid making copy of block_key and cf_name when constructing the access
    // record.
    BlockCacheTraceRecord access_record(
        rep_->ioptions.env->NowMicros(),
        /*block_key=*/"", TraceType::kBlockTraceFilterBlock,
        /*block_size=*/usage, rep_->cf_id_for_tracing(),
        /*cf_name=*/"", rep_->level_for_tracing(),
        rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
        /*no_insert=*/no_io);
    block_cache_tracer_->WriteBlockAccess(access_record, key,
                                          rep_->cf_name_for_tracing(),
                                          /*referenced_key=*/nullptr);
  }

  if (return_empty_reader) {
    return CachableEntry<FilterBlockReader>();
  }
1989
  return {filter, cache_handle ? block_cache : nullptr, cache_handle,
1990
          /*own_value=*/false};
K
Kai Liu 已提交
1991 1992
}

1993
CachableEntry<UncompressionDict> BlockBasedTable::GetUncompressionDict(
1994
    FilePrefetchBuffer* prefetch_buffer, bool no_io, GetContext* get_context,
1995
    BlockCacheLookupContext* lookup_context) const {
1996
  if (!rep_->table_options.cache_index_and_filter_blocks) {
1997 1998
    // block cache is either disabled or not used for meta-blocks. In either
    // case, BlockBasedTableReader is the owner of the uncompression dictionary.
1999 2000
    return {rep_->uncompression_dict.get(), nullptr /* cache */,
            nullptr /* cache_handle */, false /* own_value */};
2001
  }
2002
  if (rep_->compression_dict_handle.IsNull()) {
2003
    return CachableEntry<UncompressionDict>();
2004 2005 2006
  }
  char cache_key_buf[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  auto cache_key =
2007 2008
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
                  rep_->compression_dict_handle, cache_key_buf);
2009 2010 2011
  auto cache_handle =
      GetEntryFromCache(rep_->table_options.block_cache.get(), cache_key,
                        BlockType::kCompressionDictionary, get_context);
2012
  UncompressionDict* dict = nullptr;
2013 2014
  bool is_cache_hit = false;
  size_t usage = 0;
2015 2016
  if (cache_handle != nullptr) {
    dict = reinterpret_cast<UncompressionDict*>(
2017
        rep_->table_options.block_cache->Value(cache_handle));
2018 2019
    is_cache_hit = true;
    usage = dict->ApproximateMemoryUsage();
2020 2021 2022 2023 2024
  } else if (no_io) {
    // Do not invoke any io.
  } else {
    std::unique_ptr<const BlockContents> compression_dict_block;
    Status s =
2025
        ReadCompressionDictBlock(prefetch_buffer, &compression_dict_block);
2026 2027 2028
    if (s.ok()) {
      assert(compression_dict_block != nullptr);
      // TODO(ajkr): find a way to avoid the `compression_dict_block` data copy
2029 2030 2031 2032
      std::unique_ptr<UncompressionDict> uncompression_dict(
          new UncompressionDict(compression_dict_block->data.ToString(),
                                rep_->blocks_definitely_zstd_compressed,
                                rep_->ioptions.statistics));
2033
      usage = uncompression_dict->ApproximateMemoryUsage();
2034
      s = rep_->table_options.block_cache->Insert(
2035 2036
          cache_key, uncompression_dict.get(), usage,
          &DeleteCachedUncompressionDictEntry, &cache_handle,
2037
          rep_->table_options.cache_index_and_filter_blocks_with_high_priority
2038 2039
              ? Cache::Priority::HIGH
              : Cache::Priority::LOW);
2040 2041 2042 2043 2044

      if (s.ok()) {
        UpdateCacheInsertionMetrics(BlockType::kCompressionDictionary,
                                    get_context, usage);
        dict = uncompression_dict.release();
2045
      } else {
2046 2047 2048
        RecordTick(rep_->ioptions.statistics, BLOCK_CACHE_ADD_FAILURES);
        assert(dict == nullptr);
        assert(cache_handle == nullptr);
2049 2050 2051
      }
    }
  }
2052 2053
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
    // Avoid making copy of block_key and cf_name when constructing the access
    // record.
    BlockCacheTraceRecord access_record(
        rep_->ioptions.env->NowMicros(),
        /*block_key=*/"", TraceType::kBlockTraceUncompressionDictBlock,
        /*block_size=*/usage, rep_->cf_id_for_tracing(),
        /*cf_name=*/"", rep_->level_for_tracing(),
        rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
        /*no_insert=*/no_io);
    block_cache_tracer_->WriteBlockAccess(access_record, cache_key,
                                          rep_->cf_name_for_tracing(),
                                          /*referenced_key=*/nullptr);
  }
2067 2068
  return {dict, cache_handle ? rep_->table_options.block_cache.get() : nullptr,
          cache_handle, false /* own_value */};
2069 2070
}

2071 2072
// disable_prefix_seek should be set to true when prefix_extractor found in SST
// differs from the one in mutable_cf_options and index type is HashBasedIndex
2073
InternalIteratorBase<IndexValue>* BlockBasedTable::NewIndexIterator(
2074
    const ReadOptions& read_options, bool disable_prefix_seek,
2075 2076
    IndexBlockIter* input_iter, GetContext* get_context,
    BlockCacheLookupContext* lookup_context) const {
2077 2078
  assert(rep_ != nullptr);
  assert(rep_->index_reader != nullptr);
2079

2080
  // We don't return pinned data from index blocks, so no need
2081
  // to set `block_contents_pinned`.
2082
  return rep_->index_reader->NewIterator(read_options, disable_prefix_seek,
2083 2084
                                         input_iter, get_context,
                                         lookup_context);
K
Kai Liu 已提交
2085 2086
}

L
Lei Jin 已提交
2087 2088
// Convert an index iterator value (i.e., an encoded BlockHandle)
// into an iterator over the contents of the corresponding block.
2089 2090
// If input_iter is null, new a iterator
// If input_iter is not null, update this iter and return it
M
Maysam Yabandeh 已提交
2091 2092
template <typename TBlockIter>
TBlockIter* BlockBasedTable::NewDataBlockIterator(
2093
    const ReadOptions& ro, const BlockHandle& handle, TBlockIter* input_iter,
2094 2095
    BlockType block_type, GetContext* get_context,
    BlockCacheLookupContext* lookup_context, Status s,
2096
    FilePrefetchBuffer* prefetch_buffer, bool for_compaction) const {
2097 2098
  PERF_TIMER_GUARD(new_table_block_iter_nanos);

2099 2100 2101 2102 2103 2104 2105 2106
  TBlockIter* iter = input_iter != nullptr ? input_iter : new TBlockIter;
  if (!s.ok()) {
    iter->Invalidate(s);
    return iter;
  }

  const bool no_io = (ro.read_tier == kBlockCacheTier);
  auto uncompression_dict_storage =
2107
      GetUncompressionDict(prefetch_buffer, no_io, get_context, lookup_context);
2108
  const UncompressionDict& uncompression_dict =
2109 2110 2111
      uncompression_dict_storage.GetValue() == nullptr
          ? UncompressionDict::GetEmptyDict()
          : *uncompression_dict_storage.GetValue();
2112

L
Lei Jin 已提交
2113
  CachableEntry<Block> block;
2114
  s = RetrieveBlock(prefetch_buffer, ro, handle, uncompression_dict, &block,
2115
                    block_type, get_context, lookup_context, for_compaction);
2116

2117 2118 2119 2120 2121 2122 2123
  if (!s.ok()) {
    assert(block.IsEmpty());
    iter->Invalidate(s);
    return iter;
  }

  assert(block.GetValue() != nullptr);
2124

2125 2126 2127 2128 2129 2130 2131
  // Block contents are pinned and it is still pinned after the iterator
  // is destroyed as long as cleanup functions are moved to another object,
  // when:
  // 1. block cache handle is set to be released in cleanup function, or
  // 2. it's pointing to immortal source. If own_bytes is true then we are
  //    not reading data from the original source, whether immortal or not.
  //    Otherwise, the block is pinned iff the source is immortal.
2132 2133
  const bool block_contents_pinned =
      block.IsCached() ||
2134
      (!block.GetValue()->own_bytes() && rep_->immortal_table);
2135 2136
  iter = InitBlockIterator<TBlockIter>(rep_, block.GetValue(), iter,
                                       block_contents_pinned);
2137 2138

  if (!block.IsCached()) {
2139
    if (!ro.fill_cache && rep_->cache_key_prefix_size != 0) {
2140
      // insert a dummy record to block cache to track the memory usage
2141
      Cache* const block_cache = rep_->table_options.block_cache.get();
2142 2143 2144 2145 2146 2147 2148 2149
      Cache::Handle* cache_handle = nullptr;
      // There are two other types of cache keys: 1) SST cache key added in
      // `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
      // `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
      // from SST cache key(31 bytes), and use non-zero prefix to
      // differentiate from `write_buffer_manager`
      const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
      char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
2150
      // Prefix: use rep_->cache_key_prefix padded by 0s
2151
      memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
2152 2153 2154
      assert(rep_->cache_key_prefix_size != 0);
      assert(rep_->cache_key_prefix_size <= kExtraCacheKeyPrefix);
      memcpy(cache_key, rep_->cache_key_prefix, rep_->cache_key_prefix_size);
2155 2156 2157
      char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
                                 next_cache_key_id_++);
      assert(end - cache_key <=
2158
             static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
2159 2160 2161 2162
      const Slice unique_key(cache_key, static_cast<size_t>(end - cache_key));
      s = block_cache->Insert(unique_key, nullptr,
                              block.GetValue()->ApproximateMemoryUsage(),
                              nullptr, &cache_handle);
2163

2164
      if (s.ok()) {
2165 2166 2167
        assert(cache_handle != nullptr);
        iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
                              cache_handle);
2168
      }
2169
    }
2170 2171
  } else {
    iter->SetCacheHandle(block.GetCacheHandle());
2172 2173
  }

2174
  block.TransferTo(iter);
2175 2176 2177
  return iter;
}

2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
template <>
DataBlockIter* BlockBasedTable::InitBlockIterator<DataBlockIter>(
    const Rep* rep, Block* block, DataBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewDataIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, block_contents_pinned);
}

template <>
IndexBlockIter* BlockBasedTable::InitBlockIterator<IndexBlockIter>(
    const Rep* rep, Block* block, IndexBlockIter* input_iter,
    bool block_contents_pinned) {
  return block->NewIndexIterator(
      &rep->internal_comparator, rep->internal_comparator.user_comparator(),
      input_iter, rep->ioptions.statistics, /* total_order_seek */ true,
      rep->index_has_first_key, rep->index_key_includes_seq,
      rep->index_value_is_full, block_contents_pinned);
}

2198
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
2199
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2200
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2201
    CachableEntry<Block>* block_entry, BlockType block_type,
2202
    GetContext* get_context, BlockCacheLookupContext* lookup_context) const {
2203
  assert(block_entry != nullptr);
2204
  const bool no_io = (ro.read_tier == kBlockCacheTier);
2205
  Cache* block_cache = rep_->table_options.block_cache.get();
2206
  // No point to cache compressed blocks if it never goes away
2207
  Cache* block_cache_compressed =
2208 2209
      rep_->immortal_table ? nullptr
                           : rep_->table_options.block_cache_compressed.get();
L
Lei Jin 已提交
2210

2211 2212
  // First, try to get the block from the cache
  //
L
Lei Jin 已提交
2213
  // If either block cache is enabled, we'll try to read from it.
2214
  Status s;
2215 2216 2217 2218
  char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
  Slice key /* key to the block cache */;
  Slice ckey /* key to the compressed block cache */;
2219 2220
  bool is_cache_hit = false;
  bool no_insert = true;
L
Lei Jin 已提交
2221 2222 2223
  if (block_cache != nullptr || block_cache_compressed != nullptr) {
    // create key for block cache
    if (block_cache != nullptr) {
2224
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
2225
                        handle, cache_key);
L
Lei Jin 已提交
2226 2227 2228
    }

    if (block_cache_compressed != nullptr) {
2229 2230
      ckey = GetCacheKey(rep_->compressed_cache_key_prefix,
                         rep_->compressed_cache_key_prefix_size, handle,
L
Lei Jin 已提交
2231 2232 2233
                         compressed_cache_key);
    }

2234
    s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
2235
                              ro, block_entry, uncompression_dict, block_type,
2236
                              get_context);
2237 2238 2239 2240 2241
    if (block_entry->GetValue()) {
      // TODO(haoyu): Differentiate cache hit on uncompressed block cache and
      // compressed block cache.
      is_cache_hit = true;
    }
2242 2243
    // Can't find the block from the cache. If I/O is allowed, read from the
    // file.
2244
    if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
2245
      no_insert = false;
2246
      Statistics* statistics = rep_->ioptions.statistics;
2247
      bool do_decompress =
2248
          block_cache_compressed == nullptr && rep_->blocks_maybe_compressed;
2249 2250
      CompressionType raw_block_comp_type;
      BlockContents raw_block_contents;
L
Lei Jin 已提交
2251
      {
2252
        StopWatch sw(rep_->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
2253
        BlockFetcher block_fetcher(
2254 2255 2256
            rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle,
            &raw_block_contents, rep_->ioptions,
            do_decompress /* do uncompress */, rep_->blocks_maybe_compressed,
2257
            block_type, uncompression_dict, rep_->persistent_cache_options,
2258 2259
            GetMemoryAllocator(rep_->table_options),
            GetMemoryAllocatorForCompressedBlock(rep_->table_options));
2260 2261
        s = block_fetcher.ReadBlockContents();
        raw_block_comp_type = block_fetcher.get_compression_type();
L
Lei Jin 已提交
2262 2263 2264
      }

      if (s.ok()) {
2265
        SequenceNumber seq_no = rep_->get_global_seqno(block_type);
2266 2267
        // If filling cache is allowed and a cache is configured, try to put the
        // block to the cache.
2268 2269 2270 2271
        s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed,
                                block_entry, &raw_block_contents,
                                raw_block_comp_type, uncompression_dict, seq_no,
                                GetMemoryAllocator(rep_->table_options),
2272
                                block_type, get_context);
L
Lei Jin 已提交
2273 2274 2275
      }
    }
  }
2276 2277

  // Fill lookup_context.
2278 2279
  if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled() &&
      lookup_context) {
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
    size_t usage = 0;
    uint64_t nkeys = 0;
    if (block_entry->GetValue()) {
      // Approximate the number of keys in the block using restarts.
      nkeys = rep_->table_options.block_restart_interval *
              block_entry->GetValue()->NumRestarts();
      usage = block_entry->GetValue()->ApproximateMemoryUsage();
    }
    TraceType trace_block_type = TraceType::kTraceMax;
    switch (block_type) {
      case BlockType::kIndex:
        trace_block_type = TraceType::kBlockTraceIndexBlock;
        break;
      case BlockType::kData:
        trace_block_type = TraceType::kBlockTraceDataBlock;
        break;
      case BlockType::kRangeDeletion:
        trace_block_type = TraceType::kBlockTraceRangeDeletionBlock;
        break;
      default:
        // This cannot happen.
        assert(false);
        break;
    }
    if (BlockCacheTraceHelper::ShouldTraceReferencedKey(
            trace_block_type, lookup_context->caller)) {
      // Defer logging the access to Get() and MultiGet() to trace additional
      // information, e.g., the referenced key,
      // referenced_key_exist_in_block.

      // Make a copy of the block key here since it will be logged later.
      lookup_context->FillLookupContext(
          is_cache_hit, no_insert, trace_block_type,
          /*block_size=*/usage, /*block_key=*/key.ToString(), nkeys);
    } else {
      // Avoid making copy of block_key and cf_name when constructing the access
      // record.
      BlockCacheTraceRecord access_record(
          rep_->ioptions.env->NowMicros(),
          /*block_key=*/"", trace_block_type,
          /*block_size=*/usage, rep_->cf_id_for_tracing(),
          /*cf_name=*/"", rep_->level_for_tracing(),
          rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
          no_insert);
      block_cache_tracer_->WriteBlockAccess(access_record, key,
                                            rep_->cf_name_for_tracing(),
                                            /*referenced_key=*/nullptr);
    }
  }

2330
  assert(s.ok() || block_entry->GetValue() == nullptr);
2331
  return s;
2332 2333
}

2334
Status BlockBasedTable::RetrieveBlock(
2335
    FilePrefetchBuffer* prefetch_buffer, const ReadOptions& ro,
2336
    const BlockHandle& handle, const UncompressionDict& uncompression_dict,
2337
    CachableEntry<Block>* block_entry, BlockType block_type,
2338 2339
    GetContext* get_context, BlockCacheLookupContext* lookup_context,
    bool for_compaction) const {
2340 2341 2342 2343
  assert(block_entry);
  assert(block_entry->IsEmpty());

  Status s;
2344 2345 2346 2347
  if (rep_->table_options.cache_index_and_filter_blocks ||
      (block_type != BlockType::kFilter &&
       block_type != BlockType::kCompressionDictionary &&
       block_type != BlockType::kIndex)) {
2348
    s = MaybeReadBlockAndLoadToCache(prefetch_buffer, ro, handle,
2349
                                     uncompression_dict, block_entry,
2350
                                     block_type, get_context, lookup_context);
2351 2352 2353 2354 2355 2356

    if (!s.ok()) {
      return s;
    }

    if (block_entry->GetValue() != nullptr) {
2357
      assert(s.ok());
2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371
      return s;
    }
  }

  assert(block_entry->IsEmpty());

  const bool no_io = ro.read_tier == kBlockCacheTier;
  if (no_io) {
    return Status::Incomplete("no blocking io");
  }

  std::unique_ptr<Block> block;

  {
2372
    StopWatch sw(rep_->ioptions.env, rep_->ioptions.statistics,
2373 2374
                 READ_BLOCK_GET_MICROS);
    s = ReadBlockFromFile(
2375 2376
        rep_->file.get(), prefetch_buffer, rep_->footer, ro, handle, &block,
        rep_->ioptions, rep_->blocks_maybe_compressed,
2377
        rep_->blocks_maybe_compressed, block_type, uncompression_dict,
2378 2379 2380 2381
        rep_->persistent_cache_options, rep_->get_global_seqno(block_type),
        block_type == BlockType::kData
            ? rep_->table_options.read_amp_bytes_per_bit
            : 0,
2382
        GetMemoryAllocator(rep_->table_options), for_compaction);
2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
  }

  if (!s.ok()) {
    return s;
  }

  block_entry->SetOwnedValue(block.release());

  assert(s.ok());
  return s;
}

2395
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
2396
    const BlockBasedTable* table,
2397 2398 2399 2400
    std::unordered_map<uint64_t, CachableEntry<Block>>* block_map)
    : table_(table), block_map_(block_map) {}

InternalIteratorBase<IndexValue>*
2401
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
2402
    const BlockHandle& handle) {
M
Maysam Yabandeh 已提交
2403
  // Return a block iterator on the index partition
2404 2405 2406 2407
  auto block = block_map_->find(handle.offset());
  // This is a possible scenario since block cache might not have had space
  // for the partition
  if (block != block_map_->end()) {
2408
    const Rep* rep = table_->get_rep();
2409 2410
    assert(rep);

M
Maysam Yabandeh 已提交
2411
    Statistics* kNullStats = nullptr;
2412
    // We don't return pinned data from index blocks, so no need
2413
    // to set `block_contents_pinned`.
2414
    return block->second.GetValue()->NewIndexIterator(
M
Maysam Yabandeh 已提交
2415
        &rep->internal_comparator, rep->internal_comparator.user_comparator(),
2416 2417
        nullptr, kNullStats, true, rep->index_has_first_key,
        rep->index_key_includes_seq, rep->index_value_is_full);
2418 2419
  }
  // Create an empty iterator
2420
  return new IndexBlockIter();
2421 2422
}

T
Tyler Harter 已提交
2423 2424
// This will be broken if the user specifies an unusual implementation
// of Options.comparator, or if the user specifies an unusual
2425 2426
// definition of prefixes in BlockBasedTableOptions.filter_policy.
// In particular, we require the following three properties:
T
Tyler Harter 已提交
2427 2428 2429 2430
//
// 1) key.starts_with(prefix(key))
// 2) Compare(prefix(key), key) <= 0.
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
T
Tyler Harter 已提交
2431
//
K
Kai Liu 已提交
2432 2433 2434
// Otherwise, this method guarantees no I/O will be incurred.
//
// REQUIRES: this method shouldn't be called while the DB lock is held.
2435 2436 2437
bool BlockBasedTable::PrefixMayMatch(
    const Slice& internal_key, const ReadOptions& read_options,
    const SliceTransform* options_prefix_extractor,
2438 2439
    const bool need_upper_bound_check,
    BlockCacheLookupContext* lookup_context) const {
2440
  if (!rep_->filter_policy) {
2441 2442 2443
    return true;
  }

2444 2445 2446 2447 2448 2449 2450 2451 2452 2453
  const SliceTransform* prefix_extractor;

  if (rep_->table_prefix_extractor == nullptr) {
    if (need_upper_bound_check) {
      return true;
    }
    prefix_extractor = options_prefix_extractor;
  } else {
    prefix_extractor = rep_->table_prefix_extractor.get();
  }
2454
  auto user_key = ExtractUserKey(internal_key);
2455
  if (!prefix_extractor->InDomain(user_key)) {
2456 2457
    return true;
  }
L
Lei Jin 已提交
2458

T
Tyler Harter 已提交
2459 2460 2461
  bool may_match = true;
  Status s;

2462
  // First, try check with full filter
2463 2464 2465
  auto filter_entry =
      GetFilter(prefix_extractor, /*prefetch_buffer=*/nullptr, /*no_io=*/false,
                /*get_context=*/nullptr, lookup_context);
2466
  FilterBlockReader* filter = filter_entry.GetValue();
2467
  bool filter_checked = true;
2468 2469
  if (filter != nullptr) {
    if (!filter->IsBlockBased()) {
M
Maysam Yabandeh 已提交
2470
      const Slice* const const_ikey_ptr = &internal_key;
2471 2472 2473
      may_match = filter->RangeMayExist(
          read_options.iterate_upper_bound, user_key, prefix_extractor,
          rep_->internal_comparator.user_comparator(), const_ikey_ptr,
2474
          &filter_checked, need_upper_bound_check, lookup_context);
2475
    } else {
2476 2477 2478 2479 2480
      // if prefix_extractor changed for block based filter, skip filter
      if (need_upper_bound_check) {
        return true;
      }
      auto prefix = prefix_extractor->Transform(user_key);
M
Maysam Yabandeh 已提交
2481 2482 2483 2484 2485 2486 2487 2488 2489
      InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
      auto internal_prefix = internal_key_prefix.Encode();

      // To prevent any io operation in this method, we set `read_tier` to make
      // sure we always read index or filter only when they have already been
      // loaded to memory.
      ReadOptions no_io_read_options;
      no_io_read_options.read_tier = kBlockCacheTier;

2490
      // Then, try find it within each block
2491 2492
      // we already know prefix_extractor and prefix_extractor_name must match
      // because `CheckPrefixMayMatch` first checks `check_filter_ == true`
2493
      std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
2494 2495
          no_io_read_options,
          /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
2496
          /*get_context=*/nullptr, lookup_context));
2497 2498 2499 2500 2501 2502 2503 2504
      iiter->Seek(internal_prefix);

      if (!iiter->Valid()) {
        // we're past end of file
        // if it's incomplete, it means that we avoided I/O
        // and we're not really sure that we're past the end
        // of the file
        may_match = iiter->status().IsIncomplete();
2505 2506
      } else if ((rep_->index_key_includes_seq ? ExtractUserKey(iiter->key())
                                               : iiter->key())
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
                     .starts_with(ExtractUserKey(internal_prefix))) {
        // we need to check for this subtle case because our only
        // guarantee is that "the key is a string >= last key in that data
        // block" according to the doc/table_format.txt spec.
        //
        // Suppose iiter->key() starts with the desired prefix; it is not
        // necessarily the case that the corresponding data block will
        // contain the prefix, since iiter->key() need not be in the
        // block.  However, the next data block may contain the prefix, so
        // we return true to play it safe.
        may_match = true;
      } else if (filter->IsBlockBased()) {
        // iiter->key() does NOT start with the desired prefix.  Because
        // Seek() finds the first key that is >= the seek target, this
        // means that iiter->key() > prefix.  Thus, any data blocks coming
        // after the data block corresponding to iiter->key() cannot
        // possibly contain the key.  Thus, the corresponding data block
        // is the only on could potentially contain the prefix.
2525
        BlockHandle handle = iiter->value().handle;
2526 2527 2528
        may_match = filter->PrefixMayMatch(
            prefix, prefix_extractor, handle.offset(), /*no_io=*/false,
            /*const_key_ptr=*/nullptr, lookup_context);
2529
      }
2530
    }
T
Tyler Harter 已提交
2531
  }
T
Tyler Harter 已提交
2532

2533 2534 2535 2536 2537 2538
  if (filter_checked) {
    Statistics* statistics = rep_->ioptions.statistics;
    RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
    if (!may_match) {
      RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
    }
T
Tyler Harter 已提交
2539 2540
  }

T
Tyler Harter 已提交
2541 2542 2543
  return may_match;
}

2544 2545
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Seek(const Slice& target) {
2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
  SeekImpl(&target);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToFirst() {
  SeekImpl(nullptr);
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekImpl(
    const Slice* target) {
2557
  is_out_of_bound_ = false;
2558 2559
  is_at_first_key_from_index_ = false;
  if (target && !CheckPrefixMayMatch(*target)) {
2560 2561 2562 2563
    ResetDataIter();
    return;
  }

2564
  bool need_seek_index = true;
2565
  if (block_iter_points_to_real_block_ && block_iter_.Valid()) {
2566
    // Reseek.
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583
    prev_block_offset_ = index_iter_->value().handle.offset();

    if (target) {
      // We can avoid an index seek if:
      // 1. The new seek key is larger than the current key
      // 2. The new seek key is within the upper bound of the block
      // Since we don't necessarily know the internal key for either
      // the current key or the upper bound, we check user keys and
      // exclude the equality case. Considering internal keys can
      // improve for the boundary cases, but it would complicate the
      // code.
      if (user_comparator_.Compare(ExtractUserKey(*target),
                                   block_iter_.user_key()) > 0 &&
          user_comparator_.Compare(ExtractUserKey(*target),
                                   index_iter_->user_key()) < 0) {
        need_seek_index = false;
      }
2584
    }
2585 2586
  }

2587
  if (need_seek_index) {
2588 2589 2590 2591 2592 2593
    if (target) {
      index_iter_->Seek(*target);
    } else {
      index_iter_->SeekToFirst();
    }

2594 2595 2596 2597 2598
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }
2599

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624
  IndexValue v = index_iter_->value();
  const bool same_block = block_iter_points_to_real_block_ &&
                          v.handle.offset() == prev_block_offset_;

  // TODO(kolmike): Remove the != kBlockCacheTier condition.
  if (!v.first_internal_key.empty() && !same_block &&
      (!target || icomp_.Compare(*target, v.first_internal_key) <= 0) &&
      read_options_.read_tier != kBlockCacheTier) {
    // Index contains the first key of the block, and it's >= target.
    // We can defer reading the block.
    is_at_first_key_from_index_ = true;
    ResetDataIter();
  } else {
    // Need to use the data block.
    if (!same_block) {
      InitDataBlock();
    }

    if (target) {
      block_iter_.Seek(*target);
    } else {
      block_iter_.SeekToFirst();
    }
    FindKeyForward();
  }
2625

2626
  CheckOutOfBound();
2627 2628 2629 2630 2631 2632 2633 2634 2635

  if (target) {
    assert(
        !Valid() ||
        ((block_type_ == BlockType::kIndex &&
          !table_->get_rep()->index_key_includes_seq)
             ? (user_comparator_.Compare(ExtractUserKey(*target), key()) <= 0)
             : (icomp_.Compare(*target, key()) <= 0)));
  }
2636 2637
}

2638 2639 2640
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekForPrev(
    const Slice& target) {
2641
  is_out_of_bound_ = false;
2642
  is_at_first_key_from_index_ = false;
2643
  if (!CheckPrefixMayMatch(target)) {
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665
    ResetDataIter();
    return;
  }

  SavePrevIndexValue();

  // Call Seek() rather than SeekForPrev() in the index block, because the
  // target data block will likely to contain the position for `target`, the
  // same as Seek(), rather than than before.
  // For example, if we have three data blocks, each containing two keys:
  //   [2, 4]  [6, 8] [10, 12]
  //  (the keys in the index block would be [4, 8, 12])
  // and the user calls SeekForPrev(7), we need to go to the second block,
  // just like if they call Seek(7).
  // The only case where the block is difference is when they seek to a position
  // in the boundary. For example, if they SeekForPrev(5), we should go to the
  // first block, rather than the second. However, we don't have the information
  // to distinguish the two unless we read the second block. In this case, we'll
  // end up with reading two blocks.
  index_iter_->Seek(target);

  if (!index_iter_->Valid()) {
2666 2667 2668 2669 2670
    if (!index_iter_->status().ok()) {
      ResetDataIter();
      return;
    }

2671 2672 2673 2674 2675 2676 2677 2678 2679
    index_iter_->SeekToLast();
    if (!index_iter_->Valid()) {
      ResetDataIter();
      return;
    }
  }

  InitDataBlock();

M
Maysam Yabandeh 已提交
2680
  block_iter_.SeekForPrev(target);
2681 2682

  FindKeyBackward();
M
Maysam Yabandeh 已提交
2683 2684
  assert(!block_iter_.Valid() ||
         icomp_.Compare(target, block_iter_.key()) >= 0);
2685 2686
}

2687 2688
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToLast() {
2689
  is_out_of_bound_ = false;
2690
  is_at_first_key_from_index_ = false;
2691 2692 2693 2694 2695 2696 2697
  SavePrevIndexValue();
  index_iter_->SeekToLast();
  if (!index_iter_->Valid()) {
    ResetDataIter();
    return;
  }
  InitDataBlock();
M
Maysam Yabandeh 已提交
2698
  block_iter_.SeekToLast();
2699 2700 2701
  FindKeyBackward();
}

2702 2703
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Next() {
2704 2705 2706
  if (is_at_first_key_from_index_ && !MaterializeCurrentBlock()) {
    return;
  }
2707
  assert(block_iter_points_to_real_block_);
M
Maysam Yabandeh 已提交
2708
  block_iter_.Next();
2709
  FindKeyForward();
2710
  CheckOutOfBound();
2711 2712
}

2713 2714
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::NextAndGetResult(
2715
    Slice* ret_key) {
2716 2717 2718
  Next();
  bool is_valid = Valid();
  if (is_valid) {
2719
    *ret_key = key();
2720 2721 2722 2723
  }
  return is_valid;
}

2724 2725
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::Prev() {
2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
  if (is_at_first_key_from_index_) {
    is_at_first_key_from_index_ = false;

    index_iter_->Prev();
    if (!index_iter_->Valid()) {
      return;
    }

    InitDataBlock();
    block_iter_.SeekToLast();
  } else {
    assert(block_iter_points_to_real_block_);
    block_iter_.Prev();
  }

2741 2742 2743
  FindKeyBackward();
}

2744 2745 2746 2747 2748 2749 2750
// Found that 256 KB readahead size provides the best performance, based on
// experiments, for auto readahead. Experiment data is in PR #3282.
template <class TBlockIter, typename TValue>
const size_t
    BlockBasedTableIterator<TBlockIter, TValue>::kMaxAutoReadaheadSize =
        256 * 1024;

2751 2752
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::InitDataBlock() {
2753
  BlockHandle data_block_handle = index_iter_->value().handle;
2754
  if (!block_iter_points_to_real_block_ ||
2755
      data_block_handle.offset() != prev_block_offset_ ||
2756
      // if previous attempt of reading the block missed cache, try again
M
Maysam Yabandeh 已提交
2757
      block_iter_.status().IsIncomplete()) {
2758 2759 2760 2761 2762
    if (block_iter_points_to_real_block_) {
      ResetDataIter();
    }
    auto* rep = table_->get_rep();

2763 2764 2765 2766 2767 2768
    // Prefetch additional data for range scans (iterators). Enabled only for
    // user reads.
    // Implicit auto readahead:
    //   Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
    // Explicit user requested readahead:
    //   Enabled from the very first IO when ReadOptions.readahead_size is set.
2769
    if (lookup_context_.caller != TableReaderCaller::kCompaction) {
2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795
      if (read_options_.readahead_size == 0) {
        // Implicit auto readahead
        num_file_reads_++;
        if (num_file_reads_ > kMinNumFileReadsToStartAutoReadahead) {
          if (!rep->file->use_direct_io() &&
              (data_block_handle.offset() +
                   static_cast<size_t>(data_block_handle.size()) +
                   kBlockTrailerSize >
               readahead_limit_)) {
            // Buffered I/O
            // Discarding the return status of Prefetch calls intentionally, as
            // we can fallback to reading from disk if Prefetch fails.
            rep->file->Prefetch(data_block_handle.offset(), readahead_size_);
            readahead_limit_ = static_cast<size_t>(data_block_handle.offset() +
                                                   readahead_size_);
            // Keep exponentially increasing readahead size until
            // kMaxAutoReadaheadSize.
            readahead_size_ =
                std::min(kMaxAutoReadaheadSize, readahead_size_ * 2);
          } else if (rep->file->use_direct_io() && !prefetch_buffer_) {
            // Direct I/O
            // Let FilePrefetchBuffer take care of the readahead.
            prefetch_buffer_.reset(
                new FilePrefetchBuffer(rep->file.get(), kInitAutoReadaheadSize,
                                       kMaxAutoReadaheadSize));
          }
2796
        }
2797 2798 2799 2800 2801 2802 2803
      } else if (!prefetch_buffer_) {
        // Explicit user requested readahead
        // The actual condition is:
        // if (read_options_.readahead_size != 0 && !prefetch_buffer_)
        prefetch_buffer_.reset(new FilePrefetchBuffer(
            rep->file.get(), read_options_.readahead_size,
            read_options_.readahead_size));
2804
      }
2805 2806 2807 2808
    } else if (!prefetch_buffer_) {
      prefetch_buffer_.reset(
          new FilePrefetchBuffer(rep->file.get(), compaction_readahead_size_,
                                 compaction_readahead_size_));
2809 2810
    }

2811
    Status s;
2812
    table_->NewDataBlockIterator<TBlockIter>(
2813
        read_options_, data_block_handle, &block_iter_, block_type_,
2814
        /*get_context=*/nullptr, &lookup_context_, s, prefetch_buffer_.get(),
2815 2816
        /*for_compaction=*/lookup_context_.caller ==
            TableReaderCaller::kCompaction);
2817 2818 2819 2820
    block_iter_points_to_real_block_ = true;
  }
}

2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
template <class TBlockIter, typename TValue>
bool BlockBasedTableIterator<TBlockIter, TValue>::MaterializeCurrentBlock() {
  assert(is_at_first_key_from_index_);
  assert(!block_iter_points_to_real_block_);
  assert(index_iter_->Valid());

  is_at_first_key_from_index_ = false;
  InitDataBlock();
  assert(block_iter_points_to_real_block_);
  block_iter_.SeekToFirst();

  if (!block_iter_.Valid() ||
      icomp_.Compare(block_iter_.key(),
                     index_iter_->value().first_internal_key) != 0) {
    // Uh oh.
    block_iter_.Invalidate(Status::Corruption(
        "first key in index doesn't match first key in block"));
    return false;
  }

  return true;
}

template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyForward() {
  // This method's code is kept short to make it likely to be inlined.

  assert(!is_out_of_bound_);
  assert(block_iter_points_to_real_block_);

  if (!block_iter_.Valid()) {
    // This is the only call site of FindBlockForward(), but it's extracted into
    // a separate method to keep FindKeyForward() short and likely to be
    // inlined. When transitioning to a different block, we call
    // FindBlockForward(), which is much longer and is probably not inlined.
    FindBlockForward();
  } else {
    // This is the fast path that avoids a function call.
  }
}

2862
template <class TBlockIter, typename TValue>
2863
void BlockBasedTableIterator<TBlockIter, TValue>::FindBlockForward() {
2864 2865
  // TODO the while loop inherits from two-level-iterator. We don't know
  // whether a block can be empty so it can be replaced by an "if".
2866
  do {
M
Maysam Yabandeh 已提交
2867
    if (!block_iter_.status().ok()) {
2868 2869
      return;
    }
2870
    // Whether next data block is out of upper bound, if there is one.
2871 2872 2873 2874 2875 2876 2877
    bool next_block_is_out_of_bound = false;
    if (read_options_.iterate_upper_bound != nullptr &&
        block_iter_points_to_real_block_) {
      next_block_is_out_of_bound =
          (user_comparator_.Compare(*read_options_.iterate_upper_bound,
                                    index_iter_->user_key()) <= 0);
    }
2878 2879
    ResetDataIter();
    index_iter_->Next();
2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890
    if (next_block_is_out_of_bound) {
      // The next block is out of bound. No need to read it.
      TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr);
      // We need to make sure this is not the last data block before setting
      // is_out_of_bound_, since the index key for the last data block can be
      // larger than smallest key of the next file on the same level.
      if (index_iter_->Valid()) {
        is_out_of_bound_ = true;
      }
      return;
    }
2891

2892
    if (!index_iter_->Valid()) {
2893 2894
      return;
    }
2895

2896
    IndexValue v = index_iter_->value();
2897

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908
    // TODO(kolmike): Remove the != kBlockCacheTier condition.
    if (!v.first_internal_key.empty() &&
        read_options_.read_tier != kBlockCacheTier) {
      // Index contains the first key of the block. Defer reading the block.
      is_at_first_key_from_index_ = true;
      return;
    }

    InitDataBlock();
    block_iter_.SeekToFirst();
  } while (!block_iter_.Valid());
2909 2910
}

2911 2912
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyBackward() {
M
Maysam Yabandeh 已提交
2913 2914
  while (!block_iter_.Valid()) {
    if (!block_iter_.status().ok()) {
2915 2916 2917 2918 2919 2920 2921 2922
      return;
    }

    ResetDataIter();
    index_iter_->Prev();

    if (index_iter_->Valid()) {
      InitDataBlock();
M
Maysam Yabandeh 已提交
2923
      block_iter_.SeekToLast();
2924 2925 2926 2927 2928 2929 2930 2931 2932
    } else {
      return;
    }
  }

  // We could have check lower bound here too, but we opt not to do it for
  // code simplicity.
}

2933 2934
template <class TBlockIter, typename TValue>
void BlockBasedTableIterator<TBlockIter, TValue>::CheckOutOfBound() {
2935
  if (read_options_.iterate_upper_bound != nullptr && Valid()) {
2936 2937 2938 2939 2940
    is_out_of_bound_ = user_comparator_.Compare(
                           *read_options_.iterate_upper_bound, user_key()) <= 0;
  }
}

2941 2942
InternalIterator* BlockBasedTable::NewIterator(
    const ReadOptions& read_options, const SliceTransform* prefix_extractor,
2943 2944
    Arena* arena, bool skip_filters, TableReaderCaller caller, size_t compaction_readahead_size) {
  BlockCacheLookupContext lookup_context{caller};
2945
  bool need_upper_bound_check =
2946
      PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
2947
  if (arena == nullptr) {
M
Maysam Yabandeh 已提交
2948
    return new BlockBasedTableIterator<DataBlockIter>(
2949
        this, read_options, rep_->internal_comparator,
2950 2951
        NewIndexIterator(
            read_options,
2952
            need_upper_bound_check &&
2953 2954
                rep_->index_type == BlockBasedTableOptions::kHashSearch,
            /*input_iter=*/nullptr, /*get_context=*/nullptr, &lookup_context),
2955
        !skip_filters && !read_options.total_order_seek &&
2956
            prefix_extractor != nullptr,
2957
        need_upper_bound_check, prefix_extractor, BlockType::kData,
2958
        caller, compaction_readahead_size);
2959
  } else {
M
Maysam Yabandeh 已提交
2960 2961 2962
    auto* mem =
        arena->AllocateAligned(sizeof(BlockBasedTableIterator<DataBlockIter>));
    return new (mem) BlockBasedTableIterator<DataBlockIter>(
2963
        this, read_options, rep_->internal_comparator,
2964 2965 2966
        NewIndexIterator(read_options, need_upper_bound_check,
                         /*input_iter=*/nullptr, /*get_context=*/nullptr,
                         &lookup_context),
2967
        !skip_filters && !read_options.total_order_seek &&
2968
            prefix_extractor != nullptr,
2969
        need_upper_bound_check, prefix_extractor, BlockType::kData,
2970
        caller, compaction_readahead_size);
2971
  }
J
jorlow@chromium.org 已提交
2972 2973
}

2974
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
2975
    const ReadOptions& read_options) {
2976 2977 2978
  if (rep_->fragmented_range_dels == nullptr) {
    return nullptr;
  }
2979 2980 2981 2982 2983
  SequenceNumber snapshot = kMaxSequenceNumber;
  if (read_options.snapshot != nullptr) {
    snapshot = read_options.snapshot->GetSequenceNumber();
  }
  return new FragmentedRangeTombstoneIterator(
2984
      rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
2985 2986
}

2987 2988 2989
bool BlockBasedTable::FullFilterKeyMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    const Slice& internal_key, const bool no_io,
2990 2991
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
2992 2993 2994 2995
  if (filter == nullptr || filter->IsBlockBased()) {
    return true;
  }
  Slice user_key = ExtractUserKey(internal_key);
M
Maysam Yabandeh 已提交
2996
  const Slice* const const_ikey_ptr = &internal_key;
2997
  bool may_match = true;
2998
  if (filter->whole_key_filtering()) {
2999 3000 3001
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
    Slice user_key_without_ts = StripTimestampFromUserKey(user_key, ts_sz);
3002 3003 3004
    may_match =
        filter->KeyMayMatch(user_key_without_ts, prefix_extractor, kNotValid,
                            no_io, const_ikey_ptr, lookup_context);
3005
  } else if (!read_options.total_order_seek && prefix_extractor &&
3006
             rep_->table_properties->prefix_extractor_name.compare(
3007 3008 3009 3010
                 prefix_extractor->Name()) == 0 &&
             prefix_extractor->InDomain(user_key) &&
             !filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
                                     prefix_extractor, kNotValid, false,
3011
                                     const_ikey_ptr, lookup_context)) {
3012 3013 3014 3015
    may_match = false;
  }
  if (may_match) {
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
3016
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
3017
  }
3018
  return may_match;
3019 3020
}

3021 3022 3023
void BlockBasedTable::FullFilterKeysMayMatch(
    const ReadOptions& read_options, FilterBlockReader* filter,
    MultiGetRange* range, const bool no_io,
3024 3025
    const SliceTransform* prefix_extractor,
    BlockCacheLookupContext* lookup_context) const {
3026 3027 3028 3029
  if (filter == nullptr || filter->IsBlockBased()) {
    return;
  }
  if (filter->whole_key_filtering()) {
3030 3031
    filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io,
                         lookup_context);
3032 3033 3034 3035 3036 3037 3038 3039 3040 3041
  } else if (!read_options.total_order_seek && prefix_extractor &&
             rep_->table_properties->prefix_extractor_name.compare(
                 prefix_extractor->Name()) == 0) {
    for (auto iter = range->begin(); iter != range->end(); ++iter) {
      Slice user_key = iter->lkey->user_key();

      if (!prefix_extractor->InDomain(user_key)) {
        range->SkipKey(iter);
      }
    }
3042 3043
    filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false,
                             lookup_context);
3044 3045 3046
  }
}

3047
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
3048 3049 3050
                            GetContext* get_context,
                            const SliceTransform* prefix_extractor,
                            bool skip_filters) {
M
Maysam Yabandeh 已提交
3051
  assert(key.size() >= 8);  // key must be internal key
S
Sanjay Ghemawat 已提交
3052
  Status s;
M
Maysam Yabandeh 已提交
3053
  const bool no_io = read_options.read_tier == kBlockCacheTier;
3054
  CachableEntry<FilterBlockReader> filter_entry;
3055 3056
  bool may_match;
  FilterBlockReader* filter = nullptr;
3057
  BlockCacheLookupContext lookup_context{TableReaderCaller::kUserGet};
3058 3059
  {
    if (!skip_filters) {
3060 3061 3062
      filter_entry = GetFilter(prefix_extractor, /*prefetch_buffer=*/nullptr,
                               read_options.read_tier == kBlockCacheTier,
                               get_context, &lookup_context);
3063
    }
3064
    filter = filter_entry.GetValue();
3065

3066 3067 3068
    // First check the full filter
    // If full filter not useful, Then go into each block
    may_match = FullFilterKeyMayMatch(read_options, filter, key, no_io,
3069
                                      prefix_extractor, &lookup_context);
3070 3071
  }
  if (!may_match) {
3072
    RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3073
    PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3074
  } else {
M
Maysam Yabandeh 已提交
3075
    IndexBlockIter iiter_on_stack;
3076 3077
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
3078
    bool need_upper_bound_check = false;
3079
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
3080
      need_upper_bound_check = PrefixExtractorChanged(
3081
          rep_->table_properties.get(), prefix_extractor);
3082
    }
3083 3084 3085
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
                         get_context, &lookup_context);
3086
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3087
    if (iiter != &iiter_on_stack) {
M
Maysam Yabandeh 已提交
3088
      iiter_unique_ptr.reset(iiter);
M
Maysam Yabandeh 已提交
3089
    }
3090

3091 3092
    size_t ts_sz =
        rep_->internal_comparator.user_comparator()->timestamp_size();
3093
    bool matched = false;  // if such user key mathced a key in SST
3094
    bool done = false;
M
Maysam Yabandeh 已提交
3095
    for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
3096
      IndexValue v = iiter->value();
3097

3098 3099
      bool not_exist_in_filter =
          filter != nullptr && filter->IsBlockBased() == true &&
3100
          !filter->KeyMayMatch(ExtractUserKeyAndStripTimestamp(key, ts_sz),
3101
                               prefix_extractor, v.handle.offset(), no_io,
3102
                               /*const_ikey_ptr=*/nullptr, &lookup_context);
3103 3104 3105 3106 3107 3108

      if (not_exist_in_filter) {
        // Not found
        // TODO: think about interaction with Merge. If a user key cannot
        // cross one data block, we should be fine.
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
3109
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
3110
        break;
3111
      }
3112

3113 3114 3115 3116 3117 3118 3119 3120
      if (!v.first_internal_key.empty() && !skip_filters &&
          UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                  .Compare(ExtractUserKey(key),
                           ExtractUserKey(v.first_internal_key)) < 0) {
        // The requested key falls between highest key in previous block and
        // lowest key in current block.
        break;
      }
3121

3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142
      BlockCacheLookupContext lookup_data_block_context{
          TableReaderCaller::kUserGet};
      bool does_referenced_key_exist = false;
      DataBlockIter biter;
      uint64_t referenced_data_size = 0;
      NewDataBlockIterator<DataBlockIter>(
          read_options, v.handle, &biter, BlockType::kData,
          get_context, &lookup_data_block_context,
          /*s=*/Status(), /*prefetch_buffer*/ nullptr);

      if (no_io && biter.status().IsIncomplete()) {
        // couldn't get block from block_cache
        // Update Saver.state to Found because we are only looking for
        // whether we can guarantee the key is not there when "no_io" is set
        get_context->MarkKeyMayExist();
        break;
      }
      if (!biter.status().ok()) {
        s = biter.status();
        break;
      }
3143

3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167
      bool may_exist = biter.SeekForGet(key);
      // If user-specified timestamp is supported, we cannot end the search
      // just because hash index lookup indicates the key+ts does not exist.
      if (!may_exist && ts_sz == 0) {
        // HashSeek cannot find the key this block and the the iter is not
        // the end of the block, i.e. cannot be in the following blocks
        // either. In this case, the seek_key cannot be found, so we break
        // from the top level for-loop.
        done = true;
      } else {
        // Call the *saver function on each entry/block until it returns false
        for (; biter.Valid(); biter.Next()) {
          ParsedInternalKey parsed_key;
          if (!ParseInternalKey(biter.key(), &parsed_key)) {
            s = Status::Corruption(Slice());
          }

          if (!get_context->SaveValue(
                  parsed_key, biter.value(), &matched,
                  biter.IsValuePinned() ? &biter : nullptr)) {
            does_referenced_key_exist = true;
            referenced_data_size = biter.key().size() + biter.value().size();
            done = true;
            break;
3168 3169
          }
        }
3170 3171 3172
        s = biter.status();
      }
      // Write the block cache access record.
3173
      if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189
        // Avoid making copy of block_key, cf_name, and referenced_key when
        // constructing the access record.
        BlockCacheTraceRecord access_record(
            rep_->ioptions.env->NowMicros(),
            /*block_key=*/"", lookup_data_block_context.block_type,
            lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
            /*cf_name=*/"", rep_->level_for_tracing(),
            rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
            lookup_data_block_context.is_cache_hit,
            lookup_data_block_context.no_insert,
            /*referenced_key=*/"", referenced_data_size,
            lookup_data_block_context.num_keys_in_block,
            does_referenced_key_exist);
        block_cache_tracer_->WriteBlockAccess(
            access_record, lookup_data_block_context.block_key,
            rep_->cf_name_for_tracing(), key);
S
Sanjay Ghemawat 已提交
3190
      }
3191

M
Maysam Yabandeh 已提交
3192 3193 3194 3195
      if (done) {
        // Avoid the extra Next which is expensive in two-level indexes
        break;
      }
3196
    }
3197 3198
    if (matched && filter != nullptr && !filter->IsBlockBased()) {
      RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
3199 3200
      PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                rep_->level);
3201
    }
3202
    if (s.ok()) {
M
Maysam Yabandeh 已提交
3203
      s = iiter->status();
S
Sanjay Ghemawat 已提交
3204 3205
    }
  }
K
Kai Liu 已提交
3206

S
Sanjay Ghemawat 已提交
3207 3208 3209
  return s;
}

3210 3211 3212 3213 3214
using MultiGetRange = MultiGetContext::Range;
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
                               const MultiGetRange* mget_range,
                               const SliceTransform* prefix_extractor,
                               bool skip_filters) {
3215
  BlockCacheLookupContext lookup_context{TableReaderCaller::kUserMultiGet};
3216 3217 3218 3219 3220 3221 3222 3223
  const bool no_io = read_options.read_tier == kBlockCacheTier;
  CachableEntry<FilterBlockReader> filter_entry;
  FilterBlockReader* filter = nullptr;
  MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
                               mget_range->end());
  {
    if (!skip_filters) {
      // TODO: Figure out where the stats should go
3224
      filter_entry = GetFilter(prefix_extractor, /*prefetch_buffer=*/nullptr,
3225
                               read_options.read_tier == kBlockCacheTier,
3226
                               /*get_context=*/nullptr, &lookup_context);
3227
    }
3228
    filter = filter_entry.GetValue();
3229 3230 3231 3232

    // First check the full filter
    // If full filter not useful, Then go into each block
    FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
3233
                           prefix_extractor, &lookup_context);
3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
  }
  if (skip_filters || !sst_file_range.empty()) {
    IndexBlockIter iiter_on_stack;
    // if prefix_extractor found in block differs from options, disable
    // BlockPrefixIndex. Only do this check when index_type is kHashSearch.
    bool need_upper_bound_check = false;
    if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
      need_upper_bound_check = PrefixExtractorChanged(
          rep_->table_properties.get(), prefix_extractor);
    }
3244 3245
    auto iiter =
        NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
3246
                         sst_file_range.begin()->get_context, &lookup_context);
3247
    std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
3248 3249 3250 3251
    if (iiter != &iiter_on_stack) {
      iiter_unique_ptr.reset(iiter);
    }

3252 3253
    DataBlockIter biter;
    uint64_t offset = std::numeric_limits<uint64_t>::max();
3254 3255 3256 3257 3258 3259 3260 3261
    for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
         ++miter) {
      Status s;
      GetContext* get_context = miter->get_context;
      const Slice& key = miter->ikey;
      bool matched = false;  // if such user key matched a key in SST
      bool done = false;
      for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
3262 3263 3264 3265 3266 3267 3268 3269 3270 3271
        IndexValue v = iiter->value();
        if (!v.first_internal_key.empty() && !skip_filters &&
            UserComparatorWrapper(rep_->internal_comparator.user_comparator())
                    .Compare(ExtractUserKey(key),
                             ExtractUserKey(v.first_internal_key)) < 0) {
          // The requested key falls between highest key in previous block and
          // lowest key in current block.
          break;
        }

3272
        bool reusing_block = true;
3273 3274 3275
        uint64_t referenced_data_size = 0;
        bool does_referenced_key_exist = false;
        BlockCacheLookupContext lookup_data_block_context(
3276
            TableReaderCaller::kUserMultiGet);
3277 3278
        if (iiter->value().handle.offset() != offset) {
          offset = iiter->value().handle.offset();
3279 3280
          biter.Invalidate(Status::OK());
          NewDataBlockIterator<DataBlockIter>(
3281 3282
              read_options, v.handle, &biter, BlockType::kData,
              get_context, &lookup_data_block_context, Status(), nullptr);
3283 3284
          reusing_block = false;
        }
3285

3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
        if (read_options.read_tier == kBlockCacheTier &&
            biter.status().IsIncomplete()) {
          // couldn't get block from block_cache
          // Update Saver.state to Found because we are only looking for
          // whether we can guarantee the key is not there when "no_io" is set
          get_context->MarkKeyMayExist();
          break;
        }
        if (!biter.status().ok()) {
          s = biter.status();
          break;
        }

        bool may_exist = biter.SeekForGet(key);
        if (!may_exist) {
          // HashSeek cannot find the key this block and the the iter is not
          // the end of the block, i.e. cannot be in the following blocks
          // either. In this case, the seek_key cannot be found, so we break
          // from the top level for-loop.
3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
          done = true;
        } else {
          // Call the *saver function on each entry/block until it returns false
          for (; biter.Valid(); biter.Next()) {
            ParsedInternalKey parsed_key;
            Cleanable dummy;
            Cleanable* value_pinner = nullptr;

            if (!ParseInternalKey(biter.key(), &parsed_key)) {
              s = Status::Corruption(Slice());
            }
            if (biter.IsValuePinned()) {
              if (reusing_block) {
                Cache* block_cache = rep_->table_options.block_cache.get();
                assert(biter.cache_handle() != nullptr);
                block_cache->Ref(biter.cache_handle());
                dummy.RegisterCleanup(&ReleaseCachedEntry, block_cache,
                                      biter.cache_handle());
                value_pinner = &dummy;
              } else {
                value_pinner = &biter;
              }
3327
            }
3328

3329 3330 3331 3332 3333 3334 3335
            if (!get_context->SaveValue(parsed_key, biter.value(), &matched,
                                        value_pinner)) {
              does_referenced_key_exist = true;
              referenced_data_size = biter.key().size() + biter.value().size();
              done = true;
              break;
            }
3336
          }
3337 3338 3339
          s = biter.status();
        }
        // Write the block cache access.
3340
        if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356
          // Avoid making copy of block_key, cf_name, and referenced_key when
          // constructing the access record.
          BlockCacheTraceRecord access_record(
              rep_->ioptions.env->NowMicros(),
              /*block_key=*/"", lookup_data_block_context.block_type,
              lookup_data_block_context.block_size, rep_->cf_id_for_tracing(),
              /*cf_name=*/"", rep_->level_for_tracing(),
              rep_->sst_number_for_tracing(), lookup_data_block_context.caller,
              lookup_data_block_context.is_cache_hit,
              lookup_data_block_context.no_insert,
              /*referenced_key=*/"", referenced_data_size,
              lookup_data_block_context.num_keys_in_block,
              does_referenced_key_exist);
          block_cache_tracer_->WriteBlockAccess(
              access_record, lookup_data_block_context.block_key,
              rep_->cf_name_for_tracing(), key);
3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375
        }
        if (done) {
          // Avoid the extra Next which is expensive in two-level indexes
          break;
        }
      }
      if (matched && filter != nullptr && !filter->IsBlockBased()) {
        RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
        PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
                                  rep_->level);
      }
      if (s.ok()) {
        s = iiter->status();
      }
      *(miter->s) = s;
    }
  }
}

3376 3377 3378
Status BlockBasedTable::Prefetch(const Slice* const begin,
                                 const Slice* const end) {
  auto& comparator = rep_->internal_comparator;
3379
  UserComparatorWrapper user_comparator(comparator.user_comparator());
3380 3381 3382 3383
  // pre-condition
  if (begin && end && comparator.Compare(*begin, *end) > 0) {
    return Status::InvalidArgument(*begin, *end);
  }
3384
  BlockCacheLookupContext lookup_context{TableReaderCaller::kPrefetch};
M
Maysam Yabandeh 已提交
3385
  IndexBlockIter iiter_on_stack;
3386 3387 3388
  auto iiter = NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                                &iiter_on_stack, /*get_context=*/nullptr,
                                &lookup_context);
3389
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
M
Maysam Yabandeh 已提交
3390
  if (iiter != &iiter_on_stack) {
3391
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
M
Maysam Yabandeh 已提交
3392
  }
3393

M
Maysam Yabandeh 已提交
3394
  if (!iiter->status().ok()) {
3395
    // error opening index iterator
M
Maysam Yabandeh 已提交
3396
    return iiter->status();
3397 3398 3399 3400 3401
  }

  // indicates if we are on the last page that need to be pre-fetched
  bool prefetching_boundary_page = false;

M
Maysam Yabandeh 已提交
3402 3403
  for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
       iiter->Next()) {
3404 3405
    BlockHandle block_handle = iiter->value().handle;
    const bool is_user_key = !rep_->index_key_includes_seq;
M
Maysam Yabandeh 已提交
3406 3407 3408
    if (end &&
        ((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
         (is_user_key &&
3409
          user_comparator.Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
3410 3411 3412 3413 3414 3415 3416 3417 3418 3419
      if (prefetching_boundary_page) {
        break;
      }

      // The index entry represents the last key in the data block.
      // We should load this page into memory as well, but no more
      prefetching_boundary_page = true;
    }

    // Load the block specified by the block_handle into the block cache
M
Maysam Yabandeh 已提交
3420
    DataBlockIter biter;
3421 3422 3423 3424 3425

    NewDataBlockIterator<DataBlockIter>(
        ReadOptions(), block_handle, &biter, /*type=*/BlockType::kData,
        /*get_context=*/nullptr, &lookup_context, Status(),
        /*prefetch_buffer=*/nullptr);
3426 3427 3428 3429 3430 3431 3432 3433 3434 3435

    if (!biter.status().ok()) {
      // there was an unexpected error while pre-fetching
      return biter.status();
    }
  }

  return Status::OK();
}

3436
Status BlockBasedTable::VerifyChecksum(TableReaderCaller caller) {
A
Aaron G 已提交
3437 3438 3439 3440
  Status s;
  // Check Meta blocks
  std::unique_ptr<Block> meta;
  std::unique_ptr<InternalIterator> meta_iter;
3441
  s = ReadMetaBlock(nullptr /* prefetch buffer */, &meta, &meta_iter);
A
Aaron G 已提交
3442
  if (s.ok()) {
3443
    s = VerifyChecksumInMetaBlocks(meta_iter.get());
A
Aaron G 已提交
3444 3445 3446 3447 3448 3449 3450
    if (!s.ok()) {
      return s;
    }
  } else {
    return s;
  }
  // Check Data blocks
M
Maysam Yabandeh 已提交
3451
  IndexBlockIter iiter_on_stack;
3452
  BlockCacheLookupContext context{caller};
3453
  InternalIteratorBase<IndexValue>* iiter = NewIndexIterator(
3454
      ReadOptions(), /*need_upper_bound_check=*/false, &iiter_on_stack,
3455
      /*get_context=*/nullptr, &context);
3456
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter_unique_ptr;
A
Aaron G 已提交
3457
  if (iiter != &iiter_on_stack) {
3458
    iiter_unique_ptr = std::unique_ptr<InternalIteratorBase<IndexValue>>(iiter);
A
Aaron G 已提交
3459 3460 3461 3462 3463 3464 3465 3466 3467
  }
  if (!iiter->status().ok()) {
    // error opening index iterator
    return iiter->status();
  }
  s = VerifyChecksumInBlocks(iiter);
  return s;
}

3468
Status BlockBasedTable::VerifyChecksumInBlocks(
3469
    InternalIteratorBase<IndexValue>* index_iter) {
A
Aaron G 已提交
3470 3471 3472 3473 3474 3475
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
    if (!s.ok()) {
      break;
    }
3476
    BlockHandle handle = index_iter->value().handle;
3477
    BlockContents contents;
3478 3479 3480
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
3481
        false /* decompress */, false /*maybe_compressed*/, BlockType::kData,
3482
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
3483 3484 3485 3486 3487 3488 3489 3490
    s = block_fetcher.ReadBlockContents();
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522
BlockType BlockBasedTable::GetBlockTypeForMetaBlockByName(
    const Slice& meta_block_name) {
  if (meta_block_name.starts_with(kFilterBlockPrefix) ||
      meta_block_name.starts_with(kFullFilterBlockPrefix) ||
      meta_block_name.starts_with(kPartitionedFilterBlockPrefix)) {
    return BlockType::kFilter;
  }

  if (meta_block_name == kPropertiesBlock) {
    return BlockType::kProperties;
  }

  if (meta_block_name == kCompressionDictBlock) {
    return BlockType::kCompressionDictionary;
  }

  if (meta_block_name == kRangeDelBlock) {
    return BlockType::kRangeDeletion;
  }

  if (meta_block_name == kHashIndexPrefixesBlock) {
    return BlockType::kHashIndexPrefixes;
  }

  if (meta_block_name == kHashIndexPrefixesMetadataBlock) {
    return BlockType::kHashIndexMetadata;
  }

  assert(false);
  return BlockType::kInvalid;
}

3523
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
3524 3525 3526 3527
    InternalIteratorBase<Slice>* index_iter) {
  Status s;
  for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
    s = index_iter->status();
A
Aaron G 已提交
3528 3529 3530
    if (!s.ok()) {
      break;
    }
3531 3532 3533
    BlockHandle handle;
    Slice input = index_iter->value();
    s = handle.DecodeFrom(&input);
A
Aaron G 已提交
3534
    BlockContents contents;
3535
    const Slice meta_block_name = index_iter->key();
3536 3537 3538 3539
    BlockFetcher block_fetcher(
        rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
        ReadOptions(), handle, &contents, rep_->ioptions,
        false /* decompress */, false /*maybe_compressed*/,
3540
        GetBlockTypeForMetaBlockByName(meta_block_name),
3541
        UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
S
Siying Dong 已提交
3542
    s = block_fetcher.ReadBlockContents();
3543
    if (s.IsCorruption() && meta_block_name == kPropertiesBlock) {
3544
      TableProperties* table_properties;
3545
      s = TryReadPropertiesWithGlobalSeqno(nullptr /* prefetch_buffer */,
3546 3547 3548 3549
                                           index_iter->value(),
                                           &table_properties);
      delete table_properties;
    }
A
Aaron G 已提交
3550 3551 3552 3553 3554 3555 3556
    if (!s.ok()) {
      break;
    }
  }
  return s;
}

3557 3558 3559 3560 3561 3562 3563 3564 3565
bool BlockBasedTable::TEST_BlockInCache(const BlockHandle& handle) const {
  assert(rep_ != nullptr);

  Cache* const cache = rep_->table_options.block_cache.get();
  if (cache == nullptr) {
    return false;
  }

  char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
3566 3567 3568
  Slice cache_key =
      GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
                  cache_key_storage);
3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579

  Cache::Handle* const cache_handle = cache->Lookup(cache_key);
  if (cache_handle == nullptr) {
    return false;
  }

  cache->Release(cache_handle);

  return true;
}

S
Siying Dong 已提交
3580 3581
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
                                      const Slice& key) {
3582
  std::unique_ptr<InternalIteratorBase<IndexValue>> iiter(NewIndexIterator(
3583 3584
      options, /*need_upper_bound_check=*/false, /*input_iter=*/nullptr,
      /*get_context=*/nullptr, /*lookup_contex=*/nullptr));
I
Igor Canadi 已提交
3585 3586 3587
  iiter->Seek(key);
  assert(iiter->Valid());

3588
  return TEST_BlockInCache(iiter->value().handle);
3589 3590 3591 3592 3593 3594 3595 3596 3597
}

// REQUIRES: The following fields of rep_ should have already been populated:
//  1. file
//  2. index_handle,
//  3. options
//  4. internal_comparator
//  5. index_type
Status BlockBasedTable::CreateIndexReader(
3598 3599
    FilePrefetchBuffer* prefetch_buffer,
    InternalIterator* preloaded_meta_index_iter, bool use_cache, bool prefetch,
3600 3601
    bool pin, IndexReader** index_reader,
    BlockCacheLookupContext* lookup_context) {
3602 3603
  // kHashSearch requires non-empty prefix_extractor but bypass checking
  // prefix_extractor here since we have no access to MutableCFOptions.
3604
  // Add need_upper_bound_check flag in  BlockBasedTable::NewIndexIterator.
3605 3606
  // If prefix_extractor does not match prefix_extractor_name from table
  // properties, turn off Hash Index by setting total_order_seek to true
3607

3608
  switch (rep_->index_type) {
M
Maysam Yabandeh 已提交
3609
    case BlockBasedTableOptions::kTwoLevelIndexSearch: {
3610
      return PartitionIndexReader::Create(this, prefetch_buffer, use_cache,
3611 3612
                                          prefetch, pin, index_reader,
                                          lookup_context);
M
Maysam Yabandeh 已提交
3613
    }
3614 3615
    case BlockBasedTableOptions::kBinarySearch:
    case BlockBasedTableOptions::kBinarySearchWithFirstKey: {
3616
      return BinarySearchIndexReader::Create(this, prefetch_buffer, use_cache,
3617 3618
                                             prefetch, pin, index_reader,
                                             lookup_context);
3619 3620
    }
    case BlockBasedTableOptions::kHashSearch: {
K
Kai Liu 已提交
3621
      std::unique_ptr<Block> meta_guard;
S
sdong 已提交
3622
      std::unique_ptr<InternalIterator> meta_iter_guard;
K
Kai Liu 已提交
3623 3624
      auto meta_index_iter = preloaded_meta_index_iter;
      if (meta_index_iter == nullptr) {
3625
        auto s = ReadMetaBlock(prefetch_buffer, &meta_guard, &meta_iter_guard);
K
Kai Liu 已提交
3626
        if (!s.ok()) {
3627 3628
          // we simply fall back to binary search in case there is any
          // problem with prefix hash index loading.
3629 3630 3631
          ROCKS_LOG_WARN(rep_->ioptions.info_log,
                         "Unable to read the metaindex block."
                         " Fall back to binary search index.");
3632 3633 3634
          return BinarySearchIndexReader::Create(this, prefetch_buffer,
                                                 use_cache, prefetch, pin,
                                                 index_reader, lookup_context);
K
Kai Liu 已提交
3635 3636 3637 3638
        }
        meta_index_iter = meta_iter_guard.get();
      }

3639
      return HashIndexReader::Create(this, prefetch_buffer, meta_index_iter,
3640 3641
                                     use_cache, prefetch, pin, index_reader,
                                     lookup_context);
3642 3643 3644
    }
    default: {
      std::string error_message =
3645
          "Unrecognized index type: " + ToString(rep_->index_type);
3646
      return Status::InvalidArgument(error_message.c_str());
3647 3648 3649 3650
    }
  }
}

3651
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key,
3652 3653
                                              TableReaderCaller caller) {
  BlockCacheLookupContext context(caller);
3654
  std::unique_ptr<InternalIteratorBase<IndexValue>> index_iter(
3655 3656 3657
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/&context));
K
Kai Liu 已提交
3658

J
jorlow@chromium.org 已提交
3659 3660 3661
  index_iter->Seek(key);
  uint64_t result;
  if (index_iter->Valid()) {
3662
    BlockHandle handle = index_iter->value().handle;
3663
    result = handle.offset();
J
jorlow@chromium.org 已提交
3664
  } else {
K
Kai Liu 已提交
3665 3666 3667
    // key is past the last key in the file. If table_properties is not
    // available, approximate the offset by returning the offset of the
    // metaindex block (which is right near the end of the file).
3668 3669 3670 3671
    result = 0;
    if (rep_->table_properties) {
      result = rep_->table_properties->data_size;
    }
K
Kai Liu 已提交
3672 3673
    // table_properties is not present in the table.
    if (result == 0) {
I
xxHash  
Igor Canadi 已提交
3674
      result = rep_->footer.metaindex_handle().offset();
K
Kai Liu 已提交
3675
    }
J
jorlow@chromium.org 已提交
3676 3677 3678 3679
  }
  return result;
}

3680 3681 3682 3683
bool BlockBasedTable::TEST_filter_block_preloaded() const {
  return rep_->filter != nullptr;
}

3684 3685 3686 3687
bool BlockBasedTable::TEST_IndexBlockInCache() const {
  assert(rep_ != nullptr);

  return TEST_BlockInCache(rep_->footer.index_handle());
3688 3689
}

O
omegaga 已提交
3690 3691
Status BlockBasedTable::GetKVPairsFromDataBlocks(
    std::vector<KVPairBlock>* kv_pair_blocks) {
3692
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3693 3694 3695
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
O
omegaga 已提交
3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711

  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    // Cannot read Index Block
    return s;
  }

  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();

    if (!s.ok()) {
      break;
    }

    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
3712
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
3713 3714
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
3715 3716
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
O
omegaga 已提交
3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744
    s = datablock_iter->status();

    if (!s.ok()) {
      // Error reading the block - Skipped
      continue;
    }

    KVPairBlock kv_pair_block;
    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        // Error reading the block - Skipped
        break;
      }
      const Slice& key = datablock_iter->key();
      const Slice& value = datablock_iter->value();
      std::string key_copy = std::string(key.data(), key.size());
      std::string value_copy = std::string(value.data(), value.size());

      kv_pair_block.push_back(
          std::make_pair(std::move(key_copy), std::move(value_copy)));
    }
    kv_pair_blocks->push_back(std::move(kv_pair_block));
  }
  return Status::OK();
}

3745 3746
Status BlockBasedTable::DumpTable(WritableFile* out_file,
                                  const SliceTransform* prefix_extractor) {
3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759
  // Output Footer
  out_file->Append(
      "Footer Details:\n"
      "--------------------------------------\n"
      "  ");
  out_file->Append(rep_->footer.ToString().c_str());
  out_file->Append("\n");

  // Output MetaIndex
  out_file->Append(
      "Metaindex Details:\n"
      "--------------------------------------\n");
  std::unique_ptr<Block> meta;
S
sdong 已提交
3760
  std::unique_ptr<InternalIterator> meta_iter;
3761
  Status s = ReadMetaBlock(nullptr /* prefetch_buffer */, &meta, &meta_iter);
3762 3763 3764 3765 3766 3767 3768 3769 3770 3771
  if (s.ok()) {
    for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
      s = meta_iter->status();
      if (!s.ok()) {
        return s;
      }
      if (meta_iter->key() == rocksdb::kPropertiesBlock) {
        out_file->Append("  Properties block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
3772 3773 3774 3775
      } else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
        out_file->Append("  Compression dictionary block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
3776 3777 3778 3779 3780
      } else if (strstr(meta_iter->key().ToString().c_str(),
                        "filter.rocksdb.") != nullptr) {
        out_file->Append("  Filter block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
3781 3782 3783 3784
      } else if (meta_iter->key() == rocksdb::kRangeDelBlock) {
        out_file->Append("  Range deletion block handle: ");
        out_file->Append(meta_iter->value().ToString(true).c_str());
        out_file->Append("\n");
3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803
      }
    }
    out_file->Append("\n");
  } else {
    return s;
  }

  // Output TableProperties
  const rocksdb::TableProperties* table_properties;
  table_properties = rep_->table_properties.get();

  if (table_properties != nullptr) {
    out_file->Append(
        "Table Properties:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(table_properties->ToString("\n  ", ": ").c_str());
    out_file->Append("\n");

3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818
    // Output Filter blocks
    if (!rep_->filter && !table_properties->filter_policy_name.empty()) {
      // Support only BloomFilter as off now
      rocksdb::BlockBasedTableOptions table_options;
      table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(1));
      if (table_properties->filter_policy_name.compare(
              table_options.filter_policy->Name()) == 0) {
        std::string filter_block_key = kFilterBlockPrefix;
        filter_block_key.append(table_properties->filter_policy_name);
        BlockHandle handle;
        if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) {
          BlockContents block;
          BlockFetcher block_fetcher(
              rep_->file.get(), nullptr /* prefetch_buffer */, rep_->footer,
              ReadOptions(), handle, &block, rep_->ioptions,
3819
              false /*decompress*/, false /*maybe_compressed*/,
3820
              BlockType::kFilter, UncompressionDict::GetEmptyDict(),
3821
              rep_->persistent_cache_options);
3822 3823 3824 3825 3826 3827 3828
          s = block_fetcher.ReadBlockContents();
          if (!s.ok()) {
            rep_->filter.reset(new BlockBasedFilterBlockReader(
                prefix_extractor, table_options,
                table_options.whole_key_filtering, std::move(block),
                rep_->ioptions.statistics));
          }
3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846
        }
      }
    }
  }
  if (rep_->filter) {
    out_file->Append(
        "Filter Details:\n"
        "--------------------------------------\n"
        "  ");
    out_file->Append(rep_->filter->ToString().c_str());
    out_file->Append("\n");
  }

  // Output Index block
  s = DumpIndexBlock(out_file);
  if (!s.ok()) {
    return s;
  }
3847 3848

  // Output compression dictionary
3849 3850
  if (!rep_->compression_dict_handle.IsNull()) {
    std::unique_ptr<const BlockContents> compression_dict_block;
3851
    s = ReadCompressionDictBlock(nullptr /* prefetch_buffer */,
3852 3853 3854 3855 3856 3857
                                 &compression_dict_block);
    if (!s.ok()) {
      return s;
    }
    assert(compression_dict_block != nullptr);
    auto compression_dict = compression_dict_block->data;
3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868
    out_file->Append(
        "Compression Dictionary:\n"
        "--------------------------------------\n");
    out_file->Append("  size (bytes): ");
    out_file->Append(rocksdb::ToString(compression_dict.size()));
    out_file->Append("\n\n");
    out_file->Append("  HEX    ");
    out_file->Append(compression_dict.ToString(true).c_str());
    out_file->Append("\n\n");
  }

3869
  // Output range deletions block
A
Andrew Kryczka 已提交
3870
  auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
A
Andrew Kryczka 已提交
3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881
  if (range_del_iter != nullptr) {
    range_del_iter->SeekToFirst();
    if (range_del_iter->Valid()) {
      out_file->Append(
          "Range deletions:\n"
          "--------------------------------------\n"
          "  ");
      for (; range_del_iter->Valid(); range_del_iter->Next()) {
        DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
      }
      out_file->Append("\n");
3882
    }
A
Andrew Kryczka 已提交
3883
    delete range_del_iter;
3884
  }
3885 3886 3887 3888 3889 3890
  // Output Data blocks
  s = DumpDataBlocks(out_file);

  return s;
}

3891
void BlockBasedTable::Close() {
3892 3893 3894
  if (rep_->closed) {
    return;
  }
3895 3896 3897 3898 3899

  Cache* const cache = rep_->table_options.block_cache.get();

  // cleanup index, filter, and compression dictionary blocks
  // to avoid accessing dangling pointers
3900 3901
  if (!rep_->table_options.no_block_cache) {
    char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
3902

3903 3904
    // Get the filter block key
    auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
M
Maysam Yabandeh 已提交
3905
                           rep_->filter_handle, cache_key);
3906 3907 3908 3909 3910 3911 3912 3913
    cache->Erase(key);

    if (!rep_->compression_dict_handle.IsNull()) {
      // Get the compression dictionary block key
      key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
                        rep_->compression_dict_handle, cache_key);
      cache->Erase(key);
    }
3914
  }
3915

3916
  rep_->closed = true;
3917 3918
}

3919 3920 3921 3922
Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
  out_file->Append(
      "Index Details:\n"
      "--------------------------------------\n");
3923
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3924 3925 3926
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

  out_file->Append("  Block key hex dump: Data block handle\n");
  out_file->Append("  Block key ascii\n\n");
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }
    Slice key = blockhandles_iter->key();
M
Maysam Yabandeh 已提交
3942
    Slice user_key;
3943
    InternalKey ikey;
3944
    if (!rep_->index_key_includes_seq) {
3945 3946
      user_key = key;
    } else {
M
Maysam Yabandeh 已提交
3947 3948 3949
      ikey.DecodeFrom(key);
      user_key = ikey.user_key();
    }
3950 3951

    out_file->Append("  HEX    ");
M
Maysam Yabandeh 已提交
3952
    out_file->Append(user_key.ToString(true).c_str());
3953
    out_file->Append(": ");
3954 3955 3956
    out_file->Append(blockhandles_iter->value()
                         .ToString(true, rep_->index_has_first_key)
                         .c_str());
3957 3958
    out_file->Append("\n");

M
Maysam Yabandeh 已提交
3959
    std::string str_key = user_key.ToString();
3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974
    std::string res_key("");
    char cspace = ' ';
    for (size_t i = 0; i < str_key.size(); i++) {
      res_key.append(&str_key[i], 1);
      res_key.append(1, cspace);
    }
    out_file->Append("  ASCII  ");
    out_file->Append(res_key.c_str());
    out_file->Append("\n  ------\n");
  }
  out_file->Append("\n");
  return Status::OK();
}

Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
3975
  std::unique_ptr<InternalIteratorBase<IndexValue>> blockhandles_iter(
3976 3977 3978
      NewIndexIterator(ReadOptions(), /*need_upper_bound_check=*/false,
                       /*input_iter=*/nullptr, /*get_context=*/nullptr,
                       /*lookup_contex=*/nullptr));
3979 3980 3981 3982 3983 3984
  Status s = blockhandles_iter->status();
  if (!s.ok()) {
    out_file->Append("Can not read Index Block \n\n");
    return s;
  }

3985 3986 3987 3988
  uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
  uint64_t datablock_size_max = 0;
  uint64_t datablock_size_sum = 0;

3989 3990 3991 3992 3993 3994 3995 3996
  size_t block_id = 1;
  for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
       block_id++, blockhandles_iter->Next()) {
    s = blockhandles_iter->status();
    if (!s.ok()) {
      break;
    }

3997
    BlockHandle bh = blockhandles_iter->value().handle;
3998 3999 4000 4001 4002
    uint64_t datablock_size = bh.size();
    datablock_size_min = std::min(datablock_size_min, datablock_size);
    datablock_size_max = std::max(datablock_size_max, datablock_size);
    datablock_size_sum += datablock_size;

4003
    out_file->Append("Data Block # ");
S
sdong 已提交
4004
    out_file->Append(rocksdb::ToString(block_id));
4005
    out_file->Append(" @ ");
4006
    out_file->Append(blockhandles_iter->value().handle.ToString(true).c_str());
4007 4008 4009
    out_file->Append("\n");
    out_file->Append("--------------------------------------\n");

S
sdong 已提交
4010
    std::unique_ptr<InternalIterator> datablock_iter;
M
Maysam Yabandeh 已提交
4011
    datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
4012 4013
        ReadOptions(), blockhandles_iter->value().handle,
        /*input_iter=*/nullptr, /*type=*/BlockType::kData,
4014 4015
        /*get_context=*/nullptr, /*lookup_context=*/nullptr, Status(),
        /*prefetch_buffer=*/nullptr));
4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029
    s = datablock_iter->status();

    if (!s.ok()) {
      out_file->Append("Error reading the block - Skipped \n\n");
      continue;
    }

    for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
         datablock_iter->Next()) {
      s = datablock_iter->status();
      if (!s.ok()) {
        out_file->Append("Error reading the block - Skipped \n");
        break;
      }
4030
      DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
4031 4032 4033
    }
    out_file->Append("\n");
  }
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051

  uint64_t num_datablocks = block_id - 1;
  if (num_datablocks) {
    double datablock_size_avg =
        static_cast<double>(datablock_size_sum) / num_datablocks;
    out_file->Append("Data Block Summary:\n");
    out_file->Append("--------------------------------------");
    out_file->Append("\n  # data blocks: ");
    out_file->Append(rocksdb::ToString(num_datablocks));
    out_file->Append("\n  min data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_min));
    out_file->Append("\n  max data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_max));
    out_file->Append("\n  avg data block size: ");
    out_file->Append(rocksdb::ToString(datablock_size_avg));
    out_file->Append("\n");
  }

4052 4053 4054
  return Status::OK();
}

4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
                                   WritableFile* out_file) {
  InternalKey ikey;
  ikey.DecodeFrom(key);

  out_file->Append("  HEX    ");
  out_file->Append(ikey.user_key().ToString(true).c_str());
  out_file->Append(": ");
  out_file->Append(value.ToString(true).c_str());
  out_file->Append("\n");

  std::string str_key = ikey.user_key().ToString();
  std::string str_value = value.ToString();
  std::string res_key(""), res_value("");
  char cspace = ' ';
  for (size_t i = 0; i < str_key.size(); i++) {
4071 4072 4073 4074 4075
    if (str_key[i] == '\0') {
      res_key.append("\\0", 2);
    } else {
      res_key.append(&str_key[i], 1);
    }
4076 4077 4078
    res_key.append(1, cspace);
  }
  for (size_t i = 0; i < str_value.size(); i++) {
4079 4080 4081 4082 4083
    if (str_value[i] == '\0') {
      res_value.append("\\0", 2);
    } else {
      res_value.append(&str_value[i], 1);
    }
4084 4085 4086 4087 4088 4089 4090 4091 4092 4093
    res_value.append(1, cspace);
  }

  out_file->Append("  ASCII  ");
  out_file->Append(res_key.c_str());
  out_file->Append(": ");
  out_file->Append(res_value.c_str());
  out_file->Append("\n  ------\n");
}

4094 4095
namespace {

A
Andrew Kryczka 已提交
4096
void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) {
4097 4098 4099
  FilterBlockReader* filter = reinterpret_cast<FilterBlockReader*>(value);
  if (filter->statistics() != nullptr) {
    RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT,
4100
               filter->ApproximateMemoryUsage());
4101 4102 4103 4104
  }
  delete filter;
}

4105 4106 4107 4108 4109 4110 4111
void DeleteCachedUncompressionDictEntry(const Slice& /*key*/, void* value) {
  UncompressionDict* dict = reinterpret_cast<UncompressionDict*>(value);
  RecordTick(dict->statistics(), BLOCK_CACHE_COMPRESSION_DICT_BYTES_EVICT,
             dict->ApproximateMemoryUsage());
  delete dict;
}

4112 4113
}  // anonymous namespace

4114
}  // namespace rocksdb