提交 601efe3c 编写于 作者: P Peter Dillinger 提交者: Facebook GitHub Bot

Misc cleanup of block cache code (#11291)

Summary:
... ahead of a larger change.
* Rename confusingly named `is_in_sec_cache` to `kept_in_sec_cache`
* Unify naming of "standalone" block cache entries (was "detached" in clock_cache)
* Remove some unused definitions in clock_cache.h (leftover from a previous revision)

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11291

Test Plan: usual tests and CI, no behavior changes

Reviewed By: anand1976

Differential Revision: D43984642

Pulled By: pdillinger

fbshipit-source-id: b8bf0c5b90a932a88bcbdb413b2f256834aedf97
上级 11cb6af6
......@@ -364,21 +364,22 @@ inline bool HyperClockTable::ChargeUsageMaybeEvictNonStrict(
return true;
}
inline HyperClockTable::HandleImpl* HyperClockTable::DetachedInsert(
inline HyperClockTable::HandleImpl* HyperClockTable::StandaloneInsert(
const ClockHandleBasicData& proto) {
// Heap allocated separate from table
HandleImpl* h = new HandleImpl();
ClockHandleBasicData* h_alias = h;
*h_alias = proto;
h->SetDetached();
// Single reference (detached entries only created if returning a refed
h->SetStandalone();
// Single reference (standalone entries only created if returning a refed
// Handle back to user)
uint64_t meta = uint64_t{ClockHandle::kStateInvisible}
<< ClockHandle::kStateShift;
meta |= uint64_t{1} << ClockHandle::kAcquireCounterShift;
h->meta.store(meta, std::memory_order_release);
// Keep track of how much of usage is detached
detached_usage_.fetch_add(proto.GetTotalCharge(), std::memory_order_relaxed);
// Keep track of how much of usage is standalone
standalone_usage_.fetch_add(proto.GetTotalCharge(),
std::memory_order_relaxed);
return h;
}
......@@ -396,7 +397,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Usage/capacity handling is somewhat different depending on
// strict_capacity_limit, but mostly pessimistic.
bool use_detached_insert = false;
bool use_standalone_insert = false;
const size_t total_charge = proto.GetTotalCharge();
if (strict_capacity_limit) {
Status s = ChargeUsageMaybeEvictStrict(total_charge, capacity,
......@@ -417,9 +418,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
proto.FreeData(allocator_);
return Status::OK();
} else {
// Need to track usage of fallback detached insert
// Need to track usage of fallback standalone insert
usage_.fetch_add(total_charge, std::memory_order_relaxed);
use_detached_insert = true;
use_standalone_insert = true;
}
}
}
......@@ -429,7 +430,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
assert(usage_.load(std::memory_order_relaxed) < SIZE_MAX / 2);
};
if (!use_detached_insert) {
if (!use_standalone_insert) {
// Attempt a table insert, but abort if we find an existing entry for the
// key. If we were to overwrite old entries, we would either
// * Have to gain ownership over an existing entry to overwrite it, which
......@@ -500,8 +501,8 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
std::memory_order_acq_rel);
// Correct for possible (but rare) overflow
CorrectNearOverflow(old_meta, h->meta);
// Insert detached instead (only if return handle needed)
use_detached_insert = true;
// Insert standalone instead (only if return handle needed)
use_standalone_insert = true;
return true;
} else {
// Mismatch. Pretend we never took the reference
......@@ -539,9 +540,9 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// That should be infeasible for roughly n >= 256, so if this assertion
// fails, that suggests something is going wrong.
assert(GetTableSize() < 256);
use_detached_insert = true;
use_standalone_insert = true;
}
if (!use_detached_insert) {
if (!use_standalone_insert) {
// Successfully inserted
if (handle) {
*handle = e;
......@@ -551,7 +552,7 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
// Roll back table insertion
Rollback(proto.hashed_key, e);
revert_occupancy_fn();
// Maybe fall back on detached insert
// Maybe fall back on standalone insert
if (handle == nullptr) {
revert_usage_fn();
// As if unrefed entry immdiately evicted
......@@ -560,16 +561,16 @@ Status HyperClockTable::Insert(const ClockHandleBasicData& proto,
}
}
// Run detached insert
assert(use_detached_insert);
// Run standalone insert
assert(use_standalone_insert);
*handle = DetachedInsert(proto);
*handle = StandaloneInsert(proto);
// The OkOverwritten status is used to count "redundant" insertions into
// block cache. This implementation doesn't strictly check for redundant
// insertions, but we instead are probably interested in how many insertions
// didn't go into the table (instead "detached"), which could be redundant
// Insert or some other reason (use_detached_insert reasons above).
// didn't go into the table (instead "standalone"), which could be redundant
// Insert or some other reason (use_standalone_insert reasons above).
return Status::OkOverwritten();
}
......@@ -696,11 +697,11 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
std::memory_order_acquire));
// Took ownership
size_t total_charge = h->GetTotalCharge();
if (UNLIKELY(h->IsDetached())) {
if (UNLIKELY(h->IsStandalone())) {
h->FreeData(allocator_);
// Delete detached handle
// Delete standalone handle
delete h;
detached_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
standalone_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
usage_.fetch_sub(total_charge, std::memory_order_relaxed);
} else {
Rollback(h->hashed_key, h);
......@@ -1156,8 +1157,8 @@ size_t ClockCacheShard<Table>::GetUsage() const {
}
template <class Table>
size_t ClockCacheShard<Table>::GetDetachedUsage() const {
return table_.GetDetachedUsage();
size_t ClockCacheShard<Table>::GetStandaloneUsage() const {
return table_.GetStandaloneUsage();
}
template <class Table>
......@@ -1191,7 +1192,7 @@ size_t ClockCacheShard<Table>::GetPinnedUsage() const {
},
0, table_.GetTableSize(), true);
return table_pinned_usage + table_.GetDetachedUsage();
return table_pinned_usage + table_.GetStandaloneUsage();
}
template <class Table>
......@@ -1259,7 +1260,7 @@ namespace {
void AddShardEvaluation(const HyperClockCache::Shard& shard,
std::vector<double>& predicted_load_factors,
size_t& min_recommendation) {
size_t usage = shard.GetUsage() - shard.GetDetachedUsage();
size_t usage = shard.GetUsage() - shard.GetStandaloneUsage();
size_t capacity = shard.GetCapacity();
double usage_ratio = 1.0 * usage / capacity;
......
......@@ -145,7 +145,7 @@ class ClockCacheTest;
// (erased by user) but can be read by existing references, and ref count
// changed by Ref and Release.
//
// A special case is "detached" entries, which are heap-allocated handles
// A special case is "standalone" entries, which are heap-allocated handles
// not in the table. They are always Invisible and freed on zero refs.
//
// State transitions:
......@@ -200,8 +200,8 @@ class ClockCacheTest;
// table occupancy limit has been reached. If strict_capacity_limit=false,
// we must never fail Insert, and if a Handle* is provided, we have to return
// a usable Cache handle on success. The solution to this (typically rare)
// problem is "detached" handles, which are usable by the caller but not
// actually available for Lookup in the Cache. Detached handles are allocated
// problem is "standalone" handles, which are usable by the caller but not
// actually available for Lookup in the Cache. Standalone handles are allocated
// independently on the heap and specially marked so that they are freed on
// the heap when their last reference is released.
//
......@@ -312,12 +312,6 @@ struct ClockHandleBasicData {
UniqueId64x2 hashed_key = kNullUniqueId64x2;
size_t total_charge = 0;
// For total_charge_and_flags
// "Detached" means the handle is allocated separately from hash table.
static constexpr uint64_t kFlagDetached = uint64_t{1} << 63;
// Extract just the total charge
static constexpr uint64_t kTotalChargeMask = kFlagDetached - 1;
inline size_t GetTotalCharge() const { return total_charge; }
// Calls deleter (if non-null) on cache key and value
......@@ -398,11 +392,11 @@ class HyperClockTable {
// TODO: ideally this would be packed into some other data field, such
// as upper bits of total_charge, but that incurs a measurable performance
// regression.
bool detached = false;
bool standalone = false;
inline bool IsDetached() const { return detached; }
inline bool IsStandalone() const { return standalone; }
inline void SetDetached() { detached = true; }
inline void SetStandalone() { standalone = true; }
}; // struct HandleImpl
struct Opts {
......@@ -444,8 +438,8 @@ class HyperClockTable {
size_t GetUsage() const { return usage_.load(std::memory_order_relaxed); }
size_t GetDetachedUsage() const {
return detached_usage_.load(std::memory_order_relaxed);
size_t GetStandaloneUsage() const {
return standalone_usage_.load(std::memory_order_relaxed);
}
// Acquire/release N references
......@@ -514,10 +508,10 @@ class HyperClockTable {
size_t capacity,
bool need_evict_for_occupancy);
// Creates a "detached" handle for returning from an Insert operation that
// Creates a "standalone" handle for returning from an Insert operation that
// cannot be completed by actually inserting into the table.
// Updates `detached_usage_` but not `usage_` nor `occupancy_`.
inline HandleImpl* DetachedInsert(const ClockHandleBasicData& proto);
// Updates `standalone_usage_` but not `usage_` nor `occupancy_`.
inline HandleImpl* StandaloneInsert(const ClockHandleBasicData& proto);
MemoryAllocator* GetAllocator() const { return allocator_; }
......@@ -555,11 +549,11 @@ class HyperClockTable {
// Number of elements in the table.
std::atomic<size_t> occupancy_{};
// Memory usage by entries tracked by the cache (including detached)
// Memory usage by entries tracked by the cache (including standalone)
std::atomic<size_t> usage_{};
// Part of usage by detached entries (not in table)
std::atomic<size_t> detached_usage_{};
// Part of usage by standalone entries (not in table)
std::atomic<size_t> standalone_usage_{};
}; // class HyperClockTable
// A single shard of sharded cache.
......@@ -623,7 +617,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
size_t GetUsage() const;
size_t GetDetachedUsage() const;
size_t GetStandaloneUsage() const;
size_t GetPinnedUsage() const;
......
......@@ -40,10 +40,10 @@ CompressedSecondaryCache::~CompressedSecondaryCache() { cache_.reset(); }
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
bool& is_in_sec_cache) {
bool& kept_in_sec_cache) {
assert(helper);
std::unique_ptr<SecondaryCacheResultHandle> handle;
is_in_sec_cache = false;
kept_in_sec_cache = false;
Cache::Handle* lru_handle = cache_->Lookup(key);
if (lru_handle == nullptr) {
return nullptr;
......@@ -109,7 +109,7 @@ std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
/*charge=*/0)
.PermitUncheckedError();
} else {
is_in_sec_cache = true;
kept_in_sec_cache = true;
cache_->Release(lru_handle, /*erase_if_last_ref=*/false);
}
handle.reset(new CompressedSecondaryCacheResultHandle(value, charge));
......
......@@ -91,7 +91,7 @@ class CompressedSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/, bool advise_erase,
bool& is_in_sec_cache) override;
bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return true; }
......
......@@ -100,10 +100,10 @@ class CompressedSecondaryCacheTest : public testing::Test,
void BasicTestHelper(std::shared_ptr<SecondaryCache> sec_cache,
bool sec_cache_is_compressed) {
get_perf_context()->Reset();
bool is_in_sec_cache{true};
bool kept_in_sec_cache{true};
// Lookup an non-existent key.
std::unique_ptr<SecondaryCacheResultHandle> handle0 = sec_cache->Lookup(
"k0", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
"k0", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle0, nullptr);
Random rnd(301);
......@@ -117,7 +117,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr);
// Insert and Lookup the item k1 for the second time and advise erasing it.
......@@ -125,9 +125,9 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1);
std::unique_ptr<SecondaryCacheResultHandle> handle1_2 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
"k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle1_2, nullptr);
ASSERT_FALSE(is_in_sec_cache);
ASSERT_FALSE(kept_in_sec_cache);
if (sec_cache_is_compressed) {
ASSERT_EQ(get_perf_context()->compressed_sec_cache_uncompressed_bytes,
1000);
......@@ -145,7 +145,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
// Lookup the item k1 again.
std::unique_ptr<SecondaryCacheResultHandle> handle1_3 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
"k1", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle1_3, nullptr);
// Insert and Lookup the item k2.
......@@ -154,7 +154,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_dummy_count, 2);
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr);
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
......@@ -169,7 +169,7 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_EQ(get_perf_context()->compressed_sec_cache_compressed_bytes, 0);
}
std::unique_ptr<SecondaryCacheResultHandle> handle2_2 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2_2, nullptr);
std::unique_ptr<TestItem> val2 =
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2_2->Value()));
......@@ -247,15 +247,15 @@ class CompressedSecondaryCacheTest : public testing::Test,
TestItem item2(str2.data(), str2.length());
// Insert a dummy handle, k1 is not evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
bool is_in_sec_cache{false};
bool kept_in_sec_cache{false};
std::unique_ptr<SecondaryCacheResultHandle> handle1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1, nullptr);
// Insert k2 and k1 is evicted.
ASSERT_OK(sec_cache->Insert("k2", &item2, &kHelper));
std::unique_ptr<SecondaryCacheResultHandle> handle2 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k2", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_NE(handle2, nullptr);
std::unique_ptr<TestItem> val2 =
std::unique_ptr<TestItem>(static_cast<TestItem*>(handle2->Value()));
......@@ -266,13 +266,13 @@ class CompressedSecondaryCacheTest : public testing::Test,
ASSERT_OK(sec_cache->Insert("k1", &item1, &kHelper));
std::unique_ptr<SecondaryCacheResultHandle> handle1_1 = sec_cache->Lookup(
"k1", &kHelper, this, true, /*advise_erase=*/false, is_in_sec_cache);
"k1", &kHelper, this, true, /*advise_erase=*/false, kept_in_sec_cache);
ASSERT_EQ(handle1_1, nullptr);
// Create Fails.
SetFailCreate(true);
std::unique_ptr<SecondaryCacheResultHandle> handle2_1 = sec_cache->Lookup(
"k2", &kHelper, this, true, /*advise_erase=*/true, is_in_sec_cache);
"k2", &kHelper, this, true, /*advise_erase=*/true, kept_in_sec_cache);
ASSERT_EQ(handle2_1, nullptr);
// Save Fails.
......@@ -970,10 +970,10 @@ TEST_P(CompressedSecondaryCacheTestWithCompressionParam, EntryRoles) {
ASSERT_OK(sec_cache->Insert(ith_key, &item, &kHelperByRole[i]));
ASSERT_EQ(get_perf_context()->compressed_sec_cache_insert_real_count, 1U);
bool is_in_sec_cache{true};
bool kept_in_sec_cache{true};
std::unique_ptr<SecondaryCacheResultHandle> handle =
sec_cache->Lookup(ith_key, &kHelperByRole[i], this, true,
/*advise_erase=*/true, is_in_sec_cache);
/*advise_erase=*/true, kept_in_sec_cache);
ASSERT_NE(handle, nullptr);
// Lookup returns the right data
......
......@@ -555,10 +555,10 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
// again, we erase it from CompressedSecondaryCache and add it into the
// primary cache.
if (!e && secondary_cache_ && helper && helper->create_cb) {
bool is_in_sec_cache{false};
bool kept_in_sec_cache{false};
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle =
secondary_cache_->Lookup(key, helper, create_context, wait,
found_dummy_entry, is_in_sec_cache);
found_dummy_entry, kept_in_sec_cache);
if (secondary_handle != nullptr) {
e = static_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
......@@ -575,7 +575,7 @@ LRUHandle* LRUCacheShard::Lookup(const Slice& key, uint32_t hash,
e->sec_handle = secondary_handle.release();
e->total_charge = 0;
e->Ref();
e->SetIsInSecondaryCache(is_in_sec_cache);
e->SetIsInSecondaryCache(kept_in_sec_cache);
e->SetIsStandalone(secondary_cache_->SupportForceErase() &&
!found_dummy_entry);
......
......@@ -934,12 +934,12 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool /*wait*/,
bool /*advise_erase*/, bool& is_in_sec_cache) override {
bool /*advise_erase*/, bool& kept_in_sec_cache) override {
std::string key_str = key.ToString();
TEST_SYNC_POINT_CALLBACK("TestSecondaryCache::Lookup", &key_str);
std::unique_ptr<SecondaryCacheResultHandle> secondary_handle;
is_in_sec_cache = false;
kept_in_sec_cache = false;
ResultType type = ResultType::SUCCESS;
auto iter = result_map_.find(key.ToString());
if (iter != result_map_.end()) {
......@@ -965,7 +965,7 @@ class TestSecondaryCache : public SecondaryCache {
if (s.ok()) {
secondary_handle.reset(new TestSecondaryCacheResultHandle(
cache_.get(), handle, value, charge, type));
is_in_sec_cache = true;
kept_in_sec_cache = true;
} else {
cache_.Release(handle);
}
......
......@@ -1214,12 +1214,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_EQ(handle0, nullptr);
// key0's item should be in the secondary cache.
bool is_in_sec_cache = false;
bool kept_in_sec_cache = false;
auto sec_handle0 = secondary_cache->Lookup(
key0, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true,
/*advise_erase=*/true, is_in_sec_cache);
ASSERT_FALSE(is_in_sec_cache);
/*advise_erase=*/true, kept_in_sec_cache);
ASSERT_FALSE(kept_in_sec_cache);
ASSERT_NE(sec_handle0, nullptr);
ASSERT_TRUE(sec_handle0->IsReady());
auto value = static_cast<BlobContents*>(sec_handle0->Value());
......@@ -1242,12 +1242,12 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_NE(handle1, nullptr);
blob_cache->Release(handle1);
bool is_in_sec_cache = false;
bool kept_in_sec_cache = false;
auto sec_handle1 = secondary_cache->Lookup(
key1, &BlobSource::SharedCacheInterface::kFullHelper,
/*context*/ nullptr, true,
/*advise_erase=*/true, is_in_sec_cache);
ASSERT_FALSE(is_in_sec_cache);
/*advise_erase=*/true, kept_in_sec_cache);
ASSERT_FALSE(kept_in_sec_cache);
ASSERT_EQ(sec_handle1, nullptr);
ASSERT_TRUE(blob_source.TEST_BlobInCache(file_number, file_size,
......
......@@ -99,12 +99,12 @@ class SecondaryCache : public Customizable {
// needs to return true.
// This hint can also be safely ignored.
//
// is_in_sec_cache is to indicate whether the handle is possibly erased
// from the secondary cache after the Lookup.
// kept_in_sec_cache is to indicate whether the entry will be kept in the
// secondary cache after the Lookup (rather than erased because of Lookup)
virtual std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase,
bool& is_in_sec_cache) = 0;
bool& kept_in_sec_cache) = 0;
// Indicate whether a handle can be erased in this secondary cache.
[[nodiscard]] virtual bool SupportForceErase() const = 0;
......
......@@ -1236,8 +1236,8 @@ class TestSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& /*key*/, const Cache::CacheItemHelper* /*helper*/,
Cache::CreateContext* /*create_context*/, bool /*wait*/,
bool /*advise_erase*/, bool& is_in_sec_cache) override {
is_in_sec_cache = true;
bool /*advise_erase*/, bool& kept_in_sec_cache) override {
kept_in_sec_cache = true;
return nullptr;
}
......
......@@ -92,18 +92,18 @@ FaultInjectionSecondaryCache::Lookup(const Slice& key,
const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context,
bool wait, bool advise_erase,
bool& is_in_sec_cache) {
bool& kept_in_sec_cache) {
ErrorContext* ctx = GetErrorContext();
if (base_is_compressed_sec_cache_) {
if (ctx->rand.OneIn(prob_)) {
return nullptr;
} else {
return base_->Lookup(key, helper, create_context, wait, advise_erase,
is_in_sec_cache);
kept_in_sec_cache);
}
} else {
std::unique_ptr<SecondaryCacheResultHandle> hdl = base_->Lookup(
key, helper, create_context, wait, advise_erase, is_in_sec_cache);
key, helper, create_context, wait, advise_erase, kept_in_sec_cache);
if (wait && ctx->rand.OneIn(prob_)) {
hdl.reset();
}
......
......@@ -37,7 +37,7 @@ class FaultInjectionSecondaryCache : public SecondaryCache {
std::unique_ptr<SecondaryCacheResultHandle> Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,
Cache::CreateContext* create_context, bool wait, bool advise_erase,
bool& is_in_sec_cache) override;
bool& kept_in_sec_cache) override;
bool SupportForceErase() const override { return base_->SupportForceErase(); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册