clock_cache.cc 30.2 KB
Newer Older
Y
Yi Wu 已提交
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
Y
Yi Wu 已提交
5 6 7 8 9
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

10
#include "cache/clock_cache.h"
Y
Yi Wu 已提交
11 12 13

#ifndef SUPPORT_CLOCK_CACHE

14
namespace ROCKSDB_NAMESPACE {
Y
Yi Wu 已提交
15

16 17 18
std::shared_ptr<Cache> NewClockCache(
    size_t /*capacity*/, int /*num_shard_bits*/, bool /*strict_capacity_limit*/,
    CacheMetadataChargePolicy /*metadata_charge_policy*/) {
Y
Yi Wu 已提交
19 20 21 22
  // Clock cache not supported.
  return nullptr;
}

23
}  // namespace ROCKSDB_NAMESPACE
Y
Yi Wu 已提交
24 25 26 27 28 29 30

#else

#include <assert.h>
#include <atomic>
#include <deque>

S
Siying Dong 已提交
31 32 33 34 35
// "tbb/concurrent_hash_map.h" requires RTTI if exception is enabled.
// Disable it so users can chooose to disable RTTI.
#ifndef ROCKSDB_USE_RTTI
#define TBB_USE_EXCEPTIONS 0
#endif
36
#include "cache/sharded_cache.h"
37
#include "port/lang.h"
38
#include "port/malloc.h"
Y
Yi Wu 已提交
39
#include "port/port.h"
40
#include "tbb/concurrent_hash_map.h"
Y
Yi Wu 已提交
41 42 43
#include "util/autovector.h"
#include "util/mutexlock.h"

44
namespace ROCKSDB_NAMESPACE {
Y
Yi Wu 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

namespace {

// An implementation of the Cache interface based on CLOCK algorithm, with
// better concurrent performance than LRUCache. The idea of CLOCK algorithm
// is to maintain all cache entries in a circular list, and an iterator
// (the "head") pointing to the last examined entry. Eviction starts from the
// current head. Each entry is given a second chance before eviction, if it
// has been access since last examine. In contrast to LRU, no modification
// to the internal data-structure (except for flipping the usage bit) needs
// to be done upon lookup. This gives us oppertunity to implement a cache
// with better concurrency.
//
// Each cache entry is represented by a cache handle, and all the handles
// are arranged in a circular list, as describe above. Upon erase of an entry,
// we never remove the handle. Instead, the handle is put into a recycle bin
// to be re-use. This is to avoid memory dealocation, which is hard to deal
// with in concurrent environment.
//
// The cache also maintains a concurrent hash map for lookup. Any concurrent
// hash map implementation should do the work. We currently use
// tbb::concurrent_hash_map because it supports concurrent erase.
//
// Each cache handle has the following flags and counters, which are squeeze
// in an atomic interger, to make sure the handle always be in a consistent
// state:
//
//   * In-cache bit: whether the entry is reference by the cache itself. If
//     an entry is in cache, its key would also be available in the hash map.
//   * Usage bit: whether the entry has been access by user since last
//     examine for eviction. Can be reset by eviction.
//   * Reference count: reference count by user.
//
// An entry can be reference only when it's in cache. An entry can be evicted
// only when it is in cache, has no usage since last examine, and reference
// count is zero.
//
// The follow figure shows a possible layout of the cache. Boxes represents
// cache handles and numbers in each box being in-cache bit, usage bit and
// reference count respectively.
//
//    hash map:
//      +-------+--------+
//      |  key  | handle |
//      +-------+--------+
//      | "foo" |    5   |-------------------------------------+
//      +-------+--------+                                     |
//      | "bar" |    2   |--+                                  |
//      +-------+--------+  |                                  |
//                          |                                  |
//                     head |                                  |
//                       |  |                                  |
//    circular list:     |  |                                  |
//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
//         |(0,0,0)|---|(1,1,0)|---|(0,0,0)|---|(0,1,3)|---|(1,0,0)|---|  ...
//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
//             |                       |
//             +-------+   +-----------+
//                     |   |
//                   +---+---+
//    recycle bin:   | 1 | 3 |
//                   +---+---+
//
// Suppose we try to insert "baz" into the cache at this point and the cache is
// full. The cache will first look for entries to evict, starting from where
// head points to (the second entry). It resets usage bit of the second entry,
// skips the third and fourth entry since they are not in cache, and finally
// evict the fifth entry ("foo"). It looks at recycle bin for available handle,
// grabs handle 3, and insert the key into the handle. The following figure
// shows the resulting layout.
//
//    hash map:
//      +-------+--------+
//      |  key  | handle |
//      +-------+--------+
//      | "baz" |    3   |-------------+
//      +-------+--------+             |
//      | "bar" |    2   |--+          |
//      +-------+--------+  |          |
//                          |          |
//                          |          |                                 head
//                          |          |                                   |
//    circular list:        |          |                                   |
//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
//         |(0,0,0)|---|(1,0,0)|---|(1,0,0)|---|(0,1,3)|---|(0,0,0)|---|  ...
//         +-------+   +-------+   +-------+   +-------+   +-------+   +-------
//             |                                               |
//             +-------+   +-----------------------------------+
//                     |   |
//                   +---+---+
//    recycle bin:   | 1 | 5 |
//                   +---+---+
//
// A global mutex guards the circular list, the head, and the recycle bin.
// We additionally require that modifying the hash map needs to hold the mutex.
// As such, Modifying the cache (such as Insert() and Erase()) require to
// hold the mutex. Lookup() only access the hash map and the flags associated
// with each handle, and don't require explicit locking. Release() has to
// acquire the mutex only when it releases the last reference to the entry and
// the entry has been erased from cache explicitly. A future improvement could
// be to remove the mutex completely.
//
// Benchmark:
// We run readrandom db_bench on a test DB of size 13GB, with size of each
// level:
//
//    Level    Files   Size(MB)
//    -------------------------
//      L0        1       0.01
//      L1       18      17.32
//      L2      230     182.94
//      L3     1186    1833.63
//      L4     4602    8140.30
//
// We test with both 32 and 16 read threads, with 2GB cache size (the whole DB
// doesn't fits in) and 64GB cache size (the whole DB can fit in cache), and
// whether to put index and filter blocks in block cache. The benchmark runs
// with
// with RocksDB 4.10. We got the following result:
//
// Threads Cache     Cache               ClockCache               LRUCache
//         Size  Index/Filter Throughput(MB/s)   Hit Throughput(MB/s)    Hit
//     32   2GB       yes               466.7  85.9%           433.7   86.5%
//     32   2GB       no                529.9  72.7%           532.7   73.9%
//     32  64GB       yes               649.9  99.9%           507.9   99.9%
//     32  64GB       no                740.4  99.9%           662.8   99.9%
//     16   2GB       yes               278.4  85.9%           283.4   86.5%
//     16   2GB       no                318.6  72.7%           335.8   73.9%
//     16  64GB       yes               391.9  99.9%           353.3   99.9%
//     16  64GB       no                433.8  99.8%           419.4   99.8%

// Cache entry meta data.
struct CacheHandle {
  Slice key;
  void* value;
  size_t charge;
181
  Cache::DeleterFn deleter;
182 183 184 185
  uint32_t hash;

  // Addition to "charge" to get "total charge" under metadata policy.
  uint32_t meta_charge;
Y
Yi Wu 已提交
186 187

  // Flags and counters associated with the cache handle:
Y
yetingsky 已提交
188
  //   lowest bit: in-cache bit
Y
Yi Wu 已提交
189 190 191 192 193 194 195 196 197 198
  //   second lowest bit: usage bit
  //   the rest bits: reference count
  // The handle is unused when flags equals to 0. The thread decreases the count
  // to 0 is responsible to put the handle back to recycle_ and cleanup memory.
  std::atomic<uint32_t> flags;

  CacheHandle() = default;

  CacheHandle(const CacheHandle& a) { *this = a; }

199 200
  CacheHandle(const Slice& k, void* v,
              void (*del)(const Slice& key, void* value))
201 202
      : key(k), value(v), deleter(del) {}

Y
Yi Wu 已提交
203 204 205 206 207 208 209
  CacheHandle& operator=(const CacheHandle& a) {
    // Only copy members needed for deletion.
    key = a.key;
    value = a.value;
    deleter = a.deleter;
    return *this;
  }
210

211 212
  inline static uint32_t CalcMetadataCharge(
      Slice key, CacheMetadataChargePolicy metadata_charge_policy) {
213 214 215 216 217 218 219 220 221 222
    size_t meta_charge = 0;
    if (metadata_charge_policy == kFullChargeCacheMetadata) {
      meta_charge += sizeof(CacheHandle);
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
      meta_charge +=
          malloc_usable_size(static_cast<void*>(const_cast<char*>(key.data())));
#else
      meta_charge += key.size();
#endif
    }
223 224
    assert(meta_charge <= UINT32_MAX);
    return static_cast<uint32_t>(meta_charge);
225 226
  }

227
  inline size_t GetTotalCharge() { return charge + meta_charge; }
Y
Yi Wu 已提交
228 229 230
};

// Key of hash map. We store hash value with the key for convenience.
231
struct ClockCacheKey {
Y
Yi Wu 已提交
232 233 234
  Slice key;
  uint32_t hash_value;

235
  ClockCacheKey() = default;
Y
Yi Wu 已提交
236

237
  ClockCacheKey(const Slice& k, uint32_t h) {
Y
Yi Wu 已提交
238 239 240 241
    key = k;
    hash_value = h;
  }

242
  static bool equal(const ClockCacheKey& a, const ClockCacheKey& b) {
Y
Yi Wu 已提交
243 244 245
    return a.hash_value == b.hash_value && a.key == b.key;
  }

246
  static size_t hash(const ClockCacheKey& a) {
Y
Yi Wu 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259
    return static_cast<size_t>(a.hash_value);
  }
};

struct CleanupContext {
  // List of values to be deleted, along with the key and deleter.
  autovector<CacheHandle> to_delete_value;

  // List of keys to be deleted.
  autovector<const char*> to_delete_key;
};

// A cache shard which maintains its own CLOCK cache.
260
class ClockCacheShard final : public CacheShard {
Y
Yi Wu 已提交
261 262
 public:
  // Hash map type.
263 264
  using HashTable =
      tbb::concurrent_hash_map<ClockCacheKey, CacheHandle*, ClockCacheKey>;
Y
Yi Wu 已提交
265 266

  ClockCacheShard();
267
  ~ClockCacheShard() override;
Y
Yi Wu 已提交
268 269

  // Interfaces
270 271 272
  void SetCapacity(size_t capacity) override;
  void SetStrictCapacityLimit(bool strict_capacity_limit) override;
  Status Insert(const Slice& key, uint32_t hash, void* value, size_t charge,
273 274
                void (*deleter)(const Slice& key, void* value),
                Cache::Handle** handle, Cache::Priority priority) override;
275 276 277 278 279
  Status Insert(const Slice& key, uint32_t hash, void* value,
                const Cache::CacheItemHelper* helper, size_t charge,
                Cache::Handle** handle, Cache::Priority priority) override {
    return Insert(key, hash, value, charge, helper->del_cb, handle, priority);
  }
280
  Cache::Handle* Lookup(const Slice& key, uint32_t hash) override;
281 282 283
  Cache::Handle* Lookup(const Slice& key, uint32_t hash,
                        const Cache::CacheItemHelper* /*helper*/,
                        const Cache::CreateCallback& /*create_cb*/,
284 285
                        Cache::Priority /*priority*/, bool /*wait*/,
                        Statistics* /*stats*/) override {
286 287 288
    return Lookup(key, hash);
  }
  bool Release(Cache::Handle* handle, bool /*useful*/,
289 290
               bool erase_if_last_ref) override {
    return Release(handle, erase_if_last_ref);
291 292 293 294
  }
  bool IsReady(Cache::Handle* /*handle*/) override { return true; }
  void Wait(Cache::Handle* /*handle*/) override {}

295 296 297 298
  // If the entry in in cache, increase reference count and return true.
  // Return false otherwise.
  //
  // Not necessary to hold mutex_ before being called.
299
  bool Ref(Cache::Handle* handle) override;
300
  bool Release(Cache::Handle* handle, bool erase_if_last_ref = false) override;
301
  void Erase(const Slice& key, uint32_t hash) override;
302 303
  bool EraseAndConfirm(const Slice& key, uint32_t hash,
                       CleanupContext* context);
304 305 306
  size_t GetUsage() const override;
  size_t GetPinnedUsage() const override;
  void EraseUnRefEntries() override;
307 308 309 310
  void ApplyToSomeEntries(
      const std::function<void(const Slice& key, void* value, size_t charge,
                               DeleterFn deleter)>& callback,
      uint32_t average_entries_per_lock, uint32_t* state) override;
Y
Yi Wu 已提交
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

 private:
  static const uint32_t kInCacheBit = 1;
  static const uint32_t kUsageBit = 2;
  static const uint32_t kRefsOffset = 2;
  static const uint32_t kOneRef = 1 << kRefsOffset;

  // Helper functions to extract cache handle flags and counters.
  static bool InCache(uint32_t flags) { return flags & kInCacheBit; }
  static bool HasUsage(uint32_t flags) { return flags & kUsageBit; }
  static uint32_t CountRefs(uint32_t flags) { return flags >> kRefsOffset; }

  // Decrease reference count of the entry. If this decreases the count to 0,
  // recycle the entry. If set_usage is true, also set the usage bit.
  //
326 327
  // returns true if a value is erased.
  //
Y
Yi Wu 已提交
328
  // Not necessary to hold mutex_ before being called.
329
  bool Unref(CacheHandle* handle, bool set_usage, CleanupContext* context);
Y
Yi Wu 已提交
330 331 332

  // Unset in-cache bit of the entry. Recycle the handle if necessary.
  //
333 334
  // returns true if a value is erased.
  //
Y
Yi Wu 已提交
335
  // Has to hold mutex_ before being called.
336
  bool UnsetInCache(CacheHandle* handle, CleanupContext* context);
Y
Yi Wu 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363

  // Put the handle back to recycle_ list, and put the value associated with
  // it into to-be-deleted list. It doesn't cleanup the key as it might be
  // reused by another handle.
  //
  // Has to hold mutex_ before being called.
  void RecycleHandle(CacheHandle* handle, CleanupContext* context);

  // Delete keys and values in to-be-deleted list. Call the method without
  // holding mutex, as destructors can be expensive.
  void Cleanup(const CleanupContext& context);

  // Examine the handle for eviction. If the handle is in cache, usage bit is
  // not set, and referece count is 0, evict it from cache. Otherwise unset
  // the usage bit.
  //
  // Has to hold mutex_ before being called.
  bool TryEvict(CacheHandle* value, CleanupContext* context);

  // Scan through the circular list, evict entries until we get enough capacity
  // for new cache entry of specific size. Return true if success, false
  // otherwise.
  //
  // Has to hold mutex_ before being called.
  bool EvictFromCache(size_t charge, CleanupContext* context);

  CacheHandle* Insert(const Slice& key, uint32_t hash, void* value,
364 365
                      size_t change,
                      void (*deleter)(const Slice& key, void* value),
366 367
                      bool hold_reference, CleanupContext* context,
                      bool* overwritten);
Y
Yi Wu 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

  // Guards list_, head_, and recycle_. In addition, updating table_ also has
  // to hold the mutex, to avoid the cache being in inconsistent state.
  mutable port::Mutex mutex_;

  // The circular list of cache handles. Initially the list is empty. Once a
  // handle is needed by insertion, and no more handles are available in
  // recycle bin, one more handle is appended to the end.
  //
  // We use std::deque for the circular list because we want to make sure
  // pointers to handles are valid through out the life-cycle of the cache
  // (in contrast to std::vector), and be able to grow the list (in contrast
  // to statically allocated arrays).
  std::deque<CacheHandle> list_;

  // Pointer to the next handle in the circular list to be examine for
  // eviction.
  size_t head_;

  // Recycle bin of cache handles.
  autovector<CacheHandle*> recycle_;

  // Maximum cache size.
  std::atomic<size_t> capacity_;

  // Current total size of the cache.
  std::atomic<size_t> usage_;

  // Total un-released cache size.
  std::atomic<size_t> pinned_usage_;

  // Whether allow insert into cache if cache is full.
  std::atomic<bool> strict_capacity_limit_;

  // Hash table (tbb::concurrent_hash_map) for lookup.
  HashTable table_;
};

ClockCacheShard::ClockCacheShard()
    : head_(0), usage_(0), pinned_usage_(0), strict_capacity_limit_(false) {}

Y
Yi Wu 已提交
409 410 411 412
ClockCacheShard::~ClockCacheShard() {
  for (auto& handle : list_) {
    uint32_t flags = handle.flags.load(std::memory_order_relaxed);
    if (InCache(flags) || CountRefs(flags) > 0) {
413 414 415
      if (handle.deleter != nullptr) {
        (*handle.deleter)(handle.key, handle.value);
      }
Y
Yi Wu 已提交
416 417 418 419 420
      delete[] handle.key.data();
    }
  }
}

Y
Yi Wu 已提交
421 422 423 424 425 426 427 428
size_t ClockCacheShard::GetUsage() const {
  return usage_.load(std::memory_order_relaxed);
}

size_t ClockCacheShard::GetPinnedUsage() const {
  return pinned_usage_.load(std::memory_order_relaxed);
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
void ClockCacheShard::ApplyToSomeEntries(
    const std::function<void(const Slice& key, void* value, size_t charge,
                             DeleterFn deleter)>& callback,
    uint32_t average_entries_per_lock, uint32_t* state) {
  assert(average_entries_per_lock > 0);
  MutexLock lock(&mutex_);

  // Figure out the range to iterate, update `state`
  size_t list_size = list_.size();
  size_t start_idx = *state;
  size_t end_idx = start_idx + average_entries_per_lock;
  if (start_idx > list_size) {
    // Shouldn't reach here, but recoverable
    assert(false);
    // Mark finished with all
    *state = UINT32_MAX;
    return;
  }
  if (end_idx >= list_size || end_idx >= UINT32_MAX) {
    // This also includes the hypothetical case of >4 billion
    // cache handles.
    end_idx = list_size;
    // Mark finished with all
    *state = UINT32_MAX;
  } else {
454
    *state = static_cast<uint32_t>(end_idx);
Y
Yi Wu 已提交
455
  }
456 457 458 459 460 461 462 463

  // Do the iteration
  auto cur = list_.begin() + start_idx;
  auto end = list_.begin() + end_idx;
  for (; cur != end; ++cur) {
    const CacheHandle& handle = *cur;
    // Use relaxed semantics instead of acquire semantics since we are
    // holding mutex
Y
Yi Wu 已提交
464 465
    uint32_t flags = handle.flags.load(std::memory_order_relaxed);
    if (InCache(flags)) {
466
      callback(handle.key, handle.value, handle.charge, handle.deleter);
Y
Yi Wu 已提交
467 468 469 470 471 472 473 474
    }
  }
}

void ClockCacheShard::RecycleHandle(CacheHandle* handle,
                                    CleanupContext* context) {
  mutex_.AssertHeld();
  assert(!InCache(handle->flags) && CountRefs(handle->flags) == 0);
Y
Yi Wu 已提交
475
  context->to_delete_key.push_back(handle->key.data());
Y
Yi Wu 已提交
476
  context->to_delete_value.emplace_back(*handle);
477 478
  size_t total_charge = handle->GetTotalCharge();
  // clearing `handle` fields would go here but not strictly required
Y
Yi Wu 已提交
479
  recycle_.push_back(handle);
480
  usage_.fetch_sub(total_charge, std::memory_order_relaxed);
Y
Yi Wu 已提交
481 482 483 484 485 486 487 488 489 490 491 492 493
}

void ClockCacheShard::Cleanup(const CleanupContext& context) {
  for (const CacheHandle& handle : context.to_delete_value) {
    if (handle.deleter) {
      (*handle.deleter)(handle.key, handle.value);
    }
  }
  for (const char* key : context.to_delete_key) {
    delete[] key;
  }
}

494 495
bool ClockCacheShard::Ref(Cache::Handle* h) {
  auto handle = reinterpret_cast<CacheHandle*>(h);
Y
Yi Wu 已提交
496 497 498 499 500 501 502 503 504 505
  // CAS loop to increase reference count.
  uint32_t flags = handle->flags.load(std::memory_order_relaxed);
  while (InCache(flags)) {
    // Use acquire semantics on success, as further operations on the cache
    // entry has to be order after reference count is increased.
    if (handle->flags.compare_exchange_weak(flags, flags + kOneRef,
                                            std::memory_order_acquire,
                                            std::memory_order_relaxed)) {
      if (CountRefs(flags) == 0) {
        // No reference count before the operation.
506
        size_t total_charge = handle->GetTotalCharge();
507
        pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
Y
Yi Wu 已提交
508 509 510 511 512 513 514
      }
      return true;
    }
  }
  return false;
}

515
bool ClockCacheShard::Unref(CacheHandle* handle, bool set_usage,
Y
Yi Wu 已提交
516 517 518 519
                            CleanupContext* context) {
  if (set_usage) {
    handle->flags.fetch_or(kUsageBit, std::memory_order_relaxed);
  }
520 521 522 523 524
  // If the handle reaches state refs=0 and InCache=true after this
  // atomic operation then we cannot access `handle` afterward, because
  // it could be evicted before we access the `handle`.
  size_t total_charge = handle->GetTotalCharge();

Y
Yi Wu 已提交
525 526 527 528 529 530 531
  // Use acquire-release semantics as previous operations on the cache entry
  // has to be order before reference count is decreased, and potential cleanup
  // of the entry has to be order after.
  uint32_t flags = handle->flags.fetch_sub(kOneRef, std::memory_order_acq_rel);
  assert(CountRefs(flags) > 0);
  if (CountRefs(flags) == 1) {
    // this is the last reference.
532
    pinned_usage_.fetch_sub(total_charge, std::memory_order_relaxed);
Y
Yi Wu 已提交
533 534 535 536 537 538
    // Cleanup if it is the last reference.
    if (!InCache(flags)) {
      MutexLock l(&mutex_);
      RecycleHandle(handle, context);
    }
  }
539
  return context->to_delete_value.size();
Y
Yi Wu 已提交
540 541
}

542
bool ClockCacheShard::UnsetInCache(CacheHandle* handle,
Y
Yi Wu 已提交
543 544 545 546 547 548 549 550 551 552 553
                                   CleanupContext* context) {
  mutex_.AssertHeld();
  // Use acquire-release semantics as previous operations on the cache entry
  // has to be order before reference count is decreased, and potential cleanup
  // of the entry has to be order after.
  uint32_t flags =
      handle->flags.fetch_and(~kInCacheBit, std::memory_order_acq_rel);
  // Cleanup if it is the last reference.
  if (InCache(flags) && CountRefs(flags) == 0) {
    RecycleHandle(handle, context);
  }
554
  return context->to_delete_value.size();
Y
Yi Wu 已提交
555 556 557 558 559 560 561
}

bool ClockCacheShard::TryEvict(CacheHandle* handle, CleanupContext* context) {
  mutex_.AssertHeld();
  uint32_t flags = kInCacheBit;
  if (handle->flags.compare_exchange_strong(flags, 0, std::memory_order_acquire,
                                            std::memory_order_relaxed)) {
Y
Yi Wu 已提交
562
    bool erased __attribute__((__unused__)) =
563
        table_.erase(ClockCacheKey(handle->key, handle->hash));
Y
Yi Wu 已提交
564
    assert(erased);
Y
Yi Wu 已提交
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
    RecycleHandle(handle, context);
    return true;
  }
  handle->flags.fetch_and(~kUsageBit, std::memory_order_relaxed);
  return false;
}

bool ClockCacheShard::EvictFromCache(size_t charge, CleanupContext* context) {
  size_t usage = usage_.load(std::memory_order_relaxed);
  size_t capacity = capacity_.load(std::memory_order_relaxed);
  if (usage == 0) {
    return charge <= capacity;
  }
  size_t new_head = head_;
  bool second_iteration = false;
  while (usage + charge > capacity) {
    assert(new_head < list_.size());
    if (TryEvict(&list_[new_head], context)) {
      usage = usage_.load(std::memory_order_relaxed);
    }
    new_head = (new_head + 1 >= list_.size()) ? 0 : new_head + 1;
    if (new_head == head_) {
      if (second_iteration) {
        return false;
      } else {
        second_iteration = true;
      }
    }
  }
  head_ = new_head;
  return true;
}

void ClockCacheShard::SetCapacity(size_t capacity) {
  CleanupContext context;
  {
    MutexLock l(&mutex_);
    capacity_.store(capacity, std::memory_order_relaxed);
    EvictFromCache(0, &context);
  }
  Cleanup(context);
}

void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
  strict_capacity_limit_.store(strict_capacity_limit,
                               std::memory_order_relaxed);
}

613 614 615
CacheHandle* ClockCacheShard::Insert(
    const Slice& key, uint32_t hash, void* value, size_t charge,
    void (*deleter)(const Slice& key, void* value), bool hold_reference,
616 617
    CleanupContext* context, bool* overwritten) {
  assert(overwritten != nullptr && *overwritten == false);
618 619 620
  uint32_t meta_charge =
      CacheHandle::CalcMetadataCharge(key, metadata_charge_policy_);
  size_t total_charge = charge + meta_charge;
Y
Yi Wu 已提交
621
  MutexLock l(&mutex_);
622
  bool success = EvictFromCache(total_charge, context);
Y
Yi Wu 已提交
623
  bool strict = strict_capacity_limit_.load(std::memory_order_relaxed);
624 625 626 627 628
  if (!success && (strict || !hold_reference)) {
    context->to_delete_key.push_back(key.data());
    if (!hold_reference) {
      context->to_delete_value.emplace_back(key, value, deleter);
    }
Y
Yi Wu 已提交
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
    return nullptr;
  }
  // Grab available handle from recycle bin. If recycle bin is empty, create
  // and append new handle to end of circular list.
  CacheHandle* handle = nullptr;
  if (!recycle_.empty()) {
    handle = recycle_.back();
    recycle_.pop_back();
  } else {
    list_.emplace_back();
    handle = &list_.back();
  }
  // Fill handle.
  handle->key = key;
  handle->hash = hash;
  handle->value = value;
  handle->charge = charge;
646
  handle->meta_charge = meta_charge;
Y
Yi Wu 已提交
647 648
  handle->deleter = deleter;
  uint32_t flags = hold_reference ? kInCacheBit + kOneRef : kInCacheBit;
649 650 651 652 653 654 655 656 657

  // TODO investigate+fix suspected race condition:
  // [thread 1] Lookup starts, up to Ref()
  // [thread 2] Erase/evict the entry just looked up
  // [thread 1] Ref() the handle, even though it's in the recycle bin
  // [thread 2] Insert with recycling that handle
  // Here we obliterate the other thread's Ref
  // Possible fix: never blindly overwrite the flags, but only make
  // relative updates (fetch_add, etc).
Y
Yi Wu 已提交
658 659
  handle->flags.store(flags, std::memory_order_relaxed);
  HashTable::accessor accessor;
660
  if (table_.find(accessor, ClockCacheKey(key, hash))) {
661
    *overwritten = true;
Y
Yi Wu 已提交
662
    CacheHandle* existing_handle = accessor->second;
Y
Yi Wu 已提交
663
    table_.erase(accessor);
Y
Yi Wu 已提交
664 665
    UnsetInCache(existing_handle, context);
  }
666
  table_.insert(HashTable::value_type(ClockCacheKey(key, hash), handle));
Y
Yi Wu 已提交
667
  if (hold_reference) {
668
    pinned_usage_.fetch_add(total_charge, std::memory_order_relaxed);
Y
Yi Wu 已提交
669
  }
670
  usage_.fetch_add(total_charge, std::memory_order_relaxed);
Y
Yi Wu 已提交
671 672 673 674
  return handle;
}

Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
675 676
                               size_t charge,
                               void (*deleter)(const Slice& key, void* value),
677
                               Cache::Handle** out_handle,
A
Andrew Kryczka 已提交
678
                               Cache::Priority /*priority*/) {
Y
Yi Wu 已提交
679 680 681 682 683
  CleanupContext context;
  HashTable::accessor accessor;
  char* key_data = new char[key.size()];
  memcpy(key_data, key.data(), key.size());
  Slice key_copy(key_data, key.size());
684
  bool overwritten = false;
685
  CacheHandle* handle = Insert(key_copy, hash, value, charge, deleter,
686
                               out_handle != nullptr, &context, &overwritten);
Y
Yi Wu 已提交
687
  Status s;
688 689
  if (out_handle != nullptr) {
    if (handle == nullptr) {
690
      s = Status::Incomplete("Insert failed due to CLOCK cache being full.");
691 692 693
    } else {
      *out_handle = reinterpret_cast<Cache::Handle*>(handle);
    }
Y
Yi Wu 已提交
694
  }
695 696 697 698
  if (overwritten) {
    assert(s.ok());
    s = Status::OkOverwritten();
  }
Y
Yi Wu 已提交
699 700 701 702 703 704
  Cleanup(context);
  return s;
}

Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
  HashTable::const_accessor accessor;
705
  if (!table_.find(accessor, ClockCacheKey(key, hash))) {
Y
Yi Wu 已提交
706 707 708 709 710 711
    return nullptr;
  }
  CacheHandle* handle = accessor->second;
  accessor.release();
  // Ref() could fail if another thread sneak in and evict/erase the cache
  // entry before we are able to hold reference.
712
  if (!Ref(reinterpret_cast<Cache::Handle*>(handle))) {
Y
Yi Wu 已提交
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
    return nullptr;
  }
  // Double check the key since the handle may now representing another key
  // if other threads sneak in, evict/erase the entry and re-used the handle
  // for another cache entry.
  if (hash != handle->hash || key != handle->key) {
    CleanupContext context;
    Unref(handle, false, &context);
    // It is possible Unref() delete the entry, so we need to cleanup.
    Cleanup(context);
    return nullptr;
  }
  return reinterpret_cast<Cache::Handle*>(handle);
}

728
bool ClockCacheShard::Release(Cache::Handle* h, bool erase_if_last_ref) {
Y
Yi Wu 已提交
729 730
  CleanupContext context;
  CacheHandle* handle = reinterpret_cast<CacheHandle*>(h);
731
  bool erased = Unref(handle, true, &context);
732
  if (erase_if_last_ref && !erased) {
733 734
    erased = EraseAndConfirm(handle->key, handle->hash, &context);
  }
Y
Yi Wu 已提交
735
  Cleanup(context);
736
  return erased;
Y
Yi Wu 已提交
737 738 739 740
}

void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
  CleanupContext context;
741
  EraseAndConfirm(key, hash, &context);
Y
Yi Wu 已提交
742 743 744
  Cleanup(context);
}

745 746 747 748 749
bool ClockCacheShard::EraseAndConfirm(const Slice& key, uint32_t hash,
                                      CleanupContext* context) {
  MutexLock l(&mutex_);
  HashTable::accessor accessor;
  bool erased = false;
750
  if (table_.find(accessor, ClockCacheKey(key, hash))) {
751 752 753 754 755 756 757
    CacheHandle* handle = accessor->second;
    table_.erase(accessor);
    erased = UnsetInCache(handle, context);
  }
  return erased;
}

Y
Yi Wu 已提交
758 759 760 761 762 763 764 765 766 767 768 769
void ClockCacheShard::EraseUnRefEntries() {
  CleanupContext context;
  {
    MutexLock l(&mutex_);
    table_.clear();
    for (auto& handle : list_) {
      UnsetInCache(&handle, &context);
    }
  }
  Cleanup(context);
}

770
class ClockCache final : public ShardedCache {
Y
Yi Wu 已提交
771
 public:
772 773
  ClockCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
             CacheMetadataChargePolicy metadata_charge_policy)
Y
Yi Wu 已提交
774 775 776
      : ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
    int num_shards = 1 << num_shard_bits;
    shards_ = new ClockCacheShard[num_shards];
777 778 779
    for (int i = 0; i < num_shards; i++) {
      shards_[i].set_metadata_charge_policy(metadata_charge_policy);
    }
Y
Yi Wu 已提交
780 781 782 783
    SetCapacity(capacity);
    SetStrictCapacityLimit(strict_capacity_limit);
  }

784
  ~ClockCache() override { delete[] shards_; }
Y
Yi Wu 已提交
785

786
  const char* Name() const override { return "ClockCache"; }
Y
Yi Wu 已提交
787

788
  CacheShard* GetShard(uint32_t shard) override {
Y
Yi Wu 已提交
789 790 791
    return reinterpret_cast<CacheShard*>(&shards_[shard]);
  }

792
  const CacheShard* GetShard(uint32_t shard) const override {
Y
Yi Wu 已提交
793 794 795
    return reinterpret_cast<CacheShard*>(&shards_[shard]);
  }

796
  void* Value(Handle* handle) override {
Y
Yi Wu 已提交
797 798 799
    return reinterpret_cast<const CacheHandle*>(handle)->value;
  }

800
  size_t GetCharge(Handle* handle) const override {
Y
Yi Wu 已提交
801 802 803
    return reinterpret_cast<const CacheHandle*>(handle)->charge;
  }

804
  uint32_t GetHash(Handle* handle) const override {
Y
Yi Wu 已提交
805 806 807
    return reinterpret_cast<const CacheHandle*>(handle)->hash;
  }

808 809 810 811
  DeleterFn GetDeleter(Handle* handle) const override {
    return reinterpret_cast<const CacheHandle*>(handle)->deleter;
  }

812
  void DisownData() override {
813 814 815 816
    // Leak data only if that won't generate an ASAN/valgrind warning
    if (!kMustFreeHeapAllocations) {
      shards_ = nullptr;
    }
817
  }
Y
Yi Wu 已提交
818

819 820
  void WaitAll(std::vector<Handle*>& /*handles*/) override {}

Y
Yi Wu 已提交
821 822 823 824 825 826
 private:
  ClockCacheShard* shards_;
};

}  // end anonymous namespace

827 828 829
std::shared_ptr<Cache> NewClockCache(
    size_t capacity, int num_shard_bits, bool strict_capacity_limit,
    CacheMetadataChargePolicy metadata_charge_policy) {
830 831 832
  if (num_shard_bits < 0) {
    num_shard_bits = GetDefaultCacheShardBits(capacity);
  }
833 834
  return std::make_shared<ClockCache>(
      capacity, num_shard_bits, strict_capacity_limit, metadata_charge_policy);
Y
Yi Wu 已提交
835 836
}

837
}  // namespace ROCKSDB_NAMESPACE
Y
Yi Wu 已提交
838 839

#endif  // SUPPORT_CLOCK_CACHE