clock_cache.cc 24.3 KB
Newer Older
Y
Yi Wu 已提交
1
//  Copyright (c) 2011-present, Facebook, Inc.  All rights reserved.
S
Siying Dong 已提交
2 3 4
//  This source code is licensed under both the GPLv2 (found in the
//  COPYING file in the root directory) and Apache 2.0 License
//  (found in the LICENSE.Apache file in the root directory).
Y
Yi Wu 已提交
5 6 7 8 9
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

10
#include "cache/clock_cache.h"
Y
Yi Wu 已提交
11

G
Guido Tagliavini Ponce 已提交
12 13 14 15
#include <cassert>
#include <cstdint>
#include <cstdio>
#include <functional>
Y
Yi Wu 已提交
16

G
Guido Tagliavini Ponce 已提交
17 18 19 20 21 22
#include "monitoring/perf_context_imp.h"
#include "monitoring/statistics.h"
#include "port/lang.h"
#include "util/hash.h"
#include "util/math.h"
#include "util/random.h"
Y
Yi Wu 已提交
23

G
Guido Tagliavini Ponce 已提交
24
namespace ROCKSDB_NAMESPACE {
Y
Yi Wu 已提交
25

G
Guido Tagliavini Ponce 已提交
26
namespace clock_cache {
Y
Yi Wu 已提交
27

28
ClockHandleTable::ClockHandleTable(size_t capacity, int hash_bits)
G
Guido Tagliavini Ponce 已提交
29 30 31 32
    : length_bits_(hash_bits),
      length_bits_mask_((uint32_t{1} << length_bits_) - 1),
      occupancy_limit_(static_cast<uint32_t>((uint32_t{1} << length_bits_) *
                                             kStrictLoadFactor)),
33 34 35 36 37
      capacity_(capacity),
      array_(new ClockHandle[size_t{1} << length_bits_]),
      clock_pointer_(0),
      occupancy_(0),
      usage_(0) {
G
Guido Tagliavini Ponce 已提交
38 39
  assert(hash_bits <= 32);
}
Y
Yi Wu 已提交
40

G
Guido Tagliavini Ponce 已提交
41
ClockHandleTable::~ClockHandleTable() {
42 43 44 45 46 47 48
  // Assumes there are no references (of any type) to any slot in the table.
  for (uint32_t i = 0; i < GetTableSize(); i++) {
    ClockHandle* h = &array_[i];
    if (h->IsElement()) {
      h->FreeData();
    }
  }
G
Guido Tagliavini Ponce 已提交
49
}
Y
Yi Wu 已提交
50

51 52
ClockHandle* ClockHandleTable::Lookup(const Slice& key, uint32_t hash) {
  uint32_t probe = 0;
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
  ClockHandle* e = FindSlot(
      key,
      [&](ClockHandle* h) {
        if (h->TryInternalRef()) {
          if (h->IsElement() && h->Matches(key, hash)) {
            return true;
          }
          h->ReleaseInternalRef();
        }
        return false;
      },
      [&](ClockHandle* h) { return h->displacements == 0; },
      [&](ClockHandle* /*h*/) {}, probe);

  if (e != nullptr) {
    // TODO(Guido) Comment from #10347: Here it looks like we have three atomic
    // updates where it would be possible to combine into one CAS (more metadata
    // under one atomic field) or maybe two atomic updates (one arithmetic, one
    // bitwise). Something to think about optimizing.
    e->SetHit();
    // The handle is now referenced, so we take it out of clock.
    ClockOff(e);
75
    e->InternalToExternalRef();
76 77 78
  }

  return e;
G
Guido Tagliavini Ponce 已提交
79
}
Y
Yi Wu 已提交
80

81 82 83
ClockHandle* ClockHandleTable::Insert(ClockHandle* h,
                                      autovector<ClockHandle>* deleted,
                                      bool take_reference) {
84
  uint32_t probe = 0;
85 86 87
  ClockHandle* e = FindAvailableSlot(h->key(), h->hash, probe, deleted);
  if (e == nullptr) {
    // No available slot to place the handle.
G
Guido Tagliavini Ponce 已提交
88
    return nullptr;
Y
Yi Wu 已提交
89
  }
90

91 92 93 94 95 96 97 98
  // The slot is empty or is a tombstone. And we have an exclusive ref.
  Assign(e, h);
  // TODO(Guido) The following RemoveAll can probably be run outside of
  // the exclusive ref. I had a bad case in mind: multiple inserts could
  // annihilate each. Although I think this is impossible, I'm not sure
  // my mental proof covers every case.
  if (e->displacements != 0) {
    // It used to be a tombstone, so there may already be copies of the
G
Guido Tagliavini Ponce 已提交
99
    // key in the table.
100
    RemoveAll(h->key(), h->hash, probe, deleted);
101 102
  }

103 104 105 106 107 108 109 110 111 112
  if (take_reference) {
    // The user wants to take a reference.
    e->ExclusiveToExternalRef();
  } else {
    // The user doesn't want to immediately take a reference, so we make
    // it evictable.
    ClockOn(e);
    e->ReleaseExclusiveRef();
  }
  return e;
G
Guido Tagliavini Ponce 已提交
113
}
Y
Yi Wu 已提交
114

115 116 117 118 119 120 121 122
void ClockHandleTable::Assign(ClockHandle* dst, ClockHandle* src) {
  // DON'T touch displacements and refs.
  dst->value = src->value;
  dst->deleter = src->deleter;
  dst->hash = src->hash;
  dst->total_charge = src->total_charge;
  dst->key_data = src->key_data;
  dst->flags.store(0);
G
Guido Tagliavini Ponce 已提交
123
  dst->SetIsElement(true);
124
  dst->SetCachePriority(src->GetCachePriority());
125
  usage_ += dst->total_charge;
G
Guido Tagliavini Ponce 已提交
126 127
  occupancy_++;
}
Y
Yi Wu 已提交
128

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
bool ClockHandleTable::TryRemove(ClockHandle* h,
                                 autovector<ClockHandle>* deleted) {
  if (h->TryExclusiveRef()) {
    if (h->WillBeDeleted()) {
      Remove(h, deleted);
      return true;
    }
    h->ReleaseExclusiveRef();
  }
  return false;
}

bool ClockHandleTable::SpinTryRemove(ClockHandle* h,
                                     autovector<ClockHandle>* deleted) {
  if (h->SpinTryExclusiveRef()) {
    if (h->WillBeDeleted()) {
      Remove(h, deleted);
      return true;
    }
    h->ReleaseExclusiveRef();
  }
  return false;
}

void ClockHandleTable::ClockOff(ClockHandle* h) {
  h->SetClockPriority(ClockHandle::ClockPriority::NONE);
}

void ClockHandleTable::ClockOn(ClockHandle* h) {
  assert(!h->IsInClock());
  bool is_high_priority =
      h->HasHit() || h->GetCachePriority() == Cache::Priority::HIGH;
  h->SetClockPriority(static_cast<ClockHandle::ClockPriority>(
      is_high_priority ? ClockHandle::ClockPriority::HIGH
                       : ClockHandle::ClockPriority::MEDIUM));
}

void ClockHandleTable::Remove(ClockHandle* h,
                              autovector<ClockHandle>* deleted) {
  deleted->push_back(*h);
  ClockOff(h);
  uint32_t probe = 0;
  FindSlot(
      h->key(), [&](ClockHandle* e) { return e == h; },
      [&](ClockHandle* /*e*/) { return false; },
      [&](ClockHandle* e) { e->displacements--; }, probe);
  h->SetWillBeDeleted(false);
  h->SetIsElement(false);
}

void ClockHandleTable::RemoveAll(const Slice& key, uint32_t hash,
                                 uint32_t& probe,
                                 autovector<ClockHandle>* deleted) {
  FindSlot(
183 184 185
      key,
      [&](ClockHandle* h) {
        if (h->TryInternalRef()) {
186 187 188 189 190 191 192
          if (h->IsElement() && h->Matches(key, hash)) {
            h->SetWillBeDeleted(true);
            h->ReleaseInternalRef();
            if (TryRemove(h, deleted)) {
              h->ReleaseExclusiveRef();
            }
            return false;
193 194 195 196 197 198 199
          }
          h->ReleaseInternalRef();
        }
        return false;
      },
      [&](ClockHandle* h) { return h->displacements == 0; },
      [&](ClockHandle* /*h*/) {}, probe);
G
Guido Tagliavini Ponce 已提交
200
}
Y
Yi Wu 已提交
201

202 203 204 205 206 207 208 209 210 211
void ClockHandleTable::Free(autovector<ClockHandle>* deleted) {
  if (deleted->size() == 0) {
    // Avoid unnecessarily reading usage_ and occupancy_.
    return;
  }

  size_t deleted_charge = 0;
  for (auto& h : *deleted) {
    deleted_charge += h.total_charge;
    h.FreeData();
212
  }
213 214 215
  assert(usage_ >= deleted_charge);
  usage_ -= deleted_charge;
  occupancy_ -= static_cast<uint32_t>(deleted->size());
G
Guido Tagliavini Ponce 已提交
216
}
Y
Yi Wu 已提交
217

218 219 220 221
ClockHandle* ClockHandleTable::FindAvailableSlot(
    const Slice& key, uint32_t hash, uint32_t& probe,
    autovector<ClockHandle>* deleted) {
  ClockHandle* e = FindSlot(
G
Guido Tagliavini Ponce 已提交
222 223
      key,
      [&](ClockHandle* h) {
224
        // To read the handle, first acquire a shared ref.
225
        if (h->TryInternalRef()) {
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
          if (h->IsElement()) {
            // The slot is not available.
            // TODO(Guido) Is it worth testing h->WillBeDeleted()?
            if (h->WillBeDeleted() || h->Matches(key, hash)) {
              // The slot can be freed up, or the key we're inserting is already
              // in the table, so we try to delete it. When the attempt is
              // successful, the slot becomes available, so we stop probing.
              // Notice that in that case TryRemove returns an exclusive ref.
              h->SetWillBeDeleted(true);
              h->ReleaseInternalRef();
              if (TryRemove(h, deleted)) {
                return true;
              }
              return false;
            }
            h->ReleaseInternalRef();
            return false;
243
          }
244 245

          // Available slot.
246
          h->ReleaseInternalRef();
247 248 249 250 251 252 253 254
          // Try to acquire an exclusive ref. If we fail, continue probing.
          if (h->SpinTryExclusiveRef()) {
            // Check that the slot is still available.
            if (!h->IsElement()) {
              return true;
            }
            h->ReleaseExclusiveRef();
          }
255 256
        }
        return false;
G
Guido Tagliavini Ponce 已提交
257
      },
258 259
      [&](ClockHandle* /*h*/) { return false; },
      [&](ClockHandle* h) { h->displacements++; }, probe);
260
  if (e == nullptr) {
261 262
    Rollback(key, probe);
  }
263
  return e;
G
Guido Tagliavini Ponce 已提交
264 265
}

266 267 268 269
ClockHandle* ClockHandleTable::FindSlot(
    const Slice& key, std::function<bool(ClockHandle*)> match,
    std::function<bool(ClockHandle*)> abort,
    std::function<void(ClockHandle*)> update, uint32_t& probe) {
270 271 272 273
  // We use double-hashing probing. Every probe in the sequence is a
  // pseudorandom integer, computed as a linear function of two random hashes,
  // which we call base and increment. Specifically, the i-th probe is base + i
  // * increment modulo the table size.
G
Guido Tagliavini Ponce 已提交
274
  uint32_t base = ModTableSize(Hash(key.data(), key.size(), kProbingSeed1));
275 276 277
  // We use an odd increment, which is relatively prime with the power-of-two
  // table size. This implies that we cycle back to the first probe only
  // after probing every slot exactly once.
G
Guido Tagliavini Ponce 已提交
278 279 280 281 282
  uint32_t increment =
      ModTableSize((Hash(key.data(), key.size(), kProbingSeed2) << 1) | 1);
  uint32_t current = ModTableSize(base + probe * increment);
  while (true) {
    ClockHandle* h = &array_[current];
283
    if (current == base && probe > 0) {
G
Guido Tagliavini Ponce 已提交
284
      // We looped back.
285
      return nullptr;
G
Guido Tagliavini Ponce 已提交
286
    }
287 288
    if (match(h)) {
      probe++;
289
      return h;
G
Guido Tagliavini Ponce 已提交
290
    }
291
    if (abort(h)) {
292
      return nullptr;
Y
Yi Wu 已提交
293
    }
294 295 296 297 298 299 300 301 302 303 304 305
    probe++;
    update(h);
    current = ModTableSize(current + increment);
  }
}

void ClockHandleTable::Rollback(const Slice& key, uint32_t probe) {
  uint32_t current = ModTableSize(Hash(key.data(), key.size(), kProbingSeed1));
  uint32_t increment =
      ModTableSize((Hash(key.data(), key.size(), kProbingSeed2) << 1) | 1);
  for (uint32_t i = 0; i < probe; i++) {
    array_[current].displacements--;
G
Guido Tagliavini Ponce 已提交
306
    current = ModTableSize(current + increment);
Y
Yi Wu 已提交
307 308 309
  }
}

310 311 312 313 314 315 316
void ClockHandleTable::ClockRun(size_t charge) {
  // TODO(Guido) When an element is in the probe sequence of a
  // hot element, it will be hard to get an exclusive ref.
  // Do we need a mechanism to prevent an element from sitting
  // for a long time in cache waiting to be evicted?
  autovector<ClockHandle> deleted;
  uint32_t max_iterations =
317 318 319 320 321
      ClockHandle::ClockPriority::HIGH *
      (1 +
       static_cast<uint32_t>(
           GetTableSize() *
           kLoadFactor));  // It may take up to HIGH passes to evict an element.
322
  size_t usage_local = usage_;
323 324
  size_t capacity_local = capacity_;
  while (usage_local + charge > capacity_local && max_iterations--) {
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
    uint32_t steps = 1 + static_cast<uint32_t>(1 / kLoadFactor);
    uint32_t clock_pointer_local = (clock_pointer_ += steps) - steps;
    for (uint32_t i = 0; i < steps; i++) {
      ClockHandle* h = &array_[ModTableSize(clock_pointer_local + i)];
      if (h->TryExclusiveRef()) {
        if (h->WillBeDeleted()) {
          Remove(h, &deleted);
          usage_local -= h->total_charge;
        } else {
          if (!h->IsInClock() && h->IsElement()) {
            // We adjust the clock priority to make the element evictable again.
            // Why? Elements that are not in clock are either currently
            // externally referenced or used to be. Because we are holding an
            // exclusive ref, we know we are in the latter case. This can only
            // happen when the last external reference to an element was
            // released, and the element was not immediately removed.
            ClockOn(h);
          }
          ClockHandle::ClockPriority priority = h->GetClockPriority();
          if (priority == ClockHandle::ClockPriority::LOW) {
            Remove(h, &deleted);
            usage_local -= h->total_charge;
          } else if (priority > ClockHandle::ClockPriority::LOW) {
            h->DecreaseClockPriority();
          }
        }
        h->ReleaseExclusiveRef();
      }
    }
  }

  Free(&deleted);
}

G
Guido Tagliavini Ponce 已提交
359 360 361
ClockCacheShard::ClockCacheShard(
    size_t capacity, size_t estimated_value_size, bool strict_capacity_limit,
    CacheMetadataChargePolicy metadata_charge_policy)
362
    : strict_capacity_limit_(strict_capacity_limit),
363
      detached_usage_(0),
364 365
      table_(capacity, CalcHashBits(capacity, estimated_value_size,
                                    metadata_charge_policy)) {
G
Guido Tagliavini Ponce 已提交
366
  set_metadata_charge_policy(metadata_charge_policy);
Y
Yi Wu 已提交
367 368
}

G
Guido Tagliavini Ponce 已提交
369
void ClockCacheShard::EraseUnRefEntries() {
370
  autovector<ClockHandle> deleted;
G
Guido Tagliavini Ponce 已提交
371

372 373 374 375 376 377 378 379
  table_.ApplyToEntriesRange(
      [this, &deleted](ClockHandle* h) {
        // Externally unreferenced element.
        table_.Remove(h, &deleted);
      },
      0, table_.GetTableSize(), true);

  table_.Free(&deleted);
Y
Yi Wu 已提交
380 381
}

382 383 384 385
void ClockCacheShard::ApplyToSomeEntries(
    const std::function<void(const Slice& key, void* value, size_t charge,
                             DeleterFn deleter)>& callback,
    uint32_t average_entries_per_lock, uint32_t* state) {
G
Guido Tagliavini Ponce 已提交
386 387 388 389 390
  // The state is essentially going to be the starting hash, which works
  // nicely even if we resize between calls because we use upper-most
  // hash bits for table indexes.
  uint32_t length_bits = table_.GetLengthBits();
  uint32_t length = table_.GetTableSize();
391

G
Guido Tagliavini Ponce 已提交
392 393 394 395 396 397 398 399
  assert(average_entries_per_lock > 0);
  // Assuming we are called with same average_entries_per_lock repeatedly,
  // this simplifies some logic (index_end will not overflow).
  assert(average_entries_per_lock < length || *state == 0);

  uint32_t index_begin = *state >> (32 - length_bits);
  uint32_t index_end = index_begin + average_entries_per_lock;
  if (index_end >= length) {
400
    // Going to end.
G
Guido Tagliavini Ponce 已提交
401
    index_end = length;
402 403
    *state = UINT32_MAX;
  } else {
G
Guido Tagliavini Ponce 已提交
404
    *state = index_end << (32 - length_bits);
Y
Yi Wu 已提交
405
  }
406

407
  table_.ConstApplyToEntriesRange(
G
Guido Tagliavini Ponce 已提交
408
      [callback,
409
       metadata_charge_policy = metadata_charge_policy_](const ClockHandle* h) {
G
Guido Tagliavini Ponce 已提交
410 411 412
        callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
                 h->deleter);
      },
413
      index_begin, index_end, false);
Y
Yi Wu 已提交
414 415
}

416 417 418 419 420 421 422 423 424
ClockHandle* ClockCacheShard::DetachedInsert(ClockHandle* h) {
  ClockHandle* e = new ClockHandle();
  *e = *h;
  e->SetDetached();
  e->TryExternalRef();
  detached_usage_ += h->total_charge;
  return e;
}

G
Guido Tagliavini Ponce 已提交
425 426 427 428 429 430
size_t ClockCacheShard::CalcEstimatedHandleCharge(
    size_t estimated_value_size,
    CacheMetadataChargePolicy metadata_charge_policy) {
  ClockHandle h;
  h.CalcTotalCharge(estimated_value_size, metadata_charge_policy);
  return h.total_charge;
Y
Yi Wu 已提交
431 432
}

G
Guido Tagliavini Ponce 已提交
433 434 435 436 437
int ClockCacheShard::CalcHashBits(
    size_t capacity, size_t estimated_value_size,
    CacheMetadataChargePolicy metadata_charge_policy) {
  size_t handle_charge =
      CalcEstimatedHandleCharge(estimated_value_size, metadata_charge_policy);
438
  assert(handle_charge > 0);
G
Guido Tagliavini Ponce 已提交
439
  uint32_t num_entries =
440 441 442
      static_cast<uint32_t>(capacity / (kLoadFactor * handle_charge)) + 1;
  assert(num_entries <= uint32_t{1} << 31);
  return FloorLog2((num_entries << 1) - 1);
Y
Yi Wu 已提交
443 444
}

445 446 447 448 449 450
void ClockCacheShard::SetCapacity(size_t capacity) {
  if (capacity > table_.GetCapacity()) {
    assert(false);  // Not supported.
  }
  table_.SetCapacity(capacity);
  table_.ClockRun(detached_usage_);
Y
Yi Wu 已提交
451 452
}

453 454
void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
  strict_capacity_limit_ = strict_capacity_limit;
Y
Yi Wu 已提交
455 456 457
}

Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
G
Guido Tagliavini Ponce 已提交
458 459
                               size_t charge, Cache::DeleterFn deleter,
                               Cache::Handle** handle,
460
                               Cache::Priority priority) {
G
Guido Tagliavini Ponce 已提交
461 462 463 464 465 466 467 468 469 470
  if (key.size() != kCacheKeySize) {
    return Status::NotSupported("ClockCache only supports key size " +
                                std::to_string(kCacheKeySize) + "B");
  }

  ClockHandle tmp;
  tmp.value = value;
  tmp.deleter = deleter;
  tmp.hash = hash;
  tmp.CalcTotalCharge(charge, metadata_charge_policy_);
471
  tmp.SetCachePriority(priority);
G
Guido Tagliavini Ponce 已提交
472 473 474 475 476
  for (int i = 0; i < kCacheKeySize; i++) {
    tmp.key_data[i] = key.data()[i];
  }

  Status s = Status::OK();
477

478 479 480
  // Use a local copy to minimize cache synchronization.
  size_t detached_usage = detached_usage_;

481 482
  // Free space with the clock policy until enough space is freed or there are
  // no evictable elements.
483
  table_.ClockRun(tmp.total_charge + detached_usage);
484

485 486
  // Use local copies to minimize cache synchronization
  // (occupancy_ and usage_ are read and written by all insertions).
487
  uint32_t occupancy_local = table_.GetOccupancy();
488 489 490 491 492 493 494 495 496 497
  size_t total_usage = table_.GetUsage() + detached_usage;

  // TODO: Currently we support strict_capacity_limit == false as long as the
  // number of pinned elements is below table_.GetOccupancyLimit(). We can
  // always support it as follows: whenever we exceed this limit, we dynamically
  // allocate a handle and return it (when the user provides a handle pointer,
  // of course). Then, Release checks whether the handle was dynamically
  // allocated, or is stored in the table.
  if (total_usage + tmp.total_charge > table_.GetCapacity() &&
      (strict_capacity_limit_ || handle == nullptr)) {
498 499 500
    if (handle == nullptr) {
      // Don't insert the entry but still return ok, as if the entry inserted
      // into cache and get evicted immediately.
501
      tmp.FreeData();
502
    } else {
503
      if (occupancy_local + 1 > table_.GetOccupancyLimit()) {
504 505 506 507
        // TODO: Consider using a distinct status for this case, but usually
        // it will be handled the same way as reaching charge capacity limit
        s = Status::MemoryLimit(
            "Insert failed because all slots in the hash table are full.");
G
Guido Tagliavini Ponce 已提交
508
      } else {
509 510 511
        s = Status::MemoryLimit(
            "Insert failed because the total charge has exceeded the "
            "capacity.");
G
Guido Tagliavini Ponce 已提交
512
      }
513
    }
514
  } else {
515 516
    ClockHandle* h = nullptr;
    if (handle != nullptr && occupancy_local + 1 > table_.GetOccupancyLimit()) {
517 518
      // Even if the user wishes to overload the cache, we can't insert into
      // the hash table. Instead, we dynamically allocate a new handle.
519
      h = DetachedInsert(&tmp);
520 521 522 523 524 525
      // TODO: Return special status?
    } else {
      // Insert into the cache. Note that the cache might get larger than its
      // capacity if not enough space was freed up.
      autovector<ClockHandle> deleted;
      h = table_.Insert(&tmp, &deleted, handle != nullptr);
526 527 528 529 530 531 532
      if (h == nullptr && handle != nullptr) {
        // The table is full. This can happen when many threads simultaneously
        // attempt an insert, and the table is operating close to full capacity.
        h = DetachedInsert(&tmp);
      }
      // Notice that if handle == nullptr, we don't insert the entry but still
      // return ok.
533 534 535 536 537
      if (deleted.size() > 0) {
        s = Status::OkOverwritten();
      }
      table_.Free(&deleted);
    }
538 539 540
    if (handle != nullptr) {
      *handle = reinterpret_cast<Cache::Handle*>(h);
    }
541
  }
G
Guido Tagliavini Ponce 已提交
542

Y
Yi Wu 已提交
543 544 545
  return s;
}

546
Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t hash) {
547
  return reinterpret_cast<Cache::Handle*>(table_.Lookup(key, hash));
Y
Yi Wu 已提交
548 549
}

G
Guido Tagliavini Ponce 已提交
550 551
bool ClockCacheShard::Ref(Cache::Handle* h) {
  ClockHandle* e = reinterpret_cast<ClockHandle*>(h);
552
  assert(e->ExternalRefs() > 0);
553
  return e->TryExternalRef();
Y
Yi Wu 已提交
554 555
}

G
Guido Tagliavini Ponce 已提交
556
bool ClockCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
557
  // In contrast with LRUCache's Release, this function won't delete the handle
558
  // when the cache is above capacity and the reference is the last one. Space
559
  // is only freed up by EvictFromClock (called by Insert when space is needed)
560
  // and Erase. We do this to avoid an extra atomic read of the variable usage_.
G
Guido Tagliavini Ponce 已提交
561 562 563
  if (handle == nullptr) {
    return false;
  }
564

G
Guido Tagliavini Ponce 已提交
565
  ClockHandle* h = reinterpret_cast<ClockHandle*>(handle);
566 567 568 569 570 571 572 573 574 575 576 577 578 579

  if (UNLIKELY(h->IsDetached())) {
    h->ReleaseExternalRef();
    if (h->TryExclusiveRef()) {
      // Only the last reference will succeed.
      // Don't bother releasing the exclusive ref.
      h->FreeData();
      detached_usage_ -= h->total_charge;
      delete h;
      return true;
    }
    return false;
  }

580 581
  uint32_t refs = h->refs;
  bool last_reference = ((refs & ClockHandle::EXTERNAL_REFS) == 1);
582 583 584
  bool will_be_deleted = refs & ClockHandle::WILL_BE_DELETED;

  if (last_reference && (will_be_deleted || erase_if_last_ref)) {
585 586 587 588 589 590 591
    autovector<ClockHandle> deleted;
    h->SetWillBeDeleted(true);
    h->ReleaseExternalRef();
    if (table_.SpinTryRemove(h, &deleted)) {
      h->ReleaseExclusiveRef();
      table_.Free(&deleted);
      return true;
G
Guido Tagliavini Ponce 已提交
592
    }
593 594
  } else {
    h->ReleaseExternalRef();
595
  }
596 597

  return false;
598 599
}

600
void ClockCacheShard::Erase(const Slice& key, uint32_t hash) {
601 602 603 604
  autovector<ClockHandle> deleted;
  uint32_t probe = 0;
  table_.RemoveAll(key, hash, probe, &deleted);
  table_.Free(&deleted);
G
Guido Tagliavini Ponce 已提交
605
}
Y
Yi Wu 已提交
606

607
size_t ClockCacheShard::GetUsage() const { return table_.GetUsage(); }
Y
Yi Wu 已提交
608

G
Guido Tagliavini Ponce 已提交
609
size_t ClockCacheShard::GetPinnedUsage() const {
610 611
  // Computes the pinned usage by scanning the whole hash table. This
  // is slow, but avoids keeping an exact counter on the clock usage,
612
  // i.e., the number of not externally referenced elements.
613
  // Why avoid this counter? Because Lookup removes elements from the clock
614 615 616 617 618
  // list, so it would need to update the pinned usage every time,
  // which creates additional synchronization costs.
  size_t clock_usage = 0;

  table_.ConstApplyToEntriesRange(
619
      [&clock_usage](const ClockHandle* h) {
620 621
        if (h->ExternalRefs() > 1) {
          // We check > 1 because we are holding an external ref.
622 623 624 625 626
          clock_usage += h->total_charge;
        }
      },
      0, table_.GetTableSize(), true);

627
  return clock_usage + detached_usage_;
G
Guido Tagliavini Ponce 已提交
628
}
Y
Yi Wu 已提交
629

G
Guido Tagliavini Ponce 已提交
630 631 632
ClockCache::ClockCache(size_t capacity, size_t estimated_value_size,
                       int num_shard_bits, bool strict_capacity_limit,
                       CacheMetadataChargePolicy metadata_charge_policy)
633 634
    : ShardedCache(capacity, num_shard_bits, strict_capacity_limit),
      num_shards_(1 << num_shard_bits) {
635 636
  assert(estimated_value_size > 0 ||
         metadata_charge_policy != kDontChargeCacheMetadata);
G
Guido Tagliavini Ponce 已提交
637 638 639 640 641 642 643
  shards_ = reinterpret_cast<ClockCacheShard*>(
      port::cacheline_aligned_alloc(sizeof(ClockCacheShard) * num_shards_));
  size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
  for (int i = 0; i < num_shards_; i++) {
    new (&shards_[i])
        ClockCacheShard(per_shard, estimated_value_size, strict_capacity_limit,
                        metadata_charge_policy);
Y
Yi Wu 已提交
644
  }
G
Guido Tagliavini Ponce 已提交
645
}
Y
Yi Wu 已提交
646

G
Guido Tagliavini Ponce 已提交
647 648 649 650 651 652 653
ClockCache::~ClockCache() {
  if (shards_ != nullptr) {
    assert(num_shards_ > 0);
    for (int i = 0; i < num_shards_; i++) {
      shards_[i].~ClockCacheShard();
    }
    port::cacheline_aligned_free(shards_);
Y
Yi Wu 已提交
654
  }
G
Guido Tagliavini Ponce 已提交
655
}
Y
Yi Wu 已提交
656

G
Guido Tagliavini Ponce 已提交
657 658 659
CacheShard* ClockCache::GetShard(uint32_t shard) {
  return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
Y
Yi Wu 已提交
660

G
Guido Tagliavini Ponce 已提交
661 662 663
const CacheShard* ClockCache::GetShard(uint32_t shard) const {
  return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
Y
Yi Wu 已提交
664

G
Guido Tagliavini Ponce 已提交
665 666 667
void* ClockCache::Value(Handle* handle) {
  return reinterpret_cast<const ClockHandle*>(handle)->value;
}
668

G
Guido Tagliavini Ponce 已提交
669 670 671 672
size_t ClockCache::GetCharge(Handle* handle) const {
  CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata;
  if (num_shards_ > 0) {
    metadata_charge_policy = shards_[0].metadata_charge_policy_;
673
  }
G
Guido Tagliavini Ponce 已提交
674 675 676
  return reinterpret_cast<const ClockHandle*>(handle)->GetCharge(
      metadata_charge_policy);
}
Y
Yi Wu 已提交
677

G
Guido Tagliavini Ponce 已提交
678 679 680 681
Cache::DeleterFn ClockCache::GetDeleter(Handle* handle) const {
  auto h = reinterpret_cast<const ClockHandle*>(handle);
  return h->deleter;
}
682

G
Guido Tagliavini Ponce 已提交
683 684 685
uint32_t ClockCache::GetHash(Handle* handle) const {
  return reinterpret_cast<const ClockHandle*>(handle)->hash;
}
Y
Yi Wu 已提交
686

G
Guido Tagliavini Ponce 已提交
687 688 689 690 691 692 693 694 695
void ClockCache::DisownData() {
  // Leak data only if that won't generate an ASAN/valgrind warning.
  if (!kMustFreeHeapAllocations) {
    shards_ = nullptr;
    num_shards_ = 0;
  }
}

}  // namespace clock_cache
Y
Yi Wu 已提交
696

697
std::shared_ptr<Cache> NewClockCache(
698
    size_t capacity, int num_shard_bits, bool strict_capacity_limit,
699
    CacheMetadataChargePolicy metadata_charge_policy) {
700 701 702 703
  return NewLRUCache(capacity, num_shard_bits, strict_capacity_limit,
                     /* high_pri_pool_ratio */ 0.5, nullptr,
                     kDefaultToAdaptiveMutex, metadata_charge_policy,
                     /* low_pri_pool_ratio */ 0.0);
704 705 706
}

std::shared_ptr<Cache> ExperimentalNewClockCache(
G
Guido Tagliavini Ponce 已提交
707 708
    size_t capacity, size_t estimated_value_size, int num_shard_bits,
    bool strict_capacity_limit,
709
    CacheMetadataChargePolicy metadata_charge_policy) {
G
Guido Tagliavini Ponce 已提交
710 711 712
  if (num_shard_bits >= 20) {
    return nullptr;  // The cache cannot be sharded into too many fine pieces.
  }
713 714 715
  if (num_shard_bits < 0) {
    num_shard_bits = GetDefaultCacheShardBits(capacity);
  }
G
Guido Tagliavini Ponce 已提交
716 717 718
  return std::make_shared<clock_cache::ClockCache>(
      capacity, estimated_value_size, num_shard_bits, strict_capacity_limit,
      metadata_charge_policy);
Y
Yi Wu 已提交
719 720
}

721
}  // namespace ROCKSDB_NAMESPACE