diff --git a/table/cuckoo_table_builder.cc b/table/cuckoo_table_builder.cc index 0fe243665e34705f425d195ef96fd3433540aa23..df0fae044aae57205a2b652f3a23b08bff3c78c5 100644 --- a/table/cuckoo_table_builder.cc +++ b/table/cuckoo_table_builder.cc @@ -274,6 +274,16 @@ uint64_t CuckooTableBuilder::FileSize() const { } } +// This method is invoked when there is no place to insert the target key. +// It searches for a set of elements that can be moved to accommodate target +// key. The search is a BFS graph traversal with first level (hash_vals) +// being all the buckets target key could go to. +// Then, from each node (curr_node), we find all the buckets that curr_node +// could go to. They form the children of curr_node in the tree. +// We continue the traversal until we find an empty bucket, in which case, we +// move all elements along the path from first level to this empty bucket, to +// make space for target key which is inserted at first level (*bucket_id). +// If tree depth exceedes max depth, we return false indicating failure. bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key, uint64_t *bucket_id, autovector hash_vals) { struct CuckooNode { @@ -331,23 +341,21 @@ bool CuckooTableBuilder::MakeSpaceForKey(const Slice& key, } if (null_found) { + // There is an empty node in tree.back(). Now, traverse the path from this + // empty node to top of the tree and at every node in the path, replace + // child with the parent. Stop when first level is reached in the tree + // (happens when 0 <= bucket_to_replace_pos < num_hash_table_) and return + // this location in first level for target key to be inserted. uint32_t bucket_to_replace_pos = tree.size()-1; - while (bucket_to_replace_pos >= 0) { + while (bucket_to_replace_pos >= num_hash_table_) { CuckooNode& curr_node = tree[bucket_to_replace_pos]; - if (bucket_to_replace_pos >= num_hash_table_) { - buckets_[curr_node.bucket_id] = - buckets_[tree[curr_node.parent_pos].bucket_id]; - bucket_to_replace_pos = curr_node.parent_pos; - } else { - *bucket_id = curr_node.bucket_id; - return true; - } + buckets_[curr_node.bucket_id] = + buckets_[tree[curr_node.parent_pos].bucket_id]; + bucket_to_replace_pos = curr_node.parent_pos; } - assert(false); - return true; - } else { - return false; + *bucket_id = tree[bucket_to_replace_pos].bucket_id; } + return null_found; } } // namespace rocksdb diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index 26751bc31fba52becdee59970e0fd558f17cf979..f967716f685764c8434dfe9a7f3baa8ded021b5a 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -24,7 +24,6 @@ CuckooTableReader::CuckooTableReader( uint64_t file_size, uint64_t (*GetSliceHashPtr)(const Slice&, uint32_t, uint64_t)) : file_(std::move(file)), - file_size_(file_size), GetSliceHash(GetSliceHashPtr) { if (!options.allow_mmap_reads) { status_ = Status::InvalidArgument("File is not mmaped"); diff --git a/table/cuckoo_table_reader.h b/table/cuckoo_table_reader.h index 3e7e6eaa98084afbb04834607cf277e3753ad1ec..3e2de256a5833a9005d93c6f748115f6133662b7 100644 --- a/table/cuckoo_table_reader.h +++ b/table/cuckoo_table_reader.h @@ -53,7 +53,6 @@ class CuckooTableReader: public TableReader { private: std::unique_ptr file_; Slice file_data_; - const uint64_t file_size_; bool is_last_level_; std::shared_ptr table_props_; Status status_; diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index f2fe9567bec8561747dad3da4e1bc54b98de7620..a7f1d0fd8c4e6ae7507ea55a53029f0efb925376 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -11,6 +11,8 @@ int main() { } #else +#define __STDC_FORMAT_MACROS +#include #include #include #include @@ -304,8 +306,8 @@ void BM_CuckooRead(uint64_t num, uint32_t key_length, reader.GetTableProperties()->user_collected_properties; const uint32_t num_hash_fun = *reinterpret_cast( user_props.at(CuckooTablePropertyNames::kNumHashTable).data()); - fprintf(stderr, "With %lu items and hash table ratio %f, number of hash" - " functions used: %u.\n", num, hash_ratio, num_hash_fun); + fprintf(stderr, "With %" PRIu64 " items and hash table ratio %f, number of" + " hash functions used: %u.\n", num, hash_ratio, num_hash_fun); ReadOptions r_options; for (auto& key : keys) { int cnt = 0; diff --git a/util/statistics.cc b/util/statistics.cc index 9bb70e80dbf309a373d8c41514d0db2c31e4a49c..24957c9b6fc65b399ed7708b5d54b3ecd7a9d736 100644 --- a/util/statistics.cc +++ b/util/statistics.cc @@ -4,6 +4,9 @@ // of patent rights can be found in the PATENTS file in the same directory. // #include "util/statistics.h" + +#define __STDC_FORMAT_MACROS +#include #include "rocksdb/statistics.h" #include "port/likely.h" #include @@ -96,7 +99,7 @@ std::string StatisticsImpl::ToString() const { for (const auto& t : TickersNameMap) { if (t.first < TICKER_ENUM_MAX || enable_internal_stats_) { char buffer[kBufferSize]; - snprintf(buffer, kBufferSize, "%s COUNT : %ld\n", + snprintf(buffer, kBufferSize, "%s COUNT : %" PRIu64 "\n", t.second.c_str(), getTickerCount(t.first)); res.append(buffer); }