From db234133a90bb80c9e2f32fb4e2dcdcd27f3fc5b Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Fri, 14 Mar 2014 11:26:13 -0700 Subject: [PATCH] [CF] WriteBatch to take in ColumnFamilyHandle Summary: Client doesn't need to know anything about ColumnFamily ID. By making WriteBatch take ColumnFamilyHandle as a parameter, we can eliminate method GetID() from ColumnFamilyHandle Test Plan: column_family_test Reviewers: haobo CC: leveldb Differential Revision: https://reviews.facebook.net/D16887 --- db/column_family.h | 2 +- db/column_family_test.cc | 13 ++++++++----- db/db_impl.cc | 6 +++--- db/write_batch.cc | 32 ++++++++++++++++++++++++++++---- include/rocksdb/db.h | 2 -- include/rocksdb/write_batch.h | 19 +++++++++++-------- 6 files changed, 51 insertions(+), 23 deletions(-) diff --git a/db/column_family.h b/db/column_family.h index 50047be23..01d76734f 100644 --- a/db/column_family.h +++ b/db/column_family.h @@ -47,7 +47,7 @@ class ColumnFamilyHandleImpl : public ColumnFamilyHandle { virtual ~ColumnFamilyHandleImpl(); virtual ColumnFamilyData* cfd() const { return cfd_; } - virtual uint32_t GetID() const override; + virtual uint32_t GetID() const; private: ColumnFamilyData* cfd_; diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 1ccb21a38..5c755c23a 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -289,7 +289,8 @@ TEST(ColumnFamilyTest, DontReuseColumnFamilyID) { Open(); CreateColumnFamilies({"one", "two", "three"}); for (size_t i = 0; i < handles_.size(); ++i) { - ASSERT_EQ(i, handles_[i]->GetID()); + auto cfh = reinterpret_cast(handles_[i]); + ASSERT_EQ(i, cfh->GetID()); } if (iter == 1) { Reopen(); @@ -303,7 +304,8 @@ TEST(ColumnFamilyTest, DontReuseColumnFamilyID) { } CreateColumnFamilies({"three2"}); // ID 3 that was used for dropped column family "three" should not be reused - ASSERT_EQ(4, handles_[3]->GetID()); + auto cfh3 = reinterpret_cast(handles_[3]); + ASSERT_EQ(4, cfh3->GetID()); Close(); Destroy(); } @@ -362,12 +364,13 @@ TEST(ColumnFamilyTest, DropTest) { TEST(ColumnFamilyTest, WriteBatchFailure) { Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); WriteBatch batch; - batch.Put(1, Slice("non-existing"), Slice("column-family")); + batch.Put(handles_[1], Slice("non-existing"), Slice("column-family")); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); + DropColumnFamilies({1}); Status s = db_->Write(WriteOptions(), &batch); ASSERT_TRUE(s.IsInvalidArgument()); - CreateColumnFamilies({"one"}); - ASSERT_OK(db_->Write(WriteOptions(), &batch)); Close(); } diff --git a/db/db_impl.cc b/db/db_impl.cc index 9e36b500d..362a30ec7 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -3992,21 +3992,21 @@ Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family, // 8 bytes are taken by header, 4 bytes for count, 1 byte for type, // and we allocate 11 extra bytes for key length, as well as value length. WriteBatch batch(key.size() + value.size() + 24); - batch.Put(column_family->GetID(), key, value); + batch.Put(column_family, key, value); return Write(opt, &batch); } Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family, const Slice& key) { WriteBatch batch; - batch.Delete(column_family->GetID(), key); + batch.Delete(column_family, key); return Write(opt, &batch); } Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) { WriteBatch batch; - batch.Merge(column_family->GetID(), key, value); + batch.Merge(column_family, key, value); return Write(opt, &batch); } diff --git a/db/write_batch.cc b/db/write_batch.cc index 39dc29d39..8fffdbfbd 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -173,8 +173,14 @@ void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { EncodeFixed64(&b->rep_[0], seq); } -void WriteBatch::Put(uint32_t column_family_id, const Slice& key, +void WriteBatch::Put(ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) { + uint32_t column_family_id = 0; + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + column_family_id = cfh->GetID(); + } + WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); if (column_family_id == 0) { rep_.push_back(static_cast(kTypeValue)); @@ -186,8 +192,14 @@ void WriteBatch::Put(uint32_t column_family_id, const Slice& key, PutLengthPrefixedSlice(&rep_, value); } -void WriteBatch::Put(uint32_t column_family_id, const SliceParts& key, +void WriteBatch::Put(ColumnFamilyHandle* column_family, const SliceParts& key, const SliceParts& value) { + uint32_t column_family_id = 0; + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + column_family_id = cfh->GetID(); + } + WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); if (column_family_id == 0) { rep_.push_back(static_cast(kTypeValue)); @@ -199,7 +211,13 @@ void WriteBatch::Put(uint32_t column_family_id, const SliceParts& key, PutLengthPrefixedSliceParts(&rep_, value); } -void WriteBatch::Delete(uint32_t column_family_id, const Slice& key) { +void WriteBatch::Delete(ColumnFamilyHandle* column_family, const Slice& key) { + uint32_t column_family_id = 0; + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + column_family_id = cfh->GetID(); + } + WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); if (column_family_id == 0) { rep_.push_back(static_cast(kTypeDeletion)); @@ -210,8 +228,14 @@ void WriteBatch::Delete(uint32_t column_family_id, const Slice& key) { PutLengthPrefixedSlice(&rep_, key); } -void WriteBatch::Merge(uint32_t column_family_id, const Slice& key, +void WriteBatch::Merge(ColumnFamilyHandle* column_family, const Slice& key, const Slice& value) { + uint32_t column_family_id = 0; + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + column_family_id = cfh->GetID(); + } + WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1); if (column_family_id == 0) { rep_.push_back(static_cast(kTypeMerge)); diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 431f3bf96..f9a0551fb 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -27,8 +27,6 @@ using std::unique_ptr; class ColumnFamilyHandle { public: virtual ~ColumnFamilyHandle() {} - - virtual uint32_t GetID() const = 0; }; extern const std::string default_column_family_name; diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index 2f8112375..60817056f 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -31,6 +31,7 @@ namespace rocksdb { class Slice; +class ColumnFamilyHandle; struct SliceParts; class WriteBatch { @@ -39,31 +40,33 @@ class WriteBatch { ~WriteBatch(); // Store the mapping "key->value" in the database. - void Put(uint32_t column_family_id, const Slice& key, const Slice& value); + void Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value); void Put(const Slice& key, const Slice& value) { - Put(0, key, value); + Put(nullptr, key, value); } // Variant of Put() that gathers output like writev(2). The key and value // that will be written to the database are concatentations of arrays of // slices. - void Put(uint32_t column_family_id, const SliceParts& key, + void Put(ColumnFamilyHandle* column_family, const SliceParts& key, const SliceParts& value); void Put(const SliceParts& key, const SliceParts& value) { - Put(0, key, value); + Put(nullptr, key, value); } // Merge "value" with the existing value of "key" in the database. // "key->merge(existing, value)" - void Merge(uint32_t column_family_id, const Slice& key, const Slice& value); + void Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value); void Merge(const Slice& key, const Slice& value) { - Merge(0, key, value); + Merge(nullptr, key, value); } // If the database contains a mapping for "key", erase it. Else do nothing. - void Delete(uint32_t column_family_id, const Slice& key); + void Delete(ColumnFamilyHandle* column_family, const Slice& key); void Delete(const Slice& key) { - Delete(0, key); + Delete(nullptr, key); } // Append a blob of arbitrary size to the records in this batch. The blob will -- GitLab