From 95bf3021899edef53aa6aba6d3c7172eded3560e Mon Sep 17 00:00:00 2001 From: Peter Dillinger Date: Thu, 1 Dec 2022 13:18:40 -0800 Subject: [PATCH] Fix use of make_unique in Arena::AllocateNewBlock (#11012) Summary: The change to `make_unique` in https://github.com/facebook/rocksdb/issues/10810 inadvertently started initializing data in Arena blocks, which could lead to increased memory use due to (at least on our implementation) force-mapping pages as a result. This change goes back to `new char[]` while keeping all the other good parts of https://github.com/facebook/rocksdb/issues/10810. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11012 Test Plan: unit test added (fails on Linux before fix) Reviewed By: anand1976 Differential Revision: D41658893 Pulled By: pdillinger fbshipit-source-id: 267b7dccfadaeeb1be767d43c602a6abb0e71cd0 --- memory/arena.cc | 7 ++++--- memory/arena_test.cc | 30 ++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/memory/arena.cc b/memory/arena.cc index d42868edf..0a920203d 100644 --- a/memory/arena.cc +++ b/memory/arena.cc @@ -143,9 +143,10 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size, } char* Arena::AllocateNewBlock(size_t block_bytes) { - auto uniq = std::make_unique(block_bytes); - char* block = uniq.get(); - blocks_.push_back(std::move(uniq)); + // NOTE: std::make_unique zero-initializes the block so is not appropriate + // here + char* block = new char[block_bytes]; + blocks_.push_back(std::unique_ptr(block)); size_t allocated_size; #ifdef ROCKSDB_MALLOC_USABLE_SIZE diff --git a/memory/arena_test.cc b/memory/arena_test.cc index 30887c23a..21bf7ed62 100644 --- a/memory/arena_test.cc +++ b/memory/arena_test.cc @@ -256,6 +256,36 @@ TEST(MmapTest, AllocateLazyZeroed) { } } +TEST_F(ArenaTest, UnmappedAllocation) { + // Verify that it's possible to get unmapped pages in large allocations, + // for memory efficiency and to ensure we don't accidentally waste time & + // space initializing the memory. + constexpr size_t kBlockSize = 2U << 20; + Arena arena(kBlockSize); + + // The allocator might give us back recycled memory for a while, but + // shouldn't last forever. + for (int i = 0;; ++i) { + char* p = arena.Allocate(kBlockSize); + + // Start counting page faults + PopMinorPageFaultCount(); + + // Overwrite the whole allocation + for (size_t j = 0; j < kBlockSize; ++j) { + p[j] = static_cast(j & 255); + } + + size_t faults = PopMinorPageFaultCount(); + if (faults >= kBlockSize * 3 / 4 / port::kPageSize) { + // Most of the access generated page faults => GOOD + break; + } + // Should have succeeded after enough tries + ASSERT_LT(i, 1000); + } +} + } // namespace ROCKSDB_NAMESPACE int main(int argc, char** argv) { -- GitLab