diff --git a/db/flush_job.cc b/db/flush_job.cc index 8398bd8f856b284914f5d5bf3e6e99006260ee31..17c9b0df6384b396592392efe8ecb8d7dcacb198 100644 --- a/db/flush_job.cc +++ b/db/flush_job.cc @@ -311,6 +311,7 @@ Status FlushJob::WriteLevel0Table() { ro.total_order_seek = true; Arena arena; uint64_t total_num_entries = 0, total_num_deletes = 0; + uint64_t total_data_size = 0; size_t total_memory_usage = 0; for (MemTable* m : mems_) { ROCKS_LOG_INFO( @@ -325,16 +326,18 @@ Status FlushJob::WriteLevel0Table() { } total_num_entries += m->num_entries(); total_num_deletes += m->num_deletes(); + total_data_size += m->get_data_size(); total_memory_usage += m->ApproximateMemoryUsage(); } - event_logger_->Log() - << "job" << job_context_->job_id << "event" - << "flush_started" - << "num_memtables" << mems_.size() << "num_entries" << total_num_entries - << "num_deletes" << total_num_deletes << "memory_usage" - << total_memory_usage << "flush_reason" - << GetFlushReasonString(cfd_->GetFlushReason()); + event_logger_->Log() << "job" << job_context_->job_id << "event" + << "flush_started" + << "num_memtables" << mems_.size() << "num_entries" + << total_num_entries << "num_deletes" + << total_num_deletes << "total_data_size" + << total_data_size << "memory_usage" + << total_memory_usage << "flush_reason" + << GetFlushReasonString(cfd_->GetFlushReason()); { ScopedArenaIterator iter( diff --git a/db/memtable.h b/db/memtable.h index 46a746ad60165fa6a8bfbdd329d05473d49e6d74..5724f2c31bc39856326e80ec138eb3abb90a104e 100644 --- a/db/memtable.h +++ b/db/memtable.h @@ -265,6 +265,10 @@ class MemTable { return num_deletes_.load(std::memory_order_relaxed); } + uint64_t get_data_size() const { + return data_size_.load(std::memory_order_relaxed); + } + // Dynamically change the memtable's capacity. If set below the current usage, // the next key added will trigger a flush. Can only increase size when // memtable prefix bloom is disabled, since we can't easily allocate more