提交 49881921 编写于 作者: L Levi Tamasi 提交者: Facebook GitHub Bot

Rename a recently added PerfContext counter (#11294)

Summary:
The patch renames the counter added in https://github.com/facebook/rocksdb/issues/11284 for better consistency with the existing naming scheme.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11294

Test Plan: `make check`

Reviewed By: jowlyzhang

Differential Revision: D44035964

Pulled By: ltamasi

fbshipit-source-id: 8b1a2a03ee728148365367e0ecc1fcf462f62191
上级 648e972f
......@@ -10,7 +10,7 @@
### New Features
* Add statistics rocksdb.secondary.cache.filter.hits, rocksdb.secondary.cache.index.hits, and rocksdb.secondary.cache.filter.hits
* Added a new PerfContext counter `internal_merge_count_point_lookups` which tracks the number of Merge operands applied while serving point lookup queries.
* Added a new PerfContext counter `internal_merge_point_lookup_count` which tracks the number of Merge operands applied while serving point lookup queries.
## 8.0.0 (02/19/2023)
### Behavior changes
......
......@@ -1230,7 +1230,7 @@ static bool SaveValue(void* arg, const char* entry) {
*(s->merge_in_progress) = true;
merge_context->PushOperand(
v, s->inplace_update_support == false /* operand_pinned */);
PERF_COUNTER_ADD(internal_merge_count_point_lookups, 1);
PERF_COUNTER_ADD(internal_merge_point_lookup_count, 1);
if (s->do_merge && merge_operator->ShouldMerge(
merge_context->GetOperandsDirectionBackward())) {
......
......@@ -1018,8 +1018,7 @@ TEST_F(PerfContextTest, MergeOperandCount) {
PinnableSlice result;
ASSERT_OK(db->Get(ReadOptions(), db->DefaultColumnFamily(), keys[i],
&result));
ASSERT_EQ(get_perf_context()->internal_merge_count_point_lookups,
i + 1);
ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count, i + 1);
get_perf_context()->Reset();
}
......@@ -1029,8 +1028,7 @@ TEST_F(PerfContextTest, MergeOperandCount) {
PinnableWideColumns result;
ASSERT_OK(db->GetEntity(ReadOptions(), db->DefaultColumnFamily(),
keys[i], &result));
ASSERT_EQ(get_perf_context()->internal_merge_count_point_lookups,
i + 1);
ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count, i + 1);
get_perf_context()->Reset();
}
......@@ -1056,7 +1054,7 @@ TEST_F(PerfContextTest, MergeOperandCount) {
ASSERT_OK(statuses[i]);
}
ASSERT_EQ(get_perf_context()->internal_merge_count_point_lookups,
ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count,
total_merges);
get_perf_context()->Reset();
......@@ -1074,7 +1072,7 @@ TEST_F(PerfContextTest, MergeOperandCount) {
ASSERT_OK(statuses[i]);
}
ASSERT_EQ(get_perf_context()->internal_merge_count_point_lookups,
ASSERT_EQ(get_perf_context()->internal_merge_point_lookup_count,
total_merges);
get_perf_context()->Reset();
......
......@@ -142,7 +142,7 @@ struct PerfContext {
// How many merge operands were fed into the merge operator by point lookups.
// Note: base values are not included in the count.
//
uint64_t internal_merge_count_point_lookups;
uint64_t internal_merge_point_lookup_count;
// Number of times we reseeked inside a merging iterator, specifically to skip
// after or before a range of keys covered by a range deletion in a newer LSM
// component.
......
......@@ -69,7 +69,7 @@ PerfContext::PerfContext(const PerfContext& other) {
internal_delete_skipped_count = other.internal_delete_skipped_count;
internal_recent_skipped_count = other.internal_recent_skipped_count;
internal_merge_count = other.internal_merge_count;
internal_merge_count_point_lookups = other.internal_merge_count_point_lookups;
internal_merge_point_lookup_count = other.internal_merge_point_lookup_count;
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
write_wal_time = other.write_wal_time;
get_snapshot_time = other.get_snapshot_time;
......@@ -189,7 +189,7 @@ PerfContext::PerfContext(PerfContext&& other) noexcept {
internal_delete_skipped_count = other.internal_delete_skipped_count;
internal_recent_skipped_count = other.internal_recent_skipped_count;
internal_merge_count = other.internal_merge_count;
internal_merge_count_point_lookups = other.internal_merge_count_point_lookups;
internal_merge_point_lookup_count = other.internal_merge_point_lookup_count;
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
write_wal_time = other.write_wal_time;
get_snapshot_time = other.get_snapshot_time;
......@@ -311,7 +311,7 @@ PerfContext& PerfContext::operator=(const PerfContext& other) {
internal_delete_skipped_count = other.internal_delete_skipped_count;
internal_recent_skipped_count = other.internal_recent_skipped_count;
internal_merge_count = other.internal_merge_count;
internal_merge_count_point_lookups = other.internal_merge_count_point_lookups;
internal_merge_point_lookup_count = other.internal_merge_point_lookup_count;
internal_range_del_reseek_count = other.internal_range_del_reseek_count;
write_wal_time = other.write_wal_time;
get_snapshot_time = other.get_snapshot_time;
......@@ -425,7 +425,7 @@ void PerfContext::Reset() {
internal_delete_skipped_count = 0;
internal_recent_skipped_count = 0;
internal_merge_count = 0;
internal_merge_count_point_lookups = 0;
internal_merge_point_lookup_count = 0;
internal_range_del_reseek_count = 0;
write_wal_time = 0;
......@@ -560,7 +560,7 @@ std::string PerfContext::ToString(bool exclude_zero_counters) const {
PERF_CONTEXT_OUTPUT(internal_delete_skipped_count);
PERF_CONTEXT_OUTPUT(internal_recent_skipped_count);
PERF_CONTEXT_OUTPUT(internal_merge_count);
PERF_CONTEXT_OUTPUT(internal_merge_count_point_lookups);
PERF_CONTEXT_OUTPUT(internal_merge_point_lookup_count);
PERF_CONTEXT_OUTPUT(internal_range_del_reseek_count);
PERF_CONTEXT_OUTPUT(write_wal_time);
PERF_CONTEXT_OUTPUT(get_snapshot_time);
......
......@@ -442,7 +442,7 @@ bool GetContext::SaveValue(const ParsedInternalKey& parsed_key,
state_ = kMerge;
// value_pinner is not set from plain_table_reader.cc for example.
push_operand(value, value_pinner);
PERF_COUNTER_ADD(internal_merge_count_point_lookups, 1);
PERF_COUNTER_ADD(internal_merge_point_lookup_count, 1);
if (do_merge_ && merge_operator_ != nullptr &&
merge_operator_->ShouldMerge(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册