提交 6ee4752f 编写于 作者: D David Chinner 提交者: Lachlan McIlroy

[XFS] Use atomic counters for ktrace buffer indexes

ktrace_enter() is consuming vast amounts of CPU time due to the use of a
single global lock for protecting buffer index increments. Change it to
use per-buffer atomic counters - this reduces ktrace_enter() overhead
during a trace intensive test on a 4p machine from 58% of all CPU time to
12% and halves test runtime.

SGI-PV: 977546
SGI-Modid: xfs-linux-melb:xfs-kern:30537a
Signed-off-by: NDavid Chinner <dgc@sgi.com>
Signed-off-by: NChristoph Hellwig <hch@infradead.org>
Signed-off-by: NLachlan McIlroy <lachlan@sgi.com>
上级 44d814ce
......@@ -92,7 +92,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
ktp->kt_entries = ktep;
ktp->kt_nentries = nentries;
ktp->kt_index = 0;
atomic_set(&ktp->kt_index, 0);
ktp->kt_rollover = 0;
return ktp;
}
......@@ -151,8 +151,6 @@ ktrace_enter(
void *val14,
void *val15)
{
static DEFINE_SPINLOCK(wrap_lock);
unsigned long flags;
int index;
ktrace_entry_t *ktep;
......@@ -161,12 +159,8 @@ ktrace_enter(
/*
* Grab an entry by pushing the index up to the next one.
*/
spin_lock_irqsave(&wrap_lock, flags);
index = ktp->kt_index;
if (++ktp->kt_index == ktp->kt_nentries)
ktp->kt_index = 0;
spin_unlock_irqrestore(&wrap_lock, flags);
index = atomic_add_return(1, &ktp->kt_index);
index = (index - 1) % ktp->kt_nentries;
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
ktp->kt_rollover = 1;
......@@ -199,11 +193,12 @@ int
ktrace_nentries(
ktrace_t *ktp)
{
if (ktp == NULL) {
int index;
if (ktp == NULL)
return 0;
}
return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
return (ktp->kt_rollover ? ktp->kt_nentries : index);
}
/*
......@@ -228,7 +223,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
int nentries;
if (ktp->kt_rollover)
index = ktp->kt_index;
index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
else
index = 0;
......
......@@ -30,7 +30,7 @@ typedef struct ktrace_entry {
*/
typedef struct ktrace {
int kt_nentries; /* number of entries in trace buf */
int kt_index; /* current index in entries */
atomic_t kt_index; /* current index in entries */
int kt_rollover;
ktrace_entry_t *kt_entries; /* buffer of entries */
} ktrace_t;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册