提交 80c5e73d 编写于 作者: V Venki Pallipadi 提交者: Ingo Molnar

x86: fix Xorg startup/shutdown slowdown with PAT

Rene Herman reported significant Xorg startup/shutdown slowdown due
to PAT. It turns out that the memtype list has thousands of entries.

Add cached_entry to list add routine, in order to speed up the
lookup for sequential reserve_memtype calls.
Reported-by: NRene Herman <rene.herman@keyaccess.nl>
Signed-off-by: NVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 c6744955
...@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry, ...@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
return -EBUSY; return -EBUSY;
} }
static struct memtype *cached_entry;
static u64 cached_start;
/* /*
* req_type typically has one of the: * req_type typically has one of the:
* - _PAGE_CACHE_WB * - _PAGE_CACHE_WB
...@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
if (cached_entry && start >= cached_start)
entry = cached_entry;
else
entry = list_entry(&memtype_list, struct memtype, nd);
/* Search for existing mapping that overlaps the current range */ /* Search for existing mapping that overlaps the current range */
where = NULL; where = NULL;
list_for_each_entry(entry, &memtype_list, nd) { list_for_each_entry_continue(entry, &memtype_list, nd) {
if (end <= entry->start) { if (end <= entry->start) {
where = entry->nd.prev; where = entry->nd.prev;
cached_entry = list_entry(where, struct memtype, nd);
break; break;
} else if (start <= entry->start) { /* end > entry->start */ } else if (start <= entry->start) { /* end > entry->start */
err = chk_conflict(new, entry, new_type); err = chk_conflict(new, entry, new_type);
...@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk("Overlap at 0x%Lx-0x%Lx\n", dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end); entry->start, entry->end);
where = entry->nd.prev; where = entry->nd.prev;
cached_entry = list_entry(where,
struct memtype, nd);
} }
break; break;
} else if (start < entry->end) { /* start > entry->start */ } else if (start < entry->end) { /* start > entry->start */
...@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (!err) { if (!err) {
dprintk("Overlap at 0x%Lx-0x%Lx\n", dprintk("Overlap at 0x%Lx-0x%Lx\n",
entry->start, entry->end); entry->start, entry->end);
where = &entry->nd; cached_entry = list_entry(entry->nd.prev,
struct memtype, nd);
/*
* Move to right position in the linked
* list to add this new entry
*/
list_for_each_entry_continue(entry,
&memtype_list, nd) {
if (start <= entry->start) {
where = entry->nd.prev;
break;
}
}
} }
break; break;
} }
...@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, ...@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return err; return err;
} }
cached_start = start;
if (where) if (where)
list_add(&new->nd, where); list_add(&new->nd, where);
else else
...@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end) ...@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) { list_for_each_entry(entry, &memtype_list, nd) {
if (entry->start == start && entry->end == end) { if (entry->start == start && entry->end == end) {
if (cached_entry == entry || cached_start == start)
cached_entry = NULL;
list_del(&entry->nd); list_del(&entry->nd);
kfree(entry); kfree(entry);
err = 0; err = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册