提交 639bc703 编写于 作者: S Sean Christopherson 提交者: Zheng Zengkai

x86/kvm: Alloc dummy async #PF token outside of raw spinlock

stable inclusion
from stable-v5.10.120
commit 4a9f3a9c28a6966c699b4264b6a3c5aaed21ea3e
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5L6BR

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=4a9f3a9c28a6966c699b4264b6a3c5aaed21ea3e

--------------------------------

commit 0547758a upstream.

Drop the raw spinlock in kvm_async_pf_task_wake() before allocating the
the dummy async #PF token, the allocator is preemptible on PREEMPT_RT
kernels and must not be called from truly atomic contexts.

Opportunistically document why it's ok to loop on allocation failure,
i.e. why the function won't get stuck in an infinite loop.
Reported-by: NYajun Deng <yajun.deng@linux.dev>
Cc: stable@vger.kernel.org
Signed-off-by: NSean Christopherson <seanjc@google.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
Acked-by: NXie XiuQi <xiexiuqi@huawei.com>
上级 4b129dc6
...@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token) ...@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
{ {
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node *n; struct kvm_task_sleep_node *n, *dummy = NULL;
if (token == ~0) { if (token == ~0) {
apf_task_wake_all(); apf_task_wake_all();
...@@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token) ...@@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token)
n = _find_apf_task(b, token); n = _find_apf_task(b, token);
if (!n) { if (!n) {
/* /*
* async PF was not yet handled. * Async #PF not yet handled, add a dummy entry for the token.
* Add dummy entry for the token. * Allocating the token must be down outside of the raw lock
* as the allocator is preemptible on PREEMPT_RT kernels.
*/ */
n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!dummy) {
if (!n) { raw_spin_unlock(&b->lock);
dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
/* /*
* Allocation failed! Busy wait while other cpu * Continue looping on allocation failure, eventually
* handles async PF. * the async #PF will be handled and allocating a new
* node will be unnecessary.
*/
if (!dummy)
cpu_relax();
/*
* Recheck for async #PF completion before enqueueing
* the dummy token to avoid duplicate list entries.
*/ */
raw_spin_unlock(&b->lock);
cpu_relax();
goto again; goto again;
} }
n->token = token; dummy->token = token;
n->cpu = smp_processor_id(); dummy->cpu = smp_processor_id();
init_swait_queue_head(&n->wq); init_swait_queue_head(&dummy->wq);
hlist_add_head(&n->link, &b->list); hlist_add_head(&dummy->link, &b->list);
dummy = NULL;
} else { } else {
apf_task_wake_one(n); apf_task_wake_one(n);
} }
raw_spin_unlock(&b->lock); raw_spin_unlock(&b->lock);
return;
/* A dummy token might be allocated and ultimately not used. */
if (dummy)
kfree(dummy);
} }
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册