提交 c5c8c7c5 编写于 作者: D David Matlack 提交者: Paolo Bonzini

KVM: x86/mmu: Make walk_shadow_page_lockless_{begin,end} interoperate with the TDP MMU

Acquire the RCU read lock in walk_shadow_page_lockless_begin and release
it in walk_shadow_page_lockless_end when the TDP MMU is enabled.  This
should not introduce any functional changes but is used in the following
commit to make fast_page_fault interoperate with the TDP MMU.
Signed-off-by: NDavid Matlack <dmatlack@google.com>
Message-Id: <20210713220957.3493520-4-dmatlack@google.com>
[Use if...else instead of if(){return;}]
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 61bcd360
......@@ -686,28 +686,36 @@ static bool mmu_spte_age(u64 *sptep)
static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
/*
* Prevent page table teardown by making any free-er wait during
* kvm_flush_remote_tlbs() IPI to all active vcpus.
*/
local_irq_disable();
if (is_tdp_mmu(vcpu->arch.mmu)) {
kvm_tdp_mmu_walk_lockless_begin();
} else {
/*
* Prevent page table teardown by making any free-er wait during
* kvm_flush_remote_tlbs() IPI to all active vcpus.
*/
local_irq_disable();
/*
* Make sure a following spte read is not reordered ahead of the write
* to vcpu->mode.
*/
smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
/*
* Make sure a following spte read is not reordered ahead of the write
* to vcpu->mode.
*/
smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
}
}
static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
* reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
if (is_tdp_mmu(vcpu->arch.mmu)) {
kvm_tdp_mmu_walk_lockless_end();
} else {
/*
* Make sure the write to vcpu->mode is not reordered in front of
* reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
local_irq_enable();
}
}
static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
......@@ -3617,6 +3625,8 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
/*
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*
* Must be called between walk_shadow_page_lockless_{begin,end}.
*/
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
......@@ -3624,8 +3634,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
int leaf = -1;
u64 spte;
walk_shadow_page_lockless_begin(vcpu);
for (shadow_walk_init(&iterator, vcpu, addr),
*root_level = iterator.level;
shadow_walk_okay(&iterator);
......@@ -3639,8 +3647,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
break;
}
walk_shadow_page_lockless_end(vcpu);
return leaf;
}
......@@ -3652,11 +3658,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf, level;
bool reserved = false;
walk_shadow_page_lockless_begin(vcpu);
if (is_tdp_mmu(vcpu->arch.mmu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes, &root);
walk_shadow_page_lockless_end(vcpu);
if (unlikely(leaf < 0)) {
*sptep = 0ull;
return reserved;
......
......@@ -1516,6 +1516,8 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
/*
* Return the level of the lowest level SPTE added to sptes.
* That SPTE may be non-present.
*
* Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
*/
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level)
......@@ -1527,14 +1529,10 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
*root_level = vcpu->arch.mmu->shadow_root_level;
rcu_read_lock();
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
leaf = iter.level;
sptes[leaf] = iter.old_spte;
}
rcu_read_unlock();
return leaf;
}
......@@ -77,6 +77,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
int min_level);
static inline void kvm_tdp_mmu_walk_lockless_begin(void)
{
rcu_read_lock();
}
static inline void kvm_tdp_mmu_walk_lockless_end(void)
{
rcu_read_unlock();
}
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册