提交 7a3796d2 编写于 作者: M Marc Zyngier 提交者: Christoffer Dall

KVM: arm/arm64: Preserve Exec permission across R/W permission faults

So far, we loose the Exec property whenever we take permission
faults, as we always reconstruct the PTE/PMD from scratch. This
can be counter productive as we can end-up with the following
fault sequence:

	X -> RO -> ROX -> RW -> RWX

Instead, we can lookup the existing PTE/PMD and clear the XN bit in the
new entry if it was already cleared in the old one, leadig to a much
nicer fault sequence:

	X -> ROX -> RWX
Reviewed-by: NChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: NMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: NChristoffer Dall <christoffer.dall@linaro.org>
上级 a9c0e12e
...@@ -107,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte) ...@@ -107,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY; return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
} }
static inline bool kvm_s2pte_exec(pte_t *pte)
{
return !(pte_val(*pte) & L_PTE_XN);
}
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{ {
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY; pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
...@@ -117,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) ...@@ -117,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY; return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
} }
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
{
return !(pmd_val(*pmd) & PMD_SECT_XN);
}
static inline bool kvm_page_empty(void *ptr) static inline bool kvm_page_empty(void *ptr)
{ {
struct page *ptr_page = virt_to_page(ptr); struct page *ptr_page = virt_to_page(ptr);
......
...@@ -203,6 +203,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte) ...@@ -203,6 +203,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY; return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
} }
static inline bool kvm_s2pte_exec(pte_t *pte)
{
return !(pte_val(*pte) & PTE_S2_XN);
}
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd) static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{ {
kvm_set_s2pte_readonly((pte_t *)pmd); kvm_set_s2pte_readonly((pte_t *)pmd);
...@@ -213,6 +218,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd) ...@@ -213,6 +218,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
return kvm_s2pte_readonly((pte_t *)pmd); return kvm_s2pte_readonly((pte_t *)pmd);
} }
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
{
return !(pmd_val(*pmd) & PMD_S2_XN);
}
static inline bool kvm_page_empty(void *ptr) static inline bool kvm_page_empty(void *ptr)
{ {
struct page *ptr_page = virt_to_page(ptr); struct page *ptr_page = virt_to_page(ptr);
......
...@@ -926,6 +926,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache ...@@ -926,6 +926,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return 0; return 0;
} }
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
{
pmd_t *pmdp;
pte_t *ptep;
pmdp = stage2_get_pmd(kvm, NULL, addr);
if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
return false;
if (pmd_thp_or_huge(*pmdp))
return kvm_s2pmd_exec(pmdp);
ptep = pte_offset_kernel(pmdp, addr);
if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
return false;
return kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte, phys_addr_t addr, const pte_t *new_pte,
unsigned long flags) unsigned long flags)
...@@ -1407,6 +1426,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1407,6 +1426,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault) { if (exec_fault) {
new_pmd = kvm_s2pmd_mkexec(new_pmd); new_pmd = kvm_s2pmd_mkexec(new_pmd);
invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE); invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
} else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa))
new_pmd = kvm_s2pmd_mkexec(new_pmd);
} }
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
...@@ -1425,6 +1448,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1425,6 +1448,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault) { if (exec_fault) {
new_pte = kvm_s2pte_mkexec(new_pte); new_pte = kvm_s2pte_mkexec(new_pte);
invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE); invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
} else if (fault_status == FSC_PERM) {
/* Preserve execute if XN was already cleared */
if (stage2_is_exec(kvm, fault_ipa))
new_pte = kvm_s2pte_mkexec(new_pte);
} }
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册