diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4399be1aaeff128e5d0965378c60fb02c96a0a96..df2e7f14ffb7f9c5dd81a78a5c99e9f91edc5bd7 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -479,6 +479,11 @@ static inline int mm_has_pgste(struct mm_struct *mm) return 0; } +/* + * In the case that a guest uses storage keys + * faults should no longer be backed by zero pages + */ +#define mm_forbids_zeropage mm_use_skey static inline int mm_use_skey(struct mm_struct *mm) { #ifdef CONFIG_PGSTE diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 019afdf50b1a35393250993c689a8c5ce5e21c2e..0f1e9ff6bc123a26731bbb52ae0f59d55366b5ad 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1256,6 +1256,15 @@ static int __s390_enable_skey(pte_t *pte, unsigned long addr, pgste_t pgste; pgste = pgste_get_lock(pte); + /* + * Remove all zero page mappings, + * after establishing a policy to forbid zero page mappings + * following faults for that page will get fresh anonymous pages + */ + if (is_zero_pfn(pte_pfn(*pte))) { + ptep_flush_direct(walk->mm, addr, pte); + pte_val(*pte) = _PAGE_INVALID; + } /* Clear storage key */ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT); @@ -1274,9 +1283,11 @@ void s390_enable_skey(void) down_write(&mm->mmap_sem); if (mm_use_skey(mm)) goto out_up; + + mm->context.use_skey = 1; + walk.mm = mm; walk_page_range(0, TASK_SIZE, &walk); - mm->context.use_skey = 1; out_up: up_write(&mm->mmap_sem);