提交 33a709b2 编写于 作者: D Dave Hansen 提交者: Ingo Molnar

mm/gup, x86/mm/pkeys: Check VMAs and PTEs for protection keys

Today, for normal faults and page table walks, we check the VMA
and/or PTE to ensure that it is compatible with the action.  For
instance, if we get a write fault on a non-writeable VMA, we
SIGSEGV.

We try to do the same thing for protection keys.  Basically, we
try to make sure that if a user does this:

	mprotect(ptr, size, PROT_NONE);
	*ptr = foo;

they see the same effects with protection keys when they do this:

	mprotect(ptr, size, PROT_READ|PROT_WRITE);
	set_pkey(ptr, size, 4);
	wrpkru(0xffffff3f); // access disable pkey 4
	*ptr = foo;

The state to do that checking is in the VMA, but we also
sometimes have to do it on the page tables only, like when doing
a get_user_pages_fast() where we have no VMA.

We add two functions and expose them to generic code:

	arch_pte_access_permitted(pte_flags, write)
	arch_vma_access_permitted(vma, write)

These are, of course, backed up in x86 arch code with checks
against the PTE or VMA's protection key.

But, there are also cases where we do not want to respect
protection keys.  When we ptrace(), for instance, we do not want
to apply the tracer's PKRU permissions to the PTEs from the
process being traced.
Signed-off-by: NDave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: NThomas Gleixner <tglx@linutronix.de>
Cc: Alexey Kardashevskiy <aik@ozlabs.ru>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Dominik Vogt <vogt@linux.vnet.ibm.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Low <jason.low2@hp.com>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Laurent Dufour <ldufour@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mikulas Patocka <mpatocka@redhat.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Shachar Raindel <raindel@mellanox.com>
Cc: Stephen Smalley <sds@tycho.nsa.gov>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-s390@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/20160212210219.14D5D715@viggo.jf.intel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 1874f689
...@@ -148,5 +148,16 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, ...@@ -148,5 +148,16 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{ {
} }
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write)
{
/* by default, allow everything */
return true;
}
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
{
/* by default, allow everything */
return true;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
...@@ -130,4 +130,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, ...@@ -130,4 +130,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{ {
} }
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write)
{
/* by default, allow everything */
return true;
}
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
{
/* by default, allow everything */
return true;
}
#endif /* __S390_MMU_CONTEXT_H */ #endif /* __S390_MMU_CONTEXT_H */
...@@ -97,4 +97,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, ...@@ -97,4 +97,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{ {
} }
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write)
{
/* by default, allow everything */
return true;
}
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
{
/* by default, allow everything */
return true;
}
#endif #endif
...@@ -286,4 +286,53 @@ static inline int vma_pkey(struct vm_area_struct *vma) ...@@ -286,4 +286,53 @@ static inline int vma_pkey(struct vm_area_struct *vma)
return pkey; return pkey;
} }
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
{
u32 pkru = read_pkru();
if (!__pkru_allows_read(pkru, pkey))
return false;
if (write && !__pkru_allows_write(pkru, pkey))
return false;
return true;
}
/*
* We only want to enforce protection keys on the current process
* because we effectively have no access to PKRU for other
* processes or any way to tell *which * PKRU in a threaded
* process we could use.
*
* So do not enforce things if the VMA is not from the current
* mm, or if we are in a kernel thread.
*/
static inline bool vma_is_foreign(struct vm_area_struct *vma)
{
if (!current->mm)
return true;
/*
* Should PKRU be enforced on the access to this VMA? If
* the VMA is from another process, then PKRU has no
* relevance and should not be enforced.
*/
if (current->mm != vma->vm_mm)
return true;
return false;
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write)
{
/* allow access if the VMA is not one from this process */
if (vma_is_foreign(vma))
return true;
return __pkru_allows_pkey(vma_pkey(vma), write);
}
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
{
return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write);
}
#endif /* _ASM_X86_MMU_CONTEXT_H */ #endif /* _ASM_X86_MMU_CONTEXT_H */
...@@ -919,6 +919,35 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) ...@@ -919,6 +919,35 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
} }
#endif #endif
#define PKRU_AD_BIT 0x1
#define PKRU_WD_BIT 0x2
static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
{
int pkru_pkey_bits = pkey * 2;
return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
}
static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
{
int pkru_pkey_bits = pkey * 2;
/*
* Access-disable disables writes too so we need to check
* both bits here.
*/
return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
}
static inline u16 pte_flags_pkey(unsigned long pte_flags)
{
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/* ifdef to avoid doing 59-bit shift on 32-bit values */
return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
#else
return 0;
#endif
}
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -897,6 +897,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) ...@@ -897,6 +897,16 @@ bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
__bad_area(regs, error_code, address, NULL, SEGV_MAPERR); __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
} }
static inline bool bad_area_access_from_pkeys(unsigned long error_code,
struct vm_area_struct *vma)
{
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return false;
if (error_code & PF_PK)
return true;
return false;
}
static noinline void static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code, bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, struct vm_area_struct *vma) unsigned long address, struct vm_area_struct *vma)
...@@ -906,7 +916,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, ...@@ -906,7 +916,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
* But, doing it this way allows compiler optimizations * But, doing it this way allows compiler optimizations
* if pkeys are compiled out. * if pkeys are compiled out.
*/ */
if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK)) if (bad_area_access_from_pkeys(error_code, vma))
__bad_area(regs, error_code, address, vma, SEGV_PKUERR); __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
else else
__bad_area(regs, error_code, address, vma, SEGV_ACCERR); __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
...@@ -1081,6 +1091,15 @@ int show_unhandled_signals = 1; ...@@ -1081,6 +1091,15 @@ int show_unhandled_signals = 1;
static inline int static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma) access_error(unsigned long error_code, struct vm_area_struct *vma)
{ {
/*
* Access or read was blocked by protection keys. We do
* this check before any others because we do not want
* to, for instance, confuse a protection-key-denied
* write with one for which we should do a COW.
*/
if (error_code & PF_PK)
return 1;
if (error_code & PF_WRITE) { if (error_code & PF_WRITE) {
/* write, present and write, not present: */ /* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE))) if (unlikely(!(vma->vm_flags & VM_WRITE)))
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/memremap.h> #include <linux/memremap.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
static inline pte_t gup_get_pte(pte_t *ptep) static inline pte_t gup_get_pte(pte_t *ptep)
...@@ -89,6 +90,10 @@ static inline int pte_allows_gup(unsigned long pteval, int write) ...@@ -89,6 +90,10 @@ static inline int pte_allows_gup(unsigned long pteval, int write)
if ((pteval & need_pte_bits) != need_pte_bits) if ((pteval & need_pte_bits) != need_pte_bits)
return 0; return 0;
/* Check memory protection keys permissions. */
if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write))
return 0;
return 1; return 1;
} }
......
...@@ -26,4 +26,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm, ...@@ -26,4 +26,15 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
{ {
} }
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write)
{
/* by default, allow everything */
return true;
}
static inline bool arch_pte_access_permitted(pte_t pte, bool write)
{
/* by default, allow everything */
return true;
}
#endif /* _ASM_GENERIC_MM_HOOKS_H */ #endif /* _ASM_GENERIC_MM_HOOKS_H */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -444,6 +445,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) ...@@ -444,6 +445,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (!(vm_flags & VM_MAYREAD)) if (!(vm_flags & VM_MAYREAD))
return -EFAULT; return -EFAULT;
} }
if (!arch_vma_access_permitted(vma, (gup_flags & FOLL_WRITE)))
return -EFAULT;
return 0; return 0;
} }
...@@ -612,13 +615,19 @@ EXPORT_SYMBOL(__get_user_pages); ...@@ -612,13 +615,19 @@ EXPORT_SYMBOL(__get_user_pages);
bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
{ {
vm_flags_t vm_flags; bool write = !!(fault_flags & FAULT_FLAG_WRITE);
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
if (!(vm_flags & vma->vm_flags)) if (!(vm_flags & vma->vm_flags))
return false; return false;
/*
* The architecture might have a hardware protection
* mechanism other than read/write that can deny access
*/
if (!arch_vma_access_permitted(vma, write))
return false;
return true; return true;
} }
...@@ -1172,6 +1181,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, ...@@ -1172,6 +1181,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
pte_protnone(pte) || (write && !pte_write(pte))) pte_protnone(pte) || (write && !pte_write(pte)))
goto pte_unmap; goto pte_unmap;
if (!arch_pte_access_permitted(pte, write))
goto pte_unmap;
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte); page = pte_page(pte);
head = compound_head(page); head = compound_head(page);
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -3378,6 +3379,9 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3378,6 +3379,9 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE))
return VM_FAULT_SIGSEGV;
if (unlikely(is_vm_hugetlb_page(vma))) if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags); return hugetlb_fault(mm, vma, address, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册