diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index fec1947b8dbcdbc444f65254df5f3d4eeec717be..02760f6e6ca443547eb2bfbf248fe41fefc49be7 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -78,4 +78,9 @@ #define MAP_HUGE_SHIFT 26 #define MAP_HUGE_MASK 0x3f +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + #endif /* __ALPHA_MMAN_H__ */ diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index ccdcfcbb24aa60e2d2f161b61067ad4061caca84..655e2fb5395b5ba4dbfba2df396c9c0c8752eb2e 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -105,4 +105,9 @@ #define MAP_HUGE_SHIFT 26 #define MAP_HUGE_MASK 0x3f +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + #endif /* _ASM_MMAN_H */ diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index f3db7d8eb0c265a875fbcbb1d72f1cfaa058efbd..5979745815a5ebcc2825f21a8b98125a0916d5fa 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -75,4 +75,9 @@ #define MAP_HUGE_SHIFT 26 #define MAP_HUGE_MASK 0x3f +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + #endif /* __PARISC_MMAN_H__ */ diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 1ea0baef1175c407e9097e125a84b226be28266c..72198c64e646d3cbb0d9831d950e4e815003cdd3 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -23,6 +23,14 @@ typedef struct { const struct vdso_image *vdso_image; /* vdso image in use */ atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */ +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + /* + * One bit per protection key says whether userspace can + * use it or not. protected by mmap_sem. + */ + u16 pkey_allocation_map; + s16 execute_only_pkey; +#endif } mm_context_t; #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index af0251fc85ed16b862ef0fdeaaa0b114406c847a..8e0a9fe86de466eee5a520aa8ef9776a0e0e637d 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -108,7 +108,16 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS + if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { + /* pkey 0 is the default and always allocated */ + mm->context.pkey_allocation_map = 0x1; + /* -1 means unallocated or invalid */ + mm->context.execute_only_pkey = -1; + } + #endif init_new_context_ldt(tsk, mm); + return 0; } static inline void destroy_context(struct mm_struct *mm) @@ -263,5 +272,4 @@ static inline bool arch_pte_access_permitted(pte_t pte, bool write) { return __pkru_allows_pkey(pte_flags_pkey(pte_flags(pte)), write); } - #endif /* _ASM_X86_MMU_CONTEXT_H */ diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 666ffc862ef737b41816fd03e9549b7f5f7721c8..b406889de0db73626007a1cab13b39daa8ce749e 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -1,12 +1,7 @@ #ifndef _ASM_X86_PKEYS_H #define _ASM_X86_PKEYS_H -#define PKEY_DEDICATED_EXECUTE_ONLY 15 -/* - * Consider the PKEY_DEDICATED_EXECUTE_ONLY key unavailable. - */ -#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? \ - PKEY_DEDICATED_EXECUTE_ONLY : 1) +#define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1) extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, unsigned long init_val); @@ -40,4 +35,70 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, #define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | VM_PKEY_BIT3) +#define mm_pkey_allocation_map(mm) (mm->context.pkey_allocation_map) +#define mm_set_pkey_allocated(mm, pkey) do { \ + mm_pkey_allocation_map(mm) |= (1U << pkey); \ +} while (0) +#define mm_set_pkey_free(mm, pkey) do { \ + mm_pkey_allocation_map(mm) &= ~(1U << pkey); \ +} while (0) + +static inline +bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) +{ + return mm_pkey_allocation_map(mm) & (1U << pkey); +} + +/* + * Returns a positive, 4-bit key on success, or -1 on failure. + */ +static inline +int mm_pkey_alloc(struct mm_struct *mm) +{ + /* + * Note: this is the one and only place we make sure + * that the pkey is valid as far as the hardware is + * concerned. The rest of the kernel trusts that + * only good, valid pkeys come out of here. + */ + u16 all_pkeys_mask = ((1U << arch_max_pkey()) - 1); + int ret; + + /* + * Are we out of pkeys? We must handle this specially + * because ffz() behavior is undefined if there are no + * zeros. + */ + if (mm_pkey_allocation_map(mm) == all_pkeys_mask) + return -1; + + ret = ffz(mm_pkey_allocation_map(mm)); + + mm_set_pkey_allocated(mm, ret); + + return ret; +} + +static inline +int mm_pkey_free(struct mm_struct *mm, int pkey) +{ + /* + * pkey 0 is special, always allocated and can never + * be freed. + */ + if (!pkey) + return -EINVAL; + if (!mm_pkey_is_allocated(mm, pkey)) + return -EINVAL; + + mm_set_pkey_free(mm, pkey); + + return 0; +} + +extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val); +extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val); + #endif /*_ASM_X86_PKEYS_H */ diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 01567aa87503f021cfb3b4104520842b1ec87179..124aa5c593f8da7aba6643bc609a332744d10ba6 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -5,6 +5,7 @@ */ #include #include +#include #include #include @@ -866,9 +867,10 @@ const void *get_xsave_field_ptr(int xsave_state) return get_xsave_addr(&fpu->state.xsave, xsave_state); } +#ifdef CONFIG_ARCH_HAS_PKEYS + #define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2) #define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1) - /* * This will go out and modify PKRU register to set the access * rights for @pkey to @init_val. @@ -914,6 +916,7 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, return 0; } +#endif /* ! CONFIG_ARCH_HAS_PKEYS */ /* * This is similar to user_regset_copyout(), but will not add offset to diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c index e8c474451928a709b46a80b55caf371f405f87b2..e6113bbb56e1de089f8def86e0e378d966077f1b 100644 --- a/arch/x86/mm/pkeys.c +++ b/arch/x86/mm/pkeys.c @@ -21,8 +21,19 @@ int __execute_only_pkey(struct mm_struct *mm) { + bool need_to_set_mm_pkey = false; + int execute_only_pkey = mm->context.execute_only_pkey; int ret; + /* Do we need to assign a pkey for mm's execute-only maps? */ + if (execute_only_pkey == -1) { + /* Go allocate one to use, which might fail */ + execute_only_pkey = mm_pkey_alloc(mm); + if (execute_only_pkey < 0) + return -1; + need_to_set_mm_pkey = true; + } + /* * We do not want to go through the relatively costly * dance to set PKRU if we do not need to. Check it @@ -32,22 +43,33 @@ int __execute_only_pkey(struct mm_struct *mm) * can make fpregs inactive. */ preempt_disable(); - if (fpregs_active() && - !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) { + if (!need_to_set_mm_pkey && + fpregs_active() && + !__pkru_allows_read(read_pkru(), execute_only_pkey)) { preempt_enable(); - return PKEY_DEDICATED_EXECUTE_ONLY; + return execute_only_pkey; } preempt_enable(); - ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY, + + /* + * Set up PKRU so that it denies access for everything + * other than execution. + */ + ret = arch_set_user_pkey_access(current, execute_only_pkey, PKEY_DISABLE_ACCESS); /* * If the PKRU-set operation failed somehow, just return * 0 and effectively disable execute-only support. */ - if (ret) - return 0; + if (ret) { + mm_set_pkey_free(mm, execute_only_pkey); + return -1; + } - return PKEY_DEDICATED_EXECUTE_ONLY; + /* We got one, store it and use it from here on out */ + if (need_to_set_mm_pkey) + mm->context.execute_only_pkey = execute_only_pkey; + return execute_only_pkey; } static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) @@ -55,7 +77,7 @@ static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma) /* Do this check first since the vm_flags should be hot */ if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC) return false; - if (vma_pkey(vma) != PKEY_DEDICATED_EXECUTE_ONLY) + if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey) return false; return true; diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 9e079d49e7f2e678044d0cb527daec7cf70a293e..24365b30aae95adc240c5e38bd07edb7da03ba13 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -117,4 +117,9 @@ #define MAP_HUGE_SHIFT 26 #define MAP_HUGE_MASK 0x3f +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + #endif /* _XTENSA_MMAN_H */ diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h index 6899b0bc7ce09e4959f4c4b1be50b0254dbbd72c..8ff21125dc8a5594b52dbcc3bfa95e55a2f27e84 100644 --- a/include/linux/pkeys.h +++ b/include/linux/pkeys.h @@ -4,11 +4,6 @@ #include #include -#define PKEY_DISABLE_ACCESS 0x1 -#define PKEY_DISABLE_WRITE 0x2 -#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ - PKEY_DISABLE_WRITE) - #ifdef CONFIG_ARCH_HAS_PKEYS #include #else /* ! CONFIG_ARCH_HAS_PKEYS */ @@ -17,6 +12,29 @@ #define arch_override_mprotect_pkey(vma, prot, pkey) (0) #define PKEY_DEDICATED_EXECUTE_ONLY 0 #define ARCH_VM_PKEY_FLAGS 0 + +static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) +{ + return (pkey == 0); +} + +static inline int mm_pkey_alloc(struct mm_struct *mm) +{ + return -1; +} + +static inline int mm_pkey_free(struct mm_struct *mm, int pkey) +{ + WARN_ONCE(1, "free of protection key when disabled"); + return -EINVAL; +} + +static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey, + unsigned long init_val) +{ + return 0; +} + #endif /* ! CONFIG_ARCH_HAS_PKEYS */ #endif /* _LINUX_PKEYS_H */ diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 58274382a6164de8e9d03ec85da0cf07996869d6..8c27db0c5c08ce84fc7299bd9bff2cee571b9397 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -72,4 +72,9 @@ #define MAP_HUGE_SHIFT 26 #define MAP_HUGE_MASK 0x3f +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\ + PKEY_DISABLE_WRITE) + #endif /* __ASM_GENERIC_MMAN_COMMON_H */ diff --git a/mm/mprotect.c b/mm/mprotect.c index abd9c8257b2e3b082ed034ffc5886189dc9c7ebf..7b35ee3894ee4a8970a502826e860a644222e4a9 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -23,11 +23,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include "internal.h" @@ -364,12 +366,6 @@ static int do_mprotect_pkey(unsigned long start, size_t len, const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); const bool rier = (current->personality & READ_IMPLIES_EXEC) && (prot & PROT_READ); - /* - * A temporary safety check since we are not validating - * the pkey before we introduce the allocation code. - */ - if (pkey != -1) - return -EINVAL; prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ @@ -391,6 +387,14 @@ static int do_mprotect_pkey(unsigned long start, size_t len, if (down_write_killable(¤t->mm->mmap_sem)) return -EINTR; + /* + * If userspace did not allocate the pkey, do not let + * them use it here. + */ + error = -EINVAL; + if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) + goto out; + vma = find_vma(current->mm, start); error = -ENOMEM; if (!vma) @@ -485,3 +489,48 @@ SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, { return do_mprotect_pkey(start, len, prot, pkey); } + +SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) +{ + int pkey; + int ret; + + /* No flags supported yet. */ + if (flags) + return -EINVAL; + /* check for unsupported init values */ + if (init_val & ~PKEY_ACCESS_MASK) + return -EINVAL; + + down_write(¤t->mm->mmap_sem); + pkey = mm_pkey_alloc(current->mm); + + ret = -ENOSPC; + if (pkey == -1) + goto out; + + ret = arch_set_user_pkey_access(current, pkey, init_val); + if (ret) { + mm_pkey_free(current->mm, pkey); + goto out; + } + ret = pkey; +out: + up_write(¤t->mm->mmap_sem); + return ret; +} + +SYSCALL_DEFINE1(pkey_free, int, pkey) +{ + int ret; + + down_write(¤t->mm->mmap_sem); + ret = mm_pkey_free(current->mm, pkey); + up_write(¤t->mm->mmap_sem); + + /* + * We could provie warnings or errors if any VMA still + * has the pkey set here. + */ + return ret; +}