提交 4b65a5db 编写于 作者: C Catalin Marinas

arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1

This patch adds the uaccess macros/functions to disable access to user
space by setting TTBR0_EL1 to a reserved zeroed page. Since the value
written to TTBR0_EL1 must be a physical address, for simplicity this
patch introduces a reserved_ttbr0 page at a constant offset from
swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value
adjusted by the reserved_ttbr0 offset.

Enabling access to user is done by restoring TTBR0_EL1 with the value
from the struct thread_info ttbr0 variable. Interrupts must be disabled
during the uaccess_ttbr0_enable code to ensure the atomicity of the
thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the
get_thread_info asm macro from entry.S to assembler.h for reuse in the
uaccess_ttbr0_* macros.

Cc: Will Deacon <will.deacon@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com>
上级 f33bcf03
...@@ -41,6 +41,15 @@ ...@@ -41,6 +41,15 @@
msr daifclr, #2 msr daifclr, #2
.endm .endm
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #2
.endm
.macro restore_irq, flags
msr daif, \flags
.endm
/* /*
* Enable and disable debug exceptions. * Enable and disable debug exceptions.
*/ */
...@@ -406,6 +415,13 @@ alternative_endif ...@@ -406,6 +415,13 @@ alternative_endif
movk \reg, :abs_g0_nc:\val movk \reg, :abs_g0_nc:\val
.endm .endm
/*
* Return the current thread_info.
*/
.macro get_thread_info, rd
mrs \rd, sp_el0
.endm
/* /*
* Errata workaround post TTBR0_EL1 update. * Errata workaround post TTBR0_EL1 update.
*/ */
......
...@@ -241,6 +241,12 @@ static inline bool system_supports_fpsimd(void) ...@@ -241,6 +241,12 @@ static inline bool system_supports_fpsimd(void)
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
} }
static inline bool system_uses_ttbr0_pan(void)
{
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
!cpus_have_cap(ARM64_HAS_PAN);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#ifndef __ASM_KERNEL_PGTABLE_H #ifndef __ASM_KERNEL_PGTABLE_H
#define __ASM_KERNEL_PGTABLE_H #define __ASM_KERNEL_PGTABLE_H
#include <asm/pgtable.h>
#include <asm/sparsemem.h> #include <asm/sparsemem.h>
/* /*
...@@ -54,6 +55,12 @@ ...@@ -54,6 +55,12 @@
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE) #define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
#else
#define RESERVED_TTBR0_SIZE (0)
#endif
/* Initial memory map size */ /* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS #if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT #define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
......
...@@ -47,6 +47,9 @@ typedef unsigned long mm_segment_t; ...@@ -47,6 +47,9 @@ typedef unsigned long mm_segment_t;
struct thread_info { struct thread_info {
unsigned long flags; /* low level flags */ unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */ mm_segment_t addr_limit; /* address limit */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */ int preempt_count; /* 0 => preemptable, <0 => bug */
}; };
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#define __ASM_UACCESS_H #define __ASM_UACCESS_H
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -125,16 +126,71 @@ static inline void set_fs(mm_segment_t fs) ...@@ -125,16 +126,71 @@ static inline void set_fs(mm_segment_t fs)
/* /*
* User access enabling/disabling. * User access enabling/disabling.
*/ */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
static inline void __uaccess_ttbr0_disable(void)
{
unsigned long ttbr;
/* reserved_ttbr0 placed at the end of swapper_pg_dir */
ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
write_sysreg(ttbr, ttbr0_el1);
isb();
}
static inline void __uaccess_ttbr0_enable(void)
{
unsigned long flags;
/*
* Disable interrupts to avoid preemption between reading the 'ttbr0'
* variable and the MSR. A context switch could trigger an ASID
* roll-over and an update of 'ttbr0'.
*/
local_irq_save(flags);
write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
isb();
local_irq_restore(flags);
}
static inline bool uaccess_ttbr0_disable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_disable();
return true;
}
static inline bool uaccess_ttbr0_enable(void)
{
if (!system_uses_ttbr0_pan())
return false;
__uaccess_ttbr0_enable();
return true;
}
#else
static inline bool uaccess_ttbr0_disable(void)
{
return false;
}
static inline bool uaccess_ttbr0_enable(void)
{
return false;
}
#endif
#define __uaccess_disable(alt) \ #define __uaccess_disable(alt) \
do { \ do { \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ if (!uaccess_ttbr0_disable()) \
CONFIG_ARM64_PAN)); \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
CONFIG_ARM64_PAN)); \
} while (0) } while (0)
#define __uaccess_enable(alt) \ #define __uaccess_enable(alt) \
do { \ do { \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ if (uaccess_ttbr0_enable()) \
CONFIG_ARM64_PAN)); \ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
CONFIG_ARM64_PAN)); \
} while (0) } while (0)
static inline void uaccess_disable(void) static inline void uaccess_disable(void)
...@@ -373,16 +429,56 @@ extern __must_check long strnlen_user(const char __user *str, long n); ...@@ -373,16 +429,56 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#include <asm/assembler.h> #include <asm/assembler.h>
/* /*
* User access enabling/disabling macros. These are no-ops when UAO is * User access enabling/disabling macros.
* present. */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
.macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_disable, tmp1
alternative_if_not ARM64_HAS_PAN
__uaccess_ttbr0_disable \tmp1
alternative_else_nop_endif
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else_nop_endif
.endm
#else
.macro uaccess_ttbr0_disable, tmp1
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/ */
.macro uaccess_disable_not_uao, tmp1 .macro uaccess_disable_not_uao, tmp1
uaccess_ttbr0_disable \tmp1
alternative_if ARM64_ALT_PAN_NOT_UAO alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1) SET_PSTATE_PAN(1)
alternative_else_nop_endif alternative_else_nop_endif
.endm .endm
.macro uaccess_enable_not_uao, tmp1, tmp2 .macro uaccess_enable_not_uao, tmp1, tmp2
uaccess_ttbr0_enable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0) SET_PSTATE_PAN(0)
alternative_else_nop_endif alternative_else_nop_endif
......
...@@ -39,6 +39,9 @@ int main(void) ...@@ -39,6 +39,9 @@ int main(void)
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags)); DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count)); DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit)); DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif
DEFINE(TSK_STACK, offsetof(struct task_struct, stack)); DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
BLANK(); BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
......
...@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; ...@@ -47,6 +47,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
#endif #endif
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS); DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys); EXPORT_SYMBOL(cpu_hwcap_keys);
......
...@@ -183,10 +183,6 @@ alternative_else_nop_endif ...@@ -183,10 +183,6 @@ alternative_else_nop_endif
eret // return to kernel eret // return to kernel
.endm .endm
.macro get_thread_info, rd
mrs \rd, sp_el0
.endm
.macro irq_stack_entry .macro irq_stack_entry
mov x19, sp // preserve the original sp mov x19, sp // preserve the original sp
......
...@@ -326,14 +326,14 @@ __create_page_tables: ...@@ -326,14 +326,14 @@ __create_page_tables:
* dirty cache lines being evicted. * dirty cache lines being evicted.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
bl __inval_cache_range bl __inval_cache_range
/* /*
* Clear the idmap and swapper page tables. * Clear the idmap and swapper page tables.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
1: stp xzr, xzr, [x0], #16 1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16 stp xzr, xzr, [x0], #16
...@@ -412,7 +412,7 @@ __create_page_tables: ...@@ -412,7 +412,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines. * tables again to remove any speculatively loaded cache lines.
*/ */
adrp x0, idmap_pg_dir adrp x0, idmap_pg_dir
adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
dmb sy dmb sy
bl __inval_cache_range bl __inval_cache_range
......
...@@ -216,6 +216,11 @@ SECTIONS ...@@ -216,6 +216,11 @@ SECTIONS
swapper_pg_dir = .; swapper_pg_dir = .;
. += SWAPPER_DIR_SIZE; . += SWAPPER_DIR_SIZE;
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
reserved_ttbr0 = .;
. += RESERVED_TTBR0_SIZE;
#endif
_end = .; _end = .;
STABS_DEBUG STABS_DEBUG
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册