提交 81429eb8 编写于 作者: L Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fix from Will Deacon:
 "Ensure PAN is re-enabled following user fault in uaccess routines.

  After I thought we were done for 5.4, we had a report this week of a
  nasty issue that has been shown to leak data between different user
  address spaces thanks to corruption of entries in the TLB. In
  hindsight, we should have spotted this in review when the PAN code was
  merged back in v4.3, but hindsight is 20/20 and I'm trying not to beat
  myself up too much about it despite being fairly miserable.

  Anyway, the fix is "obvious" but the actual failure is more more
  subtle, and is described in the commit message. I've included a fairly
  mechanical follow-up patch here as well, which moves this checking out
  into the C wrappers which is what we do for {get,put}_user() already
  and allows us to remove these bloody assembly macros entirely. The
  patches have passed kernelci [1] [2] [3] and CKI [4] tests over night,
  as well as some targetted testing [5] for this particular issue.

  The first patch is tagged for stable and should be applied to 4.14,
  4.19 and 5.3. I have separate backports for 4.4 and 4.9, which I'll
  send out once this has landed in your tree (although the original
  patch applies cleanly, it won't build for those two trees).

  Thanks to Pavel Tatashin for reporting this and Mark Rutland for
  helping to diagnose the issue and review/test the solution"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: uaccess: Remove uaccess_*_not_uao asm macros
  arm64: uaccess: Ensure PAN is re-enabled after unhandled uaccess fault
...@@ -58,23 +58,6 @@ alternative_else_nop_endif ...@@ -58,23 +58,6 @@ alternative_else_nop_endif
.endm .endm
#endif #endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1, tmp2
uaccess_ttbr0_disable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2, tmp3
uaccess_ttbr0_enable \tmp1, \tmp2, \tmp3
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
.endm
/* /*
* Remove the address tag from a virtual address, if present. * Remove the address tag from a virtual address, if present.
*/ */
......
...@@ -378,20 +378,34 @@ do { \ ...@@ -378,20 +378,34 @@ do { \
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
#define raw_copy_from_user(to, from, n) \ #define raw_copy_from_user(to, from, n) \
({ \ ({ \
__arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ unsigned long __acfu_ret; \
uaccess_enable_not_uao(); \
__acfu_ret = __arch_copy_from_user((to), \
__uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__acfu_ret; \
}) })
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
#define raw_copy_to_user(to, from, n) \ #define raw_copy_to_user(to, from, n) \
({ \ ({ \
__arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ unsigned long __actu_ret; \
uaccess_enable_not_uao(); \
__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
(from), (n)); \
uaccess_disable_not_uao(); \
__actu_ret; \
}) })
extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
#define raw_copy_in_user(to, from, n) \ #define raw_copy_in_user(to, from, n) \
({ \ ({ \
__arch_copy_in_user(__uaccess_mask_ptr(to), \ unsigned long __aciu_ret; \
uaccess_enable_not_uao(); \
__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
__uaccess_mask_ptr(from), (n)); \ __uaccess_mask_ptr(from), (n)); \
uaccess_disable_not_uao(); \
__aciu_ret; \
}) })
#define INLINE_COPY_TO_USER #define INLINE_COPY_TO_USER
...@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi ...@@ -400,8 +414,11 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{ {
if (access_ok(to, n)) if (access_ok(to, n)) {
uaccess_enable_not_uao();
n = __arch_clear_user(__uaccess_mask_ptr(to), n); n = __arch_clear_user(__uaccess_mask_ptr(to), n);
uaccess_disable_not_uao();
}
return n; return n;
} }
#define clear_user __clear_user #define clear_user __clear_user
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
* Alignment fixed up by hardware. * Alignment fixed up by hardware.
*/ */
ENTRY(__arch_clear_user) ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return mov x2, x1 // save the size for fixup return
subs x1, x1, #8 subs x1, x1, #8
b.mi 2f b.mi 2f
...@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2 ...@@ -40,7 +39,6 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
b.mi 5f b.mi 5f
uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0 5: mov x0, #0
uaccess_disable_not_uao x2, x3
ret ret
ENDPROC(__arch_clear_user) ENDPROC(__arch_clear_user)
EXPORT_SYMBOL(__arch_clear_user) EXPORT_SYMBOL(__arch_clear_user)
......
...@@ -54,10 +54,8 @@ ...@@ -54,10 +54,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_from_user) ENTRY(__arch_copy_from_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 // Nothing to copy mov x0, #0 // Nothing to copy
ret ret
ENDPROC(__arch_copy_from_user) ENDPROC(__arch_copy_from_user)
......
...@@ -56,10 +56,8 @@ ...@@ -56,10 +56,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_in_user) ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_in_user) ENDPROC(__arch_copy_in_user)
......
...@@ -53,10 +53,8 @@ ...@@ -53,10 +53,8 @@
end .req x5 end .req x5
ENTRY(__arch_copy_to_user) ENTRY(__arch_copy_to_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2 add end, x0, x2
#include "copy_template.S" #include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0 mov x0, #0
ret ret
ENDPROC(__arch_copy_to_user) ENDPROC(__arch_copy_to_user)
......
...@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset, ...@@ -28,7 +28,11 @@ void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
unsigned long rc = __arch_copy_from_user(to, from, n); unsigned long rc;
uaccess_enable_not_uao();
rc = __arch_copy_from_user(to, from, n);
uaccess_disable_not_uao();
/* See above */ /* See above */
__clean_dcache_area_pop(to, n - rc); __clean_dcache_area_pop(to, n - rc);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册