1. 03 4月, 2021 9 次提交
  2. 29 3月, 2021 1 次提交
  3. 26 3月, 2021 3 次提交
  4. 11 2月, 2021 5 次提交
  5. 08 2月, 2021 1 次提交
  6. 05 11月, 2020 1 次提交
    • M
      powerpc: Use asm_goto_volatile for put_user() · 1344a232
      Michael Ellerman 提交于
      Andreas reported that commit ee0a49a6 ("powerpc/uaccess: Switch
      __put_user_size_allowed() to __put_user_asm_goto()") broke
      CLONE_CHILD_SETTID.
      
      Further inspection showed that the put_user() in schedule_tail() was
      missing entirely, the store not emitted by the compiler.
      
        <.schedule_tail>:
          mflr    r0
          std     r0,16(r1)
          stdu    r1,-112(r1)
          bl      <.finish_task_switch>
          ld      r9,2496(r3)
          cmpdi   cr7,r9,0
          bne     cr7,<.schedule_tail+0x60>
          ld      r3,392(r13)
          ld      r9,1392(r3)
          cmpdi   cr7,r9,0
          beq     cr7,<.schedule_tail+0x3c>
          li      r4,0
          li      r5,0
          bl      <.__task_pid_nr_ns>
          nop
          bl      <.calculate_sigpending>
          nop
          addi    r1,r1,112
          ld      r0,16(r1)
          mtlr    r0
          blr
          nop
          nop
          nop
          bl      <.__balance_callback>
          b       <.schedule_tail+0x1c>
      
      Notice there are no stores other than to the stack. There should be a
      stw in there for the store to current->set_child_tid.
      
      This is only seen with GCC 4.9 era compilers (tested with 4.9.3 and
      4.9.4), and only when CONFIG_PPC_KUAP is disabled.
      
      When CONFIG_PPC_KUAP=y, the inline asm that's part of the isync()
      and mtspr() inlined via allow_user_access() seems to be enough to
      avoid the bug.
      
      We already have a macro to work around this (or a similar bug), called
      asm_volatile_goto which includes an empty asm block to tickle the
      compiler into generating the right code. So use that.
      
      With this applied the code generation looks more like it will work:
      
        <.schedule_tail>:
          mflr    r0
          std     r31,-8(r1)
          std     r0,16(r1)
          stdu    r1,-144(r1)
          std     r3,112(r1)
          bl      <._mcount>
          nop
          ld      r3,112(r1)
          bl      <.finish_task_switch>
          ld      r9,2624(r3)
          cmpdi   cr7,r9,0
          bne     cr7,<.schedule_tail+0xa0>
          ld      r3,2408(r13)
          ld      r31,1856(r3)
          cmpdi   cr7,r31,0
          beq     cr7,<.schedule_tail+0x80>
          li      r4,0
          li      r5,0
          bl      <.__task_pid_nr_ns>
          nop
          li      r9,-1
          clrldi  r9,r9,12
          cmpld   cr7,r31,r9
          bgt     cr7,<.schedule_tail+0x80>
          lis     r9,16
          rldicr  r9,r9,32,31
          subf    r9,r31,r9
          cmpldi  cr7,r9,3
          ble     cr7,<.schedule_tail+0x80>
          li      r9,0
          stw     r3,0(r31)				<-- stw
          nop
          bl      <.calculate_sigpending>
          nop
          addi    r1,r1,144
          ld      r0,16(r1)
          ld      r31,-8(r1)
          mtlr    r0
          blr
          nop
          bl      <.__balance_callback>
          b       <.schedule_tail+0x30>
      
      Fixes: ee0a49a6 ("powerpc/uaccess: Switch __put_user_size_allowed() to __put_user_asm_goto()")
      Reported-by: NAndreas Schwab <schwab@linux-m68k.org>
      Tested-by: NAndreas Schwab <schwab@linux-m68k.org>
      Suggested-by: NChristophe Leroy <christophe.leroy@csgroup.eu>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/20201104111742.672142-1-mpe@ellerman.id.au
      1344a232
  7. 22 10月, 2020 1 次提交
    • C
      powerpc/uaccess: Don't use "m<>" constraint with GCC 4.9 · 592bbe9c
      Christophe Leroy 提交于
      GCC 4.9 sometimes fails to build with "m<>" constraint in
      inline assembly.
      
        CC      lib/iov_iter.o
      In file included from ./arch/powerpc/include/asm/cmpxchg.h:6:0,
                       from ./arch/powerpc/include/asm/atomic.h:11,
                       from ./include/linux/atomic.h:7,
                       from ./include/linux/crypto.h:15,
                       from ./include/crypto/hash.h:11,
                       from lib/iov_iter.c:2:
      lib/iov_iter.c: In function 'iovec_from_user.part.30':
      ./arch/powerpc/include/asm/uaccess.h:287:2: error: 'asm' operand has impossible constraints
        __asm__ __volatile__(    \
        ^
      ./include/linux/compiler.h:78:42: note: in definition of macro 'unlikely'
       # define unlikely(x) __builtin_expect(!!(x), 0)
                                                ^
      ./arch/powerpc/include/asm/uaccess.h:583:34: note: in expansion of macro 'unsafe_op_wrap'
       #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
                                        ^
      ./arch/powerpc/include/asm/uaccess.h:329:10: note: in expansion of macro '__get_user_asm'
        case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
                ^
      ./arch/powerpc/include/asm/uaccess.h:363:3: note: in expansion of macro '__get_user_size_allowed'
         __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
         ^
      ./arch/powerpc/include/asm/uaccess.h:100:2: note: in expansion of macro '__get_user_nocheck'
        __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
        ^
      ./arch/powerpc/include/asm/uaccess.h:583:49: note: in expansion of macro '__get_user_allowed'
       #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
                                                       ^
      lib/iov_iter.c:1663:3: note: in expansion of macro 'unsafe_get_user'
         unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
         ^
      make[1]: *** [scripts/Makefile.build:283: lib/iov_iter.o] Error 1
      
      Define a UPD_CONSTR macro that is "<>" by default and
      only "" with GCC prior to GCC 5.
      
      Fixes: fcf1f268 ("powerpc/uaccess: Add pre-update addressing to __put_user_asm_goto()")
      Fixes: 2f279eeb ("powerpc/uaccess: Add pre-update addressing to __get_user_asm() and __put_user_asm()")
      Signed-off-by: NChristophe Leroy <christophe.leroy@csgroup.eu>
      Acked-by: NSegher Boessenkool <segher@kernel.crashing.org>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/212d3bc4a52ca71523759517bb9c61f7e477c46a.1603179582.git.christophe.leroy@csgroup.eu
      592bbe9c
  8. 06 10月, 2020 1 次提交
    • D
      x86, powerpc: Rename memcpy_mcsafe() to copy_mc_to_{user, kernel}() · ec6347bb
      Dan Williams 提交于
      In reaction to a proposal to introduce a memcpy_mcsafe_fast()
      implementation Linus points out that memcpy_mcsafe() is poorly named
      relative to communicating the scope of the interface. Specifically what
      addresses are valid to pass as source, destination, and what faults /
      exceptions are handled.
      
      Of particular concern is that even though x86 might be able to handle
      the semantics of copy_mc_to_user() with its common copy_user_generic()
      implementation other archs likely need / want an explicit path for this
      case:
      
        On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote:
        >
        > On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote:
        > >
        > > However now I see that copy_user_generic() works for the wrong reason.
        > > It works because the exception on the source address due to poison
        > > looks no different than a write fault on the user address to the
        > > caller, it's still just a short copy. So it makes copy_to_user() work
        > > for the wrong reason relative to the name.
        >
        > Right.
        >
        > And it won't work that way on other architectures. On x86, we have a
        > generic function that can take faults on either side, and we use it
        > for both cases (and for the "in_user" case too), but that's an
        > artifact of the architecture oddity.
        >
        > In fact, it's probably wrong even on x86 - because it can hide bugs -
        > but writing those things is painful enough that everybody prefers
        > having just one function.
      
      Replace a single top-level memcpy_mcsafe() with either
      copy_mc_to_user(), or copy_mc_to_kernel().
      
      Introduce an x86 copy_mc_fragile() name as the rename for the
      low-level x86 implementation formerly named memcpy_mcsafe(). It is used
      as the slow / careful backend that is supplanted by a fast
      copy_mc_generic() in a follow-on patch.
      
      One side-effect of this reorganization is that separating copy_mc_64.S
      to its own file means that perf no longer needs to track dependencies
      for its memcpy_64.S benchmarks.
      
       [ bp: Massage a bit. ]
      Signed-off-by: NDan Williams <dan.j.williams@intel.com>
      Signed-off-by: NBorislav Petkov <bp@suse.de>
      Reviewed-by: NTony Luck <tony.luck@intel.com>
      Acked-by: NMichael Ellerman <mpe@ellerman.id.au>
      Cc: <stable@vger.kernel.org>
      Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com
      Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
      ec6347bb
  9. 15 9月, 2020 3 次提交
  10. 09 9月, 2020 2 次提交
  11. 08 9月, 2020 1 次提交
  12. 02 9月, 2020 1 次提交
    • C
      powerpc/uaccess: Use flexible addressing with __put_user()/__get_user() · c20beffe
      Christophe Leroy 提交于
      At the time being, __put_user()/__get_user() and friends only use
      D-form addressing, with 0 offset. Ex:
      
      	lwz	reg1, 0(reg2)
      
      Give the compiler the opportunity to use other adressing modes
      whenever possible, to get more optimised code.
      
      Hereunder is a small exemple:
      
      struct test {
      	u32 item1;
      	u16 item2;
      	u8 item3;
      	u64 item4;
      };
      
      int set_test_user(struct test __user *from, struct test __user *to)
      {
      	int err;
      	u32 item1;
      	u16 item2;
      	u8 item3;
      	u64 item4;
      
      	err = __get_user(item1, &from->item1);
      	err |= __get_user(item2, &from->item2);
      	err |= __get_user(item3, &from->item3);
      	err |= __get_user(item4, &from->item4);
      
      	err |= __put_user(item1, &to->item1);
      	err |= __put_user(item2, &to->item2);
      	err |= __put_user(item3, &to->item3);
      	err |= __put_user(item4, &to->item4);
      
      	return err;
      }
      
      Before the patch:
      
      00000df0 <set_test_user>:
       df0:	94 21 ff f0 	stwu    r1,-16(r1)
       df4:	39 40 00 00 	li      r10,0
       df8:	93 c1 00 08 	stw     r30,8(r1)
       dfc:	93 e1 00 0c 	stw     r31,12(r1)
       e00:	7d 49 53 78 	mr      r9,r10
       e04:	80 a3 00 00 	lwz     r5,0(r3)
       e08:	38 e3 00 04 	addi    r7,r3,4
       e0c:	7d 46 53 78 	mr      r6,r10
       e10:	a0 e7 00 00 	lhz     r7,0(r7)
       e14:	7d 29 33 78 	or      r9,r9,r6
       e18:	39 03 00 06 	addi    r8,r3,6
       e1c:	7d 46 53 78 	mr      r6,r10
       e20:	89 08 00 00 	lbz     r8,0(r8)
       e24:	7d 29 33 78 	or      r9,r9,r6
       e28:	38 63 00 08 	addi    r3,r3,8
       e2c:	7d 46 53 78 	mr      r6,r10
       e30:	83 c3 00 00 	lwz     r30,0(r3)
       e34:	83 e3 00 04 	lwz     r31,4(r3)
       e38:	7d 29 33 78 	or      r9,r9,r6
       e3c:	7d 43 53 78 	mr      r3,r10
       e40:	90 a4 00 00 	stw     r5,0(r4)
       e44:	7d 29 1b 78 	or      r9,r9,r3
       e48:	38 c4 00 04 	addi    r6,r4,4
       e4c:	7d 43 53 78 	mr      r3,r10
       e50:	b0 e6 00 00 	sth     r7,0(r6)
       e54:	7d 29 1b 78 	or      r9,r9,r3
       e58:	38 e4 00 06 	addi    r7,r4,6
       e5c:	7d 43 53 78 	mr      r3,r10
       e60:	99 07 00 00 	stb     r8,0(r7)
       e64:	7d 23 1b 78 	or      r3,r9,r3
       e68:	38 84 00 08 	addi    r4,r4,8
       e6c:	93 c4 00 00 	stw     r30,0(r4)
       e70:	93 e4 00 04 	stw     r31,4(r4)
       e74:	7c 63 53 78 	or      r3,r3,r10
       e78:	83 c1 00 08 	lwz     r30,8(r1)
       e7c:	83 e1 00 0c 	lwz     r31,12(r1)
       e80:	38 21 00 10 	addi    r1,r1,16
       e84:	4e 80 00 20 	blr
      
      After the patch:
      
      00000dbc <set_test_user>:
       dbc:	39 40 00 00 	li      r10,0
       dc0:	7d 49 53 78 	mr      r9,r10
       dc4:	80 03 00 00 	lwz     r0,0(r3)
       dc8:	7d 48 53 78 	mr      r8,r10
       dcc:	a1 63 00 04 	lhz     r11,4(r3)
       dd0:	7d 29 43 78 	or      r9,r9,r8
       dd4:	7d 48 53 78 	mr      r8,r10
       dd8:	88 a3 00 06 	lbz     r5,6(r3)
       ddc:	7d 29 43 78 	or      r9,r9,r8
       de0:	7d 48 53 78 	mr      r8,r10
       de4:	80 c3 00 08 	lwz     r6,8(r3)
       de8:	80 e3 00 0c 	lwz     r7,12(r3)
       dec:	7d 29 43 78 	or      r9,r9,r8
       df0:	7d 43 53 78 	mr      r3,r10
       df4:	90 04 00 00 	stw     r0,0(r4)
       df8:	7d 29 1b 78 	or      r9,r9,r3
       dfc:	7d 43 53 78 	mr      r3,r10
       e00:	b1 64 00 04 	sth     r11,4(r4)
       e04:	7d 29 1b 78 	or      r9,r9,r3
       e08:	7d 43 53 78 	mr      r3,r10
       e0c:	98 a4 00 06 	stb     r5,6(r4)
       e10:	7d 23 1b 78 	or      r3,r9,r3
       e14:	90 c4 00 08 	stw     r6,8(r4)
       e18:	90 e4 00 0c 	stw     r7,12(r4)
       e1c:	7c 63 53 78 	or      r3,r3,r10
       e20:	4e 80 00 20 	blr
      Signed-off-by: NChristophe Leroy <christophe.leroy@csgroup.eu>
      Reviewed-by: NSegher Boessenkool <segher@kernel.crashing.org>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/c27bc4e598daf3bbb225de7a1f5c52121cf1e279.1597235091.git.christophe.leroy@csgroup.eu
      c20beffe
  13. 13 8月, 2020 1 次提交
  14. 18 5月, 2020 2 次提交
  15. 08 5月, 2020 1 次提交
  16. 01 5月, 2020 1 次提交
  17. 30 4月, 2020 3 次提交
    • C
      powerpc/uaccess: Implement unsafe_copy_to_user() as a simple loop · 17bc4336
      Christophe Leroy 提交于
      At the time being, unsafe_copy_to_user() is based on
      raw_copy_to_user() which calls __copy_tofrom_user().
      
      __copy_tofrom_user() is a big optimised function to copy big amount
      of data. It aligns destinations to cache line in order to use
      dcbz instruction.
      
      Today unsafe_copy_to_user() is called only from filldir().
      It is used to mainly copy small amount of data like filenames,
      so __copy_tofrom_user() is not fit.
      
      Also, unsafe_copy_to_user() is used within user_access_begin/end
      sections. In those section, it is preferable to not call functions.
      
      Rewrite unsafe_copy_to_user() as a macro that uses __put_user_goto().
      We first perform a loop of long, then we finish with necessary
      complements.
      
      unsafe_copy_to_user() might be used in the near future to copy
      fixed-size data, like pt_regs structs during signal processing.
      Having it as a macro allows GCC to optimise it for instead when
      it knows the size in advance, it can unloop loops, drop complements
      when the size is a multiple of longs, etc ...
      Signed-off-by: NChristophe Leroy <christophe.leroy@c-s.fr>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/fe952112c29bf6a0a2778c9e6bbb4f4afd2c4258.1587143308.git.christophe.leroy@c-s.fr
      17bc4336
    • C
      powerpc/uaccess: Implement unsafe_put_user() using 'asm goto' · 334710b1
      Christophe Leroy 提交于
      unsafe_put_user() is designed to take benefit of 'asm goto'.
      
      Instead of using the standard __put_user() approach and branch
      based on the returned error, use 'asm goto' and make the
      exception code branch directly to the error label. There is
      no code anymore in the fixup section.
      
      This change significantly simplifies functions using
      unsafe_put_user()
      
      Small exemple of the benefit with the following code:
      
      struct test {
      	u32 item1;
      	u16 item2;
      	u8 item3;
      	u64 item4;
      };
      
      int set_test_to_user(struct test __user *test, u32 item1, u16 item2, u8 item3, u64 item4)
      {
      	unsafe_put_user(item1, &test->item1, failed);
      	unsafe_put_user(item2, &test->item2, failed);
      	unsafe_put_user(item3, &test->item3, failed);
      	unsafe_put_user(item4, &test->item4, failed);
      	return 0;
      failed:
      	return -EFAULT;
      }
      
      Before the patch:
      
      00000be8 <set_test_to_user>:
       be8:	39 20 00 00 	li      r9,0
       bec:	90 83 00 00 	stw     r4,0(r3)
       bf0:	2f 89 00 00 	cmpwi   cr7,r9,0
       bf4:	40 9e 00 38 	bne     cr7,c2c <set_test_to_user+0x44>
       bf8:	b0 a3 00 04 	sth     r5,4(r3)
       bfc:	2f 89 00 00 	cmpwi   cr7,r9,0
       c00:	40 9e 00 2c 	bne     cr7,c2c <set_test_to_user+0x44>
       c04:	98 c3 00 06 	stb     r6,6(r3)
       c08:	2f 89 00 00 	cmpwi   cr7,r9,0
       c0c:	40 9e 00 20 	bne     cr7,c2c <set_test_to_user+0x44>
       c10:	90 e3 00 08 	stw     r7,8(r3)
       c14:	91 03 00 0c 	stw     r8,12(r3)
       c18:	21 29 00 00 	subfic  r9,r9,0
       c1c:	7d 29 49 10 	subfe   r9,r9,r9
       c20:	38 60 ff f2 	li      r3,-14
       c24:	7d 23 18 38 	and     r3,r9,r3
       c28:	4e 80 00 20 	blr
       c2c:	38 60 ff f2 	li      r3,-14
       c30:	4e 80 00 20 	blr
      
      00000000 <.fixup>:
      	...
        b8:	39 20 ff f2 	li      r9,-14
        bc:	48 00 00 00 	b       bc <.fixup+0xbc>
      			bc: R_PPC_REL24	.text+0xbf0
        c0:	39 20 ff f2 	li      r9,-14
        c4:	48 00 00 00 	b       c4 <.fixup+0xc4>
      			c4: R_PPC_REL24	.text+0xbfc
        c8:	39 20 ff f2 	li      r9,-14
        cc:	48 00 00 00 	b       cc <.fixup+0xcc>
        d0:	39 20 ff f2 	li      r9,-14
        d4:	48 00 00 00 	b       d4 <.fixup+0xd4>
      			d4: R_PPC_REL24	.text+0xc18
      
      00000000 <__ex_table>:
      	...
      			a0: R_PPC_REL32	.text+0xbec
      			a4: R_PPC_REL32	.fixup+0xb8
      			a8: R_PPC_REL32	.text+0xbf8
      			ac: R_PPC_REL32	.fixup+0xc0
      			b0: R_PPC_REL32	.text+0xc04
      			b4: R_PPC_REL32	.fixup+0xc8
      			b8: R_PPC_REL32	.text+0xc10
      			bc: R_PPC_REL32	.fixup+0xd0
      			c0: R_PPC_REL32	.text+0xc14
      			c4: R_PPC_REL32	.fixup+0xd0
      
      After the patch:
      
      00000be8 <set_test_to_user>:
       be8:	90 83 00 00 	stw     r4,0(r3)
       bec:	b0 a3 00 04 	sth     r5,4(r3)
       bf0:	98 c3 00 06 	stb     r6,6(r3)
       bf4:	90 e3 00 08 	stw     r7,8(r3)
       bf8:	91 03 00 0c 	stw     r8,12(r3)
       bfc:	38 60 00 00 	li      r3,0
       c00:	4e 80 00 20 	blr
       c04:	38 60 ff f2 	li      r3,-14
       c08:	4e 80 00 20 	blr
      
      00000000 <__ex_table>:
      	...
      			a0: R_PPC_REL32	.text+0xbe8
      			a4: R_PPC_REL32	.text+0xc04
      			a8: R_PPC_REL32	.text+0xbec
      			ac: R_PPC_REL32	.text+0xc04
      			b0: R_PPC_REL32	.text+0xbf0
      			b4: R_PPC_REL32	.text+0xc04
      			b8: R_PPC_REL32	.text+0xbf4
      			bc: R_PPC_REL32	.text+0xc04
      			c0: R_PPC_REL32	.text+0xbf8
      			c4: R_PPC_REL32	.text+0xc04
      Signed-off-by: NChristophe Leroy <christophe.leroy@c-s.fr>
      Reviewed-by: NSegher Boessenkool <segher@kernel.crashing.org>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/23e680624680a9a5405f4b88740d2596d4b17c26.1587143308.git.christophe.leroy@c-s.fr
      334710b1
    • N
      powerpc/uaccess: Evaluate macro arguments once, before user access is allowed · d02f6b7d
      Nicholas Piggin 提交于
      get/put_user() can be called with nontrivial arguments. fs/proc/page.c
      has a good example:
      
          if (put_user(stable_page_flags(ppage), out)) {
      
      stable_page_flags() is quite a lot of code, including spin locks in
      the page allocator.
      
      Ensure these arguments are evaluated before user access is allowed.
      
      This improves security by reducing code with access to userspace, but
      it also fixes a PREEMPT bug with KUAP on powerpc/64s:
      stable_page_flags() is currently called with AMR set to allow writes,
      it ends up calling spin_unlock(), which can call preempt_schedule. But
      the task switch code can not be called with AMR set (it relies on
      interrupts saving the register), so this blows up.
      
      It's fine if the code inside allow_user_access() is preemptible,
      because a timer or IPI will save the AMR, but it's not okay to
      explicitly cause a reschedule.
      
      Fixes: de78a9c4 ("powerpc: Add a framework for Kernel Userspace Access Protection")
      Signed-off-by: NNicholas Piggin <npiggin@gmail.com>
      Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
      Link: https://lore.kernel.org/r/20200407041245.600651-1-npiggin@gmail.com
      d02f6b7d
  18. 28 1月, 2020 3 次提交