提交 5b09c3ed 编写于 作者: L Linus Torvalds

x86: remove pointless uaccess_32.h complexity

I'm looking at trying to possibly merge the 32-bit and 64-bit versions
of the x86 uaccess.h implementation, but first this needs to be cleaned
up.

For example, the 32-bit version of "__copy_to_user_inatomic()" is mostly
the special cases for the constant size, and it's actually never
relevant.  Every user except for one aren't actually using a constant
size anyway, and the one user that uses it is better off just using
__put_user() instead.

So get rid of the unnecessary complexity.

[ The same cleanup should likely happen to __copy_from_user_inatomic()
  as well, but that one has a lot more users that I need to take a look
  at first ]
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f6c658df
...@@ -33,46 +33,10 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero ...@@ -33,46 +33,10 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
* the specified block with access_ok() before calling this function. * the specified block with access_ok() before calling this function.
* The caller should also make sure he pins the user space address * The caller should also make sure he pins the user space address
* so that we don't result in page fault and sleep. * so that we don't result in page fault and sleep.
*
* Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
* we return the initial request size (1, 2 or 4), as copy_*_user should do.
* If a store crosses a page boundary and gets a fault, the x86 will not write
* anything, so this is accurate.
*/ */
static __always_inline unsigned long __must_check static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{ {
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
__uaccess_begin();
__put_user_size(*(u8 *)from, (u8 __user *)to,
1, ret, 1);
__uaccess_end();
return ret;
case 2:
__uaccess_begin();
__put_user_size(*(u16 *)from, (u16 __user *)to,
2, ret, 2);
__uaccess_end();
return ret;
case 4:
__uaccess_begin();
__put_user_size(*(u32 *)from, (u32 __user *)to,
4, ret, 4);
__uaccess_end();
return ret;
case 8:
__uaccess_begin();
__put_user_size(*(u64 *)from, (u64 __user *)to,
8, ret, 8);
__uaccess_end();
return ret;
}
}
return __copy_to_user_ll(to, from, n); return __copy_to_user_ll(to, from, n);
} }
......
...@@ -535,9 +535,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, ...@@ -535,9 +535,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret; return ret;
if (r->presumed_offset != offset && if (r->presumed_offset != offset &&
__copy_to_user_inatomic(&user_relocs->presumed_offset, __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
&r->presumed_offset,
sizeof(r->presumed_offset))) {
return -EFAULT; return -EFAULT;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册