提交 7d37c6d5 编写于 作者: B Bodo Stroesser 提交者: Linus Torvalds

[PATCH] uml: s390 preparation, checksumming done in arch code

Checksum handling largely depends on the subarch.

Thus, I renamed i386 arch_csum_partial in arch/um/sys-i386/checksum.S back to
csum_partial, removed csum_partial from arch/um/kernel/checksum.c and shifted
EXPORT_SYMBOL(csum_partial) to arch/um/sys-i386/ksyms.c.

Then, csum_partial_copy_to and csum_partial_copy_from were shifted from
arch/um/kernel/checksum.c to arch/um/include/sysdep-i386/checksum.h and
inserted in the calling functions csum_partial_copy_from_user() and
csum_and_copy_to_user().

Now, arch/um/kernel/checksum.c is empty and removed.
Signed-off-by: NBodo Stroesser <bstroesser@fujitsu-siemens.com>
Signed-off-by: NJeff Dike <jdike@addtoit.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 c52ac046
...@@ -23,19 +23,6 @@ ...@@ -23,19 +23,6 @@
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int csum_partial(const unsigned char * buff, int len,
unsigned int sum); unsigned int sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
unsigned int csum_partial_copy_to(const unsigned char *src, unsigned char *dst,
int len, int sum, int *err_ptr);
unsigned int csum_partial_copy_from(const unsigned char *src, unsigned char *dst,
int len, int sum, int *err_ptr);
/* /*
* Note: when you get a NULL pointer exception here this means someone * Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions. * passed in an incorrect kernel address to one of these functions.
...@@ -52,11 +39,24 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char * ...@@ -52,11 +39,24 @@ unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *
return(csum_partial(dst, len, sum)); return(csum_partial(dst, len, sum));
} }
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
static __inline__ static __inline__
unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
int len, int sum, int *err_ptr) int len, int sum, int *err_ptr)
{ {
return csum_partial_copy_from(src, dst, len, sum, err_ptr); if(copy_from_user(dst, src, len)){
*err_ptr = -EFAULT;
return(-1);
}
return csum_partial(dst, len, sum);
} }
/* /*
...@@ -67,7 +67,6 @@ unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char ...@@ -67,7 +67,6 @@ unsigned int csum_partial_copy_from_user(const unsigned char *src, unsigned char
*/ */
#define csum_partial_copy_fromuser csum_partial_copy_from_user #define csum_partial_copy_fromuser csum_partial_copy_from_user
unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum);
/* /*
* This is a version of ip_compute_csum() optimized for IP headers, * This is a version of ip_compute_csum() optimized for IP headers,
...@@ -196,8 +195,14 @@ static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src, ...@@ -196,8 +195,14 @@ static __inline__ unsigned int csum_and_copy_to_user(const unsigned char *src,
unsigned char *dst, unsigned char *dst,
int len, int sum, int *err_ptr) int len, int sum, int *err_ptr)
{ {
if (access_ok(VERIFY_WRITE, dst, len)) if (access_ok(VERIFY_WRITE, dst, len)){
return(csum_partial_copy_to(src, dst, len, sum, err_ptr)); if(copy_to_user(dst, src, len)){
*err_ptr = -EFAULT;
return(-1);
}
return csum_partial(src, len, sum);
}
if (len) if (len)
*err_ptr = -EFAULT; *err_ptr = -EFAULT;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
extra-y := vmlinux.lds extra-y := vmlinux.lds
clean-files := clean-files :=
obj-y = checksum.o config.o exec_kern.o exitcode.o \ obj-y = config.o exec_kern.o exitcode.o \
helper.o init_task.o irq.o irq_user.o ksyms.o main.o mem.o mem_user.o \ helper.o init_task.o irq.o irq_user.o ksyms.o main.o mem.o mem_user.o \
physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \ physmem.o process.o process_kern.o ptrace.o reboot.o resource.o \
sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \ sigio_user.o sigio_kern.o signal_kern.o signal_user.o smp.o \
......
#include "asm/uaccess.h"
#include "linux/errno.h"
#include "linux/module.h"
unsigned int arch_csum_partial(const unsigned char *buff, int len, int sum);
unsigned int csum_partial(unsigned char *buff, int len, int sum)
{
return arch_csum_partial(buff, len, sum);
}
EXPORT_SYMBOL(csum_partial);
unsigned int csum_partial_copy_to(const unsigned char *src,
unsigned char __user *dst, int len, int sum,
int *err_ptr)
{
if(copy_to_user(dst, src, len)){
*err_ptr = -EFAULT;
return(-1);
}
return(arch_csum_partial(src, len, sum));
}
unsigned int csum_partial_copy_from(const unsigned char __user *src,
unsigned char *dst, int len, int sum,
int *err_ptr)
{
if(copy_from_user(dst, src, len)){
*err_ptr = -EFAULT;
return(-1);
}
return arch_csum_partial(dst, len, sum);
}
...@@ -38,7 +38,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -38,7 +38,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
.text .text
.align 4 .align 4
.globl arch_csum_partial .globl csum_partial
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM #ifndef CONFIG_X86_USE_PPRO_CHECKSUM
...@@ -49,7 +49,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) ...@@ -49,7 +49,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte * Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop. * alignment for the unrolled loop.
*/ */
arch_csum_partial: csum_partial:
pushl %esi pushl %esi
pushl %ebx pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
...@@ -119,7 +119,7 @@ arch_csum_partial: ...@@ -119,7 +119,7 @@ arch_csum_partial:
/* Version for PentiumII/PPro */ /* Version for PentiumII/PPro */
arch_csum_partial: csum_partial:
pushl %esi pushl %esi
pushl %ebx pushl %ebx
movl 20(%esp),%eax # Function arg: unsigned int sum movl 20(%esp),%eax # Function arg: unsigned int sum
......
...@@ -13,5 +13,4 @@ EXPORT_SYMBOL(__down_failed_trylock); ...@@ -13,5 +13,4 @@ EXPORT_SYMBOL(__down_failed_trylock);
EXPORT_SYMBOL(__up_wakeup); EXPORT_SYMBOL(__up_wakeup);
/* Networking helper routines. */ /* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_from); EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_to);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册