Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
fcfdd0f1
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
153
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
fcfdd0f1
编写于
11月 11, 2007
作者:
P
Paul Mundt
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
sh: Split out checksum.h in to _32 and _64 variants.
Signed-off-by:
N
Paul Mundt
<
lethal@linux-sh.org
>
上级
63e2c803
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
220 addition
and
215 deletion
+220
-215
arch/sh/kernel/sh_ksyms.c
arch/sh/kernel/sh_ksyms.c
+2
-0
include/asm-sh/checksum.h
include/asm-sh/checksum.h
+3
-213
include/asm-sh/checksum_32.h
include/asm-sh/checksum_32.h
+215
-0
include/asm-sh/checksum_64.h
include/asm-sh/checksum_64.h
+0
-2
未找到文件。
arch/sh/kernel/sh_ksyms.c
浏览文件 @
fcfdd0f1
...
...
@@ -141,7 +141,9 @@ EXPORT_SYMBOL(clear_user_page);
#endif
EXPORT_SYMBOL
(
csum_partial
);
#ifdef CONFIG_SUPERH32
EXPORT_SYMBOL
(
csum_partial_copy_generic
);
#endif
#ifdef CONFIG_IPV6
EXPORT_SYMBOL
(
csum_ipv6_magic
);
#endif
...
...
include/asm-sh/checksum.h
浏览文件 @
fcfdd0f1
#ifndef __ASM_SH_CHECKSUM_H
#define __ASM_SH_CHECKSUM_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
*/
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage
__wsum
csum_partial
(
const
void
*
buff
,
int
len
,
__wsum
sum
);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage
__wsum
csum_partial_copy_generic
(
const
void
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
,
int
*
src_err_ptr
,
int
*
dst_err_ptr
);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static
inline
__wsum
csum_partial_copy_nocheck
(
const
void
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
)
{
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
);
}
static
inline
__wsum
csum_partial_copy_from_user
(
const
void
__user
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
,
int
*
err_ptr
)
{
return
csum_partial_copy_generic
((
__force
const
void
*
)
src
,
dst
,
len
,
sum
,
err_ptr
,
NULL
);
}
/*
* Fold a partial checksum
*/
static
inline
__sum16
csum_fold
(
__wsum
sum
)
{
unsigned
int
__dummy
;
__asm__
(
"swap.w %0, %1
\n\t
"
"extu.w %0, %0
\n\t
"
"extu.w %1, %1
\n\t
"
"add %1, %0
\n\t
"
"swap.w %0, %1
\n\t
"
"add %1, %0
\n\t
"
"not %0, %0
\n\t
"
:
"=r"
(
sum
),
"=&r"
(
__dummy
)
:
"0"
(
sum
)
:
"t"
);
return
(
__force
__sum16
)
sum
;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
static
inline
__sum16
ip_fast_csum
(
const
void
*
iph
,
unsigned
int
ihl
)
{
unsigned
int
sum
,
__dummy0
,
__dummy1
;
__asm__
__volatile__
(
"mov.l @%1+, %0
\n\t
"
"mov.l @%1+, %3
\n\t
"
"add #-2, %2
\n\t
"
"clrt
\n\t
"
"1:
\t
"
"addc %3, %0
\n\t
"
"movt %4
\n\t
"
"mov.l @%1+, %3
\n\t
"
"dt %2
\n\t
"
"bf/s 1b
\n\t
"
" cmp/eq #1, %4
\n\t
"
"addc %3, %0
\n\t
"
"addc %2, %0"
/* Here %2 is 0, add carry-bit */
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
:
"=r"
(
sum
),
"=r"
(
iph
),
"=r"
(
ihl
),
"=&r"
(
__dummy0
),
"=&z"
(
__dummy1
)
:
"1"
(
iph
),
"2"
(
ihl
)
:
"t"
);
return
csum_fold
(
sum
);
}
static
inline
__wsum
csum_tcpudp_nofold
(
__be32
saddr
,
__be32
daddr
,
unsigned
short
len
,
unsigned
short
proto
,
__wsum
sum
)
{
#ifdef __LITTLE_ENDIAN__
unsigned
long
len_proto
=
(
proto
+
len
)
<<
8
;
#ifdef CONFIG_SUPERH32
# include "checksum_32.h"
#else
unsigned
long
len_proto
=
proto
+
len
;
# include "checksum_64.h"
#endif
__asm__
(
"clrt
\n\t
"
"addc %0, %1
\n\t
"
"addc %2, %1
\n\t
"
"addc %3, %1
\n\t
"
"movt %0
\n\t
"
"add %1, %0"
:
"=r"
(
sum
),
"=r"
(
len_proto
)
:
"r"
(
daddr
),
"r"
(
saddr
),
"1"
(
len_proto
),
"0"
(
sum
)
:
"t"
);
return
sum
;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static
inline
__sum16
csum_tcpudp_magic
(
__be32
saddr
,
__be32
daddr
,
unsigned
short
len
,
unsigned
short
proto
,
__wsum
sum
)
{
return
csum_fold
(
csum_tcpudp_nofold
(
saddr
,
daddr
,
len
,
proto
,
sum
));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static
inline
__sum16
ip_compute_csum
(
const
void
*
buff
,
int
len
)
{
return
csum_fold
(
csum_partial
(
buff
,
len
,
0
));
}
#define _HAVE_ARCH_IPV6_CSUM
static
inline
__sum16
csum_ipv6_magic
(
const
struct
in6_addr
*
saddr
,
const
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
__wsum
sum
)
{
unsigned
int
__dummy
;
__asm__
(
"clrt
\n\t
"
"mov.l @(0,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(4,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(8,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(12,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(0,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(4,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(8,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(12,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"addc %4, %0
\n\t
"
"addc %5, %0
\n\t
"
"movt %1
\n\t
"
"add %1, %0
\n
"
:
"=r"
(
sum
),
"=&r"
(
__dummy
)
:
"r"
(
saddr
),
"r"
(
daddr
),
"r"
(
htonl
(
len
)),
"r"
(
htonl
(
proto
)),
"0"
(
sum
)
:
"t"
);
return
csum_fold
(
sum
);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static
inline
__wsum
csum_and_copy_to_user
(
const
void
*
src
,
void
__user
*
dst
,
int
len
,
__wsum
sum
,
int
*
err_ptr
)
{
if
(
access_ok
(
VERIFY_WRITE
,
dst
,
len
))
return
csum_partial_copy_generic
((
__force
const
void
*
)
src
,
dst
,
len
,
sum
,
NULL
,
err_ptr
);
if
(
len
)
*
err_ptr
=
-
EFAULT
;
return
(
__force
__wsum
)
-
1
;
/* invalid checksum */
}
#endif
/* __ASM_SH_CHECKSUM_H */
include/asm-sh/checksum_32.h
0 → 100644
浏览文件 @
fcfdd0f1
#ifndef __ASM_SH_CHECKSUM_H
#define __ASM_SH_CHECKSUM_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999 by Kaz Kojima & Niibe Yutaka
*/
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage
__wsum
csum_partial
(
const
void
*
buff
,
int
len
,
__wsum
sum
);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage
__wsum
csum_partial_copy_generic
(
const
void
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
,
int
*
src_err_ptr
,
int
*
dst_err_ptr
);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static
inline
__wsum
csum_partial_copy_nocheck
(
const
void
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
)
{
return
csum_partial_copy_generic
(
src
,
dst
,
len
,
sum
,
NULL
,
NULL
);
}
static
inline
__wsum
csum_partial_copy_from_user
(
const
void
__user
*
src
,
void
*
dst
,
int
len
,
__wsum
sum
,
int
*
err_ptr
)
{
return
csum_partial_copy_generic
((
__force
const
void
*
)
src
,
dst
,
len
,
sum
,
err_ptr
,
NULL
);
}
/*
* Fold a partial checksum
*/
static
inline
__sum16
csum_fold
(
__wsum
sum
)
{
unsigned
int
__dummy
;
__asm__
(
"swap.w %0, %1
\n\t
"
"extu.w %0, %0
\n\t
"
"extu.w %1, %1
\n\t
"
"add %1, %0
\n\t
"
"swap.w %0, %1
\n\t
"
"add %1, %0
\n\t
"
"not %0, %0
\n\t
"
:
"=r"
(
sum
),
"=&r"
(
__dummy
)
:
"0"
(
sum
)
:
"t"
);
return
(
__force
__sum16
)
sum
;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* i386 version by Jorge Cwik <jorge@laser.satlink.net>, adapted
* for linux by * Arnt Gulbrandsen.
*/
static
inline
__sum16
ip_fast_csum
(
const
void
*
iph
,
unsigned
int
ihl
)
{
unsigned
int
sum
,
__dummy0
,
__dummy1
;
__asm__
__volatile__
(
"mov.l @%1+, %0
\n\t
"
"mov.l @%1+, %3
\n\t
"
"add #-2, %2
\n\t
"
"clrt
\n\t
"
"1:
\t
"
"addc %3, %0
\n\t
"
"movt %4
\n\t
"
"mov.l @%1+, %3
\n\t
"
"dt %2
\n\t
"
"bf/s 1b
\n\t
"
" cmp/eq #1, %4
\n\t
"
"addc %3, %0
\n\t
"
"addc %2, %0"
/* Here %2 is 0, add carry-bit */
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
:
"=r"
(
sum
),
"=r"
(
iph
),
"=r"
(
ihl
),
"=&r"
(
__dummy0
),
"=&z"
(
__dummy1
)
:
"1"
(
iph
),
"2"
(
ihl
)
:
"t"
);
return
csum_fold
(
sum
);
}
static
inline
__wsum
csum_tcpudp_nofold
(
__be32
saddr
,
__be32
daddr
,
unsigned
short
len
,
unsigned
short
proto
,
__wsum
sum
)
{
#ifdef __LITTLE_ENDIAN__
unsigned
long
len_proto
=
(
proto
+
len
)
<<
8
;
#else
unsigned
long
len_proto
=
proto
+
len
;
#endif
__asm__
(
"clrt
\n\t
"
"addc %0, %1
\n\t
"
"addc %2, %1
\n\t
"
"addc %3, %1
\n\t
"
"movt %0
\n\t
"
"add %1, %0"
:
"=r"
(
sum
),
"=r"
(
len_proto
)
:
"r"
(
daddr
),
"r"
(
saddr
),
"1"
(
len_proto
),
"0"
(
sum
)
:
"t"
);
return
sum
;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static
inline
__sum16
csum_tcpudp_magic
(
__be32
saddr
,
__be32
daddr
,
unsigned
short
len
,
unsigned
short
proto
,
__wsum
sum
)
{
return
csum_fold
(
csum_tcpudp_nofold
(
saddr
,
daddr
,
len
,
proto
,
sum
));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static
inline
__sum16
ip_compute_csum
(
const
void
*
buff
,
int
len
)
{
return
csum_fold
(
csum_partial
(
buff
,
len
,
0
));
}
#define _HAVE_ARCH_IPV6_CSUM
static
inline
__sum16
csum_ipv6_magic
(
const
struct
in6_addr
*
saddr
,
const
struct
in6_addr
*
daddr
,
__u32
len
,
unsigned
short
proto
,
__wsum
sum
)
{
unsigned
int
__dummy
;
__asm__
(
"clrt
\n\t
"
"mov.l @(0,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(4,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(8,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(12,%2), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(0,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(4,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(8,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"mov.l @(12,%3), %1
\n\t
"
"addc %1, %0
\n\t
"
"addc %4, %0
\n\t
"
"addc %5, %0
\n\t
"
"movt %1
\n\t
"
"add %1, %0
\n
"
:
"=r"
(
sum
),
"=&r"
(
__dummy
)
:
"r"
(
saddr
),
"r"
(
daddr
),
"r"
(
htonl
(
len
)),
"r"
(
htonl
(
proto
)),
"0"
(
sum
)
:
"t"
);
return
csum_fold
(
sum
);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static
inline
__wsum
csum_and_copy_to_user
(
const
void
*
src
,
void
__user
*
dst
,
int
len
,
__wsum
sum
,
int
*
err_ptr
)
{
if
(
access_ok
(
VERIFY_WRITE
,
dst
,
len
))
return
csum_partial_copy_generic
((
__force
const
void
*
)
src
,
dst
,
len
,
sum
,
NULL
,
err_ptr
);
if
(
len
)
*
err_ptr
=
-
EFAULT
;
return
(
__force
__wsum
)
-
1
;
/* invalid checksum */
}
#endif
/* __ASM_SH_CHECKSUM_H */
include/asm-sh
64/checksum
.h
→
include/asm-sh
/checksum_64
.h
浏览文件 @
fcfdd0f1
...
...
@@ -12,8 +12,6 @@
*
*/
#include <asm/registers.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录