Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
c4a7c77f
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c4a7c77f
编写于
9月 30, 2005
作者:
L
Linus Torvalds
浏览文件
操作
浏览文件
下载
差异文件
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
上级
a36f4961
017fb98e
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
375 addition
and
723 deletion
+375
-723
arch/sparc/kernel/setup.c
arch/sparc/kernel/setup.c
+0
-2
arch/sparc64/kernel/head.S
arch/sparc64/kernel/head.S
+5
-2
arch/sparc64/kernel/ptrace.c
arch/sparc64/kernel/ptrace.c
+11
-3
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/setup.c
+0
-21
arch/sparc64/kernel/sys32.S
arch/sparc64/kernel/sys32.S
+98
-72
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+20
-40
arch/sparc64/kernel/una_asm.S
arch/sparc64/kernel/una_asm.S
+29
-36
arch/sparc64/kernel/unaligned.c
arch/sparc64/kernel/unaligned.c
+24
-21
arch/sparc64/lib/strncpy_from_user.S
arch/sparc64/lib/strncpy_from_user.S
+6
-10
arch/sparc64/lib/user_fixup.c
arch/sparc64/lib/user_fixup.c
+29
-34
arch/sparc64/mm/Makefile
arch/sparc64/mm/Makefile
+1
-1
arch/sparc64/mm/extable.c
arch/sparc64/mm/extable.c
+0
-80
arch/sparc64/mm/fault.c
arch/sparc64/mm/fault.c
+5
-64
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+130
-129
arch/sparc64/prom/Makefile
arch/sparc64/prom/Makefile
+1
-1
arch/sparc64/prom/init.c
arch/sparc64/prom/init.c
+0
-3
arch/sparc64/prom/memory.c
arch/sparc64/prom/memory.c
+0
-152
drivers/video/aty/radeon_base.c
drivers/video/aty/radeon_base.c
+1
-1
drivers/video/aty/radeonfb.h
drivers/video/aty/radeonfb.h
+2
-0
include/asm-sparc/pgtable.h
include/asm-sparc/pgtable.h
+2
-0
include/asm-sparc64/openprom.h
include/asm-sparc64/openprom.h
+2
-2
include/asm-sparc64/oplib.h
include/asm-sparc64/oplib.h
+0
-14
include/asm-sparc64/page.h
include/asm-sparc64/page.h
+0
-17
include/asm-sparc64/pgtable.h
include/asm-sparc64/pgtable.h
+3
-0
include/asm-sparc64/uaccess.h
include/asm-sparc64/uaccess.h
+6
-18
未找到文件。
arch/sparc/kernel/setup.c
浏览文件 @
c4a7c77f
...
...
@@ -249,8 +249,6 @@ struct tt_entry *sparc_ttable;
struct
pt_regs
fake_swapper_regs
;
extern
void
paging_init
(
void
);
void
__init
setup_arch
(
char
**
cmdline_p
)
{
int
i
;
...
...
arch/sparc64/kernel/head.S
浏览文件 @
c4a7c77f
...
...
@@ -540,8 +540,11 @@ bootup_user_stack_end:
prom_tba
:
.
xword
0
tlb_type
:
.
word
0
/*
Must
NOT
end
up
in
BSS
*/
.
section
".fixup"
,#
alloc
,#
execinstr
.
globl
__ret_efault
.
globl
__ret_efault
,
__retl_efault
__ret_efault
:
ret
restore
%
g0
,
-
EFAULT
,
%
o0
__retl_efault
:
retl
mov
-
EFAULT
,
%
o0
arch/sparc64/kernel/ptrace.c
浏览文件 @
c4a7c77f
...
...
@@ -31,6 +31,7 @@
#include <asm/visasm.h>
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/cpudata.h>
/* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and
...
...
@@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
if
((
uaddr
^
(
unsigned
long
)
kaddr
)
&
(
1UL
<<
13
))
{
unsigned
long
start
=
__pa
(
kaddr
);
unsigned
long
end
=
start
+
len
;
unsigned
long
dcache_line_size
;
dcache_line_size
=
local_cpu_data
().
dcache_line_size
;
if
(
tlb_type
==
spitfire
)
{
for
(;
start
<
end
;
start
+=
32
)
for
(;
start
<
end
;
start
+=
dcache_line_size
)
spitfire_put_dcache_tag
(
start
&
0x3fe0
,
0x0
);
}
else
{
for
(;
start
<
end
;
start
+=
32
)
start
&=
~
(
dcache_line_size
-
1
);
for
(;
start
<
end
;
start
+=
dcache_line_size
)
__asm__
__volatile__
(
"stxa %%g0, [%0] %1
\n\t
"
"membar #Sync"
...
...
@@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
if
(
write
&&
tlb_type
==
spitfire
)
{
unsigned
long
start
=
(
unsigned
long
)
kaddr
;
unsigned
long
end
=
start
+
len
;
unsigned
long
icache_line_size
;
icache_line_size
=
local_cpu_data
().
icache_line_size
;
for
(;
start
<
end
;
start
+=
32
)
for
(;
start
<
end
;
start
+=
icache_line_size
)
flushi
(
start
);
}
}
...
...
arch/sparc64/kernel/setup.c
浏览文件 @
c4a7c77f
...
...
@@ -464,8 +464,6 @@ static void __init boot_flags_init(char *commands)
}
}
extern
int
prom_probe_memory
(
void
);
extern
unsigned
long
start
,
end
;
extern
void
panic_setup
(
char
*
,
int
*
);
extern
unsigned
short
root_flags
;
...
...
@@ -492,12 +490,8 @@ void register_prom_callbacks(void)
"' linux-.soft2 to .soft2"
);
}
extern
void
paging_init
(
void
);
void
__init
setup_arch
(
char
**
cmdline_p
)
{
int
i
;
/* Initialize PROM console and command line. */
*
cmdline_p
=
prom_getbootargs
();
strcpy
(
saved_command_line
,
*
cmdline_p
);
...
...
@@ -516,21 +510,6 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init
(
*
cmdline_p
);
idprom_init
();
(
void
)
prom_probe_memory
();
phys_base
=
0xffffffffffffffffUL
;
for
(
i
=
0
;
sp_banks
[
i
].
num_bytes
!=
0
;
i
++
)
{
unsigned
long
top
;
if
(
sp_banks
[
i
].
base_addr
<
phys_base
)
phys_base
=
sp_banks
[
i
].
base_addr
;
top
=
sp_banks
[
i
].
base_addr
+
sp_banks
[
i
].
num_bytes
;
}
pfn_base
=
phys_base
>>
PAGE_SHIFT
;
kern_base
=
(
prom_boot_mapping_phys_low
>>
22UL
)
<<
22UL
;
kern_size
=
(
unsigned
long
)
&
_end
-
(
unsigned
long
)
KERNBASE
;
if
(
!
root_flags
)
root_mountflags
&=
~
MS_RDONLY
;
...
...
arch/sparc64/kernel/sys32.S
浏览文件 @
c4a7c77f
...
...
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */
or
%
g2
,
%
lo
(
__socketcall_table_begin
),
%
g2
jmpl
%
g2
+
%
o0
,
%
g0
nop
do_einval
:
retl
mov
-
EINVAL
,
%
o0
/
*
Each
entry
is
exactly
32
bytes
.
*/
.
align
32
__socketcall_table_begin
:
/
*
Each
entry
is
exactly
32
bytes
.
*/
do_sys_socket
:
/
*
sys_socket
(
int
,
int
,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
1
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_socket
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
2
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_socket
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
3
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_bind
:
/
*
sys_bind
(
int
fd
,
struct
sockaddr
*,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
4
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_bind
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
5
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_bind
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
6
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_connect
:
/
*
sys_connect
(
int
,
struct
sockaddr
*,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
7
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_connect
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
8
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_connect
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
9
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_listen
:
/
*
sys_listen
(
int
,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
10
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_listen
),
%
g1
jmpl
%
g1
+
%
lo
(
sys_listen
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
11
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
nop
do_sys_accept
:
/
*
sys_accept
(
int
,
struct
sockaddr
*,
int
*)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
12
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_accept
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
13
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_accept
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
14
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_getsockname
:
/
*
sys_getsockname
(
int
,
struct
sockaddr
*,
int
*)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
15
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_getsockname
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
16
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_getsockname
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
17
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_getpeername
:
/
*
sys_getpeername
(
int
,
struct
sockaddr
*,
int
*)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
18
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_getpeername
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
19
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
sys_getpeername
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
20
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_socketpair
:
/
*
sys_socketpair
(
int
,
int
,
int
,
int
*)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
21
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_socketpair
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
22
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
23
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
jmpl
%
g1
+
%
lo
(
sys_socketpair
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
24
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
do_sys_send
:
/
*
sys_send
(
int
,
void
*,
size_t
,
unsigned
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
25
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_send
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
26
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
27
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
jmpl
%
g1
+
%
lo
(
sys_send
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
28
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
do_sys_recv
:
/
*
sys_recv
(
int
,
void
*,
size_t
,
unsigned
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
29
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_recv
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
30
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
31
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
jmpl
%
g1
+
%
lo
(
sys_recv
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
32
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
do_sys_sendto
:
/
*
sys_sendto
(
int
,
u32
,
compat_size_t
,
unsigned
int
,
u32
,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
33
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_sendto
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
ldswa
[%
o1
+
0x14
]
%
asi
,
%
o5
34
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
35
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
36
:
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
37
:
ldswa
[%
o1
+
0x14
]
%
asi
,
%
o5
jmpl
%
g1
+
%
lo
(
sys_sendto
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
38
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
do_sys_recvfrom
:
/
*
sys_recvfrom
(
int
,
u32
,
compat_size_t
,
unsigned
int
,
u32
,
u32
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
39
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_recvfrom
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
lduwa
[%
o1
+
0x14
]
%
asi
,
%
o5
40
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
41
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
42
:
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
43
:
lduwa
[%
o1
+
0x14
]
%
asi
,
%
o5
jmpl
%
g1
+
%
lo
(
sys_recvfrom
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
44
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
do_sys_shutdown
:
/
*
sys_shutdown
(
int
,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
45
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
sys_shutdown
),
%
g1
jmpl
%
g1
+
%
lo
(
sys_shutdown
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
46
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
nop
do_sys_setsockopt
:
/
*
compat_sys_setsockopt
(
int
,
int
,
int
,
char
*,
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
47
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
compat_sys_setsockopt
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
ldswa
[%
o1
+
0x10
]
%
asi
,
%
o4
48
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
49
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
50
:
ldswa
[%
o1
+
0x10
]
%
asi
,
%
o4
jmpl
%
g1
+
%
lo
(
compat_sys_setsockopt
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
51
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
do_sys_getsockopt
:
/
*
compat_sys_getsockopt
(
int
,
int
,
int
,
u32
,
u32
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
52
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
compat_sys_getsockopt
),
%
g1
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
53
:
ldswa
[%
o1
+
0x8
]
%
asi
,
%
o2
54
:
lduwa
[%
o1
+
0xc
]
%
asi
,
%
o3
55
:
lduwa
[%
o1
+
0x10
]
%
asi
,
%
o4
jmpl
%
g1
+
%
lo
(
compat_sys_getsockopt
),
%
g0
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
56
:
ldswa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
do_sys_sendmsg
:
/
*
compat_sys_sendmsg
(
int
,
struct
compat_msghdr
*,
unsigned
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
57
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
compat_sys_sendmsg
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
58
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
compat_sys_sendmsg
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
59
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
do_sys_recvmsg
:
/
*
compat_sys_recvmsg
(
int
,
struct
compat_msghdr
*,
unsigned
int
)
*/
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
60
:
ldswa
[%
o1
+
0x0
]
%
asi
,
%
o0
sethi
%
hi
(
compat_sys_recvmsg
),
%
g1
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
61
:
lduwa
[%
o1
+
0x8
]
%
asi
,
%
o2
jmpl
%
g1
+
%
lo
(
compat_sys_recvmsg
),
%
g0
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
62
:
lduwa
[%
o1
+
0x4
]
%
asi
,
%
o1
nop
nop
nop
__socketcall_table_end
:
do_einval
:
retl
mov
-
EINVAL
,
%
o0
do_efault
:
retl
mov
-
EFAULT
,
%
o0
.
section
__ex_table
.
align
4
.
word
__socketcall_table_begin
,
0
,
__socketcall_table_end
,
do_efault
.
word
1
b
,
__retl_efault
,
2
b
,
__retl_efault
.
word
3
b
,
__retl_efault
,
4
b
,
__retl_efault
.
word
5
b
,
__retl_efault
,
6
b
,
__retl_efault
.
word
7
b
,
__retl_efault
,
8
b
,
__retl_efault
.
word
9
b
,
__retl_efault
,
10
b
,
__retl_efault
.
word
11
b
,
__retl_efault
,
12
b
,
__retl_efault
.
word
13
b
,
__retl_efault
,
14
b
,
__retl_efault
.
word
15
b
,
__retl_efault
,
16
b
,
__retl_efault
.
word
17
b
,
__retl_efault
,
18
b
,
__retl_efault
.
word
19
b
,
__retl_efault
,
20
b
,
__retl_efault
.
word
21
b
,
__retl_efault
,
22
b
,
__retl_efault
.
word
23
b
,
__retl_efault
,
24
b
,
__retl_efault
.
word
25
b
,
__retl_efault
,
26
b
,
__retl_efault
.
word
27
b
,
__retl_efault
,
28
b
,
__retl_efault
.
word
29
b
,
__retl_efault
,
30
b
,
__retl_efault
.
word
31
b
,
__retl_efault
,
32
b
,
__retl_efault
.
word
33
b
,
__retl_efault
,
34
b
,
__retl_efault
.
word
35
b
,
__retl_efault
,
36
b
,
__retl_efault
.
word
37
b
,
__retl_efault
,
38
b
,
__retl_efault
.
word
39
b
,
__retl_efault
,
40
b
,
__retl_efault
.
word
41
b
,
__retl_efault
,
42
b
,
__retl_efault
.
word
43
b
,
__retl_efault
,
44
b
,
__retl_efault
.
word
45
b
,
__retl_efault
,
46
b
,
__retl_efault
.
word
47
b
,
__retl_efault
,
48
b
,
__retl_efault
.
word
49
b
,
__retl_efault
,
50
b
,
__retl_efault
.
word
51
b
,
__retl_efault
,
52
b
,
__retl_efault
.
word
53
b
,
__retl_efault
,
54
b
,
__retl_efault
.
word
55
b
,
__retl_efault
,
56
b
,
__retl_efault
.
word
57
b
,
__retl_efault
,
58
b
,
__retl_efault
.
word
59
b
,
__retl_efault
,
60
b
,
__retl_efault
.
word
61
b
,
__retl_efault
,
62
b
,
__retl_efault
.
previous
arch/sparc64/kernel/traps.c
浏览文件 @
c4a7c77f
...
...
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
/* Test if this comes from uaccess places. */
unsigned
long
fixup
;
unsigned
long
g2
=
regs
->
u_regs
[
UREG_G2
];
const
struct
exception_table_entry
*
entry
;
if
((
fixup
=
search_extables_range
(
regs
->
tpc
,
&
g2
)))
{
/* Ouch, somebody is trying ugly VM hole tricks on us... */
entry
=
search_exception_tables
(
regs
->
tpc
);
if
(
entry
)
{
/* Ouch, somebody is trying VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS
printk
(
"Exception: PC<%016lx> faddr<UNKNOWN>
\n
"
,
regs
->
tpc
);
printk
(
"EX_TABLE: insn<%016lx> fixup<%016lx>
"
"g2<%016lx>
\n
"
,
regs
->
tpc
,
fixup
,
g2
);
printk
(
"EX_TABLE: insn<%016lx> fixup<%016lx>
\n
"
,
regs
->
tpc
,
entry
->
fixup
);
#endif
regs
->
tpc
=
fixup
;
regs
->
tpc
=
entry
->
fixup
;
regs
->
tnpc
=
regs
->
tpc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
return
;
}
/* Shit... */
...
...
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void)
ecache_flush_size
=
(
2
*
largest_size
);
ecache_flush_linesize
=
smallest_linesize
;
/* Discover a physically contiguous chunk of physical
* memory in 'sp_banks' of size ecache_flush_size calculated
* above. Store the physical base of this area at
* ecache_flush_physbase.
*/
for
(
node
=
0
;
;
node
++
)
{
if
(
sp_banks
[
node
].
num_bytes
==
0
)
break
;
if
(
sp_banks
[
node
].
num_bytes
>=
ecache_flush_size
)
{
ecache_flush_physbase
=
sp_banks
[
node
].
base_addr
;
break
;
}
}
ecache_flush_physbase
=
find_ecache_flush_span
(
ecache_flush_size
);
/* Note: Zero would be a valid value of ecache_flush_physbase so
* don't use that as the success test. :-)
*/
if
(
sp_banks
[
node
].
num_bytes
==
0
)
{
if
(
ecache_flush_physbase
==
~
0UL
)
{
prom_printf
(
"cheetah_ecache_flush_init: Cannot find %d byte "
"contiguous physical memory.
\n
"
,
ecache_flush_size
);
"contiguous physical memory.
\n
"
,
ecache_flush_size
);
prom_halt
();
}
...
...
@@ -1346,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr)
/* Return non-zero if PADDR is a valid physical memory address. */
static
int
cheetah_check_main_memory
(
unsigned
long
paddr
)
{
int
i
;
unsigned
long
vaddr
=
PAGE_OFFSET
+
paddr
;
for
(
i
=
0
;
;
i
++
)
{
if
(
sp_banks
[
i
].
num_bytes
==
0
)
break
;
if
(
paddr
>=
sp_banks
[
i
].
base_addr
&&
paddr
<
(
sp_banks
[
i
].
base_addr
+
sp_banks
[
i
].
num_bytes
))
return
1
;
}
return
0
;
if
(
vaddr
>
(
unsigned
long
)
high_memory
)
return
0
;
return
kern_addr_valid
(
vaddr
);
}
void
cheetah_cee_handler
(
struct
pt_regs
*
regs
,
unsigned
long
afsr
,
unsigned
long
afar
)
...
...
@@ -1610,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
/* OK, usermode access. */
recoverable
=
1
;
}
else
{
unsigned
long
g2
=
regs
->
u_regs
[
UREG_G2
];
unsigned
long
fixup
=
search_extables_range
(
regs
->
tpc
,
&
g2
);
const
struct
exception_table_entry
*
entry
;
if
(
fixup
!=
0UL
)
{
entry
=
search_exception_tables
(
regs
->
tpc
);
if
(
entry
)
{
/* OK, kernel access to userspace. */
recoverable
=
1
;
...
...
@@ -1632,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
* recoverable condition.
*/
if
(
recoverable
)
{
regs
->
tpc
=
fixup
;
regs
->
tpc
=
entry
->
fixup
;
regs
->
tnpc
=
regs
->
tpc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
}
}
}
...
...
arch/sparc64/kernel/una_asm.S
浏览文件 @
c4a7c77f
...
...
@@ -6,13 +6,6 @@
.
text
kernel_unaligned_trap_fault
:
call
kernel_mna_trap_fault
nop
retl
nop
.
size
kern_unaligned_trap_fault
,
.
-
kern_unaligned_trap_fault
.
globl
__do_int_store
__do_int_store
:
rd
%
asi
,
%
o4
...
...
@@ -51,24 +44,24 @@ __do_int_store:
0
:
wr
%
o4
,
0x0
,
%
asi
retl
nop
mov
0
,
%
o0
.
size
__do_int_store
,
.
-
__do_int_store
.
section
__ex_table
.
word
4
b
,
kernel_unaligned_trap_
fault
.
word
5
b
,
kernel_unaligned_trap_
fault
.
word
6
b
,
kernel_unaligned_trap_
fault
.
word
7
b
,
kernel_unaligned_trap_
fault
.
word
8
b
,
kernel_unaligned_trap_
fault
.
word
9
b
,
kernel_unaligned_trap_
fault
.
word
10
b
,
kernel_unaligned_trap_
fault
.
word
11
b
,
kernel_unaligned_trap_
fault
.
word
12
b
,
kernel_unaligned_trap_
fault
.
word
13
b
,
kernel_unaligned_trap_
fault
.
word
14
b
,
kernel_unaligned_trap_
fault
.
word
15
b
,
kernel_unaligned_trap_
fault
.
word
16
b
,
kernel_unaligned_trap_
fault
.
word
17
b
,
kernel_unaligned_trap_
fault
.
word
4
b
,
__retl_e
fault
.
word
5
b
,
__retl_e
fault
.
word
6
b
,
__retl_e
fault
.
word
7
b
,
__retl_e
fault
.
word
8
b
,
__retl_e
fault
.
word
9
b
,
__retl_e
fault
.
word
10
b
,
__retl_e
fault
.
word
11
b
,
__retl_e
fault
.
word
12
b
,
__retl_e
fault
.
word
13
b
,
__retl_e
fault
.
word
14
b
,
__retl_e
fault
.
word
15
b
,
__retl_e
fault
.
word
16
b
,
__retl_e
fault
.
word
17
b
,
__retl_e
fault
.
previous
.
globl
do_int_load
...
...
@@ -133,21 +126,21 @@ do_int_load:
0
:
wr
%
o5
,
0x0
,
%
asi
retl
nop
mov
0
,
%
o0
.
size
__do_int_load
,
.
-
__do_int_load
.
section
__ex_table
.
word
4
b
,
kernel_unaligned_trap_
fault
.
word
5
b
,
kernel_unaligned_trap_
fault
.
word
6
b
,
kernel_unaligned_trap_
fault
.
word
7
b
,
kernel_unaligned_trap_
fault
.
word
8
b
,
kernel_unaligned_trap_
fault
.
word
9
b
,
kernel_unaligned_trap_
fault
.
word
10
b
,
kernel_unaligned_trap_
fault
.
word
11
b
,
kernel_unaligned_trap_
fault
.
word
12
b
,
kernel_unaligned_trap_
fault
.
word
13
b
,
kernel_unaligned_trap_
fault
.
word
14
b
,
kernel_unaligned_trap_
fault
.
word
15
b
,
kernel_unaligned_trap_
fault
.
word
16
b
,
kernel_unaligned_trap_
fault
.
word
4
b
,
__retl_e
fault
.
word
5
b
,
__retl_e
fault
.
word
6
b
,
__retl_e
fault
.
word
7
b
,
__retl_e
fault
.
word
8
b
,
__retl_e
fault
.
word
9
b
,
__retl_e
fault
.
word
10
b
,
__retl_e
fault
.
word
11
b
,
__retl_e
fault
.
word
12
b
,
__retl_e
fault
.
word
13
b
,
__retl_e
fault
.
word
14
b
,
__retl_e
fault
.
word
15
b
,
__retl_e
fault
.
word
16
b
,
__retl_e
fault
.
previous
arch/sparc64/kernel/unaligned.c
浏览文件 @
c4a7c77f
...
...
@@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
die_if_kernel
(
str
,
regs
);
}
extern
void
do_int_load
(
unsigned
long
*
dest_reg
,
int
size
,
unsigned
long
*
saddr
,
int
is_signed
,
int
asi
);
extern
int
do_int_load
(
unsigned
long
*
dest_reg
,
int
size
,
unsigned
long
*
saddr
,
int
is_signed
,
int
asi
);
extern
void
__do_int_store
(
unsigned
long
*
dst_addr
,
int
size
,
unsigned
long
src_val
,
int
asi
);
extern
int
__do_int_store
(
unsigned
long
*
dst_addr
,
int
size
,
unsigned
long
src_val
,
int
asi
);
static
inline
void
do_int_store
(
int
reg_num
,
int
size
,
unsigned
long
*
dst_addr
,
struct
pt_regs
*
regs
,
int
asi
,
int
orig_asi
)
static
inline
int
do_int_store
(
int
reg_num
,
int
size
,
unsigned
long
*
dst_addr
,
struct
pt_regs
*
regs
,
int
asi
,
int
orig_asi
)
{
unsigned
long
zero
=
0
;
unsigned
long
*
src_val_p
=
&
zero
;
...
...
@@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
break
;
};
}
__do_int_store
(
dst_addr
,
size
,
src_val
,
asi
);
return
__do_int_store
(
dst_addr
,
size
,
src_val
,
asi
);
}
static
inline
void
advance
(
struct
pt_regs
*
regs
)
...
...
@@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn)
return
!
floating_point_load_or_store_p
(
insn
);
}
void
kernel_mna_trap_fault
(
void
)
static
void
kernel_mna_trap_fault
(
void
)
{
struct
pt_regs
*
regs
=
current_thread_info
()
->
kern_una_regs
;
unsigned
int
insn
=
current_thread_info
()
->
kern_una_insn
;
unsigned
long
g2
=
regs
->
u_regs
[
UREG_G2
];
unsigned
long
fixup
=
search_extables_range
(
regs
->
tpc
,
&
g2
);
const
struct
exception_table_entry
*
entry
;
if
(
!
fixup
)
{
entry
=
search_exception_tables
(
regs
->
tpc
);
if
(
!
entry
)
{
unsigned
long
address
;
address
=
compute_effective_address
(
regs
,
insn
,
...
...
@@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void)
die_if_kernel
(
"Oops"
,
regs
);
/* Not reached */
}
regs
->
tpc
=
fixup
;
regs
->
tpc
=
entry
->
fixup
;
regs
->
tnpc
=
regs
->
tpc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
regs
->
tstate
&=
~
TSTATE_ASI
;
regs
->
tstate
|=
(
ASI_AIUS
<<
24UL
);
...
...
@@ -295,7 +294,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
kernel_mna_trap_fault
();
}
else
{
unsigned
long
addr
,
*
reg_addr
;
int
orig_asi
,
asi
;
int
orig_asi
,
asi
,
err
;
addr
=
compute_effective_address
(
regs
,
insn
,
((
insn
>>
25
)
&
0x1f
));
...
...
@@ -320,9 +319,10 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
switch
(
dir
)
{
case
load
:
reg_addr
=
fetch_reg_addr
(((
insn
>>
25
)
&
0x1f
),
regs
);
do_int_load
(
reg_addr
,
size
,
(
unsigned
long
*
)
addr
,
decode_signedness
(
insn
),
asi
);
if
(
unlikely
(
asi
!=
orig_asi
))
{
err
=
do_int_load
(
reg_addr
,
size
,
(
unsigned
long
*
)
addr
,
decode_signedness
(
insn
),
asi
);
if
(
likely
(
!
err
)
&&
unlikely
(
asi
!=
orig_asi
))
{
unsigned
long
val_in
=
*
reg_addr
;
switch
(
size
)
{
case
2
:
...
...
@@ -344,16 +344,19 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
break
;
case
store
:
do_int_store
(((
insn
>>
25
)
&
0x1f
),
size
,
(
unsigned
long
*
)
addr
,
regs
,
asi
,
orig_asi
);
err
=
do_int_store
(((
insn
>>
25
)
&
0x1f
),
size
,
(
unsigned
long
*
)
addr
,
regs
,
asi
,
orig_asi
);
break
;
default:
panic
(
"Impossible kernel unaligned trap."
);
/* Not reached... */
}
advance
(
regs
);
if
(
unlikely
(
err
))
kernel_mna_trap_fault
();
else
advance
(
regs
);
}
}
...
...
arch/sparc64/lib/strncpy_from_user.S
浏览文件 @
c4a7c77f
...
...
@@ -125,15 +125,11 @@ __strncpy_from_user:
add
%
o2
,
%
o3
,
%
o0
.
size
__strncpy_from_user
,
.
-
__strncpy_from_user
.
section
.
fixup
,#
alloc
,#
execinstr
.
align
4
4
:
retl
mov
-
EFAULT
,
%
o0
.
section
__ex_table
,#
alloc
.
align
4
.
word
60
b
,
4
b
.
word
61
b
,
4
b
.
word
62
b
,
4
b
.
word
63
b
,
4
b
.
word
64
b
,
4
b
.
word
60
b
,
__retl_efault
.
word
61
b
,
__retl_efault
.
word
62
b
,
__retl_efault
.
word
63
b
,
__retl_efault
.
word
64
b
,
__retl_efault
.
previous
arch/sparc64/lib/user_fixup.c
浏览文件 @
c4a7c77f
...
...
@@ -11,61 +11,56 @@
/* Calculating the exact fault address when using
* block loads and stores can be very complicated.
*
* Instead of trying to be clever and handling all
* of the cases, just fix things up simply here.
*/
unsigned
long
copy_from_user_fixup
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
static
unsigned
long
compute_size
(
unsigned
long
start
,
unsigned
long
size
,
unsigned
long
*
offset
)
{
char
*
dst
=
to
;
const
char
__user
*
src
=
from
;
unsigned
long
fault_addr
=
current_thread_info
()
->
fault_address
;
unsigned
long
end
=
start
+
size
;
while
(
size
)
{
if
(
__get_user
(
*
dst
,
src
))
break
;
dst
++
;
src
++
;
size
--
;
if
(
fault_addr
<
start
||
fault_addr
>=
end
)
{
*
offset
=
0
;
}
else
{
*
offset
=
start
-
fault_addr
;
size
=
end
-
fault_addr
;
}
return
size
;
}
if
(
size
)
memset
(
dst
,
0
,
size
);
unsigned
long
copy_from_user_fixup
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
size
)
{
unsigned
long
offset
;
size
=
compute_size
((
unsigned
long
)
from
,
size
,
&
offset
);
if
(
likely
(
size
))
memset
(
to
+
offset
,
0
,
size
);
return
size
;
}
unsigned
long
copy_to_user_fixup
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
size
)
{
char
__user
*
dst
=
to
;
const
char
*
src
=
from
;
while
(
size
)
{
if
(
__put_user
(
*
src
,
dst
))
break
;
dst
++
;
src
++
;
size
--
;
}
unsigned
long
offset
;
return
size
;
return
compute_size
((
unsigned
long
)
to
,
size
,
&
offset
)
;
}
unsigned
long
copy_in_user_fixup
(
void
__user
*
to
,
void
__user
*
from
,
unsigned
long
size
)
{
char
__user
*
dst
=
to
;
char
__user
*
src
=
from
;
unsigned
long
fault_addr
=
current_thread_info
()
->
fault_address
;
unsigned
long
start
=
(
unsigned
long
)
to
;
unsigned
long
end
=
start
+
size
;
while
(
size
)
{
char
tmp
;
if
(
fault_addr
>=
start
&&
fault_addr
<
end
)
return
end
-
fault_addr
;
if
(
__get_user
(
tmp
,
src
))
break
;
if
(
__put_user
(
tmp
,
dst
))
break
;
dst
++
;
src
++
;
size
--
;
}
start
=
(
unsigned
long
)
from
;
end
=
start
+
size
;
if
(
fault_addr
>=
start
&&
fault_addr
<
end
)
return
end
-
fault_addr
;
return
size
;
}
arch/sparc64/mm/Makefile
浏览文件 @
c4a7c77f
...
...
@@ -5,6 +5,6 @@
EXTRA_AFLAGS
:=
-ansi
EXTRA_CFLAGS
:=
-Werror
obj-y
:=
ultra.o tlb.o fault.o init.o generic.o
extable.o
obj-y
:=
ultra.o tlb.o fault.o init.o generic.o
obj-$(CONFIG_HUGETLB_PAGE)
+=
hugetlbpage.o
arch/sparc64/mm/extable.c
已删除
100644 → 0
浏览文件 @
a36f4961
/*
* linux/arch/sparc64/mm/extable.c
*/
#include <linux/config.h>
#include <linux/module.h>
#include <asm/uaccess.h>
extern
const
struct
exception_table_entry
__start___ex_table
[];
extern
const
struct
exception_table_entry
__stop___ex_table
[];
void
sort_extable
(
struct
exception_table_entry
*
start
,
struct
exception_table_entry
*
finish
)
{
}
/* Caller knows they are in a range if ret->fixup == 0 */
const
struct
exception_table_entry
*
search_extable
(
const
struct
exception_table_entry
*
start
,
const
struct
exception_table_entry
*
last
,
unsigned
long
value
)
{
const
struct
exception_table_entry
*
walk
;
/* Single insn entries are encoded as:
* word 1: insn address
* word 2: fixup code address
*
* Range entries are encoded as:
* word 1: first insn address
* word 2: 0
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
* See asm/uaccess.h for more details.
*/
/* 1. Try to find an exact match. */
for
(
walk
=
start
;
walk
<=
last
;
walk
++
)
{
if
(
walk
->
fixup
==
0
)
{
/* A range entry, skip both parts. */
walk
++
;
continue
;
}
if
(
walk
->
insn
==
value
)
return
walk
;
}
/* 2. Try to find a range match. */
for
(
walk
=
start
;
walk
<=
(
last
-
1
);
walk
++
)
{
if
(
walk
->
fixup
)
continue
;
if
(
walk
[
0
].
insn
<=
value
&&
walk
[
1
].
insn
>
value
)
return
walk
;
walk
++
;
}
return
NULL
;
}
/* Special extable search, which handles ranges. Returns fixup */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
)
{
const
struct
exception_table_entry
*
entry
;
entry
=
search_exception_tables
(
addr
);
if
(
!
entry
)
return
0
;
/* Inside range? Fix g2 and return correct fixup */
if
(
!
entry
->
fixup
)
{
*
g2
=
(
addr
-
entry
->
insn
)
/
4
;
return
(
entry
+
1
)
->
fixup
;
}
return
entry
->
fixup
;
}
arch/sparc64/mm/fault.c
浏览文件 @
c4a7c77f
...
...
@@ -32,8 +32,6 @@
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
extern
struct
sparc_phys_banks
sp_banks
[
SPARC_PHYS_BANKS
];
/*
* To debug kernel to catch accesses to certain virtual/physical addresses.
* Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
...
...
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
:
"memory"
);
}
/* Nice, simple, prom library does all the sweating for us. ;) */
unsigned
long
__init
prom_probe_memory
(
void
)
{
register
struct
linux_mlist_p1275
*
mlist
;
register
unsigned
long
bytes
,
base_paddr
,
tally
;
register
int
i
;
i
=
0
;
mlist
=
*
prom_meminfo
()
->
p1275_available
;
bytes
=
tally
=
mlist
->
num_bytes
;
base_paddr
=
mlist
->
start_adr
;
sp_banks
[
0
].
base_addr
=
base_paddr
;
sp_banks
[
0
].
num_bytes
=
bytes
;
while
(
mlist
->
theres_more
!=
(
void
*
)
0
)
{
i
++
;
mlist
=
mlist
->
theres_more
;
bytes
=
mlist
->
num_bytes
;
tally
+=
bytes
;
if
(
i
>=
SPARC_PHYS_BANKS
-
1
)
{
printk
(
"The machine has more banks than "
"this kernel can support
\n
"
"Increase the SPARC_PHYS_BANKS "
"setting (currently %d)
\n
"
,
SPARC_PHYS_BANKS
);
i
=
SPARC_PHYS_BANKS
-
1
;
break
;
}
sp_banks
[
i
].
base_addr
=
mlist
->
start_adr
;
sp_banks
[
i
].
num_bytes
=
mlist
->
num_bytes
;
}
i
++
;
sp_banks
[
i
].
base_addr
=
0xdeadbeefbeefdeadUL
;
sp_banks
[
i
].
num_bytes
=
0
;
/* Now mask all bank sizes on a page boundary, it is all we can
* use anyways.
*/
for
(
i
=
0
;
sp_banks
[
i
].
num_bytes
!=
0
;
i
++
)
sp_banks
[
i
].
num_bytes
&=
PAGE_MASK
;
return
tally
;
}
static
void
__kprobes
unhandled_fault
(
unsigned
long
address
,
struct
task_struct
*
tsk
,
struct
pt_regs
*
regs
)
...
...
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
static
void
do_kernel_fault
(
struct
pt_regs
*
regs
,
int
si_code
,
int
fault_code
,
unsigned
int
insn
,
unsigned
long
address
)
{
unsigned
long
g2
;
unsigned
char
asi
=
ASI_P
;
if
((
!
insn
)
&&
(
regs
->
tstate
&
TSTATE_PRIV
))
...
...
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
}
}
g2
=
regs
->
u_regs
[
UREG_G2
];
/* Is this in ex_table? */
if
(
regs
->
tstate
&
TSTATE_PRIV
)
{
unsigned
long
fixup
;
const
struct
exception_table_entry
*
entry
;
if
(
asi
==
ASI_P
&&
(
insn
&
0xc0800000
)
==
0xc0800000
)
{
if
(
insn
&
0x2000
)
...
...
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
/* Look in asi.h: All _S asis have LS bit set */
if
((
asi
&
0x1
)
&&
(
fixup
=
search_extables_range
(
regs
->
tpc
,
&
g2
)))
{
regs
->
tpc
=
fixup
;
(
entry
=
search_exception_tables
(
regs
->
tpc
)))
{
regs
->
tpc
=
entry
->
fixup
;
regs
->
tnpc
=
regs
->
tpc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
return
;
}
}
else
{
...
...
@@ -461,7 +408,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
}
up_read
(
&
mm
->
mmap_sem
);
goto
fault_done
;
return
;
/*
* Something tried to access memory that isn't in our memory map..
...
...
@@ -473,8 +420,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
handle_kernel_fault:
do_kernel_fault
(
regs
,
si_code
,
fault_code
,
insn
,
address
);
goto
fault_done
;
return
;
/*
* We ran out of memory, or some other thing happened to us that made
...
...
@@ -505,9 +451,4 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
/* Kernel mode? Handle exceptions or die */
if
(
regs
->
tstate
&
TSTATE_PRIV
)
goto
handle_kernel_fault
;
fault_done:
/* These values are no longer needed, clear them. */
set_thread_fault_code
(
0
);
current_thread_info
()
->
fault_address
=
0
;
}
arch/sparc64/mm/init.c
浏览文件 @
c4a7c77f
...
...
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/kprobes.h>
#include <linux/cache.h>
#include <linux/sort.h>
#include <asm/head.h>
#include <asm/system.h>
...
...
@@ -41,7 +42,72 @@
extern
void
device_scan
(
void
);
struct
sparc_phys_banks
sp_banks
[
SPARC_PHYS_BANKS
];
#define MAX_BANKS 32
static
struct
linux_prom64_registers
pavail
[
MAX_BANKS
]
__initdata
;
static
struct
linux_prom64_registers
pavail_rescan
[
MAX_BANKS
]
__initdata
;
static
int
pavail_ents
__initdata
;
static
int
pavail_rescan_ents
__initdata
;
static
int
cmp_p64
(
const
void
*
a
,
const
void
*
b
)
{
const
struct
linux_prom64_registers
*
x
=
a
,
*
y
=
b
;
if
(
x
->
phys_addr
>
y
->
phys_addr
)
return
1
;
if
(
x
->
phys_addr
<
y
->
phys_addr
)
return
-
1
;
return
0
;
}
static
void
__init
read_obp_memory
(
const
char
*
property
,
struct
linux_prom64_registers
*
regs
,
int
*
num_ents
)
{
int
node
=
prom_finddevice
(
"/memory"
);
int
prop_size
=
prom_getproplen
(
node
,
property
);
int
ents
,
ret
,
i
;
ents
=
prop_size
/
sizeof
(
struct
linux_prom64_registers
);
if
(
ents
>
MAX_BANKS
)
{
prom_printf
(
"The machine has more %s property entries than "
"this kernel can support (%d).
\n
"
,
property
,
MAX_BANKS
);
prom_halt
();
}
ret
=
prom_getproperty
(
node
,
property
,
(
char
*
)
regs
,
prop_size
);
if
(
ret
==
-
1
)
{
prom_printf
(
"Couldn't get %s property from /memory.
\n
"
);
prom_halt
();
}
*
num_ents
=
ents
;
/* Sanitize what we got from the firmware, by page aligning
* everything.
*/
for
(
i
=
0
;
i
<
ents
;
i
++
)
{
unsigned
long
base
,
size
;
base
=
regs
[
i
].
phys_addr
;
size
=
regs
[
i
].
reg_size
;
size
&=
PAGE_MASK
;
if
(
base
&
~
PAGE_MASK
)
{
unsigned
long
new_base
=
PAGE_ALIGN
(
base
);
size
-=
new_base
-
base
;
if
((
long
)
size
<
0L
)
size
=
0UL
;
base
=
new_base
;
}
regs
[
i
].
phys_addr
=
base
;
regs
[
i
].
reg_size
=
size
;
}
sort
(
regs
,
ents
,
sizeof
(
struct
linux_prom64_registers
),
cmp_p64
,
NULL
);
}
unsigned
long
*
sparc64_valid_addr_bitmap
__read_mostly
;
...
...
@@ -1206,14 +1272,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
int
i
;
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf
(
"bootmem_init: Scan
sp_banks
, "
);
prom_printf
(
"bootmem_init: Scan
pavail
, "
);
#endif
bytes_avail
=
0UL
;
for
(
i
=
0
;
sp_banks
[
i
].
num_bytes
!=
0
;
i
++
)
{
end_of_phys_memory
=
sp_banks
[
i
].
base
_addr
+
sp_banks
[
i
].
num_bytes
;
bytes_avail
+=
sp_banks
[
i
].
num_bytes
;
for
(
i
=
0
;
i
<
pavail_ents
;
i
++
)
{
end_of_phys_memory
=
pavail
[
i
].
phys
_addr
+
pavail
[
i
].
reg_size
;
bytes_avail
+=
pavail
[
i
].
reg_size
;
if
(
cmdline_memory_size
)
{
if
(
bytes_avail
>
cmdline_memory_size
)
{
unsigned
long
slack
=
bytes_avail
-
cmdline_memory_size
;
...
...
@@ -1221,12 +1287,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
bytes_avail
-=
slack
;
end_of_phys_memory
-=
slack
;
sp_banks
[
i
].
num_bytes
-=
slack
;
if
(
sp_banks
[
i
].
num_bytes
==
0
)
{
sp_banks
[
i
].
base_addr
=
0xdeadbeef
;
pavail
[
i
].
reg_size
-=
slack
;
if
((
long
)
pavail
[
i
].
reg_size
<=
0L
)
{
pavail
[
i
].
phys_addr
=
0xdeadbeefUL
;
pavail
[
i
].
reg_size
=
0UL
;
pavail_ents
=
i
;
}
else
{
sp_banks
[
i
+
1
].
num_bytes
=
0
;
sp_banks
[
i
+
1
].
base_addr
=
0xdeadbeef
;
pavail
[
i
+
1
].
reg_size
=
0Ul
;
pavail
[
i
+
1
].
phys_addr
=
0xdeadbeefUL
;
pavail_ents
=
i
+
1
;
}
break
;
}
...
...
@@ -1280,12 +1349,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
/* Now register the available physical memory with the
* allocator.
*/
for
(
i
=
0
;
sp_banks
[
i
].
num_bytes
!=
0
;
i
++
)
{
for
(
i
=
0
;
i
<
pavail_ents
;
i
++
)
{
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf
(
"free_bootmem(
sp_banks
:%d): base[%lx] size[%lx]
\n
"
,
i
,
sp_banks
[
i
].
base_addr
,
sp_banks
[
i
].
num_bytes
);
prom_printf
(
"free_bootmem(
pavail
:%d): base[%lx] size[%lx]
\n
"
,
i
,
pavail
[
i
].
phys_addr
,
pavail
[
i
].
reg_size
);
#endif
free_bootmem
(
sp_banks
[
i
].
base_addr
,
sp_banks
[
i
].
num_bytes
);
free_bootmem
(
pavail
[
i
].
phys_addr
,
pavail
[
i
].
reg_size
);
}
#ifdef CONFIG_BLK_DEV_INITRD
...
...
@@ -1334,7 +1403,7 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
unsigned
long
alloc_bytes
=
0UL
;
if
((
vstart
&
~
PAGE_MASK
)
||
(
vend
&
~
PAGE_MASK
))
{
prom_printf
(
"kernel_map: Unaligned
sp_banks
[%lx:%lx]
\n
"
,
prom_printf
(
"kernel_map: Unaligned
physmem
[%lx:%lx]
\n
"
,
vstart
,
vend
);
prom_halt
();
}
...
...
@@ -1381,23 +1450,24 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
return
alloc_bytes
;
}
extern
struct
linux_mlist_p1275
*
prom_ptot_ptr
;
static
struct
linux_prom64_registers
pall
[
MAX_BANKS
]
__initdata
;
static
int
pall_ents
__initdata
;
extern
unsigned
int
kvmap_linear_patch
[
1
];
static
void
__init
kernel_physical_mapping_init
(
void
)
{
struct
linux_mlist_p1275
*
p
=
prom_ptot_ptr
;
unsigned
long
mem_alloced
=
0UL
;
unsigned
long
i
,
mem_alloced
=
0UL
;
read_obp_memory
(
"reg"
,
&
pall
[
0
],
&
pall_ents
);
while
(
p
)
{
for
(
i
=
0
;
i
<
pall_ents
;
i
++
)
{
unsigned
long
phys_start
,
phys_end
;
phys_start
=
p
->
start_a
dr
;
phys_end
=
phys_start
+
p
->
num_bytes
;
phys_start
=
p
all
[
i
].
phys_ad
dr
;
phys_end
=
phys_start
+
p
all
[
i
].
reg_size
;
mem_alloced
+=
kernel_map_range
(
phys_start
,
phys_end
,
PAGE_KERNEL
);
p
=
p
->
theres_more
;
}
printk
(
"Allocated %ld bytes for kernel page tables.
\n
"
,
...
...
@@ -1425,6 +1495,18 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
}
#endif
unsigned
long
__init
find_ecache_flush_span
(
unsigned
long
size
)
{
int
i
;
for
(
i
=
0
;
i
<
pavail_ents
;
i
++
)
{
if
(
pavail
[
i
].
reg_size
>=
size
)
return
pavail
[
i
].
phys_addr
;
}
return
~
0UL
;
}
/* paging_init() sets up the page tables */
extern
void
cheetah_ecache_flush_init
(
void
);
...
...
@@ -1435,7 +1517,19 @@ pgd_t swapper_pg_dir[2048];
void
__init
paging_init
(
void
)
{
unsigned
long
end_pfn
,
pages_avail
,
shift
;
unsigned
long
real_end
;
unsigned
long
real_end
,
i
;
/* Find available physical memory... */
read_obp_memory
(
"available"
,
&
pavail
[
0
],
&
pavail_ents
);
phys_base
=
0xffffffffffffffffUL
;
for
(
i
=
0
;
i
<
pavail_ents
;
i
++
)
phys_base
=
min
(
phys_base
,
pavail
[
i
].
phys_addr
);
pfn_base
=
phys_base
>>
PAGE_SHIFT
;
kern_base
=
(
prom_boot_mapping_phys_low
>>
22UL
)
<<
22UL
;
kern_size
=
(
unsigned
long
)
&
_end
-
(
unsigned
long
)
KERNBASE
;
set_bit
(
0
,
mmu_context_bmap
);
...
...
@@ -1507,128 +1601,35 @@ void __init paging_init(void)
device_scan
();
}
/* Ok, it seems that the prom can allocate some more memory chunks
* as a side effect of some prom calls we perform during the
* boot sequence. My most likely theory is that it is from the
* prom_set_traptable() call, and OBP is allocating a scratchpad
* for saving client program register state etc.
*/
static
void
__init
sort_memlist
(
struct
linux_mlist_p1275
*
thislist
)
{
int
swapi
=
0
;
int
i
,
mitr
;
unsigned
long
tmpaddr
,
tmpsize
;
unsigned
long
lowest
;
for
(
i
=
0
;
thislist
[
i
].
theres_more
!=
0
;
i
++
)
{
lowest
=
thislist
[
i
].
start_adr
;
for
(
mitr
=
i
+
1
;
thislist
[
mitr
-
1
].
theres_more
!=
0
;
mitr
++
)
if
(
thislist
[
mitr
].
start_adr
<
lowest
)
{
lowest
=
thislist
[
mitr
].
start_adr
;
swapi
=
mitr
;
}
if
(
lowest
==
thislist
[
i
].
start_adr
)
continue
;
tmpaddr
=
thislist
[
swapi
].
start_adr
;
tmpsize
=
thislist
[
swapi
].
num_bytes
;
for
(
mitr
=
swapi
;
mitr
>
i
;
mitr
--
)
{
thislist
[
mitr
].
start_adr
=
thislist
[
mitr
-
1
].
start_adr
;
thislist
[
mitr
].
num_bytes
=
thislist
[
mitr
-
1
].
num_bytes
;
}
thislist
[
i
].
start_adr
=
tmpaddr
;
thislist
[
i
].
num_bytes
=
tmpsize
;
}
}
void
__init
rescan_sp_banks
(
void
)
{
struct
linux_prom64_registers
memlist
[
64
];
struct
linux_mlist_p1275
avail
[
64
],
*
mlist
;
unsigned
long
bytes
,
base_paddr
;
int
num_regs
,
node
=
prom_finddevice
(
"/memory"
);
int
i
;
num_regs
=
prom_getproperty
(
node
,
"available"
,
(
char
*
)
memlist
,
sizeof
(
memlist
));
num_regs
=
(
num_regs
/
sizeof
(
struct
linux_prom64_registers
));
for
(
i
=
0
;
i
<
num_regs
;
i
++
)
{
avail
[
i
].
start_adr
=
memlist
[
i
].
phys_addr
;
avail
[
i
].
num_bytes
=
memlist
[
i
].
reg_size
;
avail
[
i
].
theres_more
=
&
avail
[
i
+
1
];
}
avail
[
i
-
1
].
theres_more
=
NULL
;
sort_memlist
(
avail
);
mlist
=
&
avail
[
0
];
i
=
0
;
bytes
=
mlist
->
num_bytes
;
base_paddr
=
mlist
->
start_adr
;
sp_banks
[
0
].
base_addr
=
base_paddr
;
sp_banks
[
0
].
num_bytes
=
bytes
;
while
(
mlist
->
theres_more
!=
NULL
){
i
++
;
mlist
=
mlist
->
theres_more
;
bytes
=
mlist
->
num_bytes
;
if
(
i
>=
SPARC_PHYS_BANKS
-
1
)
{
printk
(
"The machine has more banks than "
"this kernel can support
\n
"
"Increase the SPARC_PHYS_BANKS "
"setting (currently %d)
\n
"
,
SPARC_PHYS_BANKS
);
i
=
SPARC_PHYS_BANKS
-
1
;
break
;
}
sp_banks
[
i
].
base_addr
=
mlist
->
start_adr
;
sp_banks
[
i
].
num_bytes
=
mlist
->
num_bytes
;
}
i
++
;
sp_banks
[
i
].
base_addr
=
0xdeadbeefbeefdeadUL
;
sp_banks
[
i
].
num_bytes
=
0
;
for
(
i
=
0
;
sp_banks
[
i
].
num_bytes
!=
0
;
i
++
)
sp_banks
[
i
].
num_bytes
&=
PAGE_MASK
;
}
static
void
__init
taint_real_pages
(
void
)
{
struct
sparc_phys_banks
saved_sp_banks
[
SPARC_PHYS_BANKS
];
int
i
;
for
(
i
=
0
;
i
<
SPARC_PHYS_BANKS
;
i
++
)
{
saved_sp_banks
[
i
].
base_addr
=
sp_banks
[
i
].
base_addr
;
saved_sp_banks
[
i
].
num_bytes
=
sp_banks
[
i
].
num_bytes
;
}
rescan_sp_banks
();
read_obp_memory
(
"available"
,
&
pavail_rescan
[
0
],
&
pavail_rescan_ents
);
/* Find changes discovered in the
sp_bank
rescan and
/* Find changes discovered in the
physmem available
rescan and
* reserve the lost portions in the bootmem maps.
*/
for
(
i
=
0
;
saved_sp_banks
[
i
].
num_byte
s
;
i
++
)
{
for
(
i
=
0
;
i
<
pavail_ent
s
;
i
++
)
{
unsigned
long
old_start
,
old_end
;
old_start
=
saved_sp_banks
[
i
].
base
_addr
;
old_start
=
pavail
[
i
].
phys
_addr
;
old_end
=
old_start
+
saved_sp_banks
[
i
].
num_bytes
;
pavail
[
i
].
reg_size
;
while
(
old_start
<
old_end
)
{
int
n
;
for
(
n
=
0
;
sp_banks
[
n
].
num_byte
s
;
n
++
)
{
for
(
n
=
0
;
pavail_rescan_ent
s
;
n
++
)
{
unsigned
long
new_start
,
new_end
;
new_start
=
sp_banks
[
n
].
base_addr
;
new_end
=
new_start
+
sp_banks
[
n
].
num_bytes
;
new_start
=
pavail_rescan
[
n
].
phys_addr
;
new_end
=
new_start
+
pavail_rescan
[
n
].
reg_size
;
if
(
new_start
<=
old_start
&&
new_end
>=
(
old_start
+
PAGE_SIZE
))
{
set_bit
(
old_start
>>
22
,
sparc64_valid_addr_bitmap
);
set_bit
(
old_start
>>
22
,
sparc64_valid_addr_bitmap
);
goto
do_next_page
;
}
}
...
...
arch/sparc64/prom/Makefile
浏览文件 @
c4a7c77f
...
...
@@ -6,5 +6,5 @@
EXTRA_AFLAGS
:=
-ansi
EXTRA_CFLAGS
:=
-Werror
lib-y
:=
bootstr.o devops.o init.o m
emory.o m
isc.o
\
lib-y
:=
bootstr.o devops.o init.o misc.o
\
tree.o console.o printf.o p1275.o cif.o
arch/sparc64/prom/init.c
浏览文件 @
c4a7c77f
...
...
@@ -27,7 +27,6 @@ int prom_chosen_node;
* failure. It gets passed the pointer to the PROM vector.
*/
extern
void
prom_meminit
(
void
);
extern
void
prom_cif_init
(
void
*
,
void
*
);
void
__init
prom_init
(
void
*
cif_handler
,
void
*
cif_stack
)
...
...
@@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack)
printk
(
"PROMLIB: Sun IEEE Boot Prom %s
\n
"
,
buffer
+
bufadjust
);
prom_meminit
();
/* Initialization successful. */
return
;
...
...
arch/sparc64/prom/memory.c
已删除
100644 → 0
浏览文件 @
a36f4961
/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
* memory.c: Prom routine for acquiring various bits of information
* about RAM on the machine, both virtual and physical.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
/* This routine, for consistency, returns the ram parameters in the
* V0 prom memory descriptor format. I choose this format because I
* think it was the easiest to work with. I feel the religious
* arguments now... ;) Also, I return the linked lists sorted to
* prevent paging_init() upset stomach as I have not yet written
* the pepto-bismol kernel module yet.
*/
struct
linux_prom64_registers
prom_reg_memlist
[
64
];
struct
linux_prom64_registers
prom_reg_tmp
[
64
];
struct
linux_mlist_p1275
prom_phys_total
[
64
];
struct
linux_mlist_p1275
prom_prom_taken
[
64
];
struct
linux_mlist_p1275
prom_phys_avail
[
64
];
struct
linux_mlist_p1275
*
prom_ptot_ptr
=
prom_phys_total
;
struct
linux_mlist_p1275
*
prom_ptak_ptr
=
prom_prom_taken
;
struct
linux_mlist_p1275
*
prom_pavl_ptr
=
prom_phys_avail
;
struct
linux_mem_p1275
prom_memlist
;
/* Internal Prom library routine to sort a linux_mlist_p1275 memory
* list. Used below in initialization.
*/
static
void
__init
prom_sortmemlist
(
struct
linux_mlist_p1275
*
thislist
)
{
int
swapi
=
0
;
int
i
,
mitr
;
unsigned
long
tmpaddr
,
tmpsize
;
unsigned
long
lowest
;
for
(
i
=
0
;
thislist
[
i
].
theres_more
;
i
++
)
{
lowest
=
thislist
[
i
].
start_adr
;
for
(
mitr
=
i
+
1
;
thislist
[
mitr
-
1
].
theres_more
;
mitr
++
)
if
(
thislist
[
mitr
].
start_adr
<
lowest
)
{
lowest
=
thislist
[
mitr
].
start_adr
;
swapi
=
mitr
;
}
if
(
lowest
==
thislist
[
i
].
start_adr
)
continue
;
tmpaddr
=
thislist
[
swapi
].
start_adr
;
tmpsize
=
thislist
[
swapi
].
num_bytes
;
for
(
mitr
=
swapi
;
mitr
>
i
;
mitr
--
)
{
thislist
[
mitr
].
start_adr
=
thislist
[
mitr
-
1
].
start_adr
;
thislist
[
mitr
].
num_bytes
=
thislist
[
mitr
-
1
].
num_bytes
;
}
thislist
[
i
].
start_adr
=
tmpaddr
;
thislist
[
i
].
num_bytes
=
tmpsize
;
}
}
/* Initialize the memory lists based upon the prom version. */
void
__init
prom_meminit
(
void
)
{
int
node
=
0
;
unsigned
int
iter
,
num_regs
;
node
=
prom_finddevice
(
"/memory"
);
num_regs
=
prom_getproperty
(
node
,
"available"
,
(
char
*
)
prom_reg_memlist
,
sizeof
(
prom_reg_memlist
));
num_regs
=
(
num_regs
/
sizeof
(
struct
linux_prom64_registers
));
for
(
iter
=
0
;
iter
<
num_regs
;
iter
++
)
{
prom_phys_avail
[
iter
].
start_adr
=
prom_reg_memlist
[
iter
].
phys_addr
;
prom_phys_avail
[
iter
].
num_bytes
=
prom_reg_memlist
[
iter
].
reg_size
;
prom_phys_avail
[
iter
].
theres_more
=
&
prom_phys_avail
[
iter
+
1
];
}
prom_phys_avail
[
iter
-
1
].
theres_more
=
NULL
;
num_regs
=
prom_getproperty
(
node
,
"reg"
,
(
char
*
)
prom_reg_memlist
,
sizeof
(
prom_reg_memlist
));
num_regs
=
(
num_regs
/
sizeof
(
struct
linux_prom64_registers
));
for
(
iter
=
0
;
iter
<
num_regs
;
iter
++
)
{
prom_phys_total
[
iter
].
start_adr
=
prom_reg_memlist
[
iter
].
phys_addr
;
prom_phys_total
[
iter
].
num_bytes
=
prom_reg_memlist
[
iter
].
reg_size
;
prom_phys_total
[
iter
].
theres_more
=
&
prom_phys_total
[
iter
+
1
];
}
prom_phys_total
[
iter
-
1
].
theres_more
=
NULL
;
node
=
prom_finddevice
(
"/virtual-memory"
);
num_regs
=
prom_getproperty
(
node
,
"available"
,
(
char
*
)
prom_reg_memlist
,
sizeof
(
prom_reg_memlist
));
num_regs
=
(
num_regs
/
sizeof
(
struct
linux_prom64_registers
));
/* Convert available virtual areas to taken virtual
* areas. First sort, then convert.
*/
for
(
iter
=
0
;
iter
<
num_regs
;
iter
++
)
{
prom_prom_taken
[
iter
].
start_adr
=
prom_reg_memlist
[
iter
].
phys_addr
;
prom_prom_taken
[
iter
].
num_bytes
=
prom_reg_memlist
[
iter
].
reg_size
;
prom_prom_taken
[
iter
].
theres_more
=
&
prom_prom_taken
[
iter
+
1
];
}
prom_prom_taken
[
iter
-
1
].
theres_more
=
NULL
;
prom_sortmemlist
(
prom_prom_taken
);
/* Finally, convert. */
for
(
iter
=
0
;
iter
<
num_regs
;
iter
++
)
{
prom_prom_taken
[
iter
].
start_adr
=
prom_prom_taken
[
iter
].
start_adr
+
prom_prom_taken
[
iter
].
num_bytes
;
prom_prom_taken
[
iter
].
num_bytes
=
prom_prom_taken
[
iter
+
1
].
start_adr
-
prom_prom_taken
[
iter
].
start_adr
;
}
prom_prom_taken
[
iter
-
1
].
num_bytes
=
-
1UL
-
prom_prom_taken
[
iter
-
1
].
start_adr
;
/* Sort the other two lists. */
prom_sortmemlist
(
prom_phys_total
);
prom_sortmemlist
(
prom_phys_avail
);
/* Link all the lists into the top-level descriptor. */
prom_memlist
.
p1275_totphys
=&
prom_ptot_ptr
;
prom_memlist
.
p1275_prommap
=&
prom_ptak_ptr
;
prom_memlist
.
p1275_available
=&
prom_pavl_ptr
;
}
/* This returns a pointer to our libraries internal p1275 format
* memory descriptor.
*/
struct
linux_mem_p1275
*
prom_meminfo
(
void
)
{
return
&
prom_memlist
;
}
drivers/video/aty/radeon_base.c
浏览文件 @
c4a7c77f
...
...
@@ -475,7 +475,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo)
*/
/* Flush PCI buffers ? */
tmp
=
INREG
(
DEVICE_ID
);
tmp
=
INREG
16
(
DEVICE_ID
);
local_irq_disable
();
...
...
drivers/video/aty/radeonfb.h
浏览文件 @
c4a7c77f
...
...
@@ -395,6 +395,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms)
#define INREG8(addr) readb((rinfo->mmio_base)+addr)
#define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr)
#define INREG16(addr) readw((rinfo->mmio_base)+addr)
#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr)
#define INREG(addr) readl((rinfo->mmio_base)+addr)
#define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr)
...
...
include/asm-sparc/pgtable.h
浏览文件 @
c4a7c77f
...
...
@@ -82,6 +82,8 @@ extern unsigned long page_kernel;
/* Top-level page directory */
extern
pgd_t
swapper_pg_dir
[
1024
];
extern
void
paging_init
(
void
);
/* Page table for 0-4MB for everybody, on the Sparc this
* holds the same as on the i386.
*/
...
...
include/asm-sparc64/openprom.h
浏览文件 @
c4a7c77f
...
...
@@ -186,8 +186,8 @@ struct linux_prom_registers {
};
struct
linux_prom64_registers
{
long
phys_addr
;
long
reg_size
;
unsigned
long
phys_addr
;
unsigned
long
reg_size
;
};
struct
linux_prom_irqs
{
...
...
include/asm-sparc64/oplib.h
浏览文件 @
c4a7c77f
...
...
@@ -95,20 +95,6 @@ extern int prom_devclose(int device_handle);
extern
void
prom_seek
(
int
device_handle
,
unsigned
int
seek_hival
,
unsigned
int
seek_lowval
);
/* Machine memory configuration routine. */
/* This function returns a V0 format memory descriptor table, it has three
* entries. One for the total amount of physical ram on the machine, one
* for the amount of physical ram available, and one describing the virtual
* areas which are allocated by the prom. So, in a sense the physical
* available is a calculation of the total physical minus the physical mapped
* by the prom with virtual mappings.
*
* These lists are returned pre-sorted, this should make your life easier
* since the prom itself is way too lazy to do such nice things.
*/
extern
struct
linux_mem_p1275
*
prom_meminfo
(
void
);
/* Miscellaneous routines, don't really fit in any category per se. */
/* Reboot the machine with the command line passed. */
...
...
include/asm-sparc64/page.h
浏览文件 @
c4a7c77f
...
...
@@ -140,23 +140,6 @@ extern unsigned long page_to_pfn(struct page *);
#define virt_to_phys __pa
#define phys_to_virt __va
/* The following structure is used to hold the physical
* memory configuration of the machine. This is filled in
* probe_memory() and is later used by mem_init() to set up
* mem_map[]. We statically allocate SPARC_PHYS_BANKS of
* these structs, this is arbitrary. The entry after the
* last valid one has num_bytes==0.
*/
struct
sparc_phys_banks
{
unsigned
long
base_addr
;
unsigned
long
num_bytes
;
};
#define SPARC_PHYS_BANKS 32
extern
struct
sparc_phys_banks
sp_banks
[
SPARC_PHYS_BANKS
];
#endif
/* !(__ASSEMBLY__) */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
...
...
include/asm-sparc64/pgtable.h
浏览文件 @
c4a7c77f
...
...
@@ -341,6 +341,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
extern
pgd_t
swapper_pg_dir
[
2048
];
extern
pmd_t
swapper_low_pmd_dir
[
2048
];
extern
void
paging_init
(
void
);
extern
unsigned
long
find_ecache_flush_span
(
unsigned
long
size
);
/* These do nothing with the way I have things setup. */
#define mmu_lockarea(vaddr, len) (vaddr)
#define mmu_unlockarea(vaddr, len) do { } while(0)
...
...
include/asm-sparc64/uaccess.h
浏览文件 @
c4a7c77f
...
...
@@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*
* There is a special way how to put a range of potentially faulting
* insns (like twenty ldd/std's with now intervening other instructions)
* You specify address of first in insn and 0 in fixup and in the next
* exception_table_entry you specify last potentially faulting insn + 1
* and in fixup the routine which should handle the fault.
* That fixup code will get
* (faulting_insn_address - first_insn_in_the_range_address)/4
* in %g2 (ie. index of the faulting instruction in the range).
*/
struct
exception_table_entry
{
unsigned
insn
,
fixup
;
struct
exception_table_entry
{
unsigned
int
insn
,
fixup
;
};
/* Special exable search, which handles ranges. Returns fixup */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
);
extern
void
__ret_efault
(
void
);
extern
void
__retl_efault
(
void
);
/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
...
...
@@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
{
unsigned
long
ret
=
___copy_from_user
(
to
,
from
,
size
);
if
(
ret
)
if
(
unlikely
(
ret
)
)
ret
=
copy_from_user_fixup
(
to
,
from
,
size
);
return
ret
;
}
...
...
@@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
{
unsigned
long
ret
=
___copy_to_user
(
to
,
from
,
size
);
if
(
ret
)
if
(
unlikely
(
ret
)
)
ret
=
copy_to_user_fixup
(
to
,
from
,
size
);
return
ret
;
}
...
...
@@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
{
unsigned
long
ret
=
___copy_in_user
(
to
,
from
,
size
);
if
(
ret
)
if
(
unlikely
(
ret
)
)
ret
=
copy_in_user_fixup
(
to
,
from
,
size
);
return
ret
;
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录