Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
446d2733
K
Kernel
项目概览
openeuler
/
Kernel
大约 1 年 前同步成功
通知
5
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
446d2733
编写于
9月 05, 2008
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'x86/cpu' into x86/core
上级
accf0fa6
0a488a53
变更
21
展开全部
隐藏空白更改
内联
并排
Showing
21 changed file
with
669 addition
and
454 deletion
+669
-454
Documentation/kernel-parameters.txt
Documentation/kernel-parameters.txt
+6
-0
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/Makefile
+2
-2
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/amd.c
+7
-5
arch/x86/kernel/cpu/amd_64.c
arch/x86/kernel/cpu/amd_64.c
+2
-2
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/centaur.c
+13
-1
arch/x86/kernel/cpu/centaur_64.c
arch/x86/kernel/cpu/centaur_64.c
+2
-1
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common.c
+313
-276
arch/x86/kernel/cpu/common_64.c
arch/x86/kernel/cpu/common_64.c
+231
-132
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpu.h
+6
-12
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/cyrix.c
+32
-6
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+2
-1
arch/x86/kernel/cpu/intel_64.c
arch/x86/kernel/cpu/intel_64.c
+2
-1
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/transmeta.c
+2
-1
arch/x86/kernel/cpu/umc.c
arch/x86/kernel/cpu/umc.c
+2
-1
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+1
-0
arch/x86/kernel/traps_64.c
arch/x86/kernel/traps_64.c
+2
-3
arch/x86/kernel/vmlinux_32.lds.S
arch/x86/kernel/vmlinux_32.lds.S
+4
-4
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/kernel/vmlinux_64.lds.S
+4
-5
include/asm-x86/msr.h
include/asm-x86/msr.h
+23
-0
include/asm-x86/paravirt.h
include/asm-x86/paravirt.h
+12
-0
include/asm-x86/processor.h
include/asm-x86/processor.h
+1
-1
未找到文件。
Documentation/kernel-parameters.txt
浏览文件 @
446d2733
...
...
@@ -1888,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file
shapers= [NET]
Maximal number of shapers.
show_msr= [x86] show boot-time MSR settings
Format: { <integer> }
Show boot-time (BIOS-initialized) MSR settings.
The parameter means the number of CPUs to show,
for example 1 means boot CPU only.
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
...
...
arch/x86/kernel/cpu/Makefile
浏览文件 @
446d2733
...
...
@@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o
obj-$(CONFIG_X86_32)
+=
common.o bugs.o cmpxchg.o
obj-$(CONFIG_X86_64)
+=
common_64.o bugs_64.o
obj-$(CONFIG_CPU_SUP_INTEL_32)
+=
intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64)
+=
intel_64.o
obj-$(CONFIG_CPU_SUP_AMD_32)
+=
amd.o
obj-$(CONFIG_CPU_SUP_AMD_64)
+=
amd_64.o
obj-$(CONFIG_CPU_SUP_CYRIX_32)
+=
cyrix.o
obj-$(CONFIG_CPU_SUP_CENTAUR_32)
+=
centaur.o
obj-$(CONFIG_CPU_SUP_CENTAUR_64)
+=
centaur_64.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32)
+=
transmeta.o
obj-$(CONFIG_CPU_SUP_INTEL_32)
+=
intel.o
obj-$(CONFIG_CPU_SUP_INTEL_64)
+=
intel_64.o
obj-$(CONFIG_CPU_SUP_UMC_32)
+=
umc.o
obj-$(CONFIG_X86_MCE)
+=
mcheck/
...
...
arch/x86/kernel/cpu/amd.c
浏览文件 @
446d2733
...
...
@@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
if
(
c
->
x86_power
&
(
1
<<
8
))
set_cpu_cap
(
c
,
X86_FEATURE_CONSTANT_TSC
);
}
/* Set MTRR capability flag if appropriate */
if
(
c
->
x86_model
==
13
||
c
->
x86_model
==
9
||
(
c
->
x86_model
==
8
&&
c
->
x86_mask
>=
8
))
set_cpu_cap
(
c
,
X86_FEATURE_K6_MTRR
);
}
static
void
__cpuinit
init_amd
(
struct
cpuinfo_x86
*
c
)
...
...
@@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
mbytes
);
}
/* Set MTRR capability flag if appropriate */
if
(
c
->
x86_model
==
13
||
c
->
x86_model
==
9
||
(
c
->
x86_model
==
8
&&
c
->
x86_mask
>=
8
))
set_cpu_cap
(
c
,
X86_FEATURE_K6_MTRR
);
break
;
}
...
...
@@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.
c_early_init
=
early_init_amd
,
.
c_init
=
init_amd
,
.
c_size_cache
=
amd_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_AMD
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_AMD
,
&
amd_cpu_dev
);
cpu_
dev_register
(
amd_cpu_dev
);
arch/x86/kernel/cpu/amd_64.c
浏览文件 @
446d2733
...
...
@@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"AuthenticAMD"
},
.
c_early_init
=
early_init_amd
,
.
c_init
=
init_amd
,
.
c_x86_vendor
=
X86_VENDOR_AMD
,
};
cpu_vendor_dev_register
(
X86_VENDOR_AMD
,
&
amd_cpu_dev
);
cpu_dev_register
(
amd_cpu_dev
);
arch/x86/kernel/cpu/centaur.c
浏览文件 @
446d2733
...
...
@@ -314,6 +314,16 @@ enum {
EAMD3D
=
1
<<
20
,
};
static
void
__cpuinit
early_init_centaur
(
struct
cpuinfo_x86
*
c
)
{
switch
(
c
->
x86
)
{
case
5
:
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap
(
c
,
X86_FEATURE_CENTAUR_MCR
);
break
;
}
}
static
void
__cpuinit
init_centaur
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static
struct
cpu_dev
centaur_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"Centaur"
,
.
c_ident
=
{
"CentaurHauls"
},
.
c_early_init
=
early_init_centaur
,
.
c_init
=
init_centaur
,
.
c_size_cache
=
centaur_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_CENTAUR
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CENTAUR
,
&
centaur_cpu_dev
);
cpu_
dev_register
(
centaur_cpu_dev
);
arch/x86/kernel/cpu/centaur_64.c
浏览文件 @
446d2733
...
...
@@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"CentaurHauls"
},
.
c_early_init
=
early_init_centaur
,
.
c_init
=
init_centaur
,
.
c_x86_vendor
=
X86_VENDOR_CENTAUR
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CENTAUR
,
&
centaur_cpu_dev
);
cpu_
dev_register
(
centaur_cpu_dev
);
arch/x86/kernel/cpu/common.c
浏览文件 @
446d2733
此差异已折叠。
点击以展开。
arch/x86/kernel/cpu/common_64.c
浏览文件 @
446d2733
...
...
@@ -37,6 +37,8 @@
#include "cpu.h"
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
;
/* We need valid kernel segments for data and code in long mode too
* IRET will check the segment types kkeil 2000/10/28
* Also sysret mandates a special GDT layout
...
...
@@ -66,7 +68,7 @@ void switch_to_new_gdt(void)
load_gdt
(
&
gdt_descr
);
}
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
st
atic
st
ruct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
]
=
{};
static
void
__cpuinit
default_init
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
static
struct
cpu_dev
__cpuinitdata
default_cpu
=
{
.
c_init
=
default_init
,
.
c_vendor
=
"Unknown"
,
.
c_x86_vendor
=
X86_VENDOR_UNKNOWN
,
};
static
struct
cpu_dev
*
this_cpu
__cpuinitdata
=
&
default_cpu
;
int
__cpuinit
get_model_name
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
*
v
;
char
*
p
,
*
q
;
if
(
c
->
extended_cpuid_level
<
0x80000004
)
return
0
;
...
...
@@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
cpuid
(
0x80000003
,
&
v
[
4
],
&
v
[
5
],
&
v
[
6
],
&
v
[
7
]);
cpuid
(
0x80000004
,
&
v
[
8
],
&
v
[
9
],
&
v
[
10
],
&
v
[
11
]);
c
->
x86_model_id
[
48
]
=
0
;
/* Intel chips right-justify this string for some dumb reason;
undo that brain damage */
p
=
q
=
&
c
->
x86_model_id
[
0
];
while
(
*
p
==
' '
)
p
++
;
if
(
p
!=
q
)
{
while
(
*
p
)
*
q
++
=
*
p
++
;
while
(
q
<=
&
c
->
x86_model_id
[
48
])
*
q
++
=
'\0'
;
/* Zero-pad the rest */
}
return
1
;
}
void
__cpuinit
display_cacheinfo
(
struct
cpuinfo_x86
*
c
)
{
unsigned
int
n
,
dummy
,
ebx
,
ecx
,
edx
;
unsigned
int
n
,
dummy
,
ebx
,
ecx
,
edx
,
l2size
;
n
=
c
->
extended_cpuid_level
;
if
(
n
>=
0x80000005
)
{
cpuid
(
0x80000005
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
printk
(
KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), "
"D cache %dK (%d bytes/line)
\n
"
,
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
printk
(
KERN_INFO
"CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)
\n
"
,
edx
>>
24
,
edx
&
0xFF
,
ecx
>>
24
,
ecx
&
0xFF
);
c
->
x86_cache_size
=
(
ecx
>>
24
)
+
(
edx
>>
24
);
/* On K8 L1 TLB is inclusive, so don't count it */
c
->
x86_tlbsize
=
0
;
}
if
(
n
>=
0x80000006
)
{
cpuid
(
0x80000006
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
ecx
=
cpuid_ecx
(
0x80000006
);
c
->
x86_cache_size
=
ecx
>>
16
;
c
->
x86_tlbsize
+=
((
ebx
>>
16
)
&
0xfff
)
+
(
ebx
&
0xfff
);
if
(
n
<
0x80000006
)
/* Some chips just has a large L1. */
return
;
printk
(
KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)
\n
"
,
c
->
x86_cache_size
,
ecx
&
0xFF
);
}
cpuid
(
0x80000006
,
&
dummy
,
&
ebx
,
&
ecx
,
&
edx
);
l2size
=
ecx
>>
16
;
c
->
x86_tlbsize
+=
((
ebx
>>
16
)
&
0xfff
)
+
(
ebx
&
0xfff
);
c
->
x86_cache_size
=
l2size
;
printk
(
KERN_INFO
"CPU: L2 Cache: %dK (%d bytes/line)
\n
"
,
l2size
,
ecx
&
0xFF
);
}
void
__cpuinit
detect_ht
(
struct
cpuinfo_x86
*
c
)
...
...
@@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
u32
eax
,
ebx
,
ecx
,
edx
;
int
index_msb
,
core_bits
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
if
(
!
cpu_has
(
c
,
X86_FEATURE_HT
))
return
;
if
(
cpu_has
(
c
,
X86_FEATURE_CMP_LEGACY
))
goto
out
;
cpuid
(
1
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
smp_num_siblings
=
(
ebx
&
0xff0000
)
>>
16
;
if
(
smp_num_siblings
==
1
)
{
...
...
@@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
}
else
if
(
smp_num_siblings
>
1
)
{
if
(
smp_num_siblings
>
NR_CPUS
)
{
printk
(
KERN_WARNING
"CPU: Unsupported number of
"
"siblings %d"
,
smp_num_siblings
);
printk
(
KERN_WARNING
"CPU: Unsupported number of
siblings %d"
,
smp_num_siblings
);
smp_num_siblings
=
1
;
return
;
}
...
...
@@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
c
->
cpu_core_id
=
phys_pkg_id
(
index_msb
)
&
((
1
<<
core_bits
)
-
1
);
}
out:
if
((
c
->
x86_max_cores
*
smp_num_siblings
)
>
1
)
{
printk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
...
...
@@ -168,7 +185,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
printk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
}
#endif
}
...
...
@@ -179,41 +195,148 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
static
int
printed
;
for
(
i
=
0
;
i
<
X86_VENDOR_NUM
;
i
++
)
{
if
(
cpu_devs
[
i
])
{
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
c
->
x86_vendor
=
i
;
this_cpu
=
cpu_devs
[
i
];
return
;
}
if
(
!
cpu_devs
[
i
])
break
;
if
(
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
0
])
||
(
cpu_devs
[
i
]
->
c_ident
[
1
]
&&
!
strcmp
(
v
,
cpu_devs
[
i
]
->
c_ident
[
1
])))
{
this_cpu
=
cpu_devs
[
i
];
c
->
x86_vendor
=
this_cpu
->
c_x86_vendor
;
return
;
}
}
if
(
!
printed
)
{
printed
++
;
printk
(
KERN_ERR
"CPU: Vendor unknown, using generic init.
\n
"
);
printk
(
KERN_ERR
"CPU: Your system may be unstable.
\n
"
);
}
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
this_cpu
=
&
default_cpu
;
}
void
__cpuinit
cpu_detect
(
struct
cpuinfo_x86
*
c
)
{
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
0
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
8
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
c
->
x86
=
4
;
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
junk
,
tfms
,
cap0
,
misc
;
cpuid
(
0x00000001
,
&
tfms
,
&
misc
,
&
junk
,
&
cap0
);
c
->
x86
=
(
tfms
>>
8
)
&
0xf
;
c
->
x86_model
=
(
tfms
>>
4
)
&
0xf
;
c
->
x86_mask
=
tfms
&
0xf
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xf
)
<<
4
;
if
(
cap0
&
(
1
<<
19
))
{
c
->
x86_clflush_size
=
((
misc
>>
8
)
&
0xff
)
*
8
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
}
}
}
static
void
__init
early_cpu_support_print
(
void
)
static
void
__cpuinit
get_cpu_cap
(
struct
cpuinfo_x86
*
c
)
{
int
i
,
j
;
struct
cpu_dev
*
cpu_devx
;
u32
tfms
,
xlvl
;
u32
ebx
;
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
u32
capability
,
excap
;
cpuid
(
0x00000001
,
&
tfms
,
&
ebx
,
&
excap
,
&
capability
);
c
->
x86_capability
[
0
]
=
capability
;
c
->
x86_capability
[
4
]
=
excap
;
}
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl
=
cpuid_eax
(
0x80860000
);
if
((
xlvl
&
0xffff0000
)
==
0x80860000
)
{
/* Don't set x86_cpuid_level here for now to not confuse. */
if
(
xlvl
>=
0x80860001
)
c
->
x86_capability
[
2
]
=
cpuid_edx
(
0x80860001
);
}
if
(
c
->
extended_cpuid_level
>=
0x80000007
)
c
->
x86_power
=
cpuid_edx
(
0x80000007
);
if
(
c
->
extended_cpuid_level
>=
0x80000008
)
{
u32
eax
=
cpuid_eax
(
0x80000008
);
c
->
x86_virt_bits
=
(
eax
>>
8
)
&
0xff
;
c
->
x86_phys_bits
=
eax
&
0xff
;
}
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static
void
__init
early_identify_cpu
(
struct
cpuinfo_x86
*
c
)
{
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
c
->
extended_cpuid_level
=
0
;
cpu_detect
(
c
);
get_cpu_vendor
(
c
);
get_cpu_cap
(
c
);
if
(
this_cpu
->
c_early_init
)
this_cpu
->
c_early_init
(
c
);
validate_pat_support
(
c
);
}
void
__init
early_cpu_init
(
void
)
{
struct
cpu_dev
**
cdev
;
int
count
=
0
;
printk
(
"KERNEL supported cpus:
\n
"
);
for
(
i
=
0
;
i
<
X86_VENDOR_NUM
;
i
++
)
{
cpu_devx
=
cpu_devs
[
i
];
if
(
!
cpu_devx
)
continue
;
for
(
cdev
=
__x86_cpu_dev_start
;
cdev
<
__x86_cpu_dev_end
;
cdev
++
)
{
struct
cpu_dev
*
cpudev
=
*
cdev
;
unsigned
int
j
;
if
(
count
>=
X86_VENDOR_NUM
)
break
;
cpu_devs
[
count
]
=
cpudev
;
count
++
;
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
!
cpu
_devx
->
c_ident
[
j
])
if
(
!
cpu
dev
->
c_ident
[
j
])
continue
;
printk
(
" %s %s
\n
"
,
cpu
_devx
->
c_vendor
,
cpu
_devx
->
c_ident
[
j
]);
printk
(
" %s %s
\n
"
,
cpu
dev
->
c_vendor
,
cpu
dev
->
c_ident
[
j
]);
}
}
early_identify_cpu
(
&
boot_cpu_data
);
}
/*
...
...
@@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
}
}
static
void
__cpuinit
early_identify_cpu
(
struct
cpuinfo_x86
*
c
);
void
__init
early_cpu_init
(
void
)
static
void
__cpuinit
generic_identify
(
struct
cpuinfo_x86
*
c
)
{
struct
cpu_vendor_dev
*
cvdev
;
for
(
cvdev
=
__x86cpuvendor_start
;
cvdev
<
__x86cpuvendor_end
;
cvdev
++
)
cpu_devs
[
cvdev
->
vendor
]
=
cvdev
->
cpu_dev
;
early_cpu_support_print
();
early_identify_cpu
(
&
boot_cpu_data
);
}
/* Do some early cpuid on the boot CPU to get some parameter that are
needed before check_bugs. Everything advanced is in identify_cpu
below. */
static
void
__cpuinit
early_identify_cpu
(
struct
cpuinfo_x86
*
c
)
{
u32
tfms
,
xlvl
;
c
->
loops_per_jiffy
=
loops_per_jiffy
;
c
->
x86_cache_size
=
-
1
;
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
c
->
x86_model
=
c
->
x86_mask
=
0
;
/* So far unknown... */
c
->
x86_vendor_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_model_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
c
->
x86_max_cores
=
1
;
c
->
x86_coreid_bits
=
0
;
c
->
extended_cpuid_level
=
0
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
/* Get vendor name */
cpuid
(
0x00000000
,
(
unsigned
int
*
)
&
c
->
cpuid_level
,
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
0
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
8
],
(
unsigned
int
*
)
&
c
->
x86_vendor_id
[
4
]);
cpu_detect
(
c
);
get_cpu_vendor
(
c
);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
/* Intel-defined flags: level 0x00000001 */
if
(
c
->
cpuid_level
>=
0x00000001
)
{
__u32
misc
;
cpuid
(
0x00000001
,
&
tfms
,
&
misc
,
&
c
->
x86_capability
[
4
],
&
c
->
x86_capability
[
0
]);
c
->
x86
=
(
tfms
>>
8
)
&
0xf
;
c
->
x86_model
=
(
tfms
>>
4
)
&
0xf
;
c
->
x86_mask
=
tfms
&
0xf
;
if
(
c
->
x86
==
0xf
)
c
->
x86
+=
(
tfms
>>
20
)
&
0xff
;
if
(
c
->
x86
>=
0x6
)
c
->
x86_model
+=
((
tfms
>>
16
)
&
0xF
)
<<
4
;
if
(
test_cpu_cap
(
c
,
X86_FEATURE_CLFLSH
))
c
->
x86_clflush_size
=
((
misc
>>
8
)
&
0xff
)
*
8
;
}
else
{
/* Have CPUID level 0 only - unheard of */
c
->
x86
=
4
;
}
get_cpu_cap
(
c
);
c
->
initial_apicid
=
(
cpuid_ebx
(
1
)
>>
24
)
&
0xff
;
#ifdef CONFIG_SMP
c
->
phys_proc_id
=
c
->
initial_apicid
;
#endif
/* AMD-defined flags: level 0x80000001 */
xlvl
=
cpuid_eax
(
0x80000000
);
c
->
extended_cpuid_level
=
xlvl
;
if
((
xlvl
&
0xffff0000
)
==
0x80000000
)
{
if
(
xlvl
>=
0x80000001
)
{
c
->
x86_capability
[
1
]
=
cpuid_edx
(
0x80000001
);
c
->
x86_capability
[
6
]
=
cpuid_ecx
(
0x80000001
);
}
if
(
xlvl
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
}
/* Transmeta-defined flags: level 0x80860001 */
xlvl
=
cpuid_eax
(
0x80860000
);
if
((
xlvl
&
0xffff0000
)
==
0x80860000
)
{
/* Don't set x86_cpuid_level here for now to not confuse. */
if
(
xlvl
>=
0x80860001
)
c
->
x86_capability
[
2
]
=
cpuid_edx
(
0x80860001
);
}
if
(
c
->
extended_cpuid_level
>=
0x80000007
)
c
->
x86_power
=
cpuid_edx
(
0x80000007
);
if
(
c
->
extended_cpuid_level
>=
0x80000008
)
{
u32
eax
=
cpuid_eax
(
0x80000008
);
c
->
x86_virt_bits
=
(
eax
>>
8
)
&
0xff
;
c
->
x86_phys_bits
=
eax
&
0xff
;
}
if
(
c
->
extended_cpuid_level
>=
0x80000004
)
get_model_name
(
c
);
/* Default name */
init_scattered_cpuid_features
(
c
);
detect_nopl
(
c
);
if
(
c
->
x86_vendor
!=
X86_VENDOR_UNKNOWN
&&
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
)
cpu_devs
[
c
->
x86_vendor
]
->
c_early_init
(
c
);
validate_pat_support
(
c
);
}
/*
...
...
@@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
{
int
i
;
early_identify_cpu
(
c
);
c
->
loops_per_jiffy
=
loops_per_jiffy
;
c
->
x86_cache_size
=
-
1
;
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
c
->
x86_model
=
c
->
x86_mask
=
0
;
/* So far unknown... */
c
->
x86_vendor_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_model_id
[
0
]
=
'\0'
;
/* Unset */
c
->
x86_max_cores
=
1
;
c
->
x86_coreid_bits
=
0
;
c
->
x86_clflush_size
=
64
;
c
->
x86_cache_alignment
=
c
->
x86_clflush_size
;
memset
(
&
c
->
x86_capability
,
0
,
sizeof
c
->
x86_capability
);
init_scattered_cpuid_features
(
c
);
generic_identify
(
c
);
c
->
apicid
=
phys_pkg_id
(
0
);
...
...
@@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
}
void
__
cpu
init
identify_boot_cpu
(
void
)
void
__init
identify_boot_cpu
(
void
)
{
identify_cpu
(
&
boot_cpu_data
);
}
...
...
@@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init
();
}
struct
msr_range
{
unsigned
min
;
unsigned
max
;
};
static
struct
msr_range
msr_range_array
[]
__cpuinitdata
=
{
{
0x00000000
,
0x00000418
},
{
0xc0000000
,
0xc000040b
},
{
0xc0010000
,
0xc0010142
},
{
0xc0011000
,
0xc001103b
},
};
static
void
__cpuinit
print_cpu_msr
(
void
)
{
unsigned
index
;
u64
val
;
int
i
;
unsigned
index_min
,
index_max
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
msr_range_array
);
i
++
)
{
index_min
=
msr_range_array
[
i
].
min
;
index_max
=
msr_range_array
[
i
].
max
;
for
(
index
=
index_min
;
index
<
index_max
;
index
++
)
{
if
(
rdmsrl_amd_safe
(
index
,
&
val
))
continue
;
printk
(
KERN_INFO
" MSR%08x: %016llx
\n
"
,
index
,
val
);
}
}
}
static
int
show_msr
__cpuinitdata
;
static
__init
int
setup_show_msr
(
char
*
arg
)
{
int
num
;
get_option
(
&
arg
,
&
num
);
if
(
num
>
0
)
show_msr
=
num
;
return
1
;
}
__setup
(
"show_msr="
,
setup_show_msr
);
static
__init
int
setup_noclflush
(
char
*
arg
)
{
setup_clear_cpu_cap
(
X86_FEATURE_CLFLSH
);
...
...
@@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
printk
(
KERN_CONT
" stepping %02x
\n
"
,
c
->
x86_mask
);
else
printk
(
KERN_CONT
"
\n
"
);
#ifdef CONFIG_SMP
if
(
c
->
cpu_index
<
show_msr
)
print_cpu_msr
();
#else
if
(
show_msr
)
print_cpu_msr
();
#endif
}
static
__init
int
setup_disablecpuid
(
char
*
arg
)
...
...
arch/x86/kernel/cpu/cpu.h
浏览文件 @
446d2733
...
...
@@ -21,21 +21,15 @@ struct cpu_dev {
void
(
*
c_init
)(
struct
cpuinfo_x86
*
c
);
void
(
*
c_identify
)(
struct
cpuinfo_x86
*
c
);
unsigned
int
(
*
c_size_cache
)(
struct
cpuinfo_x86
*
c
,
unsigned
int
size
);
int
c_x86_vendor
;
};
extern
struct
cpu_dev
*
cpu_devs
[
X86_VENDOR_NUM
];
#define cpu_dev_register(cpu_devX) \
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
__attribute__((__section__(".x86_cpu_dev.init"))) = \
&cpu_devX;
struct
cpu_vendor_dev
{
int
vendor
;
struct
cpu_dev
*
cpu_dev
;
};
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
__attribute__((__section__(".x86cpuvendor.init"))) = \
{ cpu_vendor_id, cpu_dev }
extern
struct
cpu_vendor_dev
__x86cpuvendor_start
[],
__x86cpuvendor_end
[];
extern
struct
cpu_dev
*
__x86_cpu_dev_start
[],
*
__x86_cpu_dev_end
[];
extern
int
get_model_name
(
struct
cpuinfo_x86
*
c
);
extern
void
display_cacheinfo
(
struct
cpuinfo_x86
*
c
);
...
...
arch/x86/kernel/cpu/cyrix.c
浏览文件 @
446d2733
...
...
@@ -15,13 +15,11 @@
/*
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/
static
void
__cpuinit
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
static
void
__cpuinit
__
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
{
unsigned
char
ccr2
,
ccr3
;
unsigned
long
flags
;
/* we test for DEVID by checking whether CCR3 is writable */
local_irq_save
(
flags
);
ccr3
=
getCx86
(
CX86_CCR3
);
setCx86
(
CX86_CCR3
,
ccr3
^
0x80
);
getCx86
(
0xc0
);
/* dummy to change bus */
...
...
@@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
*
dir0
=
getCx86
(
CX86_DIR0
);
*
dir1
=
getCx86
(
CX86_DIR1
);
}
local_irq_restore
(
flags
);
}
static
void
__cpuinit
do_cyrix_devid
(
unsigned
char
*
dir0
,
unsigned
char
*
dir1
)
{
unsigned
long
flags
;
local_irq_save
(
flags
);
__do_cyrix_devid
(
dir0
,
dir1
);
local_irq_restore
(
flags
);
}
/*
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
* order to identify the Cyrix CPU model after we're out of setup.c
...
...
@@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
local_irq_restore
(
flags
);
}
static
void
__cpuinit
early_init_cyrix
(
struct
cpuinfo_x86
*
c
)
{
unsigned
char
dir0
,
dir0_msn
,
dir1
=
0
;
__do_cyrix_devid
(
&
dir0
,
&
dir1
);
dir0_msn
=
dir0
>>
4
;
/* identifies CPU "family" */
switch
(
dir0_msn
)
{
case
3
:
/* 6x86/6x86L */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap
(
c
,
X86_FEATURE_CYRIX_ARR
);
break
;
case
5
:
/* 6x86MX/M II */
/* Emulate MTRRs using Cyrix's ARRs. */
set_cpu_cap
(
c
,
X86_FEATURE_CYRIX_ARR
);
break
;
}
}
static
void
__cpuinit
init_cyrix
(
struct
cpuinfo_x86
*
c
)
{
...
...
@@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
static
struct
cpu_dev
cyrix_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"Cyrix"
,
.
c_ident
=
{
"CyrixInstead"
},
.
c_early_init
=
early_init_cyrix
,
.
c_init
=
init_cyrix
,
.
c_identify
=
cyrix_identify
,
.
c_x86_vendor
=
X86_VENDOR_CYRIX
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_CYRIX
,
&
cyrix_cpu_dev
);
cpu_
dev_register
(
cyrix_cpu_dev
);
static
struct
cpu_dev
nsc_cpu_dev
__cpuinitdata
=
{
.
c_vendor
=
"NSC"
,
.
c_ident
=
{
"Geode by NSC"
},
.
c_init
=
init_nsc
,
.
c_x86_vendor
=
X86_VENDOR_NSC
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_NSC
,
&
nsc_cpu_dev
);
cpu_
dev_register
(
nsc_cpu_dev
);
arch/x86/kernel/cpu/intel.c
浏览文件 @
446d2733
...
...
@@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.
c_early_init
=
early_init_intel
,
.
c_init
=
init_intel
,
.
c_size_cache
=
intel_size_cache
,
.
c_x86_vendor
=
X86_VENDOR_INTEL
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_INTEL
,
&
intel_cpu_dev
);
cpu_
dev_register
(
intel_cpu_dev
);
/* arch_initcall(intel_cpu_init); */
arch/x86/kernel/cpu/intel_64.c
浏览文件 @
446d2733
...
...
@@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"GenuineIntel"
},
.
c_early_init
=
early_init_intel
,
.
c_init
=
init_intel
,
.
c_x86_vendor
=
X86_VENDOR_INTEL
,
};
cpu_vendor_dev_register
(
X86_VENDOR_INTEL
,
&
intel_cpu_dev
);
cpu_dev_register
(
intel_cpu_dev
);
arch/x86/kernel/cpu/transmeta.c
浏览文件 @
446d2733
...
...
@@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
.
c_ident
=
{
"GenuineTMx86"
,
"TransmetaCPU"
},
.
c_init
=
init_transmeta
,
.
c_identify
=
transmeta_identify
,
.
c_x86_vendor
=
X86_VENDOR_TRANSMETA
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_TRANSMETA
,
&
transmeta_cpu_dev
);
cpu_
dev_register
(
transmeta_cpu_dev
);
arch/x86/kernel/cpu/umc.c
浏览文件 @
446d2733
...
...
@@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
}
},
},
.
c_x86_vendor
=
X86_VENDOR_UMC
,
};
cpu_
vendor_dev_register
(
X86_VENDOR_UMC
,
&
umc_cpu_dev
);
cpu_
dev_register
(
umc_cpu_dev
);
arch/x86/kernel/paravirt.c
浏览文件 @
446d2733
...
...
@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif
.
wbinvd
=
native_wbinvd
,
.
read_msr
=
native_read_msr_safe
,
.
read_msr_amd
=
native_read_msr_amd_safe
,
.
write_msr
=
native_write_msr_safe
,
.
read_tsc
=
native_read_tsc
,
.
read_pmc
=
native_read_pmc
,
...
...
arch/x86/kernel/traps_64.c
浏览文件 @
446d2733
...
...
@@ -339,9 +339,8 @@ static void
show_trace_log_lvl
(
struct
task_struct
*
task
,
struct
pt_regs
*
regs
,
unsigned
long
*
stack
,
unsigned
long
bp
,
char
*
log_lvl
)
{
printk
(
"
\n
Call Trace:
\n
"
);
printk
(
"Call Trace:
\n
"
);
dump_trace
(
task
,
regs
,
stack
,
bp
,
&
print_trace_ops
,
log_lvl
);
printk
(
"
\n
"
);
}
void
show_trace
(
struct
task_struct
*
task
,
struct
pt_regs
*
regs
,
...
...
@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk
(
" %016lx"
,
*
stack
++
);
touch_nmi_watchdog
();
}
printk
(
"
\n
"
);
show_trace_log_lvl
(
task
,
regs
,
sp
,
bp
,
log_lvl
);
}
...
...
@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
printk
(
"Stack: "
);
show_stack_log_lvl
(
NULL
,
regs
,
(
unsigned
long
*
)
sp
,
regs
->
bp
,
""
);
printk
(
"
\n
"
);
printk
(
KERN_EMERG
"Code: "
);
...
...
arch/x86/kernel/vmlinux_32.lds.S
浏览文件 @
446d2733
...
...
@@ -140,10 +140,10 @@ SECTIONS
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
}
.
x86
cpuvendor.init
:
AT
(
ADDR
(
.
x86cpuvendor
.
init
)
-
LOAD_OFFSET
)
{
__x86
cpuvendor
_start
=
.
;
*(.
x86
cpuvendor
.init
)
__x86
cpuvendor
_end
=
.
;
.
x86
_cpu_dev
.
init
:
AT
(
ADDR
(
.
x86_cpu_dev
.
init
)
-
LOAD_OFFSET
)
{
__x86
_cpu_dev
_start
=
.
;
*(.
x86
_cpu_dev
.
init
)
__x86
_cpu_dev
_end
=
.
;
}
SECURITY_INIT
.
=
ALIGN
(
4
)
;
...
...
arch/x86/kernel/vmlinux_64.lds.S
浏览文件 @
446d2733
...
...
@@ -168,13 +168,12 @@ SECTIONS
*(.
con_initcall
.
init
)
}
__con_initcall_end
=
.
;
.
=
ALIGN
(
16
)
;
__x86cpuvendor_start
=
.
;
.
x86cpuvendor.init
:
AT
(
ADDR
(
.
x86cpuvendor
.
init
)
-
LOAD_OFFSET
)
{
*(.
x86cpuvendor.init
)
__x86_cpu_dev_start
=
.
;
.
x86_cpu_dev
.
init
:
AT
(
ADDR
(
.
x86_cpu_dev
.
init
)
-
LOAD_OFFSET
)
{
*(.
x86_cpu_dev
.
init
)
}
__x86cpuvendor_end
=
.
;
SECURITY_INIT
__x86_cpu_dev_end
=
.
;
.
=
ALIGN
(
8
)
;
.
parainstructions
:
AT
(
ADDR
(
.
parainstructions
)
-
LOAD_OFFSET
)
{
...
...
include/asm-x86/msr.h
浏览文件 @
446d2733
...
...
@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
return
EAX_EDX_VAL
(
val
,
low
,
high
);
}
static
inline
unsigned
long
long
native_read_msr_amd_safe
(
unsigned
int
msr
,
int
*
err
)
{
DECLARE_ARGS
(
val
,
low
,
high
);
asm
volatile
(
"2: rdmsr ; xor %0,%0
\n
"
"1:
\n\t
"
".section .fixup,
\"
ax
\"\n\t
"
"3: mov %3,%0 ; jmp 1b
\n\t
"
".previous
\n\t
"
_ASM_EXTABLE
(
2
b
,
3
b
)
:
"=r"
(
*
err
),
EAX_EDX_RET
(
val
,
low
,
high
)
:
"c"
(
msr
),
"D"
(
0x9c5a203a
),
"i"
(
-
EFAULT
));
return
EAX_EDX_VAL
(
val
,
low
,
high
);
}
static
inline
void
native_write_msr
(
unsigned
int
msr
,
unsigned
low
,
unsigned
high
)
{
...
...
@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*
p
=
native_read_msr_safe
(
msr
,
&
err
);
return
err
;
}
static
inline
int
rdmsrl_amd_safe
(
unsigned
msr
,
unsigned
long
long
*
p
)
{
int
err
;
*
p
=
native_read_msr_amd_safe
(
msr
,
&
err
);
return
err
;
}
#define rdtscl(low) \
((low) = (u32)native_read_tsc())
...
...
include/asm-x86/paravirt.h
浏览文件 @
446d2733
...
...
@@ -137,6 +137,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64
(
*
read_msr_amd
)(
unsigned
int
msr
,
int
*
err
);
u64
(
*
read_msr
)(
unsigned
int
msr
,
int
*
err
);
int
(
*
write_msr
)(
unsigned
int
msr
,
unsigned
low
,
unsigned
high
);
...
...
@@ -720,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
{
return
PVOP_CALL2
(
u64
,
pv_cpu_ops
.
read_msr
,
msr
,
err
);
}
static
inline
u64
paravirt_read_msr_amd
(
unsigned
msr
,
int
*
err
)
{
return
PVOP_CALL2
(
u64
,
pv_cpu_ops
.
read_msr_amd
,
msr
,
err
);
}
static
inline
int
paravirt_write_msr
(
unsigned
msr
,
unsigned
low
,
unsigned
high
)
{
return
PVOP_CALL3
(
int
,
pv_cpu_ops
.
write_msr
,
msr
,
low
,
high
);
...
...
@@ -765,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*
p
=
paravirt_read_msr
(
msr
,
&
err
);
return
err
;
}
static
inline
int
rdmsrl_amd_safe
(
unsigned
msr
,
unsigned
long
long
*
p
)
{
int
err
;
*
p
=
paravirt_read_msr_amd
(
msr
,
&
err
);
return
err
;
}
static
inline
u64
paravirt_read_tsc
(
void
)
{
...
...
include/asm-x86/processor.h
浏览文件 @
446d2733
...
...
@@ -77,9 +77,9 @@ struct cpuinfo_x86 {
__u8
x86_phys_bits
;
/* CPUID returned core id bits: */
__u8
x86_coreid_bits
;
#endif
/* Max extended CPUID function supported: */
__u32
extended_cpuid_level
;
#endif
/* Maximum supported CPUID level, -1=no CPUID: */
int
cpuid_level
;
__u32
x86_capability
[
NCAPINTS
];
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录