Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
93b894b6
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
93b894b6
编写于
2月 09, 2016
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'x86/cpu' into perf/core, to pick up dependency
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
d3aaf09f
1b74dde7
变更
31
隐藏空白更改
内联
并排
Showing
31 changed file
with
150 addition
and
163 deletion
+150
-163
arch/x86/include/asm/elf.h
arch/x86/include/asm/elf.h
+1
-1
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/amd.c
+10
-13
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/cpu/bugs_64.c
+1
-1
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/centaur.c
+5
-5
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/common.c
+20
-22
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/cyrix.c
+5
-5
arch/x86/kernel/cpu/hypervisor.c
arch/x86/kernel/cpu/hypervisor.c
+1
-1
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+5
-5
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/intel_cacheinfo.c
+1
-1
arch/x86/kernel/cpu/mcheck/mce-inject.c
arch/x86/kernel/cpu/mcheck/mce-inject.c
+7
-8
arch/x86/kernel/cpu/mcheck/p5.c
arch/x86/kernel/cpu/mcheck/p5.c
+7
-11
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
+7
-8
arch/x86/kernel/cpu/mcheck/threshold.c
arch/x86/kernel/cpu/mcheck/threshold.c
+2
-2
arch/x86/kernel/cpu/mcheck/winchip.c
arch/x86/kernel/cpu/mcheck/winchip.c
+2
-3
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/amd.c
+1
-1
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mshyperv.c
+4
-4
arch/x86/kernel/cpu/mtrr/centaur.c
arch/x86/kernel/cpu/mtrr/centaur.c
+1
-1
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/cleanup.c
+22
-22
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/generic.c
+11
-12
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/mtrr/main.c
+10
-10
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.c
+5
-4
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
+5
-5
arch/x86/kernel/cpu/perf_event_amd_uncore.c
arch/x86/kernel/cpu/perf_event_amd_uncore.c
+2
-2
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
+3
-3
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/cpu/rdrand.c
+1
-1
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/topology.c
+2
-2
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/transmeta.c
+4
-4
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/cpu/vmware.c
+2
-3
arch/x86/kernel/mpparse.c
arch/x86/kernel/mpparse.c
+1
-1
arch/x86/lguest/boot.c
arch/x86/lguest/boot.c
+1
-1
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten.c
+1
-1
未找到文件。
arch/x86/include/asm/elf.h
浏览文件 @
93b894b6
...
...
@@ -256,7 +256,7 @@ extern int force_personality32;
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability[
0
])
#define ELF_HWCAP (boot_cpu_data.x86_capability[
CPUID_1_EDX
])
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
...
...
arch/x86/kernel/cpu/amd.c
浏览文件 @
93b894b6
...
...
@@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
void
(
*
f_vide
)(
void
);
u64
d
,
d2
;
pr
intk
(
KERN_INFO
"AMD K6 stepping B detected - "
);
pr
_info
(
"AMD K6 stepping B detected - "
);
/*
* It looks like AMD fixed the 2.6.2 bug and improved indirect
...
...
@@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
d
=
d2
-
d
;
if
(
d
>
20
*
K6_BUG_LOOP
)
printk
(
KERN_CONT
"system stability may be impaired when more than 32 MB are used.
\n
"
);
pr_cont
(
"system stability may be impaired when more than 32 MB are used.
\n
"
);
else
pr
intk
(
KERN_CONT
"probably OK (after B9730xxxx).
\n
"
);
pr
_cont
(
"probably OK (after B9730xxxx).
\n
"
);
}
/* K6 with old style WHCR */
...
...
@@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd
();
wrmsr
(
MSR_K6_WHCR
,
l
,
h
);
local_irq_restore
(
flags
);
pr
intk
(
KERN_INFO
"Enabling old style K6 write allocation for %d Mb
\n
"
,
pr
_info
(
"Enabling old style K6 write allocation for %d Mb
\n
"
,
mbytes
);
}
return
;
...
...
@@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
wbinvd
();
wrmsr
(
MSR_K6_WHCR
,
l
,
h
);
local_irq_restore
(
flags
);
pr
intk
(
KERN_INFO
"Enabling new style K6 write allocation for %d Mb
\n
"
,
pr
_info
(
"Enabling new style K6 write allocation for %d Mb
\n
"
,
mbytes
);
}
...
...
@@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
*/
if
(
c
->
x86_model
>=
6
&&
c
->
x86_model
<=
10
)
{
if
(
!
cpu_has
(
c
,
X86_FEATURE_XMM
))
{
pr
intk
(
KERN_INFO
"Enabling disabled K7/SSE Support.
\n
"
);
pr
_info
(
"Enabling disabled K7/SSE Support.
\n
"
);
msr_clear_bit
(
MSR_K7_HWCR
,
15
);
set_cpu_cap
(
c
,
X86_FEATURE_XMM
);
}
...
...
@@ -216,9 +215,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
if
((
c
->
x86_model
==
8
&&
c
->
x86_mask
>=
1
)
||
(
c
->
x86_model
>
8
))
{
rdmsr
(
MSR_K7_CLK_CTL
,
l
,
h
);
if
((
l
&
0xfff00000
)
!=
0x20000000
)
{
printk
(
KERN_INFO
"CPU: CLK_CTL MSR was %x. Reprogramming to %x
\n
"
,
l
,
((
l
&
0x000fffff
)
|
0x20000000
));
pr_info
(
"CPU: CLK_CTL MSR was %x. Reprogramming to %x
\n
"
,
l
,
((
l
&
0x000fffff
)
|
0x20000000
));
wrmsr
(
MSR_K7_CLK_CTL
,
(
l
&
0x000fffff
)
|
0x20000000
,
h
);
}
}
...
...
@@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
if
(
!
rdmsrl_safe
(
MSR_K8_TSEG_ADDR
,
&
tseg
))
{
unsigned
long
pfn
=
tseg
>>
PAGE_SHIFT
;
pr
intk
(
KERN_DEBUG
"tseg: %010llx
\n
"
,
tseg
);
pr
_debug
(
"tseg: %010llx
\n
"
,
tseg
);
if
(
pfn_range_is_mapped
(
pfn
,
pfn
+
1
))
set_memory_4k
((
unsigned
long
)
__va
(
tseg
),
1
);
}
...
...
@@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
rdmsrl
(
MSR_K7_HWCR
,
val
);
if
(
!
(
val
&
BIT
(
24
)))
printk
(
KERN_WARNING
FW_BUG
"TSC doesn't count "
"with P0 frequency!
\n
"
);
pr_warn
(
FW_BUG
"TSC doesn't count with P0 frequency!
\n
"
);
}
}
...
...
arch/x86/kernel/cpu/bugs_64.c
浏览文件 @
93b894b6
...
...
@@ -15,7 +15,7 @@ void __init check_bugs(void)
{
identify_boot_cpu
();
#if !defined(CONFIG_SMP)
pr
intk
(
KERN_INFO
"CPU: "
);
pr
_info
(
"CPU: "
);
print_cpu_info
(
&
boot_cpu_data
);
#endif
alternative_instructions
();
...
...
arch/x86/kernel/cpu/centaur.c
浏览文件 @
93b894b6
...
...
@@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr
(
MSR_VIA_FCR
,
lo
,
hi
);
lo
|=
ACE_FCR
;
/* enable ACE unit */
wrmsr
(
MSR_VIA_FCR
,
lo
,
hi
);
pr
intk
(
KERN_INFO
"CPU: Enabled ACE h/w crypto
\n
"
);
pr
_info
(
"CPU: Enabled ACE h/w crypto
\n
"
);
}
/* enable RNG unit, if present and disabled */
...
...
@@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c)
rdmsr
(
MSR_VIA_RNG
,
lo
,
hi
);
lo
|=
RNG_ENABLE
;
/* enable RNG unit */
wrmsr
(
MSR_VIA_RNG
,
lo
,
hi
);
pr
intk
(
KERN_INFO
"CPU: Enabled h/w RNG
\n
"
);
pr
_info
(
"CPU: Enabled h/w RNG
\n
"
);
}
/* store Centaur Extended Feature Flags as
...
...
@@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
name
=
"C6"
;
fcr_set
=
ECX8
|
DSMC
|
EDCTLB
|
EMMX
|
ERETSTK
;
fcr_clr
=
DPDC
;
pr
intk
(
KERN_NOTICE
"Disabling bugged TSC.
\n
"
);
pr
_notice
(
"Disabling bugged TSC.
\n
"
);
clear_cpu_cap
(
c
,
X86_FEATURE_TSC
);
break
;
case
8
:
...
...
@@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c)
newlo
=
(
lo
|
fcr_set
)
&
(
~
fcr_clr
);
if
(
newlo
!=
lo
)
{
pr
intk
(
KERN_INFO
"Centaur FCR was 0x%X now 0x%X
\n
"
,
pr
_info
(
"Centaur FCR was 0x%X now 0x%X
\n
"
,
lo
,
newlo
);
wrmsr
(
MSR_IDT_FCR1
,
newlo
,
hi
);
}
else
{
pr
intk
(
KERN_INFO
"Centaur FCR is 0x%X
\n
"
,
lo
);
pr
_info
(
"Centaur FCR is 0x%X
\n
"
,
lo
);
}
/* Emulate MTRRs using Centaur's MCR. */
set_cpu_cap
(
c
,
X86_FEATURE_CENTAUR_MCR
);
...
...
arch/x86/kernel/cpu/common.c
浏览文件 @
93b894b6
...
...
@@ -228,7 +228,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
lo
|=
0x200000
;
wrmsr
(
MSR_IA32_BBL_CR_CTL
,
lo
,
hi
);
pr
intk
(
KERN_NOTICE
"CPU serial number disabled.
\n
"
);
pr
_notice
(
"CPU serial number disabled.
\n
"
);
clear_cpu_cap
(
c
,
X86_FEATURE_PN
);
/* Disabling the serial number may affect the cpuid level */
...
...
@@ -329,9 +329,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
if
(
!
warn
)
continue
;
printk
(
KERN_WARNING
"CPU: CPU feature "
X86_CAP_FMT
" disabled, no CPUID level 0x%x
\n
"
,
x86_cap_flag
(
df
->
feature
),
df
->
level
);
pr_warn
(
"CPU: CPU feature "
X86_CAP_FMT
" disabled, no CPUID level 0x%x
\n
"
,
x86_cap_flag
(
df
->
feature
),
df
->
level
);
}
}
...
...
@@ -510,7 +509,7 @@ void detect_ht(struct cpuinfo_x86 *c)
smp_num_siblings
=
(
ebx
&
0xff0000
)
>>
16
;
if
(
smp_num_siblings
==
1
)
{
pr
intk_once
(
KERN_INFO
"CPU0: Hyper-Threading is disabled
\n
"
);
pr
_info_once
(
"CPU0: Hyper-Threading is disabled
\n
"
);
goto
out
;
}
...
...
@@ -531,10 +530,10 @@ void detect_ht(struct cpuinfo_x86 *c)
out:
if
(
!
printed
&&
(
c
->
x86_max_cores
*
smp_num_siblings
)
>
1
)
{
pr
intk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
c
->
phys_proc_id
);
pr
intk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
pr
_info
(
"CPU: Physical Processor ID: %d
\n
"
,
c
->
phys_proc_id
);
pr
_info
(
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
printed
=
1
;
}
#endif
...
...
@@ -559,9 +558,8 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c)
}
}
printk_once
(
KERN_ERR
"CPU: vendor_id '%s' unknown, using generic init.
\n
"
\
"CPU: Your system may be unstable.
\n
"
,
v
);
pr_err_once
(
"CPU: vendor_id '%s' unknown, using generic init.
\n
"
\
"CPU: Your system may be unstable.
\n
"
,
v
);
c
->
x86_vendor
=
X86_VENDOR_UNKNOWN
;
this_cpu
=
&
default_cpu
;
...
...
@@ -760,7 +758,7 @@ void __init early_cpu_init(void)
int
count
=
0
;
#ifdef CONFIG_PROCESSOR_SELECT
pr
intk
(
KERN_INFO
"KERNEL supported cpus:
\n
"
);
pr
_info
(
"KERNEL supported cpus:
\n
"
);
#endif
for
(
cdev
=
__x86_cpu_dev_start
;
cdev
<
__x86_cpu_dev_end
;
cdev
++
)
{
...
...
@@ -778,7 +776,7 @@ void __init early_cpu_init(void)
for
(
j
=
0
;
j
<
2
;
j
++
)
{
if
(
!
cpudev
->
c_ident
[
j
])
continue
;
pr
intk
(
KERN_INFO
" %s %s
\n
"
,
cpudev
->
c_vendor
,
pr
_info
(
" %s %s
\n
"
,
cpudev
->
c_vendor
,
cpudev
->
c_ident
[
j
]);
}
}
...
...
@@ -1061,7 +1059,7 @@ static void __print_cpu_msr(void)
for
(
index
=
index_min
;
index
<
index_max
;
index
++
)
{
if
(
rdmsrl_safe
(
index
,
&
val
))
continue
;
pr
intk
(
KERN_INFO
" MSR%08x: %016llx
\n
"
,
index
,
val
);
pr
_info
(
" MSR%08x: %016llx
\n
"
,
index
,
val
);
}
}
}
...
...
@@ -1100,19 +1098,19 @@ void print_cpu_info(struct cpuinfo_x86 *c)
}
if
(
vendor
&&
!
strstr
(
c
->
x86_model_id
,
vendor
))
pr
intk
(
KERN_CONT
"%s "
,
vendor
);
pr
_cont
(
"%s "
,
vendor
);
if
(
c
->
x86_model_id
[
0
])
pr
intk
(
KERN_CONT
"%s"
,
c
->
x86_model_id
);
pr
_cont
(
"%s"
,
c
->
x86_model_id
);
else
pr
intk
(
KERN_CONT
"%d86"
,
c
->
x86
);
pr
_cont
(
"%d86"
,
c
->
x86
);
pr
intk
(
KERN_CONT
" (family: 0x%x, model: 0x%x"
,
c
->
x86
,
c
->
x86_model
);
pr
_cont
(
" (family: 0x%x, model: 0x%x"
,
c
->
x86
,
c
->
x86_model
);
if
(
c
->
x86_mask
||
c
->
cpuid_level
>=
0
)
pr
intk
(
KERN_CONT
", stepping: 0x%x)
\n
"
,
c
->
x86_mask
);
pr
_cont
(
", stepping: 0x%x)
\n
"
,
c
->
x86_mask
);
else
pr
intk
(
KERN_CONT
")
\n
"
);
pr
_cont
(
")
\n
"
);
print_cpu_msr
(
c
);
}
...
...
@@ -1438,7 +1436,7 @@ void cpu_init(void)
show_ucode_info_early
();
pr
intk
(
KERN_INFO
"Initializing CPU#%d
\n
"
,
cpu
);
pr
_info
(
"Initializing CPU#%d
\n
"
,
cpu
);
if
(
cpu_feature_enabled
(
X86_FEATURE_VME
)
||
cpu_has_tsc
||
...
...
arch/x86/kernel/cpu/cyrix.c
浏览文件 @
93b894b6
...
...
@@ -103,7 +103,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c)
local_irq_restore
(
flags
);
if
(
ccr5
&
2
)
{
/* possible wrong calibration done */
pr
intk
(
KERN_INFO
"Recalibrating delay loop with SLOP bit reset
\n
"
);
pr
_info
(
"Recalibrating delay loop with SLOP bit reset
\n
"
);
calibrate_delay
();
c
->
loops_per_jiffy
=
loops_per_jiffy
;
}
...
...
@@ -115,7 +115,7 @@ static void set_cx86_reorder(void)
{
u8
ccr3
;
pr
intk
(
KERN_INFO
"Enable Memory access reorder on Cyrix/NSC processor.
\n
"
);
pr
_info
(
"Enable Memory access reorder on Cyrix/NSC processor.
\n
"
);
ccr3
=
getCx86
(
CX86_CCR3
);
setCx86
(
CX86_CCR3
,
(
ccr3
&
0x0f
)
|
0x10
);
/* enable MAPEN */
...
...
@@ -128,7 +128,7 @@ static void set_cx86_reorder(void)
static
void
set_cx86_memwb
(
void
)
{
pr
intk
(
KERN_INFO
"Enable Memory-Write-back mode on Cyrix/NSC processor.
\n
"
);
pr
_info
(
"Enable Memory-Write-back mode on Cyrix/NSC processor.
\n
"
);
/* CCR2 bit 2: unlock NW bit */
setCx86_old
(
CX86_CCR2
,
getCx86_old
(
CX86_CCR2
)
&
~
0x04
);
...
...
@@ -268,7 +268,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
* VSA1 we work around however.
*/
pr
intk
(
KERN_INFO
"Working around Cyrix MediaGX virtual DMA bugs.
\n
"
);
pr
_info
(
"Working around Cyrix MediaGX virtual DMA bugs.
\n
"
);
isa_dma_bridge_buggy
=
2
;
/* We do this before the PCI layer is running. However we
...
...
@@ -426,7 +426,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
if
(
dir0
==
5
||
dir0
==
3
)
{
unsigned
char
ccr3
;
unsigned
long
flags
;
pr
intk
(
KERN_INFO
"Enabling CPUID on Cyrix processor.
\n
"
);
pr
_info
(
"Enabling CPUID on Cyrix processor.
\n
"
);
local_irq_save
(
flags
);
ccr3
=
getCx86
(
CX86_CCR3
);
/* enable MAPEN */
...
...
arch/x86/kernel/cpu/hypervisor.c
浏览文件 @
93b894b6
...
...
@@ -56,7 +56,7 @@ detect_hypervisor_vendor(void)
}
if
(
max_pri
)
pr
intk
(
KERN_INFO
"Hypervisor detected: %s
\n
"
,
x86_hyper
->
name
);
pr
_info
(
"Hypervisor detected: %s
\n
"
,
x86_hyper
->
name
);
}
void
init_hypervisor
(
struct
cpuinfo_x86
*
c
)
...
...
arch/x86/kernel/cpu/intel.c
浏览文件 @
93b894b6
...
...
@@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
*/
if
(
c
->
x86
==
6
&&
c
->
x86_model
==
0x1c
&&
c
->
x86_mask
<=
2
&&
c
->
microcode
<
0x20e
)
{
pr
intk
(
KERN_WARNING
"Atom PSE erratum detected, BIOS microcode update recommended
\n
"
);
pr
_warn
(
"Atom PSE erratum detected, BIOS microcode update recommended
\n
"
);
clear_cpu_cap
(
c
,
X86_FEATURE_PSE
);
}
...
...
@@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if
(
c
->
x86
>
6
||
(
c
->
x86
==
6
&&
c
->
x86_model
>=
0xd
))
{
rdmsrl
(
MSR_IA32_MISC_ENABLE
,
misc_enable
);
if
(
!
(
misc_enable
&
MSR_IA32_MISC_ENABLE_FAST_STRING
))
{
pr
intk
(
KERN_INFO
"Disabled fast string operations
\n
"
);
pr
_info
(
"Disabled fast string operations
\n
"
);
setup_clear_cpu_cap
(
X86_FEATURE_REP_GOOD
);
setup_clear_cpu_cap
(
X86_FEATURE_ERMS
);
}
...
...
@@ -176,7 +176,7 @@ int ppro_with_ram_bug(void)
boot_cpu_data
.
x86
==
6
&&
boot_cpu_data
.
x86_model
==
1
&&
boot_cpu_data
.
x86_mask
<
8
)
{
pr
intk
(
KERN_INFO
"Pentium Pro with Errata#50 detected. Taking evasive action.
\n
"
);
pr
_info
(
"Pentium Pro with Errata#50 detected. Taking evasive action.
\n
"
);
return
1
;
}
return
0
;
...
...
@@ -225,7 +225,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
set_cpu_bug
(
c
,
X86_BUG_F00F
);
if
(
!
f00f_workaround_enabled
)
{
pr
intk
(
KERN_NOTICE
"Intel Pentium with F0 0F bug - workaround enabled.
\n
"
);
pr
_notice
(
"Intel Pentium with F0 0F bug - workaround enabled.
\n
"
);
f00f_workaround_enabled
=
1
;
}
}
...
...
@@ -244,7 +244,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
* Forcefully enable PAE if kernel parameter "forcepae" is present.
*/
if
(
forcepae
)
{
pr
intk
(
KERN_WARNING
"PAE forced!
\n
"
);
pr
_warn
(
"PAE forced!
\n
"
);
set_cpu_cap
(
c
,
X86_FEATURE_PAE
);
add_taint
(
TAINT_CPU_OUT_OF_SPEC
,
LOCKDEP_NOW_UNRELIABLE
);
}
...
...
arch/x86/kernel/cpu/intel_cacheinfo.c
浏览文件 @
93b894b6
...
...
@@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
err
=
amd_set_l3_disable_slot
(
nb
,
cpu
,
slot
,
val
);
if
(
err
)
{
if
(
err
==
-
EEXIST
)
pr_warn
ing
(
"L3 slot %d in use/index already disabled!
\n
"
,
pr_warn
(
"L3 slot %d in use/index already disabled!
\n
"
,
slot
);
return
err
;
}
...
...
arch/x86/kernel/cpu/mcheck/mce-inject.c
浏览文件 @
93b894b6
...
...
@@ -115,7 +115,7 @@ static int raise_local(void)
int
cpu
=
m
->
extcpu
;
if
(
m
->
inject_flags
&
MCJ_EXCEPTION
)
{
pr
intk
(
KERN_INFO
"Triggering MCE exception on CPU %d
\n
"
,
cpu
);
pr
_info
(
"Triggering MCE exception on CPU %d
\n
"
,
cpu
);
switch
(
context
)
{
case
MCJ_CTX_IRQ
:
/*
...
...
@@ -128,15 +128,15 @@ static int raise_local(void)
raise_exception
(
m
,
NULL
);
break
;
default:
pr
intk
(
KERN_INFO
"Invalid MCE context
\n
"
);
pr
_info
(
"Invalid MCE context
\n
"
);
ret
=
-
EINVAL
;
}
pr
intk
(
KERN_INFO
"MCE exception done on CPU %d
\n
"
,
cpu
);
pr
_info
(
"MCE exception done on CPU %d
\n
"
,
cpu
);
}
else
if
(
m
->
status
)
{
pr
intk
(
KERN_INFO
"Starting machine check poll CPU %d
\n
"
,
cpu
);
pr
_info
(
"Starting machine check poll CPU %d
\n
"
,
cpu
);
raise_poll
(
m
);
mce_notify_irq
();
pr
intk
(
KERN_INFO
"Machine check poll done on CPU %d
\n
"
,
cpu
);
pr
_info
(
"Machine check poll done on CPU %d
\n
"
,
cpu
);
}
else
m
->
finished
=
0
;
...
...
@@ -183,8 +183,7 @@ static void raise_mce(struct mce *m)
start
=
jiffies
;
while
(
!
cpumask_empty
(
mce_inject_cpumask
))
{
if
(
!
time_before
(
jiffies
,
start
+
2
*
HZ
))
{
printk
(
KERN_ERR
"Timeout waiting for mce inject %lx
\n
"
,
pr_err
(
"Timeout waiting for mce inject %lx
\n
"
,
*
cpumask_bits
(
mce_inject_cpumask
));
break
;
}
...
...
@@ -241,7 +240,7 @@ static int inject_init(void)
{
if
(
!
alloc_cpumask_var
(
&
mce_inject_cpumask
,
GFP_KERNEL
))
return
-
ENOMEM
;
pr
intk
(
KERN_INFO
"Machine check injector initialized
\n
"
);
pr
_info
(
"Machine check injector initialized
\n
"
);
register_mce_write_callback
(
mce_write
);
register_nmi_handler
(
NMI_LOCAL
,
mce_raise_notify
,
0
,
"mce_notify"
);
...
...
arch/x86/kernel/cpu/mcheck/p5.c
浏览文件 @
93b894b6
...
...
@@ -26,14 +26,12 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
rdmsr
(
MSR_IA32_P5_MC_ADDR
,
loaddr
,
hi
);
rdmsr
(
MSR_IA32_P5_MC_TYPE
,
lotype
,
hi
);
printk
(
KERN_EMERG
"CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).
\n
"
,
smp_processor_id
(),
loaddr
,
lotype
);
pr_emerg
(
"CPU#%d: Machine Check Exception: 0x%8X (type 0x%8X).
\n
"
,
smp_processor_id
(),
loaddr
,
lotype
);
if
(
lotype
&
(
1
<<
5
))
{
printk
(
KERN_EMERG
"CPU#%d: Possible thermal failure (CPU on fire ?).
\n
"
,
smp_processor_id
());
pr_emerg
(
"CPU#%d: Possible thermal failure (CPU on fire ?).
\n
"
,
smp_processor_id
());
}
add_taint
(
TAINT_MACHINE_CHECK
,
LOCKDEP_NOW_UNRELIABLE
);
...
...
@@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
/* Read registers before enabling: */
rdmsr
(
MSR_IA32_P5_MC_ADDR
,
l
,
h
);
rdmsr
(
MSR_IA32_P5_MC_TYPE
,
l
,
h
);
printk
(
KERN_INFO
"Intel old style machine check architecture supported.
\n
"
);
pr_info
(
"Intel old style machine check architecture supported.
\n
"
);
/* Enable MCE: */
cr4_set_bits
(
X86_CR4_MCE
);
printk
(
KERN_INFO
"Intel old style machine check reporting enabled on CPU#%d.
\n
"
,
smp_processor_id
());
pr_info
(
"Intel old style machine check reporting enabled on CPU#%d.
\n
"
,
smp_processor_id
());
}
arch/x86/kernel/cpu/mcheck/therm_throt.c
浏览文件 @
93b894b6
...
...
@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level)
/* if we just entered the thermal event */
if
(
new_event
)
{
if
(
event
==
THERMAL_THROTTLING_EVENT
)
pr
intk
(
KERN_CRIT
"CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)
\n
"
,
pr
_crit
(
"CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)
\n
"
,
this_cpu
,
level
==
CORE_LEVEL
?
"Core"
:
"Package"
,
state
->
count
);
...
...
@@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level)
}
if
(
old_event
)
{
if
(
event
==
THERMAL_THROTTLING_EVENT
)
printk
(
KERN_INFO
"CPU%d: %s temperature/speed normal
\n
"
,
this_cpu
,
pr_info
(
"CPU%d: %s temperature/speed normal
\n
"
,
this_cpu
,
level
==
CORE_LEVEL
?
"Core"
:
"Package"
);
return
1
;
}
...
...
@@ -417,8 +416,8 @@ static void intel_thermal_interrupt(void)
static
void
unexpected_thermal_interrupt
(
void
)
{
pr
intk
(
KERN_ERR
"CPU%d: Unexpected LVT thermal interrupt!
\n
"
,
smp_processor_id
());
pr
_err
(
"CPU%d: Unexpected LVT thermal interrupt!
\n
"
,
smp_processor_id
());
}
static
void
(
*
smp_thermal_vector
)(
void
)
=
unexpected_thermal_interrupt
;
...
...
@@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
if
((
l
&
MSR_IA32_MISC_ENABLE_TM1
)
&&
(
h
&
APIC_DM_SMI
))
{
if
(
system_state
==
SYSTEM_BOOTING
)
pr
intk
(
KERN_DEBUG
"CPU%d: Thermal monitoring handled by SMI
\n
"
,
cpu
);
pr
_debug
(
"CPU%d: Thermal monitoring handled by SMI
\n
"
,
cpu
);
return
;
}
...
...
@@ -557,8 +556,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
l
=
apic_read
(
APIC_LVTTHMR
);
apic_write
(
APIC_LVTTHMR
,
l
&
~
APIC_LVT_MASKED
);
pr
intk_once
(
KERN_INFO
"CPU0: Thermal monitoring enabled (%s)
\n
"
,
tm2
?
"TM2"
:
"TM1"
);
pr
_info_once
(
"CPU0: Thermal monitoring enabled (%s)
\n
"
,
tm2
?
"TM2"
:
"TM1"
);
/* enable thermal throttle processing */
atomic_set
(
&
therm_throt_en
,
1
);
...
...
arch/x86/kernel/cpu/mcheck/threshold.c
浏览文件 @
93b894b6
...
...
@@ -12,8 +12,8 @@
static
void
default_threshold_interrupt
(
void
)
{
pr
intk
(
KERN_ERR
"Unexpected threshold interrupt at vector %x
\n
"
,
THRESHOLD_APIC_VECTOR
);
pr
_err
(
"Unexpected threshold interrupt at vector %x
\n
"
,
THRESHOLD_APIC_VECTOR
);
}
void
(
*
mce_threshold_vector
)(
void
)
=
default_threshold_interrupt
;
...
...
arch/x86/kernel/cpu/mcheck/winchip.c
浏览文件 @
93b894b6
...
...
@@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
ist_enter
(
regs
);
pr
intk
(
KERN_EMERG
"CPU0: Machine Check Exception.
\n
"
);
pr
_emerg
(
"CPU0: Machine Check Exception.
\n
"
);
add_taint
(
TAINT_MACHINE_CHECK
,
LOCKDEP_NOW_UNRELIABLE
);
ist_exit
(
regs
);
...
...
@@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
cr4_set_bits
(
X86_CR4_MCE
);
printk
(
KERN_INFO
"Winchip machine check reporting enabled on CPU#0.
\n
"
);
pr_info
(
"Winchip machine check reporting enabled on CPU#0.
\n
"
);
}
arch/x86/kernel/cpu/microcode/amd.c
浏览文件 @
93b894b6
...
...
@@ -953,7 +953,7 @@ struct microcode_ops * __init init_amd_microcode(void)
struct
cpuinfo_x86
*
c
=
&
boot_cpu_data
;
if
(
c
->
x86_vendor
!=
X86_VENDOR_AMD
||
c
->
x86
<
0x10
)
{
pr_warn
ing
(
"AMD CPU family 0x%x not supported
\n
"
,
c
->
x86
);
pr_warn
(
"AMD CPU family 0x%x not supported
\n
"
,
c
->
x86
);
return
NULL
;
}
...
...
arch/x86/kernel/cpu/mshyperv.c
浏览文件 @
93b894b6
...
...
@@ -161,8 +161,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv
.
misc_features
=
cpuid_edx
(
HYPERV_CPUID_FEATURES
);
ms_hyperv
.
hints
=
cpuid_eax
(
HYPERV_CPUID_ENLIGHTMENT_INFO
);
pr
intk
(
KERN_INFO
"HyperV: features 0x%x, hints 0x%x
\n
"
,
ms_hyperv
.
features
,
ms_hyperv
.
hints
);
pr
_info
(
"HyperV: features 0x%x, hints 0x%x
\n
"
,
ms_hyperv
.
features
,
ms_hyperv
.
hints
);
#ifdef CONFIG_X86_LOCAL_APIC
if
(
ms_hyperv
.
features
&
HV_X64_MSR_APIC_FREQUENCY_AVAILABLE
)
{
...
...
@@ -174,8 +174,8 @@ static void __init ms_hyperv_init_platform(void)
rdmsrl
(
HV_X64_MSR_APIC_FREQUENCY
,
hv_lapic_frequency
);
hv_lapic_frequency
=
div_u64
(
hv_lapic_frequency
,
HZ
);
lapic_timer_frequency
=
hv_lapic_frequency
;
pr
intk
(
KERN_INFO
"HyperV: LAPIC Timer Frequency: %#x
\n
"
,
lapic_timer_frequency
);
pr
_info
(
"HyperV: LAPIC Timer Frequency: %#x
\n
"
,
lapic_timer_frequency
);
}
#endif
...
...
arch/x86/kernel/cpu/mtrr/centaur.c
浏览文件 @
93b894b6
...
...
@@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
*/
if
(
type
!=
MTRR_TYPE_WRCOMB
&&
(
centaur_mcr_type
==
0
||
type
!=
MTRR_TYPE_UNCACHABLE
))
{
pr_warn
ing
(
"mtrr: only write-combining%s supported
\n
"
,
pr_warn
(
"mtrr: only write-combining%s supported
\n
"
,
centaur_mcr_type
?
" and uncacheable are"
:
" is"
);
return
-
EINVAL
;
}
...
...
arch/x86/kernel/cpu/mtrr/cleanup.c
浏览文件 @
93b894b6
...
...
@@ -57,9 +57,9 @@ static int __initdata nr_range;
static
struct
var_mtrr_range_state
__initdata
range_state
[
RANGE_NUM
];
static
int
__initdata
debug_print
;
#define Dprintk(x...) do { if (debug_print) pr
intk(KERN_DEBUG
x); } while (0)
#define Dprintk(x...) do { if (debug_print) pr
_debug(
x); } while (0)
#define BIOS_BUG_MSG
KERN_WARNING
\
#define BIOS_BUG_MSG \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static
int
__init
...
...
@@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
base
,
base
+
size
);
}
if
(
debug_print
)
{
pr
intk
(
KERN_DEBUG
"After WB checking
\n
"
);
pr
_debug
(
"After WB checking
\n
"
);
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
pr
intk
(
KERN_DEBUG
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
pr
_debug
(
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
range
[
i
].
start
,
range
[
i
].
end
);
}
...
...
@@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
(
mtrr_state
.
enabled
&
MTRR_STATE_MTRR_ENABLED
)
&&
(
mtrr_state
.
enabled
&
MTRR_STATE_MTRR_FIXED_ENABLED
))
{
/* Var MTRR contains UC entry below 1M? Skip it: */
pr
intk
(
BIOS_BUG_MSG
,
i
);
pr
_warn
(
BIOS_BUG_MSG
,
i
);
if
(
base
+
size
<=
(
1
<<
(
20
-
PAGE_SHIFT
)))
continue
;
size
-=
(
1
<<
(
20
-
PAGE_SHIFT
))
-
base
;
...
...
@@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
extra_remove_base
+
extra_remove_size
);
if
(
debug_print
)
{
pr
intk
(
KERN_DEBUG
"After UC checking
\n
"
);
pr
_debug
(
"After UC checking
\n
"
);
for
(
i
=
0
;
i
<
RANGE_NUM
;
i
++
)
{
if
(
!
range
[
i
].
end
)
continue
;
pr
intk
(
KERN_DEBUG
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
pr
_debug
(
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
range
[
i
].
start
,
range
[
i
].
end
);
}
}
...
...
@@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
/* sort the ranges */
nr_range
=
clean_sort_range
(
range
,
RANGE_NUM
);
if
(
debug_print
)
{
pr
intk
(
KERN_DEBUG
"After sorting
\n
"
);
pr
_debug
(
"After sorting
\n
"
);
for
(
i
=
0
;
i
<
nr_range
;
i
++
)
pr
intk
(
KERN_DEBUG
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
pr
_debug
(
"MTRR MAP PFN: %016llx - %016llx
\n
"
,
range
[
i
].
start
,
range
[
i
].
end
);
}
...
...
@@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void)
start_base
=
to_size_factor
(
start_base
,
&
start_factor
),
type
=
range_state
[
i
].
type
;
pr
intk
(
KERN_DEBUG
"reg %d, base: %ld%cB, range: %ld%cB, type %s
\n
"
,
pr
_debug
(
"reg %d, base: %ld%cB, range: %ld%cB, type %s
\n
"
,
i
,
start_base
,
start_factor
,
size_base
,
size_factor
,
(
type
==
MTRR_TYPE_UNCACHABLE
)
?
"UC"
:
...
...
@@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits)
return
0
;
/* Print original var MTRRs at first, for debugging: */
pr
intk
(
KERN_DEBUG
"original variable MTRRs
\n
"
);
pr
_debug
(
"original variable MTRRs
\n
"
);
print_out_mtrr_range_state
();
memset
(
range
,
0
,
sizeof
(
range
));
...
...
@@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base
,
x_remove_size
);
range_sums
=
sum_ranges
(
range
,
nr_range
);
pr
intk
(
KERN_INFO
"total RAM covered: %ldM
\n
"
,
pr
_info
(
"total RAM covered: %ldM
\n
"
,
range_sums
>>
(
20
-
PAGE_SHIFT
));
if
(
mtrr_chunk_size
&&
mtrr_gran_size
)
{
...
...
@@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits)
if
(
!
result
[
i
].
bad
)
{
set_var_mtrr_all
(
address_bits
);
pr
intk
(
KERN_DEBUG
"New variable MTRRs
\n
"
);
pr
_debug
(
"New variable MTRRs
\n
"
);
print_out_mtrr_range_state
();
return
1
;
}
printk
(
KERN_INFO
"invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one
\n
"
);
pr_info
(
"invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one
\n
"
);
}
i
=
0
;
...
...
@@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits)
x_remove_base
,
x_remove_size
,
i
);
if
(
debug_print
)
{
mtrr_print_out_one_result
(
i
);
pr
intk
(
KERN_INFO
"
\n
"
);
pr
_info
(
"
\n
"
);
}
i
++
;
...
...
@@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits)
index_good
=
mtrr_search_optimal_index
();
if
(
index_good
!=
-
1
)
{
pr
intk
(
KERN_INFO
"Found optimal setting for mtrr clean up
\n
"
);
pr
_info
(
"Found optimal setting for mtrr clean up
\n
"
);
i
=
index_good
;
mtrr_print_out_one_result
(
i
);
...
...
@@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits)
gran_size
<<=
10
;
x86_setup_var_mtrrs
(
range
,
nr_range
,
chunk_size
,
gran_size
);
set_var_mtrr_all
(
address_bits
);
pr
intk
(
KERN_DEBUG
"New variable MTRRs
\n
"
);
pr
_debug
(
"New variable MTRRs
\n
"
);
print_out_mtrr_range_state
();
return
1
;
}
else
{
...
...
@@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits)
mtrr_print_out_one_result
(
i
);
}
pr
intk
(
KERN_INFO
"mtrr_cleanup: can not find optimal value
\n
"
);
pr
intk
(
KERN_INFO
"please specify mtrr_gran_size/mtrr_chunk_size
\n
"
);
pr
_info
(
"mtrr_cleanup: can not find optimal value
\n
"
);
pr
_info
(
"please specify mtrr_gran_size/mtrr_chunk_size
\n
"
);
return
0
;
}
...
...
@@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if
(
!
highest_pfn
)
{
pr
intk
(
KERN_INFO
"CPU MTRRs all blank - virtualized system.
\n
"
);
pr
_info
(
"CPU MTRRs all blank - virtualized system.
\n
"
);
return
0
;
}
...
...
@@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
end_pfn
);
if
(
total_trim_size
)
{
pr_warning
(
"WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.
\n
"
,
total_trim_size
>>
20
);
pr_warn
(
"WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.
\n
"
,
total_trim_size
>>
20
);
if
(
!
changed_by_mtrr_cleanup
)
WARN_ON
(
1
);
...
...
arch/x86/kernel/cpu/mtrr/generic.c
浏览文件 @
93b894b6
...
...
@@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
rdmsr
(
MSR_K8_SYSCFG
,
lo
,
hi
);
if
(
lo
&
K8_MTRRFIXRANGE_DRAM_MODIFY
)
{
pr
intk
(
KERN_ERR
FW_WARN
"MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
pr
_err
(
FW_WARN
"MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit
\n
"
,
smp_processor_id
());
lo
&=
~
K8_MTRRFIXRANGE_DRAM_MODIFY
;
...
...
@@ -501,14 +501,14 @@ void __init mtrr_state_warn(void)
if
(
!
mask
)
return
;
if
(
mask
&
MTRR_CHANGE_MASK_FIXED
)
pr_warn
ing
(
"mtrr: your CPUs had inconsistent fixed MTRR settings
\n
"
);
pr_warn
(
"mtrr: your CPUs had inconsistent fixed MTRR settings
\n
"
);
if
(
mask
&
MTRR_CHANGE_MASK_VARIABLE
)
pr_warn
ing
(
"mtrr: your CPUs had inconsistent variable MTRR settings
\n
"
);
pr_warn
(
"mtrr: your CPUs had inconsistent variable MTRR settings
\n
"
);
if
(
mask
&
MTRR_CHANGE_MASK_DEFTYPE
)
pr_warn
ing
(
"mtrr: your CPUs had inconsistent MTRRdefType settings
\n
"
);
pr_warn
(
"mtrr: your CPUs had inconsistent MTRRdefType settings
\n
"
);
pr
intk
(
KERN_INFO
"mtrr: probably your BIOS does not setup all CPUs.
\n
"
);
pr
intk
(
KERN_INFO
"mtrr: corrected configuration.
\n
"
);
pr
_info
(
"mtrr: probably your BIOS does not setup all CPUs.
\n
"
);
pr
_info
(
"mtrr: corrected configuration.
\n
"
);
}
/*
...
...
@@ -519,8 +519,7 @@ void __init mtrr_state_warn(void)
void
mtrr_wrmsr
(
unsigned
msr
,
unsigned
a
,
unsigned
b
)
{
if
(
wrmsr_safe
(
msr
,
a
,
b
)
<
0
)
{
printk
(
KERN_ERR
"MTRR: CPU %u: Writing MSR %x to %x:%x failed
\n
"
,
pr_err
(
"MTRR: CPU %u: Writing MSR %x to %x:%x failed
\n
"
,
smp_processor_id
(),
msr
,
a
,
b
);
}
}
...
...
@@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
tmp
|=
~
((
1ULL
<<
(
hi
-
1
))
-
1
);
if
(
tmp
!=
mask
)
{
pr
intk
(
KERN_WARNING
"mtrr: your BIOS has configured an incorrect mask, fixing it.
\n
"
);
pr
_warn
(
"mtrr: your BIOS has configured an incorrect mask, fixing it.
\n
"
);
add_taint
(
TAINT_FIRMWARE_WORKAROUND
,
LOCKDEP_STILL_OK
);
mask
=
tmp
;
}
...
...
@@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
boot_cpu_data
.
x86_model
==
1
&&
boot_cpu_data
.
x86_mask
<=
7
)
{
if
(
base
&
((
1
<<
(
22
-
PAGE_SHIFT
))
-
1
))
{
pr_warn
ing
(
"mtrr: base(0x%lx000) is not 4 MiB aligned
\n
"
,
base
);
pr_warn
(
"mtrr: base(0x%lx000) is not 4 MiB aligned
\n
"
,
base
);
return
-
EINVAL
;
}
if
(
!
(
base
+
size
<
0x70000
||
base
>
0x7003F
)
&&
(
type
==
MTRR_TYPE_WRCOMB
||
type
==
MTRR_TYPE_WRBACK
))
{
pr_warn
ing
(
"mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.
\n
"
);
pr_warn
(
"mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.
\n
"
);
return
-
EINVAL
;
}
}
...
...
@@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
lbase
=
lbase
>>
1
,
last
=
last
>>
1
)
;
if
(
lbase
!=
last
)
{
pr_warn
ing
(
"mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary
\n
"
,
base
,
size
);
pr_warn
(
"mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary
\n
"
,
base
,
size
);
return
-
EINVAL
;
}
return
0
;
...
...
arch/x86/kernel/cpu/mtrr/main.c
浏览文件 @
93b894b6
...
...
@@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return
error
;
if
(
type
>=
MTRR_NUM_TYPES
)
{
pr_warn
ing
(
"mtrr: type: %u invalid
\n
"
,
type
);
pr_warn
(
"mtrr: type: %u invalid
\n
"
,
type
);
return
-
EINVAL
;
}
/* If the type is WC, check that this processor supports it */
if
((
type
==
MTRR_TYPE_WRCOMB
)
&&
!
have_wrcomb
())
{
pr_warn
ing
(
"mtrr: your processor doesn't support write-combining
\n
"
);
pr_warn
(
"mtrr: your processor doesn't support write-combining
\n
"
);
return
-
ENOSYS
;
}
if
(
!
size
)
{
pr_warn
ing
(
"mtrr: zero sized request
\n
"
);
pr_warn
(
"mtrr: zero sized request
\n
"
);
return
-
EINVAL
;
}
if
((
base
|
(
base
+
size
-
1
))
>>
(
boot_cpu_data
.
x86_phys_bits
-
PAGE_SHIFT
))
{
pr_warn
ing
(
"mtrr: base or size exceeds the MTRR width
\n
"
);
pr_warn
(
"mtrr: base or size exceeds the MTRR width
\n
"
);
return
-
EINVAL
;
}
...
...
@@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
}
else
if
(
types_compatible
(
type
,
ltype
))
continue
;
}
pr_warn
ing
(
"mtrr: 0x%lx000,0x%lx000 overlaps existing"
pr_warn
(
"mtrr: 0x%lx000,0x%lx000 overlaps existing"
" 0x%lx000,0x%lx000
\n
"
,
base
,
size
,
lbase
,
lsize
);
goto
out
;
...
...
@@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
if
(
ltype
!=
type
)
{
if
(
types_compatible
(
type
,
ltype
))
continue
;
pr_warn
ing
(
"mtrr: type mismatch for %lx000,%lx000 old: %s new: %s
\n
"
,
pr_warn
(
"mtrr: type mismatch for %lx000,%lx000 old: %s new: %s
\n
"
,
base
,
size
,
mtrr_attrib_to_str
(
ltype
),
mtrr_attrib_to_str
(
type
));
goto
out
;
...
...
@@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
static
int
mtrr_check
(
unsigned
long
base
,
unsigned
long
size
)
{
if
((
base
&
(
PAGE_SIZE
-
1
))
||
(
size
&
(
PAGE_SIZE
-
1
)))
{
pr_warn
ing
(
"mtrr: size and base must be multiples of 4 kiB
\n
"
);
pr_warn
(
"mtrr: size and base must be multiples of 4 kiB
\n
"
);
pr_debug
(
"mtrr: size: 0x%lx base: 0x%lx
\n
"
,
size
,
base
);
dump_stack
();
return
-
1
;
...
...
@@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
}
}
if
(
reg
>=
max
)
{
pr_warn
ing
(
"mtrr: register: %d too big
\n
"
,
reg
);
pr_warn
(
"mtrr: register: %d too big
\n
"
,
reg
);
goto
out
;
}
mtrr_if
->
get
(
reg
,
&
lbase
,
&
lsize
,
&
ltype
);
if
(
lsize
<
1
)
{
pr_warn
ing
(
"mtrr: MTRR %d not used
\n
"
,
reg
);
pr_warn
(
"mtrr: MTRR %d not used
\n
"
,
reg
);
goto
out
;
}
if
(
mtrr_usage_table
[
reg
]
<
1
)
{
pr_warn
ing
(
"mtrr: reg: %d has count=0
\n
"
,
reg
);
pr_warn
(
"mtrr: reg: %d has count=0
\n
"
,
reg
);
goto
out
;
}
if
(
--
mtrr_usage_table
[
reg
]
<
1
)
...
...
arch/x86/kernel/cpu/perf_event.c
浏览文件 @
93b894b6
...
...
@@ -254,15 +254,16 @@ static bool check_hw_exists(void)
* We still allow the PMU driver to operate:
*/
if
(
bios_fail
)
{
printk
(
KERN_CONT
"Broken BIOS detected, complain to your hardware vendor.
\n
"
);
printk
(
KERN_ERR
FW_BUG
"the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)
\n
"
,
reg_fail
,
val_fail
);
pr_cont
(
"Broken BIOS detected, complain to your hardware vendor.
\n
"
);
pr_err
(
FW_BUG
"the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)
\n
"
,
reg_fail
,
val_fail
);
}
return
true
;
msr_fail:
pr
intk
(
KERN_CONT
"Broken PMU hardware detected, using software events only.
\n
"
);
pr
intk
(
"%sFailed to access perfctr msr (MSR %x is %Lx)
\n
"
,
pr
_cont
(
"Broken PMU hardware detected, using software events only.
\n
"
);
pr
_info
(
"%sFailed to access perfctr msr (MSR %x is %Lx)
\n
"
,
boot_cpu_has
(
X86_FEATURE_HYPERVISOR
)
?
KERN_INFO
:
KERN_ERR
,
reg
,
val_new
);
...
...
arch/x86/kernel/cpu/perf_event_amd_ibs.c
浏览文件 @
93b894b6
...
...
@@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void)
perf_ibs_pmu_init
(
&
perf_ibs_op
,
"ibs_op"
);
register_nmi_handler
(
NMI_LOCAL
,
perf_ibs_nmi_handler
,
0
,
"perf_ibs"
);
pr
intk
(
KERN_INFO
"perf: AMD IBS detected (0x%08x)
\n
"
,
ibs_caps
);
pr
_info
(
"perf: AMD IBS detected (0x%08x)
\n
"
,
ibs_caps
);
return
0
;
}
...
...
@@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
pci_read_config_dword
(
cpu_cfg
,
IBSCTL
,
&
value
);
if
(
value
!=
(
ibs_eilvt_off
|
IBSCTL_LVT_OFFSET_VALID
))
{
pci_dev_put
(
cpu_cfg
);
pr
intk
(
KERN_DEBUG
"Failed to setup IBS LVT offset, "
"IBSCTL = 0x%08x
\n
"
,
value
);
pr
_debug
(
"Failed to setup IBS LVT offset, IBSCTL = 0x%08x
\n
"
,
value
);
return
-
EINVAL
;
}
}
while
(
1
);
if
(
!
nodes
)
{
pr
intk
(
KERN_DEBUG
"No CPU node configured for IBS
\n
"
);
pr
_debug
(
"No CPU node configured for IBS
\n
"
);
return
-
ENODEV
;
}
...
...
@@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void)
preempt_enable
();
if
(
offset
==
APIC_EILVT_NR_MAX
)
{
pr
intk
(
KERN_DEBUG
"No EILVT entry available
\n
"
);
pr
_debug
(
"No EILVT entry available
\n
"
);
return
;
}
...
...
arch/x86/kernel/cpu/perf_event_amd_uncore.c
浏览文件 @
93b894b6
...
...
@@ -536,7 +536,7 @@ static int __init amd_uncore_init(void)
if
(
ret
)
goto
fail_nb
;
pr
intk
(
KERN_INFO
"perf: AMD NB counters detected
\n
"
);
pr
_info
(
"perf: AMD NB counters detected
\n
"
);
ret
=
0
;
}
...
...
@@ -550,7 +550,7 @@ static int __init amd_uncore_init(void)
if
(
ret
)
goto
fail_l2
;
pr
intk
(
KERN_INFO
"perf: AMD L2I counters detected
\n
"
);
pr
_info
(
"perf: AMD L2I counters detected
\n
"
);
ret
=
0
;
}
...
...
arch/x86/kernel/cpu/perf_event_intel_ds.c
浏览文件 @
93b894b6
...
...
@@ -1325,13 +1325,13 @@ void __init intel_ds_init(void)
switch
(
format
)
{
case
0
:
pr
intk
(
KERN_CONT
"PEBS fmt0%c, "
,
pebs_type
);
pr
_cont
(
"PEBS fmt0%c, "
,
pebs_type
);
x86_pmu
.
pebs_record_size
=
sizeof
(
struct
pebs_record_core
);
x86_pmu
.
drain_pebs
=
intel_pmu_drain_pebs_core
;
break
;
case
1
:
pr
intk
(
KERN_CONT
"PEBS fmt1%c, "
,
pebs_type
);
pr
_cont
(
"PEBS fmt1%c, "
,
pebs_type
);
x86_pmu
.
pebs_record_size
=
sizeof
(
struct
pebs_record_nhm
);
x86_pmu
.
drain_pebs
=
intel_pmu_drain_pebs_nhm
;
break
;
...
...
@@ -1351,7 +1351,7 @@ void __init intel_ds_init(void)
break
;
default:
pr
intk
(
KERN_CONT
"no PEBS fmt%d%c, "
,
format
,
pebs_type
);
pr
_cont
(
"no PEBS fmt%d%c, "
,
format
,
pebs_type
);
x86_pmu
.
pebs
=
0
;
}
}
...
...
arch/x86/kernel/cpu/rdrand.c
浏览文件 @
93b894b6
...
...
@@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
for
(
i
=
0
;
i
<
SANITY_CHECK_LOOPS
;
i
++
)
{
if
(
!
rdrand_long
(
&
tmp
))
{
clear_cpu_cap
(
c
,
X86_FEATURE_RDRAND
);
pr
intk_once
(
KERN_WARNING
"rdrand: disabled
\n
"
);
pr
_warn_once
(
"rdrand: disabled
\n
"
);
return
;
}
}
...
...
arch/x86/kernel/cpu/topology.c
浏览文件 @
93b894b6
...
...
@@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
c
->
x86_max_cores
=
(
core_level_siblings
/
smp_num_siblings
);
if
(
!
printed
)
{
pr
intk
(
KERN_INFO
"CPU: Physical Processor ID: %d
\n
"
,
pr
_info
(
"CPU: Physical Processor ID: %d
\n
"
,
c
->
phys_proc_id
);
if
(
c
->
x86_max_cores
>
1
)
pr
intk
(
KERN_INFO
"CPU: Processor Core ID: %d
\n
"
,
pr
_info
(
"CPU: Processor Core ID: %d
\n
"
,
c
->
cpu_core_id
);
printed
=
1
;
}
...
...
arch/x86/kernel/cpu/transmeta.c
浏览文件 @
93b894b6
...
...
@@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if
(
max
>=
0x80860001
)
{
cpuid
(
0x80860001
,
&
dummy
,
&
cpu_rev
,
&
cpu_freq
,
&
cpu_flags
);
if
(
cpu_rev
!=
0x02000000
)
{
pr
intk
(
KERN_INFO
"CPU: Processor revision %u.%u.%u.%u, %u MHz
\n
"
,
pr
_info
(
"CPU: Processor revision %u.%u.%u.%u, %u MHz
\n
"
,
(
cpu_rev
>>
24
)
&
0xff
,
(
cpu_rev
>>
16
)
&
0xff
,
(
cpu_rev
>>
8
)
&
0xff
,
...
...
@@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c)
if
(
max
>=
0x80860002
)
{
cpuid
(
0x80860002
,
&
new_cpu_rev
,
&
cms_rev1
,
&
cms_rev2
,
&
dummy
);
if
(
cpu_rev
==
0x02000000
)
{
pr
intk
(
KERN_INFO
"CPU: Processor revision %08X, %u MHz
\n
"
,
pr
_info
(
"CPU: Processor revision %08X, %u MHz
\n
"
,
new_cpu_rev
,
cpu_freq
);
}
pr
intk
(
KERN_INFO
"CPU: Code Morphing Software revision %u.%u.%u-%u-%u
\n
"
,
pr
_info
(
"CPU: Code Morphing Software revision %u.%u.%u-%u-%u
\n
"
,
(
cms_rev1
>>
24
)
&
0xff
,
(
cms_rev1
>>
16
)
&
0xff
,
(
cms_rev1
>>
8
)
&
0xff
,
...
...
@@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
(
void
*
)
&
cpu_info
[
56
],
(
void
*
)
&
cpu_info
[
60
]);
cpu_info
[
64
]
=
'\0'
;
pr
intk
(
KERN_INFO
"CPU: %s
\n
"
,
cpu_info
);
pr
_info
(
"CPU: %s
\n
"
,
cpu_info
);
}
/* Unhide possibly hidden capability flags */
...
...
arch/x86/kernel/cpu/vmware.c
浏览文件 @
93b894b6
...
...
@@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void)
tsc_hz
=
eax
|
(((
uint64_t
)
ebx
)
<<
32
);
do_div
(
tsc_hz
,
1000
);
BUG_ON
(
tsc_hz
>>
32
);
pr
intk
(
KERN_INFO
"TSC freq read from hypervisor : %lu.%03lu MHz
\n
"
,
pr
_info
(
"TSC freq read from hypervisor : %lu.%03lu MHz
\n
"
,
(
unsigned
long
)
tsc_hz
/
1000
,
(
unsigned
long
)
tsc_hz
%
1000
);
...
...
@@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void)
if
(
ebx
!=
UINT_MAX
)
x86_platform
.
calibrate_tsc
=
vmware_get_tsc_khz
;
else
printk
(
KERN_WARNING
"Failed to get TSC freq from the hypervisor
\n
"
);
pr_warn
(
"Failed to get TSC freq from the hypervisor
\n
"
);
}
/*
...
...
arch/x86/kernel/mpparse.c
浏览文件 @
93b894b6
...
...
@@ -408,7 +408,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
processor
.
cpuflag
=
CPU_ENABLED
;
processor
.
cpufeature
=
(
boot_cpu_data
.
x86
<<
8
)
|
(
boot_cpu_data
.
x86_model
<<
4
)
|
boot_cpu_data
.
x86_mask
;
processor
.
featureflag
=
boot_cpu_data
.
x86_capability
[
0
];
processor
.
featureflag
=
boot_cpu_data
.
x86_capability
[
CPUID_1_EDX
];
processor
.
reserved
[
0
]
=
0
;
processor
.
reserved
[
1
]
=
0
;
for
(
i
=
0
;
i
<
2
;
i
++
)
{
...
...
arch/x86/lguest/boot.c
浏览文件 @
93b894b6
...
...
@@ -1535,7 +1535,7 @@ __init void lguest_init(void)
*/
cpu_detect
(
&
new_cpu_data
);
/* head.S usually sets up the first capability word, so do it here. */
new_cpu_data
.
x86_capability
[
0
]
=
cpuid_edx
(
1
);
new_cpu_data
.
x86_capability
[
CPUID_1_EDX
]
=
cpuid_edx
(
1
);
/* Math is always hard! */
set_cpu_cap
(
&
new_cpu_data
,
X86_FEATURE_FPU
);
...
...
arch/x86/xen/enlighten.c
浏览文件 @
93b894b6
...
...
@@ -1654,7 +1654,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
cpu_detect
(
&
new_cpu_data
);
set_cpu_cap
(
&
new_cpu_data
,
X86_FEATURE_FPU
);
new_cpu_data
.
wp_works_ok
=
1
;
new_cpu_data
.
x86_capability
[
0
]
=
cpuid_edx
(
1
);
new_cpu_data
.
x86_capability
[
CPUID_1_EDX
]
=
cpuid_edx
(
1
);
#endif
if
(
xen_start_info
->
mod_start
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录