提交 29a9994b 编写于 作者: P Paolo Ciarrocchi 提交者: Ingo Molnar

x86: coding style fixes for arch/x86/kernel/cpu/centaur.c

Kills more than 150 errors/warnings
Signed-off-by: NPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 ca5d3f14
......@@ -11,21 +11,21 @@
static u32 __cpuinit power2(u32 x)
{
u32 s=1;
while(s<=x)
s<<=1;
return s>>=1;
u32 s = 1;
while(s <= x)
s <<= 1;
return s >>= 1;
}
/*
* Set up an actual MCR
*/
static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
{
u32 lo, hi;
hi = base & ~0xFFF;
lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
lo &= ~0xFFF; /* Remove the ctrl value bits */
......@@ -45,7 +45,7 @@ static u32 __cpuinit ramtop(void) /* 16388 */
int i;
u32 top = 0;
u32 clip = 0xFFFFFFFFUL;
for (i = 0; i < e820.nr_map; i++) {
unsigned long start, end;
......@@ -55,10 +55,10 @@ static u32 __cpuinit ramtop(void) /* 16388 */
* Don't MCR over reserved space. Ignore the ISA hole
* we frob around that catastrophe already
*/
if (e820.map[i].type == E820_RESERVED)
{
if(e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
if (e820.map[i].addr >= 0x100000UL && e820.map[i].addr < clip)
clip = e820.map[i].addr;
continue;
}
......@@ -71,19 +71,19 @@ static u32 __cpuinit ramtop(void) /* 16388 */
}
/* Everything below 'top' should be RAM except for the ISA hole.
Because of the limited MCR's we want to map NV/ACPI into our
MCR range for gunk in RAM
MCR range for gunk in RAM
Clip might cause us to MCR insufficient RAM but that is an
acceptable failure mode and should only bite obscure boxes with
a VESA hole at 15Mb
The second case Clip sometimes kicks in is when the EBDA is marked
as reserved. Again we fail safe with reasonable results
*/
if(top>clip)
top=clip;
if(top > clip)
top = clip;
return top;
}
......@@ -99,8 +99,8 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
u32 top = root;
u32 floor = 0;
int ct = 0;
while(ct<nr)
while (ct < nr)
{
u32 fspace = 0;
......@@ -108,7 +108,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
* Find the largest block we will fill going upwards
*/
u32 high = power2(mem-top);
u32 high = power2(mem-top);
/*
* Find the largest block we will fill going downwards
......@@ -119,39 +119,37 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
/*
* Don't fill below 1Mb going downwards as there
* is an ISA hole in the way.
*/
if(base <= 1024*1024)
*/
if (base <= 1024*1024)
low = 0;
/*
* See how much space we could cover by filling below
* the ISA hole
*/
if(floor == 0)
if (floor == 0)
fspace = 512*1024;
else if(floor ==512*1024)
else if (floor == 512*1024)
fspace = 128*1024;
/* And forget ROM space */
/*
* Now install the largest coverage we get
*/
if(fspace > high && fspace > low)
if (fspace > high && fspace > low)
{
centaur_mcr_insert(ct, floor, fspace, key);
floor += fspace;
}
else if(high > low)
{
else if (high > low) {
centaur_mcr_insert(ct, top, high, key);
top += high;
}
else if(low > 0)
{
else if (low > 0) {
base -= low;
centaur_mcr_insert(ct, base, low, key);
}
......@@ -162,7 +160,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
* We loaded ct values. We now need to set the mask. The caller
* must do this bit.
*/
return ct;
}
......@@ -173,7 +171,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
* Allocate up to 6 mcrs to mark as much of ram as possible
* as write combining and weak write ordered.
*
* To experiment with: Linux never uses stack operations for
* To experiment with: Linux never uses stack operations for
* mmio spaces so we could globally enable stack operation wc
*
* Load the registers with type 31 - full write combining, all
......@@ -184,8 +182,8 @@ static void __cpuinit centaur_create_optimal_mcr(void)
/*
* Wipe unused MCRs
*/
for(i=used;i<8;i++)
for (i = used; i < 8; i++)
wrmsr(MSR_IDT_MCR0+i, 0, 0);
}
......@@ -205,21 +203,21 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
*/
int used = centaur_mcr_compute(6, 25);
/*
* Mark the registers we are using.
*/
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
for(i=0;i<used;i++)
lo|=1<<(9+i);
for (i = 0; i < used; i++)
lo |= 1<<(9+i);
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
/*
* Wipe unused MCRs
*/
for(i=used;i<8;i++)
for (i = used; i < 8; i++)
wrmsr(MSR_IDT_MCR0+i, 0, 0);
}
......@@ -231,9 +229,9 @@ static void __cpuinit winchip2_unprotect_mcr(void)
{
u32 lo, hi;
u32 key;
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
lo&=~0x1C0; /* blank bits 8-6 */
lo &= ~0x1C0; /* blank bits 8-6 */
key = (lo>>17) & 7;
lo |= key<<6; /* replace with unlock key */
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
......@@ -242,9 +240,9 @@ static void __cpuinit winchip2_unprotect_mcr(void)
static void __cpuinit winchip2_protect_mcr(void)
{
u32 lo, hi;
rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
lo&=~0x1C0; /* blank bits 8-6 */
lo &= ~0x1C0; /* blank bits 8-6 */
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
}
#endif /* CONFIG_X86_OOSTORE */
......@@ -267,17 +265,17 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
/* enable ACE unit, if present and disabled */
if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
rdmsr (MSR_VIA_FCR, lo, hi);
rdmsr(MSR_VIA_FCR, lo, hi);
lo |= ACE_FCR; /* enable ACE unit */
wrmsr (MSR_VIA_FCR, lo, hi);
wrmsr(MSR_VIA_FCR, lo, hi);
printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
}
/* enable RNG unit, if present and disabled */
if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
rdmsr (MSR_VIA_RNG, lo, hi);
rdmsr(MSR_VIA_RNG, lo, hi);
lo |= RNG_ENABLE; /* enable RNG unit */
wrmsr (MSR_VIA_RNG, lo, hi);
wrmsr(MSR_VIA_RNG, lo, hi);
printk(KERN_INFO "CPU: Enabled h/w RNG\n");
}
......@@ -288,15 +286,15 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
}
/* Cyrix III family needs CX8 & PGE explicitly enabled. */
if (c->x86_model >=6 && c->x86_model <= 9) {
rdmsr (MSR_VIA_FCR, lo, hi);
if (c->x86_model >= 6 && c->x86_model <= 9) {
rdmsr(MSR_VIA_FCR, lo, hi);
lo |= (1<<1 | 1<<7);
wrmsr (MSR_VIA_FCR, lo, hi);
wrmsr(MSR_VIA_FCR, lo, hi);
set_bit(X86_FEATURE_CX8, c->x86_capability);
}
/* Before Nehemiah, the C3's had 3dNOW! */
if (c->x86_model >=6 && c->x86_model <9)
if (c->x86_model >= 6 && c->x86_model < 9)
set_bit(X86_FEATURE_3DNOW, c->x86_capability);
get_model_name(c);
......@@ -306,31 +304,31 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
{
enum {
ECX8=1<<1,
EIERRINT=1<<2,
DPM=1<<3,
DMCE=1<<4,
DSTPCLK=1<<5,
ELINEAR=1<<6,
DSMC=1<<7,
DTLOCK=1<<8,
EDCTLB=1<<8,
EMMX=1<<9,
DPDC=1<<11,
EBRPRED=1<<12,
DIC=1<<13,
DDC=1<<14,
DNA=1<<15,
ERETSTK=1<<16,
E2MMX=1<<19,
EAMD3D=1<<20,
ECX8 = 1<<1,
EIERRINT = 1<<2,
DPM = 1<<3,
DMCE = 1<<4,
DSTPCLK = 1<<5,
ELINEAR = 1<<6,
DSMC = 1<<7,
DTLOCK = 1<<8,
EDCTLB = 1<<8,
EMMX = 1<<9,
DPDC = 1<<11,
EBRPRED = 1<<12,
DIC = 1<<13,
DDC = 1<<14,
DNA = 1<<15,
ERETSTK = 1<<16,
E2MMX = 1<<19,
EAMD3D = 1<<20,
};
char *name;
u32 fcr_set=0;
u32 fcr_clr=0;
u32 lo,hi,newlo;
u32 aa,bb,cc,dd;
u32 fcr_set = 0;
u32 fcr_clr = 0;
u32 lo, hi, newlo;
u32 aa, bb, cc, dd;
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
......@@ -338,12 +336,12 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
switch (c->x86) {
case 5:
switch(c->x86_model) {
case 5:
switch (c->x86_model) {
case 4:
name="C6";
fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
fcr_clr=DPDC;
name = "C6";
fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
fcr_clr = DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n");
clear_bit(X86_FEATURE_TSC, c->x86_capability);
#ifdef CONFIG_X86_OOSTORE
......@@ -351,29 +349,29 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
/* Enable
write combining on non-stack, non-string
write combining on string, all types
weak write ordering
The C6 original lacks weak read order
weak write ordering
The C6 original lacks weak read order
Note 0x120 is write only on Winchip 1 */
wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
#endif
#endif
break;
case 8:
switch(c->x86_mask) {
switch (c->x86_mask) {
default:
name="2";
name = "2";
break;
case 7 ... 9:
name="2A";
name = "2A";
break;
case 10 ... 15:
name="2B";
name = "2B";
break;
}
fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
fcr_clr=DPDC;
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
fcr_clr = DPDC;
#ifdef CONFIG_X86_OOSTORE
winchip2_unprotect_mcr();
winchip2_create_optimal_mcr();
......@@ -381,17 +379,17 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
/* Enable
write combining on non-stack, non-string
write combining on string, all types
weak write ordering
weak write ordering
*/
lo|=31;
lo |= 31;
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
winchip2_protect_mcr();
#endif
break;
case 9:
name="3";
fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
fcr_clr=DPDC;
name = "3";
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D;
fcr_clr = DPDC;
#ifdef CONFIG_X86_OOSTORE
winchip2_unprotect_mcr();
winchip2_create_optimal_mcr();
......@@ -399,50 +397,50 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
/* Enable
write combining on non-stack, non-string
write combining on string, all types
weak write ordering
weak write ordering
*/
lo|=31;
lo |= 31;
wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
winchip2_protect_mcr();
#endif
break;
default:
name="??";
name = "??";
}
rdmsr(MSR_IDT_FCR1, lo, hi);
newlo=(lo|fcr_set) & (~fcr_clr);
newlo = (lo|fcr_set) & (~fcr_clr);
if (newlo!=lo) {
printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo );
wrmsr(MSR_IDT_FCR1, newlo, hi );
if (newlo != lo) {
printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n", lo, newlo);
wrmsr(MSR_IDT_FCR1, newlo, hi);
} else {
printk(KERN_INFO "Centaur FCR is 0x%X\n",lo);
printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
}
/* Emulate MTRRs using Centaur's MCR. */
set_bit(X86_FEATURE_CENTAUR_MCR, c->x86_capability);
/* Report CX8 */
set_bit(X86_FEATURE_CX8, c->x86_capability);
/* Set 3DNow! on Winchip 2 and above. */
if (c->x86_model >=8)
if (c->x86_model >= 8)
set_bit(X86_FEATURE_3DNOW, c->x86_capability);
/* See if we can find out some more. */
if ( cpuid_eax(0x80000000) >= 0x80000005 ) {
if (cpuid_eax(0x80000000) >= 0x80000005) {
/* Yes, we can. */
cpuid(0x80000005,&aa,&bb,&cc,&dd);
cpuid(0x80000005, &aa, &bb, &cc, &dd);
/* Add L1 data and code cache sizes. */
c->x86_cache_size = (cc>>24)+(dd>>24);
}
sprintf( c->x86_model_id, "WinChip %s", name );
sprintf(c->x86_model_id, "WinChip %s", name);
break;
case 6:
case 6:
init_c3(c);
break;
}
}
static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigned int size)
static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* VIA C3 CPUs (670-68F) need further shifting. */
if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
......@@ -451,8 +449,8 @@ static unsigned int __cpuinit centaur_size_cache(struct cpuinfo_x86 * c, unsigne
/* VIA also screwed up Nehemiah stepping 1, and made
it return '65KB' instead of '64KB'
- Note, it seems this may only be in engineering samples. */
if ((c->x86==6) && (c->x86_model==9) && (c->x86_mask==1) && (size==65))
size -=1;
if ((c->x86 == 6) && (c->x86_model == 9) && (c->x86_mask == 1) && (size == 65))
size -= 1;
return size;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册