提交 3d1712c9 编写于 作者: A Akinobu Mita 提交者: Linus Torvalds

[PATCH] x86_64: {set,clear,test}_bit() related cleanup and pci_mmcfg_init() fix

While working on these patch set, I found several possible cleanup on x86-64
and ia64.

akpm: I stole this from Andi's queue.

Not only does it clean up bitops.  It also unrelatedly changes the prototype
of pci_mmcfg_init() and removes its arch_initcall().  It seems that the wrong
two patches got joined together, but this is the one which has been tested.

This patch fixes the current x86_64 build error (the pci_mmcfg_init()
declaration in arch/i386/pci/pci.h disagrees with the definition in
arch/x86_64/pci/mmconfig.c)

This also means that x86_64's pci_mmcfg_init() gets called in the same (new)
manner as x86's: from arch/i386/pci/init.c:pci_access_init(), rather than via
initcall.

The bitops cleanups came along for free.

All this worked OK in -mm testing (since 2.6.16-rc4-mm1) because x86_64 was
tested with both patches applied.
Signed-off-by: NAkinobu Mita <mita@miraclelinux.com>
Signed-off-by: NAndi Kleen <ak@suse.de>
Cc: Con Kolivas <kernel@kolivas.org>
Cc: Jean Delvare <khali@linux-fr.org>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 9b04c997
......@@ -139,8 +139,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
static int mce_available(struct cpuinfo_x86 *c)
{
return test_bit(X86_FEATURE_MCE, &c->x86_capability) &&
test_bit(X86_FEATURE_MCA, &c->x86_capability);
return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
}
static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
......
......@@ -1344,8 +1344,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
int i;
for ( i = 0 ; i < 32*NCAPINTS ; i++ )
if ( test_bit(i, &c->x86_capability) &&
x86_cap_flags[i] != NULL )
if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
seq_printf(m, " %s", x86_cap_flags[i]);
}
......
......@@ -55,7 +55,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
char __iomem *addr;
if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots))
if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), fallback_slots))
return NULL;
addr = get_virt(seg, bus);
if (!addr)
......@@ -143,29 +143,29 @@ static __init void unreachable_devices(void)
continue;
addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
if (addr == NULL|| readl(addr) != val1) {
set_bit(i, &fallback_slots);
set_bit(i, fallback_slots);
}
}
}
static int __init pci_mmcfg_init(void)
void __init pci_mmcfg_init(void)
{
int i;
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return 0;
return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0))
return 0;
return;
/* RED-PEN i386 doesn't do _nocache right now */
pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
if (pci_mmcfg_virt == NULL) {
printk("PCI: Can not allocate memory for mmconfig structures\n");
return 0;
return;
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
......@@ -173,7 +173,7 @@ static int __init pci_mmcfg_init(void)
if (!pci_mmcfg_virt[i].virt) {
printk("PCI: Cannot map mmconfig aperture for segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number);
return 0;
return;
}
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
}
......@@ -182,8 +182,4 @@ static int __init pci_mmcfg_init(void)
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
return 0;
}
arch_initcall(pci_mmcfg_init);
......@@ -34,12 +34,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
set_bit(cpu, &next->cpu_vm_mask);
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
if (unlikely(next->context.ldt != prev->context.ldt))
......@@ -50,7 +50,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
out_of_line_bug();
if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
......
......@@ -293,19 +293,19 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
{
if (!pte_dirty(*ptep))
return 0;
return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
if (!pte_young(*ptep))
return 0;
return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, ptep);
clear_bit(_PAGE_BIT_RW, &ptep->pte);
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册