diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index bb2efdd566a9d590d64184b10b097e4b7ed17e95..db93dbc0e21a9896ee25c8dcc1c517f8f330d5cd 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -227,7 +227,7 @@ config SMP If you don't know what to do here, say N. config NR_CPUS - int "Maximum number of CPUs (2-32)" + int "Maximum number of CPUs (2-128)" range 2 128 depends on SMP default "32" if PPC64 diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 608fee7c7e20621dcf829507e893cbaee4ea05da..e3fb78397dc6d2b84e1f6fd8d0b7e9fc89e92ffe 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -102,7 +102,15 @@ int boot_cpuid_phys = 0; dev_t boot_dev; u64 ppc64_pft_size; -struct ppc64_caches ppc64_caches; +/* Pick defaults since we might want to patch instructions + * before we've read this from the device tree. + */ +struct ppc64_caches ppc64_caches = { + .dline_size = 0x80, + .log_dline_size = 7, + .iline_size = 0x80, + .log_iline_size = 7 +}; EXPORT_SYMBOL_GPL(ppc64_caches); /* diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 706e8a63ced93218334a227b62f5303595c27790..a33583f3b0e7d0e2c7f07b5ccc3bec039bf3f14e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) /* Handle hugepage regions */ if (unlikely(in_hugepage_area(mm->context, ea))) { DBG_LOW(" -> huge page !\n"); - return hash_huge_page(mm, access, ea, vsid, local); + return hash_huge_page(mm, access, ea, vsid, local, trap); } /* Get PTE and page size from page tables */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 6bc9dbad7dea20cd744d10f682f74562533f614f..54131b877da36cb3a827000d802ca76ce82507ad 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) return 0; } +struct slb_flush_info { + struct mm_struct *mm; + u16 newareas; +}; + static void flush_low_segments(void *parm) { - u16 areas = (unsigned long) parm; + struct slb_flush_info *fi = parm; unsigned long i; - asm volatile("isync" : : : "memory"); + BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS); + + if (current->active_mm != fi->mm) + return; + + /* Only need to do anything if this CPU is working in the same + * mm as the one which has changed */ - BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS); + /* update the paca copy of the context struct */ + get_paca()->context = current->active_mm->context; + asm volatile("isync" : : : "memory"); for (i = 0; i < NUM_LOW_AREAS; i++) { - if (! (areas & (1U << i))) + if (! (fi->newareas & (1U << i))) continue; asm volatile("slbie %0" : : "r" ((i << SID_SHIFT) | SLBIE_C)); } - asm volatile("isync" : : : "memory"); } static void flush_high_segments(void *parm) { - u16 areas = (unsigned long) parm; + struct slb_flush_info *fi = parm; unsigned long i, j; - asm volatile("isync" : : : "memory"); - BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS); + BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS); + if (current->active_mm != fi->mm) + return; + + /* Only need to do anything if this CPU is working in the same + * mm as the one which has changed */ + + /* update the paca copy of the context struct */ + get_paca()->context = current->active_mm->context; + + asm volatile("isync" : : : "memory"); for (i = 0; i < NUM_HIGH_AREAS; i++) { - if (! (areas & (1U << i))) + if (! (fi->newareas & (1U << i))) continue; for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++) asm volatile("slbie %0" :: "r" (((i << HTLB_AREA_SHIFT) - + (j << SID_SHIFT)) | SLBIE_C)); + + (j << SID_SHIFT)) | SLBIE_C)); } - asm volatile("isync" : : : "memory"); } @@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area) static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) { unsigned long i; + struct slb_flush_info fi; BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS); BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS); @@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas) mm->context.low_htlb_areas |= newareas; - /* update the paca copy of the context struct */ - get_paca()->context = mm->context; - /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); - on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1); + + fi.mm = mm; + fi.newareas = newareas; + on_each_cpu(flush_low_segments, &fi, 0, 1); return 0; } static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) { + struct slb_flush_info fi; unsigned long i; BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS); @@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); - on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1); + + fi.mm = mm; + fi.newareas = newareas; + on_each_cpu(flush_high_segments, &fi, 0, 1); return 0; } @@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } +/* + * Called by asm hashtable.S for doing lazy icache flush + */ +static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags, + pte_t pte, int trap) +{ + struct page *page; + int i; + + if (!pfn_valid(pte_pfn(pte))) + return rflags; + + page = pte_page(pte); + + /* page is dirty */ + if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) { + if (trap == 0x400) { + for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) + __flush_dcache_icache(page_address(page+i)); + set_bit(PG_arch_1, &page->flags); + } else { + rflags |= HPTE_R_N; + } + } + return rflags; +} + int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local) + unsigned long ea, unsigned long vsid, int local, + unsigned long trap) { pte_t *ptep; unsigned long old_pte, new_pte; @@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, rflags = 0x2 | (!(new_pte & _PAGE_RW)); /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); + if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) + /* No CPU has hugepages but lacks no execute, so we + * don't need to worry about that case */ + rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte), + trap); /* Check if pte already has an hpte (case 2) */ if (unlikely(old_pte & _PAGE_HASHPTE)) { @@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += (old_pte & _PAGE_F_GIX) >> 12; - if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1) + if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize, + local) == -1) old_pte &= ~_PAGE_HPTEFLAGS; } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f72cf87364cb29d9953131fc1f0ef46d5a407087..ba7a3055a9fc42f20b70f80b7bc298f40137a559 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn, /* We didnt find a matching region, return start/end as 0 */ if (*start_pfn == -1UL) - start_pfn = 0; + *start_pfn = 0; } static inline void map_cpu_to_node(int cpu, int node) diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index cfbb4e1f966b3331e7e89dda329ae925841616d1..51e7951414e5ca561b1d51257be70efe52a84139 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab) return; } #endif /* CONFIG_PPC_ISERIES */ -#ifdef CONFIG_PPC_PSERIES - if (platform_is_lpar()) { - plpar_hcall_norets(H_SET_ASR, stabreal); - return; - } -#endif + mtspr(SPRN_ASR, stabreal); } diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 0d7fa00fcb00cd8d608b5045557a6f6f5128bb72..f6e22da2a5daa8dd27a2bcade5b12ff19599b60f 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable) */ if (macio->type == macio_intrepid) { - if (enable) - UN_OUT(UNI_N_CLOCK_SPREADING, 2); - else - UN_OUT(UNI_N_CLOCK_SPREADING, 0); - mdelay(40); + struct device_node *clock = + of_find_node_by_path("/uni-n@f8000000/hw-clock"); + if (clock && get_property(clock, "platform-do-clockspreading", + NULL)) { + printk(KERN_INFO "%sabling clock spreading on Intrepid" + " ASIC\n", enable ? "En" : "Dis"); + if (enable) + UN_OUT(UNI_N_CLOCK_SPREADING, 2); + else + UN_OUT(UNI_N_CLOCK_SPREADING, 0); + mdelay(40); + } + of_node_put(clock); } while (machine_is_compatible("PowerBook5,2") || @@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable) pmac_low_i2c_close(ui2c); break; } + printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n", + enable ? "En" : "Dis"); + pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); DBG("write result: %d,", rc); diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index c78f2b290a73a52b90356f32f54af51569227796..2043659ea7b19cfb4fcde1ea7d2ce8ddd4768a3b 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, u64 rc; union tce_entry tce; + tcenum <<= TCE_PAGE_FACTOR; + npages <<= TCE_PAGE_FACTOR; + tce.te_word = 0; tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; tce.te_rdwr = 1; @@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, union tce_entry tce, *tcep; long l, limit; - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - - if (npages == 1) + if (TCE_PAGE_FACTOR == 0 && npages == 1) return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction); @@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, __get_cpu_var(tce_page) = tcep; } + tcenum <<= TCE_PAGE_FACTOR; + npages <<= TCE_PAGE_FACTOR; + tce.te_word = 0; tce.te_rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; tce.te_rdwr = 1; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index a50e5f3f396dc177da02deea3cbad714d01be4ce..cf1bc11b334685264c296363f0a0dd06ca1b38cf 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); -#if 1 - { - int i; - for (i=0;i<8;i++) { - unsigned long w0, w1; - plpar_pte_read(0, hpte_group, &w0, &w1); - BUG_ON (HPTE_V_COMPARE(hpte_v, w0) - && (w0 & HPTE_V_VALID)); - } - } -#endif - /* Now fill in the actual HPTE */ /* Set CEC cookie to 0 */ /* Zero page = 0 */ diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 8fa51b0a32d2d5390450d6478117ec1e7410a804..cc3f64c084c5e886b29eadd081c77edc5bcace91 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -767,14 +767,14 @@ config CPM2 on it (826x, 827x, 8560). config PPC_CHRP - bool " Common Hardware Reference Platform (CHRP) based machines" + bool depends on PPC_MULTIPLATFORM select PPC_I8259 select PPC_INDIRECT_PCI default y config PPC_PMAC - bool " Apple PowerMac based machines" + bool depends on PPC_MULTIPLATFORM select PPC_INDIRECT_PCI default y @@ -785,7 +785,7 @@ config PPC_PMAC64 default y config PPC_PREP - bool " PowerPC Reference Platform (PReP) based machines" + bool depends on PPC_MULTIPLATFORM select PPC_I8259 select PPC_INDIRECT_PCI diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c index 43b8fc2ca591c86388427c4b822eababd6991e11..becbfa397556a378c87aea3c05e8418073072fb4 100644 --- a/arch/ppc/kernel/smp.c +++ b/arch/ppc/kernel/smp.c @@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* Probe platform for CPUs: always linear. */ num_cpus = smp_ops->probe(); + + if (num_cpus < 2) + smp_tb_synchronized = 1; + for (i = 0; i < num_cpus; ++i) cpu_set(i, cpu_possible_map); diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c index 1e69b05931620a2f1420adea43a27cac67644f6b..6b7b3a150631485cbf7ce43fe763f2ab8f55748a 100644 --- a/arch/ppc/platforms/pmac_feature.c +++ b/arch/ppc/platforms/pmac_feature.c @@ -1606,11 +1606,19 @@ void pmac_tweak_clock_spreading(int enable) */ if (macio->type == macio_intrepid) { - if (enable) - UN_OUT(UNI_N_CLOCK_SPREADING, 2); - else - UN_OUT(UNI_N_CLOCK_SPREADING, 0); - mdelay(40); + struct device_node *clock = + of_find_node_by_path("/uni-n@f8000000/hw-clock"); + if (clock && get_property(clock, "platform-do-clockspreading", + NULL)) { + printk(KERN_INFO "%sabling clock spreading on Intrepid" + " ASIC\n", enable ? "En" : "Dis"); + if (enable) + UN_OUT(UNI_N_CLOCK_SPREADING, 2); + else + UN_OUT(UNI_N_CLOCK_SPREADING, 0); + mdelay(40); + } + of_node_put(clock); } while (machine_is_compatible("PowerBook5,2") || @@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable) pmac_low_i2c_close(ui2c); break; } + printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n", + enable ? "En" : "Dis"); pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); DBG("write result: %d,", rc); diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index 322c74b2687f4cbe6aa30ec6b3b2a484d937e6f8..80ddf9776bde23c0a85e294e8fa76eef6ac7ee42 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c @@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = { }, /* Model ID 3 */ { - .model_id = 2, + .model_id = 3, .itarget = 0x350000, .gd = 0x08e00000, .gp = 0x00566666, @@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = { }, /* Model ID 5 */ { - .model_id = 2, + .model_id = 5, .itarget = 0x3a0000, .gd = 0x15400000, .gp = 0x00233333, diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index c1b4bbabbe97f4816c26261ba9488c0acfac0ccb..29b0bb0086d3fc5f40e8848d82662c345d4a4a44 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access, unsigned int local); struct mm_struct; extern int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local); + unsigned long ea, unsigned long vsid, int local, + unsigned long trap); extern void htab_finish_init(void); extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,