提交 e7bc15a9 编写于 作者: L Linus Torvalds

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
  [SPARC64]: Fix memory leak when cpu hotplugging.
  [SPARC64]: Do not assume sun4v chips have load-twin/store-init support.
  [SPARC64]: Fix hard-coding of cpu type output in /proc/cpuinfo on sun4v.
  [SPARC]: Centralize find_in_proplist() instead of duplicating N times.
......@@ -102,6 +102,21 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
EXPORT_SYMBOL(of_set_property);
int of_find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
EXPORT_SYMBOL(of_find_in_proplist);
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
......
/* cpu.c: Dinky routines to look for the kind of Sparc cpu
* we are on.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
......@@ -13,6 +13,7 @@
#include <asm/fpumacro.h>
#include <asm/cpudata.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
......@@ -61,21 +62,40 @@ struct cpu_iu_info linux_sparc_chips[] = {
#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
char *sparc_cpu_type = "cpu-oops";
char *sparc_fpu_type = "fpu-oops";
char *sparc_cpu_type;
char *sparc_fpu_type;
unsigned int fsr_storage;
static void __init sun4v_cpu_probe(void)
{
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
sparc_cpu_type = "UltraSparc T1 (Niagara)";
sparc_fpu_type = "UltraSparc T1 integrated FPU";
break;
case SUN4V_CHIP_NIAGARA2:
sparc_cpu_type = "UltraSparc T2 (Niagara2)";
sparc_fpu_type = "UltraSparc T2 integrated FPU";
break;
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);
sparc_cpu_type = "Unknown SUN4V CPU";
sparc_fpu_type = "Unknown SUN4V FPU";
break;
}
}
void __init cpu_probe(void)
{
unsigned long ver, fpu_vers, manuf, impl, fprs;
int i;
if (tlb_type == hypervisor) {
sparc_cpu_type = "UltraSparc T1 (Niagara)";
sparc_fpu_type = "UltraSparc T1 integrated FPU";
return;
}
if (tlb_type == hypervisor)
return sun4v_cpu_probe();
fprs = fprs_read();
fprs_write(FPRS_FEF);
......
......@@ -97,7 +97,8 @@ sparc64_boot:
.globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
.globl prom_boot_mapped_pc, prom_boot_mapping_mode
.globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
.globl is_sun4v
.globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible
.globl is_sun4v, sun4v_chip_type
prom_peer_name:
.asciz "peer"
prom_compatible_name:
......@@ -106,6 +107,8 @@ prom_finddev_name:
.asciz "finddevice"
prom_chosen_path:
.asciz "/chosen"
prom_cpu_path:
.asciz "/cpu"
prom_getprop_name:
.asciz "getprop"
prom_mmu_name:
......@@ -120,9 +123,13 @@ prom_unmap_name:
.asciz "unmap"
prom_sun4v_name:
.asciz "sun4v"
prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
.align 4
prom_root_compatible:
.skip 64
prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
prom_mmu_ihandle_cache:
......@@ -138,6 +145,8 @@ prom_boot_mapping_phys_low:
.xword 0
is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
1:
rd %pc, %l0
......@@ -296,13 +305,13 @@ is_sun4v:
sethi %hi(prom_sun4v_name), %g7
or %g7, %lo(prom_sun4v_name), %g7
mov 5, %g3
1: ldub [%g7], %g2
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 2f
bne,pn %icc, 80f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 1b
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(is_sun4v), %g1
......@@ -310,7 +319,80 @@ is_sun4v:
mov 1, %g7
stw %g7, [%g1]
2:
/* cpu_node = prom_finddevice("/cpu") */
mov (1b - prom_finddev_name), %l1
mov (1b - prom_cpu_path), %l2
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %sp, (192 + 128), %sp
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu"
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node
mov (1b - prom_getprop_name), %l1
mov (1b - prom_compatible_name), %l2
mov (1b - prom_cpu_compatible), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_getproperty(cpu_node, "compatible",
* &prom_cpu_compatible, 64)
*/
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible
mov 64, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
add %sp, (192 + 128), %sp
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_niagara_prefix), %g7
or %g7, %lo(prom_niagara_prefix), %g7
mov 17, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 4f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 17], %g2
cmp %g2, '1'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA1, %g4
cmp %g2, '2'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA2, %g4
4:
mov SUN4V_CHIP_UNKNOWN, %g4
5: sethi %hi(sun4v_chip_type), %g2
or %g2, %lo(sun4v_chip_type), %g2
stw %g4, [%g2]
80:
BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
......@@ -414,6 +496,24 @@ niagara_tlb_fixup:
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/clear ops. */
sethi %hi(sun4v_chip_type), %g1
lduw [%g1 + %lo(sun4v_chip_type)], %g1
cmp %g1, SUN4V_CHIP_NIAGARA1
be,pt %xcc, niagara_patch
cmp %g1, SUN4V_CHIP_NIAGARA2
be,pt %xcc, niagara_patch
nop
call generic_patch_copyops
nop
call generic_patch_bzero
nop
call generic_patch_pageops
nop
ba,a,pt %xcc, 80f
niagara_patch:
call niagara_patch_copyops
nop
call niagara_patch_bzero
......@@ -421,6 +521,7 @@ niagara_tlb_fixup:
call niagara_patch_pageops
nop
80:
/* Patch TLB/cache ops. */
call hypervisor_patch_cachetlbops
nop
......
......@@ -115,11 +115,8 @@ hv_cpu_startup:
call hard_smp_processor_id
nop
mov %o0, %o1
mov 0, %o0
mov 0, %o2
call sun4v_init_mondo_queues
mov 1, %o3
call sun4v_register_mondo_queues
nop
call init_cur_cpu_trap
mov %g6, %o0
......
......@@ -929,7 +929,7 @@ static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type
}
}
static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
void __cpuinit sun4v_register_mondo_queues(int this_cpu)
{
struct trap_per_cpu *tb = &trap_block[this_cpu];
......@@ -943,20 +943,10 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
tb->nonresum_qmask);
}
static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
unsigned long order = get_order(size);
void *p = NULL;
if (use_bootmem) {
p = __alloc_bootmem_low(size, size, 0);
} else {
struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
if (page)
p = page_address(page);
}
void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt();
......@@ -965,19 +955,10 @@ static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask
*pa_ptr = __pa(p);
}
static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
{
unsigned long size = PAGE_ALIGN(qmask + 1);
unsigned long order = get_order(size);
void *p = NULL;
if (use_bootmem) {
p = __alloc_bootmem_low(size, size, 0);
} else {
struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
if (page)
p = page_address(page);
}
void *p = __alloc_bootmem_low(size, size, 0);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
......@@ -987,18 +968,14 @@ static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask,
*pa_ptr = __pa(p);
}
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{
#ifdef CONFIG_SMP
void *page;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
if (use_bootmem)
page = alloc_bootmem_low_pages(PAGE_SIZE);
else
page = (void *) get_zeroed_page(GFP_ATOMIC);
page = alloc_bootmem_low_pages(PAGE_SIZE);
if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
prom_halt();
......@@ -1009,30 +986,27 @@ static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_
#endif
}
/* Allocate and register the mondo and error queues for this cpu. */
void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
/* Allocate mondo and error queues for all possible cpus. */
static void __init sun4v_init_mondo_queues(void)
{
struct trap_per_cpu *tb = &trap_block[cpu];
int cpu;
if (alloc) {
alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
for_each_possible_cpu(cpu) {
struct trap_per_cpu *tb = &trap_block[cpu];
init_cpu_send_mondo_info(tb, use_bootmem);
}
alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
tb->nonresum_qmask);
if (load) {
if (cpu != hard_smp_processor_id()) {
prom_printf("SUN4V: init mondo on cpu %d not %d\n",
cpu, hard_smp_processor_id());
prom_halt();
}
sun4v_register_mondo_queues(cpu);
init_cpu_send_mondo_info(tb);
}
/* Load up the boot cpu's entries. */
sun4v_register_mondo_queues(hard_smp_processor_id());
}
static struct irqaction timer_irq_action = {
......@@ -1047,7 +1021,7 @@ void __init init_IRQ(void)
memset(&ivector_table[0], 0, sizeof(ivector_table));
if (tlb_type == hypervisor)
sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
sun4v_init_mondo_queues();
/* We need to clear any IRQ's pending in the soft interrupt
* registers, a spurious one could be left around from the
......
......@@ -568,20 +568,6 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
static int inline find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
struct mdesc_handle *hp,
u64 mp)
......@@ -596,10 +582,10 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
switch (*level) {
case 1:
if (find_in_proplist(type, "instn", type_len)) {
if (of_find_in_proplist(type, "instn", type_len)) {
c->icache_size = *size;
c->icache_line_size = *line_size;
} else if (find_in_proplist(type, "data", type_len)) {
} else if (of_find_in_proplist(type, "data", type_len)) {
c->dcache_size = *size;
c->dcache_line_size = *line_size;
}
......@@ -677,7 +663,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp)
continue;
type = mdesc_get_property(hp, mp, "type", &len);
if (!find_in_proplist(type, "instn", len))
if (!of_find_in_proplist(type, "instn", len))
continue;
mark_core_ids(hp, mp, idx);
......@@ -718,8 +704,8 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp,
int len;
type = mdesc_get_property(hp, mp, "type", &len);
if (!find_in_proplist(type, "int", len) &&
!find_in_proplist(type, "integer", len))
if (!of_find_in_proplist(type, "int", len) &&
!of_find_in_proplist(type, "integer", len))
continue;
mark_proc_ids(hp, mp, idx);
......
......@@ -107,6 +107,21 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
}
EXPORT_SYMBOL(of_set_property);
int of_find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
EXPORT_SYMBOL(of_find_in_proplist);
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
......
......@@ -334,8 +334,6 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
}
#endif
extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
extern unsigned long sparc64_cpu_startup;
/* The OBP cpu startup callback truncates the 3rd arg cookie to
......@@ -359,9 +357,6 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
cpu_new_thread = task_thread_info(p);
if (tlb_type == hypervisor) {
/* Alloc the mondo queues, cpu will load them. */
sun4v_init_mondo_queues(0, cpu, 1, 0);
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
if (ldom_domaining_enabled)
ldom_startcpu_cpuid(cpu,
......
......@@ -168,6 +168,7 @@ EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(get_fb_unmapped_area);
EXPORT_SYMBOL(flush_icache_range);
......
......@@ -366,11 +366,8 @@ after_lock_tlb:
call hard_smp_processor_id
nop
mov %o0, %o1
mov 0, %o0
mov 0, %o2
call sun4v_init_mondo_queues
mov 1, %o3
call sun4v_register_mondo_queues
nop
1: call init_cur_cpu_trap
ldx [%l0], %o0
......
......@@ -16,21 +16,6 @@
#include <asm/mdesc.h>
#include <asm/vio.h>
static inline int find_in_proplist(const char *list, const char *match,
int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
static const struct vio_device_id *vio_match_device(
const struct vio_device_id *matches,
const struct vio_dev *dev)
......@@ -49,7 +34,7 @@ static const struct vio_device_id *vio_match_device(
if (matches->compat[0]) {
match &= len &&
find_in_proplist(compat, matches->compat, len);
of_find_in_proplist(compat, matches->compat, len);
}
if (match)
return matches;
......@@ -406,7 +391,7 @@ static int __init vio_init(void)
"property\n");
goto out_release;
}
if (!find_in_proplist(compat, channel_devices_compat, len)) {
if (!of_find_in_proplist(compat, channel_devices_compat, len)) {
printk(KERN_ERR "VIO: Channel devices node lacks (%s) "
"compat entry.\n", channel_devices_compat);
goto out_release;
......
/* GENbzero.S: Generic sparc64 memset/clear_user.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <asm/asi.h>
#define EX_ST(x,y) \
98: x,y; \
.section .fixup; \
.align 4; \
99: retl; \
mov %o1, %o0; \
.section __ex_table; \
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
.align 32
.text
.globl GENmemset
.type GENmemset, #function
GENmemset: /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
or %g1, %o3, %o2
sllx %o2, 16, %g1
or %g1, %o2, %o2
sllx %o2, 32, %g1
ba,pt %xcc, 1f
or %g1, %o2, %o2
.globl GENbzero
.type GENbzero, #function
GENbzero:
clr %o2
1: brz,pn %o1, GENbzero_return
mov %o0, %o3
/* %o5: saved %asi, restored at GENbzero_done
* %o4: store %asi to use
*/
rd %asi, %o5
mov ASI_P, %o4
wr %o4, 0x0, %asi
GENbzero_from_clear_user:
cmp %o1, 15
bl,pn %icc, GENbzero_tiny
andcc %o0, 0x7, %g1
be,pt %xcc, 2f
mov 8, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %g1, 1, %g1
bne,pt %xcc, 1b
add %o0, 1, %o0
2: cmp %o1, 128
bl,pn %icc, GENbzero_medium
andcc %o0, (64 - 1), %g1
be,pt %xcc, GENbzero_pre_loop
mov 64, %g2
sub %g2, %g1, %g1
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
GENbzero_pre_loop:
andn %o1, (64 - 1), %g1
sub %o1, %g1, %o1
GENbzero_loop:
EX_ST(stxa %o2, [%o0 + 0x00] %asi)
EX_ST(stxa %o2, [%o0 + 0x08] %asi)
EX_ST(stxa %o2, [%o0 + 0x10] %asi)
EX_ST(stxa %o2, [%o0 + 0x18] %asi)
EX_ST(stxa %o2, [%o0 + 0x20] %asi)
EX_ST(stxa %o2, [%o0 + 0x28] %asi)
EX_ST(stxa %o2, [%o0 + 0x30] %asi)
EX_ST(stxa %o2, [%o0 + 0x38] %asi)
subcc %g1, 64, %g1
bne,pt %xcc, GENbzero_loop
add %o0, 64, %o0
membar #Sync
wr %o4, 0x0, %asi
brz,pn %o1, GENbzero_done
GENbzero_medium:
andncc %o1, 0x7, %g1
be,pn %xcc, 2f
sub %o1, %g1, %o1
1: EX_ST(stxa %o2, [%o0 + 0x00] %asi)
subcc %g1, 8, %g1
bne,pt %xcc, 1b
add %o0, 8, %o0
2: brz,pt %o1, GENbzero_done
nop
GENbzero_tiny:
1: EX_ST(stba %o2, [%o0 + 0x00] %asi)
subcc %o1, 1, %o1
bne,pt %icc, 1b
add %o0, 1, %o0
/* fallthrough */
GENbzero_done:
wr %o5, 0x0, %asi
GENbzero_return:
retl
mov %o3, %o0
.size GENbzero, .-GENbzero
.size GENmemset, .-GENmemset
.globl GENclear_user
.type GENclear_user, #function
GENclear_user: /* %o0=buf, %o1=len */
rd %asi, %o5
brz,pn %o1, GENbzero_done
clr %o3
cmp %o5, ASI_AIUS
bne,pn %icc, GENbzero
clr %o2
ba,pt %xcc, GENbzero_from_clear_user
mov ASI_AIUS, %o4
.size GENclear_user, .-GENclear_user
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define GEN_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl generic_patch_bzero
.type generic_patch_bzero,#function
generic_patch_bzero:
GEN_DO_PATCH(memset, GENmemset)
GEN_DO_PATCH(__bzero, GENbzero)
GEN_DO_PATCH(__clear_user, GENclear_user)
retl
nop
.size generic_patch_bzero,.-generic_patch_bzero
/* GENcopy_from_user.S: Generic sparc64 copy from userspace.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_LD(x) \
98: x; \
.section .fixup; \
.align 4; \
99: retl; \
mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#define FUNC_NAME GENcopy_from_user
#define LOAD(type,addr,dest) type##a [addr] ASI_AIUS, dest
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, memcpy_user_stub; \
nop
#endif
#include "GENmemcpy.S"
/* GENcopy_to_user.S: Generic sparc64 copy to userspace.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#define EX_ST(x) \
98: x; \
.section .fixup; \
.align 4; \
99: retl; \
mov 1, %o0; \
.section __ex_table,"a";\
.align 4; \
.word 98b, 99b; \
.text; \
.align 4;
#ifndef ASI_AIUS
#define ASI_AIUS 0x11
#endif
#define FUNC_NAME GENcopy_to_user
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
#define EX_RETVAL(x) 0
#ifdef __KERNEL__
/* Writing to %asi is _expensive_ so we hardcode it.
* Reading %asi to check for KERNEL_DS is comparatively
* cheap.
*/
#define PREAMBLE \
rd %asi, %g1; \
cmp %g1, ASI_AIUS; \
bne,pn %icc, memcpy_user_stub; \
nop
#endif
#include "GENmemcpy.S"
/* GENmemcpy.S: Generic sparc64 memcpy.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#ifdef __KERNEL__
#define GLOBAL_SPARE %g7
#else
#define GLOBAL_SPARE %g5
#endif
#ifndef EX_LD
#define EX_LD(x) x
#endif
#ifndef EX_ST
#define EX_ST(x) x
#endif
#ifndef EX_RETVAL
#define EX_RETVAL(x) x
#endif
#ifndef LOAD
#define LOAD(type,addr,dest) type [addr], dest
#endif
#ifndef STORE
#define STORE(type,src,addr) type src, [addr]
#endif
#ifndef FUNC_NAME
#define FUNC_NAME GENmemcpy
#endif
#ifndef PREAMBLE
#define PREAMBLE
#endif
#ifndef XCC
#define XCC xcc
#endif
.register %g2,#scratch
.register %g3,#scratch
.text
.align 64
.globl FUNC_NAME
.type FUNC_NAME,#function
FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
srlx %o2, 31, %g2
cmp %g2, 0
tne %XCC, 5
PREAMBLE
mov %o0, GLOBAL_SPARE
cmp %o2, 0
be,pn %XCC, 85f
or %o0, %o1, %o3
cmp %o2, 16
blu,a,pn %XCC, 80f
or %o3, %o2, %o3
xor %o0, %o1, %o4
andcc %o4, 0x7, %g0
bne,a,pn %XCC, 90f
sub %o0, %o1, %o3
and %o0, 0x7, %o4
sub %o4, 0x8, %o4
sub %g0, %o4, %o4
sub %o2, %o4, %o2
1: subcc %o4, 1, %o4
EX_LD(LOAD(ldub, %o1, %g1))
EX_ST(STORE(stb, %g1, %o0))
add %o1, 1, %o1
bne,pt %XCC, 1b
add %o0, 1, %o0
andn %o2, 0x7, %g1
sub %o2, %g1, %o2
1: subcc %g1, 0x8, %g1
EX_LD(LOAD(ldx, %o1, %g2))
EX_ST(STORE(stx, %g2, %o0))
add %o1, 0x8, %o1
bne,pt %XCC, 1b
add %o0, 0x8, %o0
brz,pt %o2, 85f
sub %o0, %o1, %o3
ba,a,pt %XCC, 90f
.align 64
80: /* 0 < len <= 16 */
andcc %o3, 0x3, %g0
bne,pn %XCC, 90f
sub %o0, %o1, %o3
1:
subcc %o2, 4, %o2
EX_LD(LOAD(lduw, %o1, %g1))
EX_ST(STORE(stw, %g1, %o1 + %o3))
bgu,pt %XCC, 1b
add %o1, 4, %o1
85: retl
mov EX_RETVAL(GLOBAL_SPARE), %o0
.align 32
90:
subcc %o2, 1, %o2
EX_LD(LOAD(ldub, %o1, %g1))
EX_ST(STORE(stb, %g1, %o1 + %o3))
bgu,pt %XCC, 90b
add %o1, 1, %o1
retl
mov EX_RETVAL(GLOBAL_SPARE), %o0
.size FUNC_NAME, .-FUNC_NAME
/* GENpage.S: Generic clear and copy page.
*
* Copyright (C) 2007 (davem@davemloft.net)
*/
#include <asm/page.h>
.text
.align 32
GENcopy_user_page:
set PAGE_SIZE, %g7
1: ldx [%o1 + 0x00], %o2
ldx [%o1 + 0x08], %o3
ldx [%o1 + 0x10], %o4
ldx [%o1 + 0x18], %o5
stx %o2, [%o0 + 0x00]
stx %o3, [%o0 + 0x08]
stx %o4, [%o0 + 0x10]
stx %o5, [%o0 + 0x18]
ldx [%o1 + 0x20], %o2
ldx [%o1 + 0x28], %o3
ldx [%o1 + 0x30], %o4
ldx [%o1 + 0x38], %o5
stx %o2, [%o0 + 0x20]
stx %o3, [%o0 + 0x28]
stx %o4, [%o0 + 0x30]
stx %o5, [%o0 + 0x38]
subcc %g7, 64, %g7
add %o1, 64, %o1
bne,pt %xcc, 1b
add %o0, 64, %o0
retl
nop
GENclear_page:
GENclear_user_page:
set PAGE_SIZE, %g7
1: stx %g0, [%o0 + 0x00]
stx %g0, [%o0 + 0x08]
stx %g0, [%o0 + 0x10]
stx %g0, [%o0 + 0x18]
stx %g0, [%o0 + 0x20]
stx %g0, [%o0 + 0x28]
stx %g0, [%o0 + 0x30]
stx %g0, [%o0 + 0x38]
subcc %g7, 64, %g7
bne,pt %xcc, 1b
add %o0, 64, %o0
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define GEN_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl generic_patch_pageops
.type generic_patch_pageops,#function
generic_patch_pageops:
GEN_DO_PATCH(copy_user_page, GENcopy_user_page)
GEN_DO_PATCH(_clear_page, GENclear_page)
GEN_DO_PATCH(clear_user_page, GENclear_user_page)
retl
nop
.size generic_patch_pageops,.-generic_patch_pageops
/* GENpatch.S: Patch Ultra-I routines with generic variant.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define GEN_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl generic_patch_copyops
.type generic_patch_copyops,#function
generic_patch_copyops:
GEN_DO_PATCH(memcpy, GENmemcpy)
GEN_DO_PATCH(___copy_from_user, GENcopy_from_user)
GEN_DO_PATCH(___copy_to_user, GENcopy_to_user)
retl
nop
.size generic_patch_copyops,.-generic_patch_copyops
# $Id: Makefile,v 1.25 2000/12/14 22:57:25 davem Exp $
#
# Makefile for Sparc64 library files..
#
......@@ -13,6 +13,8 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \
NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \
NGpage.o NGbzero.o \
GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o GENpatch.o \
GENpage.o GENbzero.o \
copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o
......
......@@ -67,6 +67,7 @@ extern int of_set_property(struct device_node *node, const char *name, void *val
extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
extern int of_find_in_proplist(const char *list, const char *match, int len);
extern void prom_build_devicetree(void);
......
/* $Id: oplib.h,v 1.14 2001/12/19 00:29:51 davem Exp $
* oplib.h: Describes the interface and available routines in the
/* oplib.h: Describes the interface and available routines in the
* Linux Prom library.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
......@@ -31,8 +30,10 @@ extern int prom_chosen_node;
extern const char prom_peer_name[];
extern const char prom_compatible_name[];
extern const char prom_root_compatible[];
extern const char prom_cpu_compatible[];
extern const char prom_finddev_name[];
extern const char prom_chosen_path[];
extern const char prom_cpu_path[];
extern const char prom_getprop_name[];
extern const char prom_mmu_name[];
extern const char prom_callmethod_name[];
......
......@@ -76,6 +76,7 @@ extern int of_set_property(struct device_node *node, const char *name, void *val
extern int of_getintprop_default(struct device_node *np,
const char *name,
int def);
extern int of_find_in_proplist(const char *list, const char *match, int len);
extern void prom_build_devicetree(void);
......
......@@ -38,6 +38,11 @@
#define L1DCACHE_SIZE 0x4000
#define SUN4V_CHIP_INVALID 0x00
#define SUN4V_CHIP_NIAGARA1 0x01
#define SUN4V_CHIP_NIAGARA2 0x02
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__
enum ultra_tlb_layout {
......@@ -49,6 +54,8 @@ enum ultra_tlb_layout {
extern enum ultra_tlb_layout tlb_type;
extern int sun4v_chip_type;
extern int cheetah_pcache_forced_on;
extern void cheetah_enable_pcache(void);
......
......@@ -63,4 +63,8 @@ static struct xor_block_template xor_block_niagara = {
/* For VIS for everything except Niagara. */
#define XOR_SELECT_TEMPLATE(FASTEST) \
(tlb_type == hypervisor ? &xor_block_niagara : &xor_block_VIS)
((tlb_type == hypervisor && \
(sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
sun4v_chip_type == SUN4V_CHIP_NIAGARA2)) ? \
&xor_block_niagara : \
&xor_block_VIS)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册