提交 4b1c46a3 编写于 作者: L Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Make alignment exception always check exception table
  [POWERPC] Disallow kprobes on emulate_step and branch_taken
  [POWERPC] Make mmiowb's io_sync preempt safe
  [POWERPC] Make high hugepage areas preempt safe
  [POWERPC] Make current preempt-safe
  [POWERPC] qe_lib: qe_issue_cmd writes wrong value to CECDR
  [POWERPC] Use 4kB iommu pages even on 64kB-page systems
  [POWERPC] Fix oprofile support for e500 in arch/powerpc
  [POWERPC] Fix rmb() for e500-based machines it
  [POWERPC] Fix various offb issues
......@@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
obj32-$(CONFIG_MODULES) += module_32.o
obj-$(CONFIG_E500) += perfmon_fsl_booke.o
ifeq ($(CONFIG_PPC_MERGE),y)
......
......@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np)
prop = get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = get_property(np, "linebytes", NULL);
if (prop)
if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
......
......@@ -47,6 +47,17 @@ static int novmerge = 0;
static int novmerge = 1;
#endif
static inline unsigned long iommu_num_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
npages >>= IOMMU_PAGE_SHIFT;
return npages;
}
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
......@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
}
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << PAGE_SHIFT; /* Set the return dma address */
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK,
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction);
......@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long entry, free_entry;
unsigned long i;
entry = dma_addr >> PAGE_SHIFT;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
......@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Init first segment length for backout at failure */
outs->dma_length = 0;
DBG("mapping %d elements:\n", nelems);
DBG("sg mapping %d elements:\n", nelems);
spin_lock_irqsave(&(tbl->it_lock), flags);
......@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long)page_address(s->page) + s->offset;
npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
npages = iommu_num_pages(vaddr, slen);
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
......@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
dma_addr = entry << PAGE_SHIFT;
dma_addr |= s->offset;
dma_addr = entry << IOMMU_PAGE_SHIFT;
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n",
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction);
ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
/* If we are in an open segment, try merging */
if (segstart != s) {
......@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
DBG(" merged, new len: %lx\n", outs->dma_length);
DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
......@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & PAGE_MASK;
npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr)
>> PAGE_SHIFT;
vaddr = s->dma_address & IOMMU_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
......@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sglist->dma_length == 0)
break;
npages = (PAGE_ALIGN(dma_handle + sglist->dma_length)
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
npages = iommu_num_pages(dma_handle,sglist->dma_length);
__iommu_free(tbl, dma_handle, npages);
sglist++;
}
......@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
npages = iommu_num_pages(uaddr, size);
if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
mask >> PAGE_SHIFT, 0);
mask >> IOMMU_PAGE_SHIFT, 0);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
......@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
tbl, vaddr, npages);
}
} else
dma_handle |= (uaddr & ~PAGE_MASK);
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
}
return dma_handle;
......@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction)
{
unsigned int npages;
BUG_ON(direction == DMA_NONE);
if (tbl)
iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) -
(dma_handle & PAGE_MASK)) >> PAGE_SHIFT);
if (tbl) {
npages = iommu_num_pages(dma_handle, size);
iommu_free(tbl, dma_handle, npages);
}
}
/* Allocates a contiguous real buffer and creates mappings over it.
......@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int npages, order;
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size);
/*
......@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL,
mask >> PAGE_SHIFT, order);
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
......@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
unsigned int npages;
if (tbl) {
unsigned int nio_pages;
size = PAGE_ALIGN(size);
nio_pages = size >> IOMMU_PAGE_SHIFT;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size));
}
}
/* arch/powerpc/kernel/perfmon_fsl_booke.c
* Freescale Book-E Performance Monitor code
*
* Author: Andy Fleming
* Copyright (c) 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/prctl.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/xmon.h>
#include <asm/pmc.h>
static inline u32 get_pmlca(int ctr);
static inline void set_pmlca(int ctr, u32 pmlca);
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
void init_pmc_stop(int ctr)
{
u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
PMLCA_FCM1 | PMLCA_FCM0);
u32 pmlcb = 0;
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
default:
panic("Bad ctr number!\n");
}
}
void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
void dump_pmcs(void)
{
printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
printk("pmc\t\tpmlca\t\tpmlcb\n");
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
}
EXPORT_SYMBOL(init_pmc_stop);
EXPORT_SYMBOL(set_pmc_event);
EXPORT_SYMBOL(set_pmc_user_kernel);
EXPORT_SYMBOL(set_pmc_marked);
EXPORT_SYMBOL(pmc_start_ctr);
EXPORT_SYMBOL(pmc_start_ctrs);
EXPORT_SYMBOL(pmc_stop_ctrs);
EXPORT_SYMBOL(dump_pmcs);
......@@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
}
pmc_owner_caller = __builtin_return_address(0);
perf_irq = new_perf_irq ? : dummy_perf;
perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out:
spin_unlock(&pmc_owner_lock);
......
......@@ -843,7 +843,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
int fixed = 0;
int sig, code, fixed = 0;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
......@@ -857,14 +857,16 @@ void alignment_exception(struct pt_regs *regs)
/* Operand address was bad */
if (fixed == -EFAULT) {
sig = SIGSEGV;
code = SEGV_ACCERR;
} else {
sig = SIGBUS;
code = BUS_ADRALN;
}
if (user_mode(regs))
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
_exception(sig, regs, code, regs->dar);
else
/* Search exception table */
bad_page_fault(regs, regs->dar, SIGSEGV);
return;
}
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
bad_page_fault(regs, regs->dar, sig);
}
void StackOverflow(struct pt_regs *regs)
......
......@@ -92,9 +92,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
tbl->it_size = size >> PAGE_SHIFT;
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
/* offset for VIO should always be 0 */
tbl->it_offset = offset >> PAGE_SHIFT;
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
......
......@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <asm/sstep.h>
#include <asm/processor.h>
......@@ -25,7 +26,7 @@ extern char system_call_common[];
/*
* Determine whether a conditional branch instruction would branch.
*/
static int branch_taken(unsigned int instr, struct pt_regs *regs)
static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
{
unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi;
......@@ -51,7 +52,7 @@ static int branch_taken(unsigned int instr, struct pt_regs *regs)
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
int emulate_step(struct pt_regs *regs, unsigned int instr)
int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
{
unsigned int opcode, rd;
unsigned long int imm;
......
......@@ -480,9 +480,6 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
mm->context.high_htlb_areas |= newareas;
/* update the paca copy of the context struct */
get_paca()->context = mm->context;
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb();
......
......@@ -13,4 +13,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
oprofile-$(CONFIG_PPC32) += op_model_7450.o
oprofile-$(CONFIG_6xx) += op_model_7450.o
......@@ -34,6 +34,11 @@ static void op_handle_interrupt(struct pt_regs *regs)
model->handle_interrupt(regs, ctr);
}
static void op_powerpc_cpu_setup(void *dummy)
{
model->cpu_setup(ctr);
}
static int op_powerpc_setup(void)
{
int err;
......@@ -47,7 +52,7 @@ static int op_powerpc_setup(void)
model->reg_setup(ctr, &sys, model->num_counters);
/* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 0, 1);
on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1);
return 0;
}
......@@ -142,7 +147,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case PPC_OPROFILE_POWER4:
model = &op_model_power4;
break;
#else
#endif
#ifdef CONFIG_6xx
case PPC_OPROFILE_G4:
model = &op_model_7450;
break;
......
......@@ -81,7 +81,7 @@ static void pmc_stop_ctrs(void)
/* Configures the counters on this CPU based on the global
* settings */
static void fsl7450_cpu_setup(void *unused)
static void fsl7450_cpu_setup(struct op_counter_config *ctr)
{
/* freeze all counters */
pmc_stop_ctrs();
......
......@@ -32,42 +32,152 @@ static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static int oprofile_running;
static inline unsigned int ctr_read(unsigned int i)
static void init_pmc_stop(int ctr)
{
switch(i) {
case 0:
return mfpmr(PMRN_PMC0);
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
default:
return 0;
}
}
u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
PMLCA_FCM1 | PMLCA_FCM0);
u32 pmlcb = 0;
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
switch (ctr) {
case 0:
mtpmr(PMRN_PMC0, val);
mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break;
case 1:
mtpmr(PMRN_PMC1, val);
mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break;
case 2:
mtpmr(PMRN_PMC2, val);
mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break;
case 3:
mtpmr(PMRN_PMC3, val);
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
default:
break;
panic("Bad ctr number!\n");
}
}
static void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
static void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
static void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
static void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
static void dump_pmcs(void)
{
printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
printk("pmc\t\tpmlca\t\tpmlcb\n");
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
}
static void fsl_booke_cpu_setup(struct op_counter_config *ctr)
{
int i;
/* freeze all counters */
pmc_stop_ctrs();
for (i = 0;i < num_counters;i++) {
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
}
static void fsl_booke_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
......@@ -77,23 +187,14 @@ static void fsl_booke_reg_setup(struct op_counter_config *ctr,
num_counters = num_ctrs;
/* freeze all counters */
pmc_stop_ctrs();
/* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value
* which will give us "count" until overflow.
* Then we set the events on the enabled counters */
for (i = 0; i < num_counters; ++i) {
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
}
static void fsl_booke_start(struct op_counter_config *ctr)
......@@ -105,8 +206,8 @@ static void fsl_booke_start(struct op_counter_config *ctr)
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
ctr_write(i, reset_value[i]);
/* Set Each enabled counterd to only
* count when the Mark bit is not set */
/* Set each enabled counter to only
* count when the Mark bit is *not* set */
set_pmc_marked(i, 1, 0);
pmc_start_ctr(i, 1);
} else {
......@@ -177,6 +278,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
struct op_powerpc_model op_model_fsl_booke = {
.reg_setup = fsl_booke_reg_setup,
.cpu_setup = fsl_booke_cpu_setup,
.start = fsl_booke_start,
.stop = fsl_booke_stop,
.handle_interrupt = fsl_booke_handle_interrupt,
......
......@@ -82,7 +82,7 @@ static inline int mmcra_must_set_sample(void)
return 0;
}
static void power4_cpu_setup(void *unused)
static void power4_cpu_setup(struct op_counter_config *ctr)
{
unsigned int mmcr0 = mmcr0_val;
unsigned long mmcra = mmcra_val;
......
......@@ -102,7 +102,7 @@ static void rs64_reg_setup(struct op_counter_config *ctr,
/* XXX setup user and kernel profiling */
}
static void rs64_cpu_setup(void *unused)
static void rs64_cpu_setup(struct op_counter_config *ctr)
{
unsigned int mmcr0;
......
......@@ -43,9 +43,6 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
u64 rc;
u64 tce, rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) {
rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
......@@ -75,9 +72,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{
u64 rc;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc)
......@@ -136,10 +130,9 @@ void iommu_table_getparms_iSeries(unsigned long busno,
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
/* itc_size is in pages worth of table, it_size is in # of entries */
tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR;
tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
tbl->it_offset = parms->itc_offset;
tbl->it_index = parms->itc_index;
tbl->it_blocksize = 1;
tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
......
......@@ -57,9 +57,6 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
u64 *tcep;
u64 rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
proto_tce = TCE_PCI_READ; // Read allowed
if (direction != DMA_TO_DEVICE)
......@@ -82,9 +79,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
u64 *tcep;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
tcep = ((u64 *)tbl->it_base) + index;
while (npages--)
......@@ -95,7 +89,6 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
u64 *tcep;
index <<= TCE_PAGE_FACTOR;
tcep = ((u64 *)tbl->it_base) + index;
return *tcep;
......@@ -109,9 +102,6 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64 proto_tce, tce;
u64 rpn;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
......@@ -146,7 +136,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64 rpn;
long l, limit;
if (TCE_PAGE_FACTOR == 0 && npages == 1)
if (npages == 1)
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction);
......@@ -164,9 +154,6 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
__get_cpu_var(tce_page) = tcep;
}
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
......@@ -207,9 +194,6 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
{
u64 rc;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
......@@ -229,9 +213,6 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
{
u64 rc;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
if (rc && printk_ratelimit()) {
......@@ -248,7 +229,6 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
u64 rc;
unsigned long tce_ret;
tcenum <<= TCE_PAGE_FACTOR;
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
if (rc && printk_ratelimit()) {
......@@ -289,7 +269,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_busno = phb->bus->number;
/* Units of tce entries */
tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT;
tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
......@@ -300,7 +280,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */
tbl->it_size = phb->dma_window_size >> PAGE_SHIFT;
tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
tbl->it_index = 0;
tbl->it_blocksize = 16;
......@@ -325,8 +305,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
tbl->it_base = 0;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
tbl->it_offset = offset >> PAGE_SHIFT;
tbl->it_size = size >> PAGE_SHIFT;
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
}
static void iommu_bus_setup_pSeries(struct pci_bus *bus)
......@@ -522,8 +502,6 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
const void *dma_window = NULL;
struct pci_dn *pci;
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
/* dev setup for LPAR is a little tricky, since the device tree might
* contain the dma-window properties per-device and not neccesarily
* for the bus. So we need to search upwards in the tree until we
......@@ -532,6 +510,9 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
*/
dn = pci_device_to_OF_node(dev);
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
dev, pci_name(dev), dn->full_name);
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) {
dma_window = get_property(pdn, "ibm,dma-window", NULL);
......
......@@ -72,7 +72,6 @@
#define DART_PAGE_SHIFT 12
#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
#endif /* _POWERPC_SYSDEV_DART_H */
......@@ -156,9 +156,6 @@ static void dart_build(struct iommu_table *tbl, long index,
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
index <<= DART_PAGE_FACTOR;
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int*)tbl->it_base) + index;
/* On U3, all memory is contigous, so we can move this
......@@ -199,9 +196,6 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
DBG("dart: free at: %lx, %lx\n", index, npages);
index <<= DART_PAGE_FACTOR;
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int *)tbl->it_base) + index;
while (npages--)
......@@ -281,7 +275,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_busno = 0;
iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */
iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
iommu_table_dart.it_size = dart_tablesize / sizeof(u32);
/* Initialize the common IOMMU code */
iommu_table_dart.it_base = (unsigned long)dart_vbase;
......
......@@ -122,8 +122,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
}
out_be32(&qe_immr->cp.cecdr,
immrbar_virt_to_phys((void *)cmd_input));
out_be32(&qe_immr->cp.cecdr, cmd_input);
out_be32(&qe_immr->cp.cecr,
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
mcn_protocol << mcn_shift));
......
......@@ -708,7 +708,7 @@ void single_step_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs)
{
int fixed;
int sig, code, fixed = 0;
fixed = fix_alignment(regs);
if (fixed == 1) {
......@@ -717,14 +717,16 @@ void alignment_exception(struct pt_regs *regs)
return;
}
if (fixed == -EFAULT) {
/* fixed == -EFAULT means the operand address was bad */
sig = SIGSEGV;
code = SEGV_ACCERR;
} else {
sig = SIGBUS;
code = BUS_ADRALN;
}
if (user_mode(regs))
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar);
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, regs->dar, SIGSEGV);
return;
}
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar);
bad_page_fault(regs, regs->dar, sig);
}
void StackOverflow(struct pt_regs *regs)
......
......@@ -157,7 +157,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
break;
case cmap_gxt2000:
out_le32((unsigned __iomem *) par->cmap_adr + regno,
out_le32(((unsigned __iomem *) par->cmap_adr) + regno,
(red << 16 | green << 8 | blue));
break;
}
......@@ -213,7 +213,7 @@ static int offb_blank(int blank, struct fb_info *info)
out_le32(par->cmap_adr + 0xb4, 0);
break;
case cmap_gxt2000:
out_le32((unsigned __iomem *) par->cmap_adr + i,
out_le32(((unsigned __iomem *) par->cmap_adr) + i,
0);
break;
}
......@@ -226,13 +226,23 @@ static int offb_blank(int blank, struct fb_info *info)
static void __iomem *offb_map_reg(struct device_node *np, int index,
unsigned long offset, unsigned long size)
{
struct resource r;
if (of_address_to_resource(np, index, &r))
return 0;
if ((r.start + offset + size) > r.end)
return 0;
return ioremap(r.start + offset, size);
const u32 *addrp;
u64 asize, taddr;
unsigned int flags;
addrp = of_get_pci_address(np, index, &asize, &flags);
if (addrp == NULL)
addrp = of_get_address(np, index, &asize, &flags);
if (addrp == NULL)
return NULL;
if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
return NULL;
if ((offset + size) > asize)
return NULL;
taddr = of_translate_address(np, addrp);
if (taddr == OF_BAD_ADDR)
return NULL;
return ioremap(taddr + offset, size);
}
static void __init offb_init_fb(const char *name, const char *full_name,
......@@ -289,7 +299,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
par->cmap_type = cmap_unknown;
if (depth == 8) {
/* Palette hacks disabled for now */
if (dp && !strncmp(name, "ATY,Rage128", 11)) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr)
......@@ -313,7 +322,8 @@ static void __init offb_init_fb(const char *name, const char *full_name,
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_data = par->cmap_adr + 1;
par->cmap_type = cmap_m64;
} else if (dp && device_is_compatible(dp, "pci1014,b7")) {
} else if (dp && (device_is_compatible(dp, "pci1014,b7") ||
device_is_compatible(dp, "pci1014,21c"))) {
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
if (par->cmap_adr)
par->cmap_type = cmap_gxt2000;
......@@ -433,7 +443,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
pp = get_property(dp, "linux,bootx-linebytes", &len);
if (pp == NULL)
pp = get_property(dp, "linebytes", &len);
if (pp && len == sizeof(u32))
if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
pitch = *pp;
else
pitch = width * ((depth + 7) / 8);
......@@ -496,7 +506,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
offb_init_fb(no_real_node ? "bootx" : dp->name,
no_real_node ? "display" : dp->full_name,
width, height, depth, pitch, address,
no_real_node ? dp : NULL);
no_real_node ? NULL : dp);
}
}
......
......@@ -14,7 +14,17 @@ struct task_struct;
#ifdef __powerpc64__
#include <asm/paca.h>
#define current (get_paca()->__current)
static inline struct task_struct *get_current(void)
{
struct task_struct *task;
__asm__ __volatile__("ld %0,%1(13)"
: "=r" (task)
: "i" (offsetof(struct paca_struct, __current)));
return task;
}
#define current get_current()
#else
......
......@@ -163,8 +163,11 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
static inline void mmiowb(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
get_paca()->io_sync = 0;
unsigned long tmp;
__asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
: "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
: "memory");
}
/*
......
......@@ -22,10 +22,28 @@
#define _ASM_IOMMU_H
#ifdef __KERNEL__
#include <asm/types.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <asm/types.h>
#include <asm/bitops.h>
#define IOMMU_PAGE_SHIFT 12
#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
#ifndef __ASSEMBLY__
/* Pure 2^n version of get_order */
static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
{
return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
}
#endif /* __ASSEMBLY__ */
/*
* IOMAP_MAX_ORDER defines the largest contiguous block
......
......@@ -42,7 +42,7 @@ struct op_powerpc_model {
void (*reg_setup) (struct op_counter_config *,
struct op_system_config *,
int num_counters);
void (*cpu_setup) (void *);
void (*cpu_setup) (struct op_counter_config *);
void (*start) (struct op_counter_config *);
void (*stop) (void);
void (*handle_interrupt) (struct pt_regs *,
......@@ -121,7 +121,90 @@ static inline void ctr_write(unsigned int i, unsigned int val)
break;
}
}
#endif /* !CONFIG_FSL_BOOKE */
#else /* CONFIG_FSL_BOOKE */
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
case 0:
return mfpmr(PMRN_PMC0);
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
break;
}
}
#endif /* CONFIG_FSL_BOOKE */
extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
......
......@@ -32,18 +32,5 @@ void release_pmc_hardware(void);
void power4_enable_pmcs(void);
#endif
#ifdef CONFIG_FSL_BOOKE
void init_pmc_stop(int ctr);
void set_pmc_event(int ctr, int event);
void set_pmc_user_kernel(int ctr, int user, int kernel);
void set_pmc_marked(int ctr, int mark0, int mark1);
void pmc_start_ctr(int ctr, int enable);
void pmc_start_ctrs(int enable);
void pmc_stop_ctrs(void);
void dump_pmcs(void);
extern struct op_powerpc_model op_model_fsl_booke;
#endif
#endif /* __KERNEL__ */
#endif /* _POWERPC_PMC_H */
......@@ -25,8 +25,8 @@
*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
* rmb(), though. Note that lwsync is interpreted as sync by
* 32-bit and older 64-bit CPUs.
* rmb(), though. Note that rmb() actually uses a sync on 32-bit
* architectures.
*
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
......@@ -34,7 +34,7 @@
* SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
......
......@@ -22,6 +22,8 @@
#define _ASM_POWERPC_TCE_H
#ifdef __KERNEL__
#include <asm/iommu.h>
/*
* Tces come in two formats, one for the virtual bus and a different
* format for PCI
......@@ -33,7 +35,6 @@
#define TCE_SHIFT 12
#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册