提交 fe333321 编写于 作者: I Ingo Molnar 提交者: Benjamin Herrenschmidt

powerpc: Change u64/s64 to a long long integer type

Convert arch/powerpc/ over to long long based u64:

 -#ifdef __powerpc64__
 -# include <asm-generic/int-l64.h>
 -#else
 -# include <asm-generic/int-ll64.h>
 -#endif
 +#include <asm-generic/int-ll64.h>

This will avoid reoccuring spurious warnings in core kernel code that
comes when people test on their own hardware. (i.e. x86 in ~98% of the
cases) This is what x86 uses and it generally helps keep 64-bit code
32-bit clean too.

[Adjusted to not impact user mode (from paulus) - sfr]
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 66c721e1
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
*/ */
#define RTAS_UNKNOWN_SERVICE (-1) #define RTAS_UNKNOWN_SERVICE (-1)
#define RTAS_INSTANTIATE_MAX (1UL<<30) /* Don't instantiate rtas at/above this value */ #define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
/* Buffer size for ppc_rtas system call. */ /* Buffer size for ppc_rtas system call. */
#define RTAS_RMOBUF_MAX (64 * 1024) #define RTAS_RMOBUF_MAX (64 * 1024)
......
#ifndef _ASM_POWERPC_TYPES_H #ifndef _ASM_POWERPC_TYPES_H
#define _ASM_POWERPC_TYPES_H #define _ASM_POWERPC_TYPES_H
#ifdef __powerpc64__ /*
* This is here because we used to use l64 for 64bit powerpc
* and we don't want to impact user mode with our change to ll64
* in the kernel.
*/
#if defined(__powerpc64__) && !defined(__KERNEL__)
# include <asm-generic/int-l64.h> # include <asm-generic/int-l64.h>
#else #else
# include <asm-generic/int-ll64.h> # include <asm-generic/int-ll64.h>
......
...@@ -79,10 +79,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) ...@@ -79,10 +79,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
"Warning: IOMMU offset too big for device mask\n"); "Warning: IOMMU offset too big for device mask\n");
if (tbl) if (tbl)
printk(KERN_INFO printk(KERN_INFO
"mask: 0x%08lx, table offset: 0x%08lx\n", "mask: 0x%08llx, table offset: 0x%08lx\n",
mask, tbl->it_offset); mask, tbl->it_offset);
else else
printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", printk(KERN_INFO "mask: 0x%08llx, table unavailable\n",
mask); mask);
return 0; return 0;
} else } else
......
...@@ -239,12 +239,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, ...@@ -239,12 +239,12 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n"); printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry); printk(KERN_INFO "\tentry = 0x%lx\n", entry);
printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr); printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl); printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno); printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size); printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset); printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index); printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1); WARN_ON(1);
} }
return; return;
......
...@@ -240,7 +240,7 @@ static void parse_ppp_data(struct seq_file *m) ...@@ -240,7 +240,7 @@ static void parse_ppp_data(struct seq_file *m)
if (rc) if (rc)
return; return;
seq_printf(m, "partition_entitled_capacity=%ld\n", seq_printf(m, "partition_entitled_capacity=%lld\n",
ppp_data.entitlement); ppp_data.entitlement);
seq_printf(m, "group=%d\n", ppp_data.group_num); seq_printf(m, "group=%d\n", ppp_data.group_num);
seq_printf(m, "system_active_processors=%d\n", seq_printf(m, "system_active_processors=%d\n",
...@@ -265,7 +265,7 @@ static void parse_ppp_data(struct seq_file *m) ...@@ -265,7 +265,7 @@ static void parse_ppp_data(struct seq_file *m)
ppp_data.unallocated_weight); ppp_data.unallocated_weight);
seq_printf(m, "capacity_weight=%d\n", ppp_data.weight); seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
seq_printf(m, "capped=%d\n", ppp_data.capped); seq_printf(m, "capped=%d\n", ppp_data.capped);
seq_printf(m, "unallocated_capacity=%ld\n", seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement); ppp_data.unallocated_entitlement);
} }
...@@ -509,10 +509,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight) ...@@ -509,10 +509,10 @@ static ssize_t update_ppp(u64 *entitlement, u8 *weight)
} else } else
return -EINVAL; return -EINVAL;
pr_debug("%s: current_entitled = %lu, current_weight = %u\n", pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
__func__, ppp_data.entitlement, ppp_data.weight); __func__, ppp_data.entitlement, ppp_data.weight);
pr_debug("%s: new_entitled = %lu, new_weight = %u\n", pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight); __func__, new_entitled, new_weight);
retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight); retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
...@@ -558,7 +558,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight) ...@@ -558,7 +558,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
pr_debug("%s: current_entitled = %lu, current_weight = %u\n", pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__func__, mpp_data.entitled_mem, mpp_data.mem_weight); __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
pr_debug("%s: new_entitled = %lu, new_weight = %u\n", pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight); __func__, new_entitled, new_weight);
rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight); rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
......
...@@ -434,8 +434,8 @@ void __init setup_system(void) ...@@ -434,8 +434,8 @@ void __init setup_system(void)
printk("Starting Linux PPC64 %s\n", init_utsname()->version); printk("Starting Linux PPC64 %s\n", init_utsname()->version);
printk("-----------------------------------------------------\n"); printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
printk("physicalMemorySize = 0x%lx\n", lmb_phys_mem_size()); printk("physicalMemorySize = 0x%llx\n", lmb_phys_mem_size());
if (ppc64_caches.dline_size != 0x80) if (ppc64_caches.dline_size != 0x80)
printk("ppc64_caches.dcache_line_size = 0x%x\n", printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size); ppc64_caches.dline_size);
...@@ -493,7 +493,7 @@ static void __init emergency_stack_init(void) ...@@ -493,7 +493,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they * bringup, we need to get at them in real mode. This means they
* must also be within the RMO region. * must also be within the RMO region.
*/ */
limit = min(0x10000000UL, lmb.rmo_size); limit = min(0x10000000ULL, lmb.rmo_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
unsigned long sp; unsigned long sp;
......
...@@ -251,8 +251,8 @@ void __init stabs_alloc(void) ...@@ -251,8 +251,8 @@ void __init stabs_alloc(void)
paca[cpu].stab_addr = newstab; paca[cpu].stab_addr = newstab;
paca[cpu].stab_real = virt_to_abs(newstab); paca[cpu].stab_real = virt_to_abs(newstab);
printk(KERN_INFO "Segment table for CPU %d at 0x%lx " printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
"virtual, 0x%lx absolute\n", "virtual, 0x%llx absolute\n",
cpu, paca[cpu].stab_addr, paca[cpu].stab_real); cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
} }
} }
......
...@@ -132,7 +132,7 @@ static int pa6t_reg_setup(struct op_counter_config *ctr, ...@@ -132,7 +132,7 @@ static int pa6t_reg_setup(struct op_counter_config *ctr,
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) { for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
/* counters are 40 bit. Move to cputable at some point? */ /* counters are 40 bit. Move to cputable at some point? */
reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count; reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
pr_debug("reset_value for pmc%u inited to 0x%lx\n", pr_debug("reset_value for pmc%u inited to 0x%llx\n",
pmc, reset_value[pmc]); pmc, reset_value[pmc]);
} }
...@@ -177,7 +177,7 @@ static int pa6t_start(struct op_counter_config *ctr) ...@@ -177,7 +177,7 @@ static int pa6t_start(struct op_counter_config *ctr)
oprofile_running = 1; oprofile_running = 1;
pr_debug("start on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0); pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
return 0; return 0;
} }
...@@ -193,7 +193,7 @@ static void pa6t_stop(void) ...@@ -193,7 +193,7 @@ static void pa6t_stop(void)
oprofile_running = 0; oprofile_running = 0;
pr_debug("stop on cpu %d, mmcr0 %lx\n", smp_processor_id(), mmcr0); pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
} }
/* handle the perfmon overflow vector */ /* handle the perfmon overflow vector */
......
...@@ -99,7 +99,7 @@ static void beatic_end_irq(unsigned int irq_plug) ...@@ -99,7 +99,7 @@ static void beatic_end_irq(unsigned int irq_plug)
err = beat_downcount_of_interrupt(irq_plug); err = beat_downcount_of_interrupt(irq_plug);
if (err != 0) { if (err != 0) {
if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
panic("Failed to downcount IRQ! Error = %16lx", err); panic("Failed to downcount IRQ! Error = %16llx", err);
printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug);
} }
......
...@@ -405,7 +405,7 @@ static int __init celleb_setup_epci(struct device_node *node, ...@@ -405,7 +405,7 @@ static int __init celleb_setup_epci(struct device_node *node,
hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1)); hose->cfg_addr = ioremap(r.start, (r.end - r.start + 1));
if (!hose->cfg_addr) if (!hose->cfg_addr)
goto error; goto error;
pr_debug("EPCI: cfg_addr map 0x%016lx->0x%016lx + 0x%016lx\n", pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1)); r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1));
if (of_address_to_resource(node, 2, &r)) if (of_address_to_resource(node, 2, &r))
...@@ -413,7 +413,7 @@ static int __init celleb_setup_epci(struct device_node *node, ...@@ -413,7 +413,7 @@ static int __init celleb_setup_epci(struct device_node *node,
hose->cfg_data = ioremap(r.start, (r.end - r.start + 1)); hose->cfg_data = ioremap(r.start, (r.end - r.start + 1));
if (!hose->cfg_data) if (!hose->cfg_data)
goto error; goto error;
pr_debug("EPCI: cfg_data map 0x%016lx->0x%016lx + 0x%016lx\n", pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1)); r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1));
hose->ops = &celleb_epci_ops; hose->ops = &celleb_epci_ops;
......
...@@ -855,7 +855,7 @@ static int __init cell_iommu_init_disabled(void) ...@@ -855,7 +855,7 @@ static int __init cell_iommu_init_disabled(void)
*/ */
if (np && size < lmb_end_of_DRAM()) { if (np && size < lmb_end_of_DRAM()) {
printk(KERN_WARNING "iommu: force-enabled, dma window" printk(KERN_WARNING "iommu: force-enabled, dma window"
" (%ldMB) smaller than total memory (%ldMB)\n", " (%ldMB) smaller than total memory (%lldMB)\n",
size >> 20, lmb_end_of_DRAM() >> 20); size >> 20, lmb_end_of_DRAM() >> 20);
return -ENODEV; return -ENODEV;
} }
...@@ -985,7 +985,7 @@ static void cell_dma_dev_setup_fixed(struct device *dev) ...@@ -985,7 +985,7 @@ static void cell_dma_dev_setup_fixed(struct device *dev)
addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base; addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
archdata->dma_data = (void *)addr; archdata->dma_data = (void *)addr;
dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
} }
static void insert_16M_pte(unsigned long addr, unsigned long *ptab, static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
......
...@@ -38,16 +38,16 @@ static void dump_fir(int cpu) ...@@ -38,16 +38,16 @@ static void dump_fir(int cpu)
/* Todo: do some nicer parsing of bits and based on them go down /* Todo: do some nicer parsing of bits and based on them go down
* to other sub-units FIRs and not only IIC * to other sub-units FIRs and not only IIC
*/ */
printk(KERN_ERR "Global Checkstop FIR : 0x%016lx\n", printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n",
in_be64(&pregs->checkstop_fir)); in_be64(&pregs->checkstop_fir));
printk(KERN_ERR "Global Recoverable FIR : 0x%016lx\n", printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n",
in_be64(&pregs->checkstop_fir)); in_be64(&pregs->checkstop_fir));
printk(KERN_ERR "Global MachineCheck FIR : 0x%016lx\n", printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n",
in_be64(&pregs->spec_att_mchk_fir)); in_be64(&pregs->spec_att_mchk_fir));
if (iregs == NULL) if (iregs == NULL)
return; return;
printk(KERN_ERR "IOC FIR : 0x%016lx\n", printk(KERN_ERR "IOC FIR : 0x%016llx\n",
in_be64(&iregs->ioc_fir)); in_be64(&iregs->ioc_fir));
} }
......
...@@ -151,7 +151,7 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) ...@@ -151,7 +151,7 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
{ {
struct spu_priv2 __iomem *priv2 = spu->priv2; struct spu_priv2 __iomem *priv2 = spu->priv2;
pr_debug("%s: adding SLB[%d] 0x%016lx 0x%016lx\n", pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
__func__, slbe, slb->vsid, slb->esid); __func__, slbe, slb->vsid, slb->esid);
out_be64(&priv2->slb_index_W, slbe); out_be64(&priv2->slb_index_W, slbe);
...@@ -221,7 +221,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) ...@@ -221,7 +221,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
{ {
int ret; int ret;
pr_debug("%s, %lx, %lx\n", __func__, dsisr, ea); pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
/* /*
* Handle kernel space hash faults immediately. User hash * Handle kernel space hash faults immediately. User hash
......
...@@ -54,7 +54,7 @@ long spu_sys_callback(struct spu_syscall_block *s) ...@@ -54,7 +54,7 @@ long spu_sys_callback(struct spu_syscall_block *s)
long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
pr_debug("%s: invalid syscall #%ld", __func__, s->nr_ret); pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret);
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -66,7 +66,7 @@ static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages, ...@@ -66,7 +66,7 @@ static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce); rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
if (rc) if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
rc); rc);
index++; index++;
uaddr += TCE_PAGE_SIZE; uaddr += TCE_PAGE_SIZE;
...@@ -81,7 +81,7 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) ...@@ -81,7 +81,7 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
while (npages--) { while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc) if (rc)
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n", panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
rc); rc);
index++; index++;
} }
......
...@@ -127,10 +127,10 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -127,10 +127,10 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
} }
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index); printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum); printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttce val = 0x%lx\n", tce ); printk("\ttce val = 0x%llx\n", tce );
show_stack(current, (unsigned long *)__get_SP()); show_stack(current, (unsigned long *)__get_SP());
} }
...@@ -210,10 +210,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -210,10 +210,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
} }
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index); printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%lx\n", (u64)npages); printk("\tnpages = 0x%llx\n", (u64)npages);
printk("\ttce[0] val = 0x%lx\n", tcep[0]); printk("\ttce[0] val = 0x%llx\n", tcep[0]);
show_stack(current, (unsigned long *)__get_SP()); show_stack(current, (unsigned long *)__get_SP());
} }
return ret; return ret;
...@@ -227,9 +227,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages ...@@ -227,9 +227,9 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc); printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index); printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%lx\n", (u64)tcenum); printk("\ttcenum = 0x%llx\n", (u64)tcenum);
show_stack(current, (unsigned long *)__get_SP()); show_stack(current, (unsigned long *)__get_SP());
} }
...@@ -246,9 +246,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n ...@@ -246,9 +246,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
printk("\trc = %ld\n", rc); printk("\trc = %lld\n", rc);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index); printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%lx\n", (u64)npages); printk("\tnpages = 0x%llx\n", (u64)npages);
show_stack(current, (unsigned long *)__get_SP()); show_stack(current, (unsigned long *)__get_SP());
} }
} }
...@@ -261,10 +261,9 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) ...@@ -261,10 +261,9 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%ld\n", printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
rc); printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tindex = 0x%lx\n", (u64)tbl->it_index); printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttcenum = 0x%lx\n", (u64)tcenum);
show_stack(current, (unsigned long *)__get_SP()); show_stack(current, (unsigned long *)__get_SP());
} }
......
...@@ -435,7 +435,7 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, ...@@ -435,7 +435,7 @@ static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
} }
printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%lx\n", printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
PCI_SLOT(devfn), PCI_FUNC(devfn), PCI_SLOT(devfn), PCI_FUNC(devfn),
flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
......
...@@ -712,7 +712,7 @@ static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, ...@@ -712,7 +712,7 @@ static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", printk(KERN_ERR "pasemi_mac: rx error. macrx %016llx, rx status %llx\n",
macrx, *chan->status); macrx, *chan->status);
printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
...@@ -730,8 +730,8 @@ static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, ...@@ -730,8 +730,8 @@ static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\
"tx status 0x%016lx\n", mactx, *chan->status); "tx status 0x%016llx\n", mactx, *chan->status);
printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
} }
......
...@@ -297,7 +297,7 @@ static int __devinit electra_cf_probe(struct of_device *ofdev, ...@@ -297,7 +297,7 @@ static int __devinit electra_cf_probe(struct of_device *ofdev,
goto fail3; goto fail3;
} }
dev_info(device, "at mem 0x%lx io 0x%lx irq %d\n", dev_info(device, "at mem 0x%lx io 0x%llx irq %d\n",
cf->mem_phys, io.start, cf->irq); cf->mem_phys, io.start, cf->irq);
cf->active = 1; cf->active = 1;
......
...@@ -1061,7 +1061,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -1061,7 +1061,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
} }
sdev_printk(KERN_INFO, cmd->device, sdev_printk(KERN_INFO, cmd->device,
"aborting command. lun 0x%lx, tag 0x%lx\n", "aborting command. lun 0x%llx, tag 0x%llx\n",
(((u64) lun) << 48), (u64) found_evt); (((u64) lun) << 48), (u64) found_evt);
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
...@@ -1082,7 +1082,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -1082,7 +1082,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (rsp_rc) { if (rsp_rc) {
if (printk_ratelimit()) if (printk_ratelimit())
sdev_printk(KERN_WARNING, cmd->device, sdev_printk(KERN_WARNING, cmd->device,
"abort code %d for task tag 0x%lx\n", "abort code %d for task tag 0x%llx\n",
rsp_rc, tsk_mgmt->task_tag); rsp_rc, tsk_mgmt->task_tag);
return FAILED; return FAILED;
} }
...@@ -1102,12 +1102,12 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -1102,12 +1102,12 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
if (found_evt == NULL) { if (found_evt == NULL) {
spin_unlock_irqrestore(hostdata->host->host_lock, flags); spin_unlock_irqrestore(hostdata->host->host_lock, flags);
sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%lx completed\n", sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
tsk_mgmt->task_tag); tsk_mgmt->task_tag);
return SUCCESS; return SUCCESS;
} }
sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%lx\n", sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
tsk_mgmt->task_tag); tsk_mgmt->task_tag);
cmd->result = (DID_ABORT << 16); cmd->result = (DID_ABORT << 16);
...@@ -1182,7 +1182,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1182,7 +1182,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
return FAILED; return FAILED;
} }
sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%lx\n", sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
(((u64) lun) << 48)); (((u64) lun) << 48));
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
...@@ -1203,7 +1203,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -1203,7 +1203,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
if (rsp_rc) { if (rsp_rc) {
if (printk_ratelimit()) if (printk_ratelimit())
sdev_printk(KERN_WARNING, cmd->device, sdev_printk(KERN_WARNING, cmd->device,
"reset code %d for task tag 0x%lx\n", "reset code %d for task tag 0x%llx\n",
rsp_rc, tsk_mgmt->task_tag); rsp_rc, tsk_mgmt->task_tag);
return FAILED; return FAILED;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册