提交 d5b9b787 编写于 作者: L Linus Torvalds

Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Correct definition of handle_IPI
  [IA64] move SAL_CACHE_FLUSH check later in boot
  [IA64] MCA recovery: Montecito support
  [IA64] cpu-hotplug: Fixing confliction between CPU hot-add and IPI
  [IA64] don't double >> PAGE_SHIFT pointer for /dev/kmem access
......@@ -434,6 +434,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
return MCA_IS_GLOBAL;
}
/**
* get_target_identifier - Get the valid Cache or Bus check target identifier.
* @peidx: pointer of index of processor error section
*
* Return value:
* target address on Success / 0 on Failue
*/
static u64
get_target_identifier(peidx_table_t *peidx)
{
u64 target_address = 0;
sal_log_mod_error_info_t *smei;
pal_cache_check_info_t *pcci;
int i, level = 9;
/*
* Look through the cache checks for a valid target identifier
* If more than one valid target identifier, return the one
* with the lowest cache level.
*/
for (i = 0; i < peidx_cache_check_num(peidx); i++) {
smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
if (smei->valid.target_identifier && smei->target_identifier) {
pcci = (pal_cache_check_info_t *)&(smei->check_info);
if (!target_address || (pcci->level < level)) {
target_address = smei->target_identifier;
level = pcci->level;
continue;
}
}
}
if (target_address)
return target_address;
/*
* Look at the bus check for a valid target identifier
*/
smei = peidx_bus_check(peidx, 0);
if (smei && smei->valid.target_identifier)
return smei->target_identifier;
return 0;
}
/**
* recover_from_read_error - Try to recover the errors which type are "read"s.
* @slidx: pointer of index of SAL error record
......@@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx,
peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
sal_log_mod_error_info_t *smei;
u64 target_identifier;
pal_min_state_area_t *pmsa;
struct ia64_psr *psr1, *psr2;
ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
/* Is target address valid? */
if (!pbci->tv)
target_identifier = get_target_identifier(peidx);
if (!target_identifier)
return fatal_mca("target address not valid");
/*
......@@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx,
pmsa = sos->pal_min_state;
if (psr1->cpl != 0 ||
((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
smei = peidx_bus_check(peidx, 0);
if (smei->valid.target_identifier) {
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
/* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = smei->target_identifier;
pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
pmsa->pmsa_iip = mca_hdlr_bh->fp;
pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
/* set cpl with kernel mode */
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->bn = 1;
psr2->i = 0;
return mca_recovered("user memory corruption. "
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
/* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = target_identifier;
pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
pmsa->pmsa_iip = mca_hdlr_bh->fp;
pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
/* set cpl with kernel mode */
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->bn = 1;
psr2->i = 0;
return mca_recovered("user memory corruption. "
"kill affected process - recovered.");
}
}
return fatal_mca("kernel context not recovered, iip 0x%lx\n",
......
......@@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { }
*/
static int sal_cache_flush_drops_interrupts;
static void __init
void __init
check_sal_cache_flush (void)
{
unsigned long flags;
int cpu;
u64 vector;
u64 vector, cache_type = 3;
struct ia64_sal_retval isrv;
cpu = get_cpu();
local_irq_save(flags);
......@@ -243,7 +244,10 @@ check_sal_cache_flush (void)
while (!ia64_get_irr(IA64_TIMER_VECTOR))
cpu_relax();
ia64_sal_cache_flush(3);
SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
if (isrv.status)
printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);
if (ia64_get_irr(IA64_TIMER_VECTOR)) {
vector = ia64_get_ivr();
......@@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab)
p += SAL_DESC_SIZE(*p);
}
check_sal_cache_flush();
}
int
......
......@@ -457,6 +457,8 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */
check_sal_cache_flush();
#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
......
......@@ -108,7 +108,7 @@ cpu_die(void)
}
irqreturn_t
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
handle_IPI (int irq, void *dev_id)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
......@@ -328,10 +328,14 @@ int
smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus()-1;
int cpus;
if (!cpus)
spin_lock(&call_lock);
cpus = num_online_cpus() - 1;
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
......@@ -343,8 +347,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
if (wait)
atomic_set(&data.finished, 0);
spin_lock(&call_lock);
call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
send_IPI_allbutself(IPI_CALL_FUNC);
......
......@@ -659,6 +659,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
}
extern s64 ia64_sal_cache_flush (u64 cache_type);
extern void __init check_sal_cache_flush (void);
/* Initialize all the processor and platform level instruction and data caches */
static inline s64
......
......@@ -389,7 +389,7 @@ xlate_dev_kmem_ptr (char * p)
struct page *page;
char * ptr;
page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
page = virt_to_page((unsigned long)p);
if (PageUncached(page))
ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册