提交 ed72df44 编写于 作者: L Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  Cross-compilation between e.g. i386 -> 64bit could break -> work around it
  [IA64] Enable early console for Ski simulator
  [IA64] forbid ptrace changes psr.ri to 3
  [IA64] Failure to grow RBS
  [IA64] Fix processor_get_freq
  [IA64] SGI Altix : fix a force_interrupt bug on altix
  [IA64] Update arch/ia64/configs/* s/SLAB/SLUB/
  [IA64] get back PT_IA_64_UNWIND program header
  [IA64] need NOTES in vmlinux.lds.S
  [IA64] make unwinder stop at last frame of the bootloader
  [IA64] Clean up CPE handler registration
  [IA64] Include Kconfig.preempt
  [IA64] SN2 needs platform specific irq_to_vector() function.
  [IA64] Use atomic64_read to read an atomic64_t.
  [IA64] disable irq's and check need_resched before safe_halt
......@@ -327,17 +327,7 @@ config FORCE_CPEI_RETARGET
This option it useful to enable this feature on older BIOS's as well.
You can also enable this by using boot command line option force_cpei=1.
config PREEMPT
bool "Preemptible Kernel"
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
be preempted even if it is in kernel mode executing a system call.
This allows applications to run more reliably even when the system is
under load.
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
source "kernel/Kconfig.preempt"
source "mm/Kconfig"
......
......@@ -42,7 +42,7 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
CONFIG_SLAB=y
CONFIG_SLUB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
......
......@@ -43,7 +43,7 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
CONFIG_SLAB=y
CONFIG_SLUB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
......
......@@ -43,7 +43,7 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
CONFIG_SLAB=y
CONFIG_SLUB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
......
......@@ -46,7 +46,7 @@ CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
CONFIG_SLAB=y
CONFIG_SLUB=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
......
......@@ -53,7 +53,7 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
CONFIG_SLUB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_RT_MUTEXES=y
......
......@@ -48,7 +48,7 @@ CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
CONFIG_SLAB=y
CONFIG_SLUB=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
......
......@@ -53,8 +53,7 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
......
......@@ -26,6 +26,7 @@ GLOBAL_ENTRY(_start)
movl sp = stack_mem+16384-16
bsw.1
br.call.sptk.many rp=start_bootloader
0: nop 0 /* dummy nop to make unwinding work */
END(_start)
/*
......
......@@ -113,10 +113,8 @@ processor_get_freq (
saved_mask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) {
ret = -EAGAIN;
if (smp_processor_id() != cpu)
goto migrate_end;
}
/* processor_get_pstate gets the instantaneous frequency */
ret = processor_get_pstate(&value);
......@@ -125,7 +123,7 @@ processor_get_freq (
set_cpus_allowed(current, saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n",
ret);
ret = -EAGAIN;
ret = 0;
goto migrate_end;
}
clock_freq = extract_clock(data, value, cpu);
......
......@@ -33,6 +33,11 @@ void ack_bad_irq(unsigned int irq)
}
#ifdef CONFIG_IA64_GENERIC
ia64_vector __ia64_irq_to_vector(int irq)
{
return irq_cfg[irq].vector;
}
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{
return __get_cpu_var(vector_irq)[vec];
......
......@@ -2018,22 +2018,26 @@ ia64_mca_late_init(void)
if (cpe_vector >= 0) {
/* If platform supports CPEI, enable the irq. */
cpe_poll_enabled = 0;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpe_vector) {
desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
ia64_cpe_irq = irq;
}
ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
} else {
/* If platform doesn't support CPEI, get the timer going. */
if (cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL);
IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
irq = local_vector_to_irq(cpe_vector);
if (irq > 0) {
cpe_poll_enabled = 0;
desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
ia64_cpe_irq = irq;
ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
__FUNCTION__);
return 0;
}
printk(KERN_ERR "%s: Failed to find irq for CPE "
"interrupt handler, vector %d\n",
__FUNCTION__, cpe_vector);
}
/* If platform doesn't support CPEI, get the timer going. */
if (cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL);
IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
}
}
#endif
......
......@@ -198,9 +198,13 @@ default_idle (void)
{
local_irq_enable();
while (!need_resched()) {
if (can_do_pal_halt)
safe_halt();
else
if (can_do_pal_halt) {
local_irq_disable();
if (!need_resched()) {
safe_halt();
}
local_irq_enable();
} else
cpu_relax();
}
}
......
......@@ -951,10 +951,14 @@ access_uarea (struct task_struct *child, unsigned long addr,
return 0;
case PT_CR_IPSR:
if (write_access)
pt->cr_ipsr = ((*data & IPSR_MASK)
if (write_access) {
unsigned long tmp = *data;
/* psr.ri==3 is a reserved value: SDM 2:25 */
if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
tmp &= ~IA64_PSR_RI;
pt->cr_ipsr = ((tmp & IPSR_MASK)
| (pt->cr_ipsr & ~IPSR_MASK));
else
} else
*data = (pt->cr_ipsr & IPSR_MASK);
return 0;
......
......@@ -389,6 +389,13 @@ early_console_setup (char *cmdline)
if (!efi_setup_pcdp_console(cmdline))
earlycons++;
#endif
#ifdef CONFIG_HP_SIMSERIAL_CONSOLE
{
extern struct console hpsim_cons;
register_console(&hpsim_cons);
earlycons++;
}
#endif
return (earlycons) ? 0 : -1;
}
......
......@@ -20,6 +20,8 @@ PHDRS {
code PT_LOAD;
percpu PT_LOAD;
data PT_LOAD;
note PT_NOTE;
unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
}
SECTIONS
{
......@@ -62,6 +64,9 @@ SECTIONS
/* Read-only data */
NOTES :code :note /* put .notes in text and mark in PT_NOTE */
code_continues : {} :code /* switch back to regular program... */
/* Exception table */
. = ALIGN(16);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
......@@ -99,7 +104,8 @@ SECTIONS
__start_unwind = .;
*(.IA_64.unwind*)
__end_unwind = .;
}
} :code :unwind
code_continues2 : {} : code
RODATA
......@@ -276,10 +282,6 @@ SECTIONS
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/* These must appear regardless of . */
/* Discard them for now since Intel SoftSDV cannot handle them.
.comment 0 : { *(.comment) }
.note 0 : { *(.note) }
*/
/DISCARD/ : { *(.comment) }
/DISCARD/ : { *(.note) }
}
......@@ -112,11 +112,17 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma)
if (!vma && !prev_vma )
goto bad_area;
/* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
if (address < vma->vm_start)
/*
* find_vma_prev() returns vma such that address < vma->vm_end or NULL
*
* May find no vma, but could be that the last vm area is the
* register backing store that needs to expand upwards, in
* this case vma will be null, but prev_vma will ne non-null
*/
if (( !vma && prev_vma ) || (address < vma->vm_start) )
goto check_expansion;
good_area:
......@@ -172,6 +178,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
check_expansion:
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
if (!vma)
goto bad_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
......
......@@ -256,6 +256,13 @@ struct irq_chip irq_type_sn = {
.set_affinity = sn_set_affinity_irq
};
ia64_vector sn_irq_to_vector(int irq)
{
if (irq >= IA64_NUM_VECTORS)
return 0;
return (ia64_vector)irq;
}
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
......@@ -398,7 +405,10 @@ sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
struct sn_pcibus_provider *pci_provider;
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
if (pci_provider && pci_provider->force_interrupt)
/* Don't force an interrupt if the irq has been disabled */
if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
pci_provider && pci_provider->force_interrupt)
(*pci_provider->force_interrupt)(sn_irq_info);
}
......
......@@ -55,7 +55,7 @@ ia64_atomic64_add (__s64 i, atomic64_t *v)
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
old = atomic64_read(v);
new = old + i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
......@@ -83,7 +83,7 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
do {
CMPXCHG_BUGCHECK(v);
old = atomic_read(v);
old = atomic64_read(v);
new = old - i;
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
return new;
......
......@@ -124,6 +124,11 @@ static inline void ia64_resend_irq(unsigned int vector)
extern irq_desc_t irq_desc[NR_IRQS];
#ifndef CONFIG_IA64_GENERIC
static inline ia64_vector __ia64_irq_to_vector(int irq)
{
return irq_cfg[irq].vector;
}
static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec)
{
......@@ -145,7 +150,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
static inline ia64_vector
irq_to_vector (int irq)
{
return irq_cfg[irq].vector;
return platform_irq_to_vector(irq);
}
/*
......
......@@ -30,6 +30,7 @@ typedef void ia64_mv_send_ipi_t (int, int, int, int);
typedef void ia64_mv_timer_interrupt_t (int, void *);
typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long);
typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *);
typedef u8 ia64_mv_irq_to_vector (int);
typedef unsigned int ia64_mv_local_vector_to_irq (u8);
typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *);
typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val,
......@@ -145,6 +146,7 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device
# define platform_dma_mapping_error ia64_mv.dma_mapping_error
# define platform_dma_supported ia64_mv.dma_supported
# define platform_irq_to_vector ia64_mv.irq_to_vector
# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem
# define platform_pci_legacy_read ia64_mv.pci_legacy_read
......@@ -198,6 +200,7 @@ struct ia64_machine_vector {
ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device;
ia64_mv_dma_mapping_error *dma_mapping_error;
ia64_mv_dma_supported *dma_supported;
ia64_mv_irq_to_vector *irq_to_vector;
ia64_mv_local_vector_to_irq *local_vector_to_irq;
ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem;
ia64_mv_pci_legacy_read_t *pci_legacy_read;
......@@ -247,6 +250,7 @@ struct ia64_machine_vector {
platform_dma_sync_sg_for_device, \
platform_dma_mapping_error, \
platform_dma_supported, \
platform_irq_to_vector, \
platform_local_vector_to_irq, \
platform_pci_get_legacy_mem, \
platform_pci_legacy_read, \
......@@ -366,6 +370,9 @@ extern ia64_mv_dma_supported swiotlb_dma_supported;
#ifndef platform_dma_supported
# define platform_dma_supported swiotlb_dma_supported
#endif
#ifndef platform_irq_to_vector
# define platform_irq_to_vector __ia64_irq_to_vector
#endif
#ifndef platform_local_vector_to_irq
# define platform_local_vector_to_irq __ia64_local_vector_to_irq
#endif
......
......@@ -2,6 +2,7 @@
extern ia64_mv_send_ipi_t ia64_send_ipi;
extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read;
......
......@@ -35,6 +35,7 @@ extern ia64_mv_send_ipi_t sn2_send_IPI;
extern ia64_mv_timer_interrupt_t sn_timer_interrupt;
extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge;
extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish;
extern ia64_mv_irq_to_vector sn_irq_to_vector;
extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq;
extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem;
extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read;
......@@ -104,6 +105,7 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus;
#define platform_readw_relaxed __sn_readw_relaxed
#define platform_readl_relaxed __sn_readl_relaxed
#define platform_readq_relaxed __sn_readq_relaxed
#define platform_irq_to_vector sn_irq_to_vector
#define platform_local_vector_to_irq sn_local_vector_to_irq
#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem
#define platform_pci_legacy_read sn_pci_legacy_read
......
......@@ -159,7 +159,8 @@ struct ap_device_id {
#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
#define ACPI_ID_LEN 9
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */
/* to workaround crosscompile issues */
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册