提交 66f37673 编写于 作者: L Linus Torvalds

Merge HEAD from master.kernel.org:/home/rmk/linux-2.6-arm

...@@ -365,8 +365,8 @@ config NO_IDLE_HZ ...@@ -365,8 +365,8 @@ config NO_IDLE_HZ
Please note that dynamic tick may affect the accuracy of Please note that dynamic tick may affect the accuracy of
timekeeping on some platforms depending on the implementation. timekeeping on some platforms depending on the implementation.
Currently at least OMAP platform is known to have accurate Currently at least OMAP, PXA2xx and SA11x0 platforms are known
timekeeping with dynamic tick. to have accurate timekeeping with dynamic tick.
config ARCH_DISCONTIGMEM_ENABLE config ARCH_DISCONTIGMEM_ENABLE
bool bool
......
...@@ -284,7 +284,7 @@ __syscall_start: ...@@ -284,7 +284,7 @@ __syscall_start:
.long sys_fstatfs64 .long sys_fstatfs64
.long sys_tgkill .long sys_tgkill
.long sys_utimes .long sys_utimes
/* 270 */ .long sys_fadvise64_64 /* 270 */ .long sys_arm_fadvise64_64_wrapper
.long sys_pciconfig_iobase .long sys_pciconfig_iobase
.long sys_pciconfig_read .long sys_pciconfig_read
.long sys_pciconfig_write .long sys_pciconfig_write
......
...@@ -265,6 +265,10 @@ sys_futex_wrapper: ...@@ -265,6 +265,10 @@ sys_futex_wrapper:
str r5, [sp, #4] @ push sixth arg str r5, [sp, #4] @ push sixth arg
b sys_futex b sys_futex
sys_arm_fadvise64_64_wrapper:
str r5, [sp, #4] @ push r5 to stack
b sys_arm_fadvise64_64
/* /*
* Note: off_4k (r5) is always units of 4K. If we can't do the requested * Note: off_4k (r5) is always units of 4K. If we can't do the requested
* offset, we return EINVAL. * offset, we return EINVAL.
......
...@@ -311,3 +311,13 @@ long execve(const char *filename, char **argv, char **envp) ...@@ -311,3 +311,13 @@ long execve(const char *filename, char **argv, char **envp)
return ret; return ret;
} }
EXPORT_SYMBOL(execve); EXPORT_SYMBOL(execve);
/*
* Since loff_t is a 64 bit type we avoid a lot of ABI hastle
* with a different argument ordering.
*/
asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
loff_t offset, loff_t len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
...@@ -70,6 +70,11 @@ static unsigned long pxa_gettimeoffset (void) ...@@ -70,6 +70,11 @@ static unsigned long pxa_gettimeoffset (void)
return usec; return usec;
} }
#ifdef CONFIG_NO_IDLE_HZ
static unsigned long initial_match;
static int match_posponed;
#endif
static irqreturn_t static irqreturn_t
pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
...@@ -77,11 +82,19 @@ pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -77,11 +82,19 @@ pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
#ifdef CONFIG_NO_IDLE_HZ
if (match_posponed) {
match_posponed = 0;
OSMR0 = initial_match;
}
#endif
/* Loop until we get ahead of the free running timer. /* Loop until we get ahead of the free running timer.
* This ensures an exact clock tick count and time accuracy. * This ensures an exact clock tick count and time accuracy.
* IRQs are disabled inside the loop to ensure coherence between * Since IRQs are disabled at this point, coherence between
* lost_ticks (updated in do_timer()) and the match reg value, so we * lost_ticks(updated in do_timer()) and the match reg value is
* can use do_gettimeofday() from interrupt handlers. * ensured, hence we can use do_gettimeofday() from interrupt
* handlers.
* *
* HACK ALERT: it seems that the PXA timer regs aren't updated right * HACK ALERT: it seems that the PXA timer regs aren't updated right
* away in all cases when a write occurs. We therefore compare with * away in all cases when a write occurs. We therefore compare with
...@@ -126,6 +139,42 @@ static void __init pxa_timer_init(void) ...@@ -126,6 +139,42 @@ static void __init pxa_timer_init(void)
OSCR = 0; /* initialize free-running timer, force first match */ OSCR = 0; /* initialize free-running timer, force first match */
} }
#ifdef CONFIG_NO_IDLE_HZ
static int pxa_dyn_tick_enable_disable(void)
{
/* nothing to do */
return 0;
}
static void pxa_dyn_tick_reprogram(unsigned long ticks)
{
if (ticks > 1) {
initial_match = OSMR0;
OSMR0 = initial_match + ticks * LATCH;
match_posponed = 1;
}
}
static irqreturn_t
pxa_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs)
{
if (match_posponed) {
match_posponed = 0;
OSMR0 = initial_match;
if ( (signed long)(initial_match - OSCR) <= 8 )
return pxa_timer_interrupt(irq, dev_id, regs);
}
return IRQ_NONE;
}
static struct dyn_tick_timer pxa_dyn_tick = {
.enable = pxa_dyn_tick_enable_disable,
.disable = pxa_dyn_tick_enable_disable,
.reprogram = pxa_dyn_tick_reprogram,
.handler = pxa_dyn_tick_handler,
};
#endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
static unsigned long osmr[4], oier; static unsigned long osmr[4], oier;
...@@ -161,4 +210,7 @@ struct sys_timer pxa_timer = { ...@@ -161,4 +210,7 @@ struct sys_timer pxa_timer = {
.suspend = pxa_timer_suspend, .suspend = pxa_timer_suspend,
.resume = pxa_timer_resume, .resume = pxa_timer_resume,
.offset = pxa_gettimeoffset, .offset = pxa_gettimeoffset,
#ifdef CONFIG_NO_IDLE_HZ
.dyn_tick = &pxa_dyn_tick,
#endif
}; };
...@@ -70,15 +70,11 @@ static unsigned long sa1100_gettimeoffset (void) ...@@ -70,15 +70,11 @@ static unsigned long sa1100_gettimeoffset (void)
return usec; return usec;
} }
/* #ifdef CONFIG_NO_IDLE_HZ
* We will be entered with IRQs enabled. static unsigned long initial_match;
* static int match_posponed;
* Loop until we get ahead of the free running timer. #endif
* This ensures an exact clock tick count and time accuracy.
* IRQs are disabled inside the loop to ensure coherence between
* lost_ticks (updated in do_timer()) and the match reg value, so we
* can use do_gettimeofday() from interrupt handlers.
*/
static irqreturn_t static irqreturn_t
sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{ {
...@@ -86,6 +82,21 @@ sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) ...@@ -86,6 +82,21 @@ sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_seqlock(&xtime_lock); write_seqlock(&xtime_lock);
#ifdef CONFIG_NO_IDLE_HZ
if (match_posponed) {
match_posponed = 0;
OSMR0 = initial_match;
}
#endif
/*
* Loop until we get ahead of the free running timer.
* This ensures an exact clock tick count and time accuracy.
* Since IRQs are disabled at this point, coherence between
* lost_ticks(updated in do_timer()) and the match reg value is
* ensured, hence we can use do_gettimeofday() from interrupt
* handlers.
*/
do { do {
timer_tick(regs); timer_tick(regs);
OSSR = OSSR_M0; /* Clear match on timer 0 */ OSSR = OSSR_M0; /* Clear match on timer 0 */
...@@ -120,6 +131,42 @@ static void __init sa1100_timer_init(void) ...@@ -120,6 +131,42 @@ static void __init sa1100_timer_init(void)
OSCR = 0; /* initialize free-running timer, force first match */ OSCR = 0; /* initialize free-running timer, force first match */
} }
#ifdef CONFIG_NO_IDLE_HZ
static int sa1100_dyn_tick_enable_disable(void)
{
/* nothing to do */
return 0;
}
static void sa1100_dyn_tick_reprogram(unsigned long ticks)
{
if (ticks > 1) {
initial_match = OSMR0;
OSMR0 = initial_match + ticks * LATCH;
match_posponed = 1;
}
}
static irqreturn_t
sa1100_dyn_tick_handler(int irq, void *dev_id, struct pt_regs *regs)
{
if (match_posponed) {
match_posponed = 0;
OSMR0 = initial_match;
if ((signed long)(initial_match - OSCR) <= 0)
return sa1100_timer_interrupt(irq, dev_id, regs);
}
return IRQ_NONE;
}
static struct dyn_tick_timer sa1100_dyn_tick = {
.enable = sa1100_dyn_tick_enable_disable,
.disable = sa1100_dyn_tick_enable_disable,
.reprogram = sa1100_dyn_tick_reprogram,
.handler = sa1100_dyn_tick_handler,
};
#endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
unsigned long osmr[4], oier; unsigned long osmr[4], oier;
...@@ -156,4 +203,7 @@ struct sys_timer sa1100_timer = { ...@@ -156,4 +203,7 @@ struct sys_timer sa1100_timer = {
.suspend = sa1100_timer_suspend, .suspend = sa1100_timer_suspend,
.resume = sa1100_timer_resume, .resume = sa1100_timer_resume,
.offset = sa1100_gettimeoffset, .offset = sa1100_gettimeoffset,
#ifdef CONFIG_NO_IDLE_HZ
.dyn_tick = &sa1100_dyn_tick,
#endif
}; };
...@@ -295,14 +295,10 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg ...@@ -295,14 +295,10 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pte_t *ptep; pte_t *ptep;
if (pmd_none(*pmdp)) { if (pmd_none(*pmdp)) {
unsigned long pmdval;
ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
sizeof(pte_t)); sizeof(pte_t));
pmdval = __pa(ptep) | prot_l1; __pmd_populate(pmdp, __pa(ptep) | prot_l1);
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
} }
ptep = pte_offset_kernel(pmdp, virt); ptep = pte_offset_kernel(pmdp, virt);
...@@ -457,7 +453,7 @@ static void __init build_mem_type_table(void) ...@@ -457,7 +453,7 @@ static void __init build_mem_type_table(void)
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]); unsigned long v = pgprot_val(protection_map[i]);
v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot; v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
protection_map[i] = __pgprot(v); protection_map[i] = __pgprot(v);
} }
...@@ -581,23 +577,23 @@ static void __init create_mapping(struct map_desc *md) ...@@ -581,23 +577,23 @@ static void __init create_mapping(struct map_desc *md)
*/ */
void setup_mm_for_reboot(char mode) void setup_mm_for_reboot(char mode)
{ {
unsigned long pmdval; unsigned long base_pmdval;
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd;
int i; int i;
int cpu_arch = cpu_architecture();
if (current->mm && current->mm->pgd) if (current->mm && current->mm->pgd)
pgd = current->mm->pgd; pgd = current->mm->pgd;
else else
pgd = init_mm.pgd; pgd = init_mm.pgd;
for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++) { base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
pmdval = (i << PGDIR_SHIFT) | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | base_pmdval |= PMD_BIT4;
PMD_TYPE_SECT;
if (cpu_arch <= CPU_ARCH_ARMv5TEJ) for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
pmdval |= PMD_BIT4; unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
pmd_t *pmd;
pmd = pmd_off(pgd, i << PGDIR_SHIFT); pmd = pmd_off(pgd, i << PGDIR_SHIFT);
pmd[0] = __pmd(pmdval); pmd[0] = __pmd(pmdval);
pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
......
...@@ -295,7 +295,7 @@ ...@@ -295,7 +295,7 @@
#define __NR_fstatfs64 (__NR_SYSCALL_BASE+267) #define __NR_fstatfs64 (__NR_SYSCALL_BASE+267)
#define __NR_tgkill (__NR_SYSCALL_BASE+268) #define __NR_tgkill (__NR_SYSCALL_BASE+268)
#define __NR_utimes (__NR_SYSCALL_BASE+269) #define __NR_utimes (__NR_SYSCALL_BASE+269)
#define __NR_fadvise64_64 (__NR_SYSCALL_BASE+270) #define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE+270)
#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271) #define __NR_pciconfig_iobase (__NR_SYSCALL_BASE+271)
#define __NR_pciconfig_read (__NR_SYSCALL_BASE+272) #define __NR_pciconfig_read (__NR_SYSCALL_BASE+272)
#define __NR_pciconfig_write (__NR_SYSCALL_BASE+273) #define __NR_pciconfig_write (__NR_SYSCALL_BASE+273)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册