提交 32f15dc5 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://www.atmel.no/~hskinnemoen/linux/kernel/avr32

* 'for-linus' of git://www.atmel.no/~hskinnemoen/linux/kernel/avr32: (21 commits)
  [AVR32] Fix compile error with gcc 4.1
  avr32: remove unneeded cast in atomic.h
  AVR32: Remove useless config option "GENERIC_BUST_SPINLOCK".
  [AVR32] Optimize the TLB miss handler
  [AVR32] Board code for ATNGW100
  [AVR32] Use memcpy/memset in memcpy_{from,to}_io and memset_io
  [AVR32] Get rid of board_setup_fbmem()
  [AVR32] Reserve framebuffer memory in early_parse_fbmem()
  [AVR32] Simplify early handling of memory regions
  [AVR32] Move setup_bootmem() from mm/init.c to kernel/setup.c
  [AVR32] Make I/O access macros work with external devices
  [AVR32] Fix NMI handler
  [AVR32] Clean up exception handling code
  [AVR32] Clean up cpu identification and add features bitmap
  [AVR32] Clean up asm/sysreg.h
  [AVR32] Don't enable clocks with no users
  [AVR32] Put cpu in sleep 0 when idle.
  [AVR32] Change system timer from count-compare to Timer/Counter 0
  [AVR32] Add mach-specific Kconfig
  [AVR32] Add nwait and tdf parameters to SMC configuration
  ...
......@@ -57,9 +57,6 @@ config ARCH_HAS_ILOG2_U64
bool
default n
config GENERIC_BUST_SPINLOCK
bool
config GENERIC_HWEIGHT
bool
default y
......@@ -68,6 +65,11 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_BUG
bool
default y
depends on BUG
source "init/Kconfig"
menu "System Type and features"
......@@ -106,6 +108,9 @@ choice
config BOARD_ATSTK1000
bool "ATSTK1000 evaluation board"
select BOARD_ATSTK1002 if CPU_AT32AP7000
config BOARD_ATNGW100
bool "ATNGW100 Network Gateway"
endchoice
choice
......@@ -116,6 +121,8 @@ config LOADER_U_BOOT
bool "U-Boot (or similar) bootloader"
endchoice
source "arch/avr32/mach-at32ap/Kconfig"
config LOAD_ADDRESS
hex
default 0x10000000 if LOADER_U_BOOT=y && CPU_AT32AP7000=y
......
......@@ -27,6 +27,7 @@ head-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/head.o
head-y += arch/avr32/kernel/head.o
core-$(CONFIG_PLATFORM_AT32AP) += arch/avr32/mach-at32ap/
core-$(CONFIG_BOARD_ATSTK1000) += arch/avr32/boards/atstk1000/
core-$(CONFIG_BOARD_ATNGW100) += arch/avr32/boards/atngw100/
core-$(CONFIG_LOADER_U_BOOT) += arch/avr32/boot/u-boot/
core-y += arch/avr32/kernel/
core-y += arch/avr32/mm/
......
/*
* ATNGW100 board-specific flash initialization
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <asm/arch/smc.h>
static struct smc_config flash_config __initdata = {
.ncs_read_setup = 0,
.nrd_setup = 40,
.ncs_write_setup = 0,
.nwe_setup = 10,
.ncs_read_pulse = 80,
.nrd_pulse = 40,
.ncs_write_pulse = 65,
.nwe_pulse = 55,
.read_cycle = 120,
.write_cycle = 120,
.bus_width = 2,
.nrd_controlled = 1,
.nwe_controlled = 1,
.byte_write = 1,
};
static struct mtd_partition flash_parts[] = {
{
.name = "u-boot",
.offset = 0x00000000,
.size = 0x00020000, /* 128 KiB */
.mask_flags = MTD_WRITEABLE,
},
{
.name = "root",
.offset = 0x00020000,
.size = 0x007d0000,
},
{
.name = "env",
.offset = 0x007f0000,
.size = 0x00010000,
.mask_flags = MTD_WRITEABLE,
},
};
static struct physmap_flash_data flash_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(flash_parts),
.parts = flash_parts,
};
static struct resource flash_resource = {
.start = 0x00000000,
.end = 0x007fffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device flash_device = {
.name = "physmap-flash",
.id = 0,
.resource = &flash_resource,
.num_resources = 1,
.dev = {
.platform_data = &flash_data,
},
};
/* This needs to be called after the SMC has been initialized */
static int __init atngw100_flash_init(void)
{
int ret;
ret = smc_set_configuration(0, &flash_config);
if (ret < 0) {
printk(KERN_ERR "atngw100: failed to set NOR flash timing\n");
return ret;
}
platform_device_register(&flash_device);
return 0;
}
device_initcall(atngw100_flash_init);
/*
* Board-specific setup code for the ATNGW100 Network Gateway
*
* Copyright (C) 2005-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/spi/spi.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/arch/at32ap7000.h>
#include <asm/arch/board.h>
#include <asm/arch/init.h>
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
struct eth_addr {
u8 addr[6];
};
static struct eth_addr __initdata hw_addr[2];
static struct eth_platform_data __initdata eth_data[2];
static struct spi_board_info spi0_board_info[] __initdata = {
{
.modalias = "mtd_dataflash",
.max_speed_hz = 10000000,
.chip_select = 0,
},
};
/*
* The next two functions should go away as the boot loader is
* supposed to initialize the macb address registers with a valid
* ethernet address. But we need to keep it around for a while until
* we can be reasonably sure the boot loader does this.
*
* The phy_id is ignored as the driver will probe for it.
*/
static int __init parse_tag_ethernet(struct tag *tag)
{
int i;
i = tag->u.ethernet.mac_index;
if (i < ARRAY_SIZE(hw_addr))
memcpy(hw_addr[i].addr, tag->u.ethernet.hw_address,
sizeof(hw_addr[i].addr));
return 0;
}
__tagtable(ATAG_ETHERNET, parse_tag_ethernet);
static void __init set_hw_addr(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
const u8 *addr;
void __iomem *regs;
struct clk *pclk;
if (!res)
return;
if (pdev->id >= ARRAY_SIZE(hw_addr))
return;
addr = hw_addr[pdev->id].addr;
if (!is_valid_ether_addr(addr))
return;
/*
* Since this is board-specific code, we'll cheat and use the
* physical address directly as we happen to know that it's
* the same as the virtual address.
*/
regs = (void __iomem __force *)res->start;
pclk = clk_get(&pdev->dev, "pclk");
if (!pclk)
return;
clk_enable(pclk);
__raw_writel((addr[3] << 24) | (addr[2] << 16)
| (addr[1] << 8) | addr[0], regs + 0x98);
__raw_writel((addr[5] << 8) | addr[4], regs + 0x9c);
clk_disable(pclk);
clk_put(pclk);
}
struct platform_device *at32_usart_map[1];
unsigned int at32_nr_usarts = 1;
void __init setup_board(void)
{
at32_map_usart(1, 0); /* USART 1: /dev/ttyS0, DB9 */
at32_setup_serial_console(0);
}
static int __init atngw100_init(void)
{
/*
* ATNGW100 uses 16-bit SDRAM interface, so we don't need to
* reserve any pins for it.
*/
at32_add_system_devices();
at32_add_device_usart(0);
set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
set_hw_addr(at32_add_device_eth(1, &eth_data[1]));
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
return 0;
}
postcore_initcall(atngw100_init);
......@@ -33,7 +33,7 @@ struct eth_addr {
static struct eth_addr __initdata hw_addr[2];
static struct eth_platform_data __initdata eth_data[2];
extern struct lcdc_platform_data atstk1000_fb0_data;
static struct lcdc_platform_data atstk1000_fb0_data;
static struct spi_board_info spi0_board_info[] __initdata = {
{
......@@ -148,6 +148,8 @@ static int __init atstk1002_init(void)
set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info));
atstk1000_fb0_data.fbmem_start = fbmem_start;
atstk1000_fb0_data.fbmem_size = fbmem_size;
at32_add_device_lcdc(0, &atstk1000_fb0_data);
return 0;
......
......@@ -18,33 +18,3 @@
/* Initialized by bootloader-specific startup code. */
struct tag *bootloader_tags __initdata;
struct lcdc_platform_data __initdata atstk1000_fb0_data;
void __init board_setup_fbmem(unsigned long fbmem_start,
unsigned long fbmem_size)
{
if (!fbmem_size)
return;
if (!fbmem_start) {
void *fbmem;
fbmem = alloc_bootmem_low_pages(fbmem_size);
fbmem_start = __pa(fbmem);
} else {
pg_data_t *pgdat;
for_each_online_pgdat(pgdat) {
if (fbmem_start >= pgdat->bdata->node_boot_start
&& fbmem_start <= pgdat->bdata->node_low_pfn)
reserve_bootmem_node(pgdat, fbmem_start,
fbmem_size);
}
}
printk("%luKiB framebuffer memory at address 0x%08lx\n",
fbmem_size >> 10, fbmem_start);
atstk1000_fb0_data.fbmem_start = fbmem_start;
atstk1000_fb0_data.fbmem_size = fbmem_size;
}
此差异已折叠。
......@@ -209,16 +209,17 @@ static const char *mmu_types[] = {
void __init setup_processor(void)
{
unsigned long config0, config1;
unsigned long features;
unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type;
unsigned tmp;
config0 = sysreg_read(CONFIG0); /* 0x0000013e; */
config1 = sysreg_read(CONFIG1); /* 0x01f689a2; */
cpu_id = config0 >> 24;
cpu_rev = (config0 >> 16) & 0xff;
arch_id = (config0 >> 13) & 0x07;
arch_rev = (config0 >> 10) & 0x07;
mmu_type = (config0 >> 7) & 0x03;
config0 = sysreg_read(CONFIG0);
config1 = sysreg_read(CONFIG1);
cpu_id = SYSREG_BFEXT(PROCESSORID, config0);
cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0);
arch_id = SYSREG_BFEXT(AT, config0);
arch_rev = SYSREG_BFEXT(AR, config0);
mmu_type = SYSREG_BFEXT(MMUT, config0);
boot_cpu_data.arch_type = arch_id;
boot_cpu_data.cpu_type = cpu_id;
......@@ -226,16 +227,16 @@ void __init setup_processor(void)
boot_cpu_data.cpu_revision = cpu_rev;
boot_cpu_data.tlb_config = mmu_type;
tmp = (config1 >> 13) & 0x07;
tmp = SYSREG_BFEXT(ILSZ, config1);
if (tmp) {
boot_cpu_data.icache.ways = 1 << ((config1 >> 10) & 0x07);
boot_cpu_data.icache.sets = 1 << ((config1 >> 16) & 0x0f);
boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1);
boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1);
boot_cpu_data.icache.linesz = 1 << (tmp + 1);
}
tmp = (config1 >> 3) & 0x07;
tmp = SYSREG_BFEXT(DLSZ, config1);
if (tmp) {
boot_cpu_data.dcache.ways = 1 << (config1 & 0x07);
boot_cpu_data.dcache.sets = 1 << ((config1 >> 6) & 0x0f);
boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1);
boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1);
boot_cpu_data.dcache.linesz = 1 << (tmp + 1);
}
......@@ -250,16 +251,39 @@ void __init setup_processor(void)
cpu_names[cpu_id], cpu_id, cpu_rev,
arch_names[arch_id], arch_rev);
printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]);
printk ("CPU: features:");
if (config0 & (1 << 6))
printk(" fpu");
if (config0 & (1 << 5))
printk(" java");
if (config0 & (1 << 4))
printk(" perfctr");
if (config0 & (1 << 3))
features = 0;
if (config0 & SYSREG_BIT(CONFIG0_R)) {
features |= AVR32_FEATURE_RMW;
printk(" rmw");
}
if (config0 & SYSREG_BIT(CONFIG0_D)) {
features |= AVR32_FEATURE_DSP;
printk(" dsp");
}
if (config0 & SYSREG_BIT(CONFIG0_S)) {
features |= AVR32_FEATURE_SIMD;
printk(" simd");
}
if (config0 & SYSREG_BIT(CONFIG0_O)) {
features |= AVR32_FEATURE_OCD;
printk(" ocd");
}
if (config0 & SYSREG_BIT(CONFIG0_P)) {
features |= AVR32_FEATURE_PCTR;
printk(" perfctr");
}
if (config0 & SYSREG_BIT(CONFIG0_J)) {
features |= AVR32_FEATURE_JAVA;
printk(" java");
}
if (config0 & SYSREG_BIT(CONFIG0_F)) {
features |= AVR32_FEATURE_FPU;
printk(" fpu");
}
printk("\n");
boot_cpu_data.features = features;
}
#ifdef CONFIG_PROC_FS
......
......@@ -100,55 +100,49 @@ dtlb_miss_write:
.global tlb_miss_common
tlb_miss_common:
mfsr r0, SYSREG_PTBR
mfsr r1, SYSREG_TLBEAR
mfsr r0, SYSREG_TLBEAR
mfsr r1, SYSREG_PTBR
/* Is it the vmalloc space? */
bld r1, 31
bld r0, 31
brcs handle_vmalloc_miss
/* First level lookup */
pgtbl_lookup:
lsr r2, r1, PGDIR_SHIFT
ld.w r0, r0[r2 << 2]
bld r0, _PAGE_BIT_PRESENT
lsr r2, r0, PGDIR_SHIFT
ld.w r3, r1[r2 << 2]
bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
bld r3, _PAGE_BIT_PRESENT
brcc page_table_not_present
/* TODO: Check access rights on page table if necessary */
/* Translate to virtual address in P1. */
andl r0, 0xf000
sbr r0, 31
andl r3, 0xf000
sbr r3, 31
/* Second level lookup */
lsl r1, (32 - PGDIR_SHIFT)
lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
add r2, r0, r1 << 2
ld.w r1, r2[0]
bld r1, _PAGE_BIT_PRESENT
ld.w r2, r3[r1 << 2]
mfsr r0, SYSREG_TLBARLO
bld r2, _PAGE_BIT_PRESENT
brcc page_not_present
/* Mark the page as accessed */
sbr r1, _PAGE_BIT_ACCESSED
st.w r2[0], r1
sbr r2, _PAGE_BIT_ACCESSED
st.w r3[r1 << 2], r2
/* Drop software flags */
andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
mtsr SYSREG_TLBELO, r1
andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
mtsr SYSREG_TLBELO, r2
/* Figure out which entry we want to replace */
mfsr r0, SYSREG_TLBARLO
mfsr r1, SYSREG_MMUCR
clz r2, r0
brcc 1f
mov r1, -1 /* All entries have been accessed, */
mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
mov r2, 0 /* and start at 0 */
1: mfsr r1, SYSREG_MMUCR
lsl r2, 14
andl r1, 0x3fff, COH
or r1, r2
mtsr SYSREG_MMUCR, r1
mov r3, -1 /* All entries have been accessed, */
mov r2, 0 /* so start at 0 */
mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
mtsr SYSREG_MMUCR, r1
tlbw
tlbmiss_restore
......@@ -156,8 +150,8 @@ pgtbl_lookup:
handle_vmalloc_miss:
/* Simply do the lookup in init's page table */
mov r0, lo(swapper_pg_dir)
orh r0, hi(swapper_pg_dir)
mov r1, lo(swapper_pg_dir)
orh r1, hi(swapper_pg_dir)
rjmp pgtbl_lookup
......@@ -340,12 +334,34 @@ do_bus_error_read:
do_nmi_ll:
sub sp, 4
stmts --sp, r0-lr
/* FIXME: Make sure RAR_NMI and RSR_NMI are pushed instead of *_EX */
rcall save_full_context_ex
mfsr r9, SYSREG_RSR_NMI
mfsr r8, SYSREG_RAR_NMI
bfextu r0, r9, MODE_SHIFT, 3
brne 2f
1: pushm r8, r9 /* PC and SR */
mfsr r12, SYSREG_ECR
mov r11, sp
rcall do_nmi
rjmp bad_return
popm r8-r9
mtsr SYSREG_RAR_NMI, r8
tst r0, r0
mtsr SYSREG_RSR_NMI, r9
brne 3f
ldmts sp++, r0-lr
sub sp, -4 /* skip r12_orig */
rete
2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
stdsp sp[4], r10 /* replace saved SP */
rjmp 1b
3: popm lr
sub sp, -4 /* skip sp */
popm r0-r12
sub sp, -4 /* skip r12_orig */
rete
handle_address_fault:
sub sp, 4
......@@ -630,9 +646,12 @@ irq_level\level:
rcall do_IRQ
lddsp r4, sp[REG_SR]
andh r4, (MODE_MASK >> 16), COH
bfextu r4, r4, SYSREG_M0_OFFSET, 3
cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
breq 2f
cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
#ifdef CONFIG_PREEMPT
brne 2f
brne 3f
#else
brne 1f
#endif
......@@ -649,9 +668,18 @@ irq_level\level:
sub sp, -4 /* ignore r12_orig */
rete
2: get_thread_info r0
ld.w r1, r0[TI_flags]
bld r1, TIF_CPU_GOING_TO_SLEEP
#ifdef CONFIG_PREEMPT
2:
get_thread_info r0
brcc 3f
#else
brcc 1b
#endif
sub r1, pc, . - cpu_idle_skip_sleep
stdsp sp[REG_PC], r1
#ifdef CONFIG_PREEMPT
3: get_thread_info r0
ld.w r2, r0[TI_preempt_count]
cp.w r2, 0
brne 1b
......@@ -662,12 +690,32 @@ irq_level\level:
bld r4, SYSREG_GM_OFFSET
brcs 1b
rcall preempt_schedule_irq
rjmp 1b
#endif
rjmp 1b
.endm
.section .irq.text,"ax",@progbits
.global cpu_idle_sleep
cpu_idle_sleep:
mask_interrupts
get_thread_info r8
ld.w r9, r8[TI_flags]
bld r9, TIF_NEED_RESCHED
brcs cpu_idle_enable_int_and_exit
sbr r9, TIF_CPU_GOING_TO_SLEEP
st.w r8[TI_flags], r9
unmask_interrupts
sleep 0
cpu_idle_skip_sleep:
mask_interrupts
ld.w r9, r8[TI_flags]
cbr r9, TIF_CPU_GOING_TO_SLEEP
st.w r8[TI_flags], r9
cpu_idle_enable_int_and_exit:
unmask_interrupts
retal r12
.global irq_level0
.global irq_level1
.global irq_level2
......
......@@ -12,10 +12,11 @@
* published by the Free Software Foundation.
*/
#include <linux/moduleloader.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
void *module_alloc(unsigned long size)
......@@ -315,10 +316,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
vfree(module->arch.syminfo);
module->arch.syminfo = NULL;
return 0;
return module_bug_finalize(hdr, sechdrs, module);
}
void module_arch_cleanup(struct module *module)
{
module_bug_cleanup(module);
}
......@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/unistd.h>
#include <asm/sysreg.h>
......@@ -19,6 +20,8 @@
void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
extern void cpu_idle_sleep(void);
/*
* This file handles the architecture-dependent parts of process handling..
*/
......@@ -27,9 +30,8 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
/* TODO: Enter sleep mode */
while (!need_resched())
cpu_relax();
cpu_idle_sleep();
preempt_enable_no_resched();
schedule();
preempt_disable();
......@@ -114,39 +116,178 @@ void release_thread(struct task_struct *dead_task)
/* do nothing */
}
static void dump_mem(const char *str, const char *log_lvl,
unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%s%04lx: ", log_lvl, p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
goto out;
}
printk("%08x ", val);
}
}
printk("\n");
}
out:
return;
}
static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
{
return (p > (unsigned long)tinfo)
&& (p < (unsigned long)tinfo + THREAD_SIZE - 3);
}
#ifdef CONFIG_FRAME_POINTER
static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs, const char *log_lvl)
{
unsigned long lr, fp;
struct thread_info *tinfo;
if (regs)
fp = regs->r7;
else if (tsk == current)
asm("mov %0, r7" : "=r"(fp));
else
fp = tsk->thread.cpu_context.r7;
/*
* Walk the stack as long as the frame pointer (a) is within
* the kernel stack of the task, and (b) it doesn't move
* downwards.
*/
tinfo = task_thread_info(tsk);
printk("%sCall trace:\n", log_lvl);
while (valid_stack_ptr(tinfo, fp)) {
unsigned long new_fp;
lr = *(unsigned long *)fp;
#ifdef CONFIG_KALLSYMS
printk("%s [<%08lx>] ", log_lvl, lr);
#else
printk(" [<%08lx>] ", lr);
#endif
print_symbol("%s\n", lr);
new_fp = *(unsigned long *)(fp + 4);
if (new_fp <= fp)
break;
fp = new_fp;
}
printk("\n");
}
#else
static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs, const char *log_lvl)
{
unsigned long addr;
printk("%sCall trace:\n", log_lvl);
while (!kstack_end(sp)) {
addr = *sp++;
if (kernel_text_address(addr)) {
#ifdef CONFIG_KALLSYMS
printk("%s [<%08lx>] ", log_lvl, addr);
#else
printk(" [<%08lx>] ", addr);
#endif
print_symbol("%s\n", addr);
}
}
printk("\n");
}
#endif
void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
struct pt_regs *regs, const char *log_lvl)
{
struct thread_info *tinfo;
if (sp == 0) {
if (tsk)
sp = tsk->thread.cpu_context.ksp;
else
sp = (unsigned long)&tinfo;
}
if (!tsk)
tsk = current;
tinfo = task_thread_info(tsk);
if (valid_stack_ptr(tinfo, sp)) {
dump_mem("Stack: ", log_lvl, sp,
THREAD_SIZE + (unsigned long)tinfo);
show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
}
}
void show_stack(struct task_struct *tsk, unsigned long *stack)
{
show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
}
void dump_stack(void)
{
unsigned long stack;
show_trace_log_lvl(current, &stack, NULL, "");
}
EXPORT_SYMBOL(dump_stack);
static const char *cpu_modes[] = {
"Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
"Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
};
void show_regs(struct pt_regs *regs)
void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
{
unsigned long sp = regs->sp;
unsigned long lr = regs->lr;
unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
if (!user_mode(regs))
if (!user_mode(regs)) {
sp = (unsigned long)regs + FRAME_SIZE_FULL;
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", lr);
printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
"sp : %08lx r12: %08lx r11: %08lx\n",
instruction_pointer(regs),
lr, print_tainted(), sp, regs->r12, regs->r11);
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->r10, regs->r9, regs->r8);
printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
regs->r7, regs->r6, regs->r5, regs->r4);
printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
regs->r3, regs->r2, regs->r1, regs->r0);
printk("Flags: %c%c%c%c%c\n",
printk("%s", log_lvl);
print_symbol("PC is at %s\n", instruction_pointer(regs));
printk("%s", log_lvl);
print_symbol("LR is at %s\n", lr);
}
printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n"
"%ssp : %08lx r12: %08lx r11: %08lx\n",
log_lvl, instruction_pointer(regs), lr, print_tainted(),
log_lvl, sp, regs->r12, regs->r11);
printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n",
log_lvl, regs->r10, regs->r9, regs->r8);
printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
printk("%sFlags: %c%c%c%c%c\n", log_lvl,
regs->sr & SR_Q ? 'Q' : 'q',
regs->sr & SR_V ? 'V' : 'v',
regs->sr & SR_N ? 'N' : 'n',
regs->sr & SR_Z ? 'Z' : 'z',
regs->sr & SR_C ? 'C' : 'c');
printk("Mode bits: %c%c%c%c%c%c%c%c%c\n",
printk("%sMode bits: %c%c%c%c%c%c%c%c%c\n", log_lvl,
regs->sr & SR_H ? 'H' : 'h',
regs->sr & SR_R ? 'R' : 'r',
regs->sr & SR_J ? 'J' : 'j',
......@@ -156,9 +297,21 @@ void show_regs(struct pt_regs *regs)
regs->sr & SR_I1M ? '1' : '.',
regs->sr & SR_I0M ? '0' : '.',
regs->sr & SR_GM ? 'G' : 'g');
printk("CPU Mode: %s\n", cpu_modes[mode]);
printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
log_lvl, current->comm, current->pid, current,
task_thread_info(current));
}
void show_regs(struct pt_regs *regs)
{
unsigned long sp = regs->sp;
if (!user_mode(regs))
sp = (unsigned long)regs + FRAME_SIZE_FULL;
show_trace(NULL, (unsigned long *)sp, regs);
show_regs_log_lvl(regs, "");
show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
}
EXPORT_SYMBOL(show_regs);
......
......@@ -8,12 +8,14 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/sched.h>
#include <linux/console.h>
#include <linux/ioport.h>
#include <linux/bootmem.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
......@@ -29,13 +31,6 @@
extern int root_mountflags;
/*
* Bootloader-provided information about physical memory
*/
struct tag_mem_range *mem_phys;
struct tag_mem_range *mem_reserved;
struct tag_mem_range *mem_ramdisk;
/*
* Initialize loops_per_jiffy as 5000000 (500MIPS).
* Better make it too large than too small...
......@@ -48,48 +43,193 @@ EXPORT_SYMBOL(boot_cpu_data);
static char __initdata command_line[COMMAND_LINE_SIZE];
/*
* Should be more than enough, but if you have a _really_ complex
* setup, you might need to increase the size of this...
* Standard memory resources
*/
static struct tag_mem_range __initdata mem_range_cache[32];
static unsigned mem_range_next_free;
static struct resource __initdata kernel_data = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
};
static struct resource __initdata kernel_code = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
.sibling = &kernel_data,
};
/*
* Standard memory resources
* Available system RAM and reserved regions as singly linked
* lists. These lists are traversed using the sibling pointer in
* struct resource and are kept sorted at all times.
*/
static struct resource mem_res[] = {
{
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
},
{
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
},
};
static struct resource *__initdata system_ram;
static struct resource *__initdata reserved = &kernel_code;
/*
* We need to allocate these before the bootmem allocator is up and
* running, so we need this "cache". 32 entries are probably enough
* for all but the most insanely complex systems.
*/
static struct resource __initdata res_cache[32];
static unsigned int __initdata res_cache_next_free;
static void __init resource_init(void)
{
struct resource *mem, *res;
struct resource *new;
kernel_code.start = __pa(init_mm.start_code);
for (mem = system_ram; mem; mem = mem->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, mem, sizeof(struct resource));
new->sibling = NULL;
if (request_resource(&iomem_resource, new))
printk(KERN_WARNING "Bad RAM resource %08x-%08x\n",
mem->start, mem->end);
}
for (res = reserved; res; res = res->sibling) {
new = alloc_bootmem_low(sizeof(struct resource));
memcpy(new, res, sizeof(struct resource));
new->sibling = NULL;
if (insert_resource(&iomem_resource, new))
printk(KERN_WARNING
"Bad reserved resource %s (%08x-%08x)\n",
res->name, res->start, res->end);
}
}
static void __init
add_physical_memory(resource_size_t start, resource_size_t end)
{
struct resource *new, *next, **pprev;
for (pprev = &system_ram, next = system_ram; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end) {
printk(KERN_WARNING
"Warning: Physical memory map is broken\n");
printk(KERN_WARNING
"Warning: %08x-%08x overlaps %08x-%08x\n",
start, end, next->start, next->end);
return;
}
}
if (res_cache_next_free >= ARRAY_SIZE(res_cache)) {
printk(KERN_WARNING
"Warning: Failed to add physical memory %08x-%08x\n",
start, end);
return;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = "System RAM";
new->flags = IORESOURCE_MEM;
*pprev = new;
}
static int __init
add_reserved_region(resource_size_t start, resource_size_t end,
const char *name)
{
struct resource *new, *next, **pprev;
if (end < start)
return -EINVAL;
if (res_cache_next_free >= ARRAY_SIZE(res_cache))
return -ENOMEM;
for (pprev = &reserved, next = reserved; next;
pprev = &next->sibling, next = next->sibling) {
if (end < next->start)
break;
if (start <= next->end)
return -EBUSY;
}
new = &res_cache[res_cache_next_free++];
new->start = start;
new->end = end;
new->name = name;
new->flags = IORESOURCE_MEM;
*pprev = new;
return 0;
}
static unsigned long __init
find_free_region(const struct resource *mem, resource_size_t size,
resource_size_t align)
{
struct resource *res;
unsigned long target;
target = ALIGN(mem->start, align);
for (res = reserved; res; res = res->sibling) {
if ((target + size) <= res->start)
break;
if (target <= res->end)
target = ALIGN(res->end + 1, align);
}
if ((target + size) > (mem->end + 1))
return mem->end + 1;
return target;
}
static int __init
alloc_reserved_region(resource_size_t *start, resource_size_t size,
resource_size_t align, const char *name)
{
struct resource *mem;
resource_size_t target;
int ret;
for (mem = system_ram; mem; mem = mem->sibling) {
target = find_free_region(mem, size, align);
if (target <= mem->end) {
ret = add_reserved_region(target, target + size - 1,
name);
if (!ret)
*start = target;
return ret;
}
}
#define kernel_code mem_res[0]
#define kernel_data mem_res[1]
return -ENOMEM;
}
/*
* Early framebuffer allocation. Works as follows:
* - If fbmem_size is zero, nothing will be allocated or reserved.
* - If fbmem_start is zero when setup_bootmem() is called,
* fbmem_size bytes will be allocated from the bootmem allocator.
* a block of fbmem_size bytes will be reserved before bootmem
* initialization. It will be aligned to the largest page size
* that fbmem_size is a multiple of.
* - If fbmem_start is nonzero, an area of size fbmem_size will be
* reserved at the physical address fbmem_start if necessary. If
* the area isn't in a memory region known to the kernel, it will
* be left alone.
* reserved at the physical address fbmem_start if possible. If
* it collides with other reserved memory, a different block of
* same size will be allocated, just as if fbmem_start was zero.
*
* Board-specific code may use these variables to set up platform data
* for the framebuffer driver if fbmem_size is nonzero.
*/
static unsigned long __initdata fbmem_start;
static unsigned long __initdata fbmem_size;
resource_size_t __initdata fbmem_start;
resource_size_t __initdata fbmem_size;
/*
* "fbmem=xxx[kKmM]" allocates the specified amount of boot memory for
......@@ -103,48 +243,42 @@ static unsigned long __initdata fbmem_size;
*/
static int __init early_parse_fbmem(char *p)
{
int ret;
unsigned long align;
fbmem_size = memparse(p, &p);
if (*p == '@')
if (*p == '@') {
fbmem_start = memparse(p, &p);
return 0;
}
early_param("fbmem", early_parse_fbmem);
static inline void __init resource_init(void)
{
struct tag_mem_range *region;
kernel_code.start = __pa(init_mm.start_code);
kernel_code.end = __pa(init_mm.end_code - 1);
kernel_data.start = __pa(init_mm.end_code);
kernel_data.end = __pa(init_mm.brk - 1);
for (region = mem_phys; region; region = region->next) {
struct resource *res;
unsigned long phys_start, phys_end;
if (region->size == 0)
continue;
phys_start = region->addr;
phys_end = phys_start + region->size - 1;
res = alloc_bootmem_low(sizeof(*res));
res->name = "System RAM";
res->start = phys_start;
res->end = phys_end;
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource (&iomem_resource, res);
ret = add_reserved_region(fbmem_start,
fbmem_start + fbmem_size - 1,
"Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to reserve framebuffer memory\n");
fbmem_start = 0;
}
}
if (kernel_code.start >= res->start &&
kernel_code.end <= res->end)
request_resource (res, &kernel_code);
if (kernel_data.start >= res->start &&
kernel_data.end <= res->end)
request_resource (res, &kernel_data);
if (!fbmem_start) {
if ((fbmem_size & 0x000fffffUL) == 0)
align = 0x100000; /* 1 MiB */
else if ((fbmem_size & 0x0000ffffUL) == 0)
align = 0x10000; /* 64 KiB */
else
align = 0x1000; /* 4 KiB */
ret = alloc_reserved_region(&fbmem_start, fbmem_size,
align, "Framebuffer");
if (ret) {
printk(KERN_WARNING
"Failed to allocate framebuffer memory\n");
fbmem_size = 0;
}
}
return 0;
}
early_param("fbmem", early_parse_fbmem);
static int __init parse_tag_core(struct tag *tag)
{
......@@ -157,11 +291,9 @@ static int __init parse_tag_core(struct tag *tag)
}
__tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem_range(struct tag *tag,
struct tag_mem_range **root)
static int __init parse_tag_mem(struct tag *tag)
{
struct tag_mem_range *cur, **pprev;
struct tag_mem_range *new;
unsigned long start, end;
/*
* Ignore zero-sized entries. If we're running standalone, the
......@@ -171,34 +303,53 @@ static int __init parse_tag_mem_range(struct tag *tag,
if (tag->u.mem_range.size == 0)
return 0;
/*
* Copy the data so the bootmem init code doesn't need to care
* about it.
*/
if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache))
panic("Physical memory map too complex!\n");
start = tag->u.mem_range.addr;
end = tag->u.mem_range.addr + tag->u.mem_range.size - 1;
add_physical_memory(start, end);
return 0;
}
__tagtable(ATAG_MEM, parse_tag_mem);
static int __init parse_tag_rdimg(struct tag *tag)
{
#ifdef CONFIG_INITRD
struct tag_mem_range *mem = &tag->u.mem_range;
int ret;
new = &mem_range_cache[mem_range_next_free++];
*new = tag->u.mem_range;
if (initrd_start) {
printk(KERN_WARNING
"Warning: Only the first initrd image will be used\n");
return 0;
}
pprev = root;
cur = *root;
while (cur) {
pprev = &cur->next;
cur = cur->next;
ret = add_reserved_region(mem->start, mem->start + mem->size - 1,
"initrd");
if (ret) {
printk(KERN_WARNING
"Warning: Failed to reserve initrd memory\n");
return ret;
}
*pprev = new;
new->next = NULL;
initrd_start = (unsigned long)__va(mem->addr);
initrd_end = initrd_start + mem->size;
#else
printk(KERN_WARNING "RAM disk image present, but "
"no initrd support in kernel, ignoring\n");
#endif
return 0;
}
__tagtable(ATAG_RDIMG, parse_tag_rdimg);
static int __init parse_tag_mem(struct tag *tag)
static int __init parse_tag_rsvd_mem(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_phys);
struct tag_mem_range *mem = &tag->u.mem_range;
return add_reserved_region(mem->addr, mem->addr + mem->size - 1,
"Reserved");
}
__tagtable(ATAG_MEM, parse_tag_mem);
__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
static int __init parse_tag_cmdline(struct tag *tag)
{
......@@ -207,12 +358,6 @@ static int __init parse_tag_cmdline(struct tag *tag)
}
__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
static int __init parse_tag_rdimg(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_ramdisk);
}
__tagtable(ATAG_RDIMG, parse_tag_rdimg);
static int __init parse_tag_clock(struct tag *tag)
{
/*
......@@ -223,12 +368,6 @@ static int __init parse_tag_clock(struct tag *tag)
}
__tagtable(ATAG_CLOCK, parse_tag_clock);
static int __init parse_tag_rsvd_mem(struct tag *tag)
{
return parse_tag_mem_range(tag, &mem_reserved);
}
__tagtable(ATAG_RSVD_MEM, parse_tag_rsvd_mem);
/*
* Scan the tag table for this tag, and call its parse function. The
* tag table is built by the linker from all the __tagtable
......@@ -260,10 +399,137 @@ static void __init parse_tags(struct tag *t)
t->hdr.tag);
}
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
find_bootmap_pfn(const struct resource *mem)
{
unsigned long bootmap_pages, bootmap_len;
unsigned long node_pages = PFN_UP(mem->end - mem->start + 1);
unsigned long bootmap_start;
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
/*
* Find a large enough region without reserved pages for
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
* We have to check that we don't collide with any reserved
* regions, which includes the kernel image and any RAMDISK
* images.
*/
bootmap_start = find_free_region(mem, bootmap_len, PAGE_SIZE);
return bootmap_start >> PAGE_SHIFT;
}
#define MAX_LOWMEM HIGHMEM_START
#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
static void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
unsigned node = 0;
struct resource *res;
printk(KERN_INFO "Physical memory:\n");
for (res = system_ram; res; res = res->sibling)
printk(" %08x-%08x\n", res->start, res->end);
printk(KERN_INFO "Reserved memory:\n");
for (res = reserved; res; res = res->sibling)
printk(" %08x-%08x: %s\n",
res->start, res->end, res->name);
nodes_clear(node_online_map);
if (system_ram->sibling)
printk(KERN_WARNING "Only using first memory bank\n");
for (res = system_ram; res; res = NULL) {
first_pfn = PFN_UP(res->start);
max_low_pfn = max_pfn = PFN_DOWN(res->end + 1);
bootmap_pfn = find_bootmap_pfn(res);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
if (max_low_pfn > MAX_LOWMEM_PFN) {
max_low_pfn = MAX_LOWMEM_PFN;
#ifndef CONFIG_HIGHMEM
/*
* Lowmem is memory that can be addressed
* directly through P1/P2
*/
printk(KERN_WARNING
"Node %u: Only %ld MiB of memory will be used.\n",
node, MAX_LOWMEM >> 20);
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else
#error HIGHMEM is not supported by AVR32 yet
#endif
}
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
/*
* Register fully available RAM pages with the bootmem
* allocator.
*/
pages = max_low_pfn - first_pfn;
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
/* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size);
/* ...and any other reserved regions. */
for (res = reserved; res; res = res->sibling) {
if (res->start > PFN_PHYS(max_pfn))
break;
/*
* resource_init will complain about partial
* overlaps, so we'll just ignore such
* resources for now.
*/
if (res->start >= PFN_PHYS(first_pfn)
&& res->end < PFN_PHYS(max_pfn))
reserve_bootmem_node(
NODE_DATA(node), res->start,
res->end - res->start + 1);
}
node_set_online(node);
}
}
void __init setup_arch (char **cmdline_p)
{
struct clk *cpu_clk;
init_mm.start_code = (unsigned long)_text;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
/*
* Include .init section to make allocations easier. It will
* be removed before the resource is actually requested.
*/
kernel_code.start = __pa(__init_begin);
kernel_code.end = __pa(init_mm.end_code - 1);
kernel_data.start = __pa(init_mm.end_code);
kernel_data.end = __pa(init_mm.brk - 1);
parse_tags(bootloader_tags);
setup_processor();
......@@ -289,24 +555,16 @@ void __init setup_arch (char **cmdline_p)
((cpu_hz + 500) / 1000) % 1000);
}
init_mm.start_code = (unsigned long) &_text;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
parse_early_param();
setup_bootmem();
board_setup_fbmem(fbmem_start, fbmem_size);
#ifdef CONFIG_VT
conswitchp = &dummy_con;
#endif
paging_init();
resource_init();
}
/*
* Copyright (C) 2004-2006 Atmel Corporation
* Copyright (C) 2004-2007 Atmel Corporation
*
* Based on MIPS implementation arch/mips/kernel/time.c
* Copyright 2001 MontaVista Software Inc.
......@@ -20,18 +20,25 @@
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/sysdev.h>
#include <linux/err.h>
#include <asm/div64.h>
#include <asm/sysreg.h>
#include <asm/io.h>
#include <asm/sections.h>
static cycle_t read_cycle_count(void)
/* how many counter cycles in a jiffy? */
static u32 cycles_per_jiffy;
/* the count value for the next timer interrupt */
static u32 expirelo;
cycle_t __weak read_cycle_count(void)
{
return (cycle_t)sysreg_read(COUNT);
}
static struct clocksource clocksource_avr32 = {
struct clocksource __weak clocksource_avr32 = {
.name = "avr32",
.rating = 350,
.read = read_cycle_count,
......@@ -40,12 +47,20 @@ static struct clocksource clocksource_avr32 = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
irqreturn_t __weak timer_interrupt(int irq, void *dev_id);
struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.name = "timer",
};
/*
* By default we provide the null RTC ops
*/
static unsigned long null_rtc_get_time(void)
{
return mktime(2004, 1, 1, 0, 0, 0);
return mktime(2007, 1, 1, 0, 0, 0);
}
static int null_rtc_set_time(unsigned long sec)
......@@ -56,23 +71,14 @@ static int null_rtc_set_time(unsigned long sec)
static unsigned long (*rtc_get_time)(void) = null_rtc_get_time;
static int (*rtc_set_time)(unsigned long) = null_rtc_set_time;
/* how many counter cycles in a jiffy? */
static unsigned long cycles_per_jiffy;
/* cycle counter value at the previous timer interrupt */
static unsigned int timerhi, timerlo;
/* the count value for the next timer interrupt */
static unsigned int expirelo;
static void avr32_timer_ack(void)
{
unsigned int count;
u32 count;
/* Ack this timer interrupt and set the next one */
expirelo += cycles_per_jiffy;
/* setting COMPARE to 0 stops the COUNT-COMPARE */
if (expirelo == 0) {
printk(KERN_DEBUG "expirelo == 0\n");
sysreg_write(COMPARE, expirelo + 1);
} else {
sysreg_write(COMPARE, expirelo);
......@@ -86,27 +92,56 @@ static void avr32_timer_ack(void)
}
}
static unsigned int avr32_hpt_read(void)
int __weak avr32_hpt_init(void)
{
return sysreg_read(COUNT);
int ret;
unsigned long mult, shift, count_hz;
count_hz = clk_get_rate(boot_cpu_data.clk);
shift = clocksource_avr32.shift;
mult = clocksource_hz2mult(count_hz, shift);
clocksource_avr32.mult = mult;
{
u64 tmp;
tmp = TICK_NSEC;
tmp <<= shift;
tmp += mult / 2;
do_div(tmp, mult);
cycles_per_jiffy = tmp;
}
ret = setup_irq(0, &timer_irqaction);
if (ret) {
pr_debug("timer: could not request IRQ 0: %d\n", ret);
return -ENODEV;
}
printk(KERN_INFO "timer: AT32AP COUNT-COMPARE at irq 0, "
"%lu.%03lu MHz\n",
((count_hz + 500) / 1000) / 1000,
((count_hz + 500) / 1000) % 1000);
return 0;
}
/*
* Taken from MIPS c0_hpt_timer_init().
*
* Why is it so complicated, and what is "count"? My assumption is
* that `count' specifies the "reference cycle", i.e. the cycle since
* reset that should mean "zero". The reason COUNT is written twice is
* probably to make sure we don't get any timer interrupts while we
* are messing with the counter.
* The reason COUNT is written twice is probably to make sure we don't get any
* timer interrupts while we are messing with the counter.
*/
static void avr32_hpt_init(unsigned int count)
int __weak avr32_hpt_start(void)
{
count = sysreg_read(COUNT) - count;
u32 count = sysreg_read(COUNT);
expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
sysreg_write(COUNT, expirelo - cycles_per_jiffy);
sysreg_write(COMPARE, expirelo);
sysreg_write(COUNT, count);
return 0;
}
/*
......@@ -115,26 +150,18 @@ static void avr32_hpt_init(unsigned int count)
*
* In UP mode, it is invoked from the (global) timer_interrupt.
*/
static void local_timer_interrupt(int irq, void *dev_id)
void local_timer_interrupt(int irq, void *dev_id)
{
if (current->pid)
profile_tick(CPU_PROFILING);
update_process_times(user_mode(get_irq_regs()));
}
static irqreturn_t
timer_interrupt(int irq, void *dev_id)
irqreturn_t __weak timer_interrupt(int irq, void *dev_id)
{
unsigned int count;
/* ack timer interrupt and try to set next interrupt */
count = avr32_hpt_read();
avr32_timer_ack();
/* Update timerhi/timerlo for intra-jiffy calibration */
timerhi += count < timerlo; /* Wrap around */
timerlo = count;
/*
* Call the generic timer interrupt handler
*/
......@@ -153,60 +180,37 @@ timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED,
.name = "timer",
};
void __init time_init(void)
{
unsigned long mult, shift, count_hz;
int ret;
/*
* Make sure we don't get any COMPARE interrupts before we can
* handle them.
*/
sysreg_write(COMPARE, 0);
xtime.tv_sec = rtc_get_time();
xtime.tv_nsec = 0;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
printk("Before time_init: count=%08lx, compare=%08lx\n",
(unsigned long)sysreg_read(COUNT),
(unsigned long)sysreg_read(COMPARE));
count_hz = clk_get_rate(boot_cpu_data.clk);
shift = clocksource_avr32.shift;
mult = clocksource_hz2mult(count_hz, shift);
clocksource_avr32.mult = mult;
printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift);
{
u64 tmp;
tmp = TICK_NSEC;
tmp <<= shift;
tmp += mult / 2;
do_div(tmp, mult);
cycles_per_jiffy = tmp;
ret = avr32_hpt_init();
if (ret) {
pr_debug("timer: failed setup: %d\n", ret);
return;
}
/* This sets up the high precision timer for the first interrupt. */
avr32_hpt_init(avr32_hpt_read());
printk("After time_init: count=%08lx, compare=%08lx\n",
(unsigned long)sysreg_read(COUNT),
(unsigned long)sysreg_read(COMPARE));
ret = clocksource_register(&clocksource_avr32);
if (ret)
printk(KERN_ERR
"timer: could not register clocksource: %d\n", ret);
pr_debug("timer: could not register clocksource: %d\n", ret);
ret = setup_irq(0, &timer_irqaction);
if (ret)
printk("timer: could not request IRQ 0: %d\n", ret);
ret = avr32_hpt_start();
if (ret) {
pr_debug("timer: failed starting: %d\n", ret);
return;
}
}
static struct sysdev_class timer_class = {
......
......@@ -5,158 +5,25 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/sched.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <asm/traps.h>
#include <asm/sysreg.h>
#include <asm/addrspace.h>
#include <asm/ocd.h>
#include <asm/mmu_context.h>
#include <asm/uaccess.h>
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
goto out;
}
printk("%08x ", val);
}
}
printk("\n");
}
out:
return;
}
static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
{
return (p > (unsigned long)tinfo)
&& (p < (unsigned long)tinfo + THREAD_SIZE - 3);
}
#ifdef CONFIG_FRAME_POINTER
static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
unsigned long lr, fp;
struct thread_info *tinfo;
tinfo = (struct thread_info *)
((unsigned long)sp & ~(THREAD_SIZE - 1));
if (regs)
fp = regs->r7;
else if (tsk == current)
asm("mov %0, r7" : "=r"(fp));
else
fp = tsk->thread.cpu_context.r7;
/*
* Walk the stack as long as the frame pointer (a) is within
* the kernel stack of the task, and (b) it doesn't move
* downwards.
*/
while (valid_stack_ptr(tinfo, fp)) {
unsigned long new_fp;
lr = *(unsigned long *)fp;
printk(" [<%08lx>] ", lr);
print_symbol("%s\n", lr);
new_fp = *(unsigned long *)(fp + 4);
if (new_fp <= fp)
break;
fp = new_fp;
}
printk("\n");
}
#else
static inline void __show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (kernel_text_address(addr)) {
printk(" [<%08lx>] ", addr);
print_symbol("%s\n", addr);
}
}
}
#endif
void show_trace(struct task_struct *tsk, unsigned long *sp,
struct pt_regs *regs)
{
if (regs &&
(((regs->sr & MODE_MASK) == MODE_EXCEPTION) ||
((regs->sr & MODE_MASK) == MODE_USER)))
return;
printk ("Call trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
__show_trace(tsk, sp, regs);
printk("\n");
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (sp == 0) {
if (tsk == current) {
register unsigned long *real_sp __asm__("sp");
sp = real_sp;
} else {
sp = (unsigned long *)tsk->thread.cpu_context.ksp;
}
}
stack = (unsigned long)sp;
dump_mem("Stack: ", stack,
THREAD_SIZE + (unsigned long)tsk->thread_info);
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
#include <asm/ocd.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
ATOMIC_NOTIFIER_HEAD(avr32_die_chain);
int register_die_notifier(struct notifier_block *nb)
{
pr_debug("register_die_notifier: %p\n", nb);
return atomic_notifier_chain_register(&avr32_die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
......@@ -169,93 +36,103 @@ EXPORT_SYMBOL(unregister_die_notifier);
static DEFINE_SPINLOCK(die_lock);
void __die(const char *str, struct pt_regs *regs, unsigned long err,
const char *file, const char *func, unsigned long line)
void NORET_TYPE die(const char *str, struct pt_regs *regs, long err)
{
struct task_struct *tsk = current;
static int die_counter;
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk(KERN_ALERT "%s", str);
if (file && func)
printk(" in %s:%s, line %ld", file, func, line);
printk("[#%d]:\n", ++die_counter);
print_modules();
show_regs(regs);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
tsk->comm, tsk->pid, tsk->thread_info + 1);
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->sp,
THREAD_SIZE + (unsigned long)tsk->thread_info);
printk(KERN_ALERT "Oops: %s, sig: %ld [#%d]\n" KERN_EMERG,
str, err, ++die_counter);
#ifdef CONFIG_PREEMPT
printk("PREEMPT ");
#endif
#ifdef CONFIG_FRAME_POINTER
printk("FRAME_POINTER ");
#endif
if (current_cpu_data.features & AVR32_FEATURE_OCD) {
unsigned long did = __mfdr(DBGREG_DID);
printk("chip: 0x%03lx:0x%04lx rev %lu\n",
(did >> 1) & 0x7ff,
(did >> 12) & 0x7fff,
(did >> 28) & 0xf);
} else {
printk("cpu: arch %u r%u / core %u r%u\n",
current_cpu_data.arch_type,
current_cpu_data.arch_revision,
current_cpu_data.cpu_type,
current_cpu_data.cpu_revision);
}
print_modules();
show_regs_log_lvl(regs, KERN_EMERG);
show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(err);
}
void __die_if_kernel(const char *str, struct pt_regs *regs, unsigned long err,
const char *file, const char *func, unsigned long line)
void _exception(long signr, struct pt_regs *regs, int code,
unsigned long addr)
{
siginfo_t info;
if (!user_mode(regs))
__die(str, regs, err, file, func, line);
}
die("Unhandled exception in kernel mode", regs, signr);
memset(&info, 0, sizeof(info));
info.si_signo = signr;
info.si_code = code;
info.si_addr = (void __user *)addr;
force_sig_info(signr, &info, current);
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
{
#ifdef CONFIG_SUBARCH_AVR32B
/*
* The exception entry always saves RSR_EX. For NMI, this is
* wrong; it should be RSR_NMI
* Init gets no signals that it doesn't have a handler for.
* That's all very well, but if it has caused a synchronous
* exception and we ignore the resulting signal, it will just
* generate the same exception over and over again and we get
* nowhere. Better to kill it and let the kernel panic.
*/
regs->sr = sysreg_read(RSR_NMI);
#endif
if (is_init(current)) {
__sighandler_t handler;
spin_lock_irq(&current->sighand->siglock);
handler = current->sighand->action[signr-1].sa.sa_handler;
spin_unlock_irq(&current->sighand->siglock);
if (handler == SIG_DFL) {
/* init has generated a synchronous exception
and it doesn't have a handler for the signal */
printk(KERN_CRIT "init has generated signal %ld "
"but has no handler for it\n", signr);
do_exit(signr);
}
}
}
printk("NMI taken!!!!\n");
die("NMI", regs, ecr);
BUG();
asmlinkage void do_nmi(unsigned long ecr, struct pt_regs *regs)
{
printk(KERN_ALERT "Got Non-Maskable Interrupt, dumping regs\n");
show_regs_log_lvl(regs, KERN_ALERT);
show_stack_log_lvl(current, regs->sp, regs, KERN_ALERT);
}
asmlinkage void do_critical_exception(unsigned long ecr, struct pt_regs *regs)
{
printk("Unable to handle critical exception %lu at pc = %08lx!\n",
ecr, regs->pc);
die("Oops", regs, ecr);
BUG();
die("Critical exception", regs, SIGKILL);
}
asmlinkage void do_address_exception(unsigned long ecr, struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Oops: Address exception in kernel mode", regs, ecr);
#ifdef DEBUG
if (ecr == ECR_ADDR_ALIGN_X)
pr_debug("Instruction Address Exception at pc = %08lx\n",
regs->pc);
else if (ecr == ECR_ADDR_ALIGN_R)
pr_debug("Data Address Exception (Read) at pc = %08lx\n",
regs->pc);
else if (ecr == ECR_ADDR_ALIGN_W)
pr_debug("Data Address Exception (Write) at pc = %08lx\n",
regs->pc);
else
BUG();
show_regs(regs);
#endif
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)regs->pc;
force_sig_info(SIGBUS, &info, current);
_exception(SIGBUS, regs, BUS_ADRALN, regs->pc);
}
/* This way of handling undefined instructions is stolen from ARM */
......@@ -280,7 +157,8 @@ static int do_cop_absent(u32 insn)
{
int cop_nr;
u32 cpucr;
if ( (insn & 0xfdf00000) == 0xf1900000 )
if ((insn & 0xfdf00000) == 0xf1900000)
/* LDC0 */
cop_nr = 0;
else
......@@ -292,136 +170,91 @@ static int do_cop_absent(u32 insn)
sysreg_write(CPUCR, cpucr);
cpucr = sysreg_read(CPUCR);
if ( !(cpucr & (1 << (24 + cop_nr))) ){
printk("Coprocessor #%i not found!\n", cop_nr);
return -1;
}
if (!(cpucr & (1 << (24 + cop_nr))))
return -ENODEV;
return 0;
}
#ifdef CONFIG_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
{
char *file;
u16 line;
char c;
if (__get_user(line, (u16 __user *)(regs->pc + 2)))
return;
if (__get_user(file, (char * __user *)(regs->pc + 4))
|| (unsigned long)file < PAGE_OFFSET
|| __get_user(c, file))
file = "<bad filename>";
printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
}
#else
static inline void do_bug_verbose(struct pt_regs *regs, u32 insn)
int is_valid_bugaddr(unsigned long pc)
{
unsigned short opcode;
if (pc < PAGE_OFFSET)
return 0;
if (probe_kernel_address((u16 *)pc, opcode))
return 0;
return opcode == AVR32_BUG_OPCODE;
}
#endif
#endif
asmlinkage void do_illegal_opcode(unsigned long ecr, struct pt_regs *regs)
{
u32 insn;
struct undef_hook *hook;
siginfo_t info;
void __user *pc;
long code;
if (!user_mode(regs))
goto kernel_trap;
if (!user_mode(regs) && (ecr == ECR_ILLEGAL_OPCODE)) {
enum bug_trap_type type;
type = report_bug(regs->pc);
switch (type) {
case BUG_TRAP_TYPE_NONE:
break;
case BUG_TRAP_TYPE_WARN:
regs->pc += 2;
return;
case BUG_TRAP_TYPE_BUG:
die("Kernel BUG", regs, SIGKILL);
}
}
local_irq_enable();
pc = (void __user *)instruction_pointer(regs);
if (__get_user(insn, (u32 __user *)pc))
goto invalid_area;
if (user_mode(regs)) {
pc = (void __user *)instruction_pointer(regs);
if (get_user(insn, (u32 __user *)pc))
goto invalid_area;
if (ecr == ECR_COPROC_ABSENT) {
if (do_cop_absent(insn) == 0)
if (ecr == ECR_COPROC_ABSENT && !do_cop_absent(insn))
return;
}
spin_lock_irq(&undef_lock);
list_for_each_entry(hook, &undef_hook, node) {
if ((insn & hook->insn_mask) == hook->insn_val) {
if (hook->fn(regs, insn) == 0) {
spin_unlock_irq(&undef_lock);
return;
spin_lock_irq(&undef_lock);
list_for_each_entry(hook, &undef_hook, node) {
if ((insn & hook->insn_mask) == hook->insn_val) {
if (hook->fn(regs, insn) == 0) {
spin_unlock_irq(&undef_lock);
return;
}
}
}
spin_unlock_irq(&undef_lock);
}
spin_unlock_irq(&undef_lock);
invalid_area:
#ifdef DEBUG
printk("Illegal instruction at pc = %08lx\n", regs->pc);
if (regs->pc < TASK_SIZE) {
unsigned long ptbr, pgd, pte, *p;
ptbr = sysreg_read(PTBR);
p = (unsigned long *)ptbr;
pgd = p[regs->pc >> 22];
p = (unsigned long *)((pgd & 0x1ffff000) | 0x80000000);
pte = p[(regs->pc >> 12) & 0x3ff];
printk("page table: 0x%08lx -> 0x%08lx -> 0x%08lx\n", ptbr, pgd, pte);
}
#endif
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_addr = (void __user *)regs->pc;
switch (ecr) {
case ECR_ILLEGAL_OPCODE:
case ECR_UNIMPL_INSTRUCTION:
info.si_code = ILL_ILLOPC;
break;
case ECR_PRIVILEGE_VIOLATION:
info.si_code = ILL_PRVOPC;
code = ILL_PRVOPC;
break;
case ECR_COPROC_ABSENT:
info.si_code = ILL_COPROC;
code = ILL_COPROC;
break;
default:
BUG();
code = ILL_ILLOPC;
break;
}
force_sig_info(SIGILL, &info, current);
_exception(SIGILL, regs, code, regs->pc);
return;
kernel_trap:
#ifdef CONFIG_BUG
if (__kernel_text_address(instruction_pointer(regs))) {
insn = *(u16 *)instruction_pointer(regs);
if (insn == AVR32_BUG_OPCODE) {
do_bug_verbose(regs, insn);
die("Kernel BUG", regs, 0);
return;
}
}
#endif
die("Oops: Illegal instruction in kernel code", regs, ecr);
invalid_area:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->pc);
}
asmlinkage void do_fpe(unsigned long ecr, struct pt_regs *regs)
{
siginfo_t info;
printk("Floating-point exception at pc = %08lx\n", regs->pc);
/* We have no FPU... */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_addr = (void __user *)regs->pc;
info.si_code = ILL_COPROC;
force_sig_info(SIGILL, &info, current);
/* We have no FPU yet */
_exception(SIGILL, regs, ILL_COPROC, regs->pc);
}
......
......@@ -26,6 +26,12 @@ SECTIONS
_sinittext = .;
*(.text.reset)
*(.init.text)
/*
* .exit.text is discarded at runtime, not
* link time, to deal with references from
* __bug_table
*/
*(.exit.text)
_einittext = .;
. = ALIGN(4);
__tagtable_begin = .;
......@@ -86,6 +92,8 @@ SECTIONS
__stop___ex_table = .;
}
BUG_TABLE
RODATA
. = ALIGN(8192);
......@@ -126,7 +134,6 @@ SECTIONS
* thrown away, as cleanup code is never called unless it's a module.
*/
/DISCARD/ : {
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
......
if PLATFORM_AT32AP
menu "Atmel AVR32 AP options"
choice
prompt "AT32AP7000 static memory bus width"
depends on CPU_AT32AP7000
default AP7000_16_BIT_SMC
help
Define the width of the AP7000 external static memory interface.
This is used to determine how to mangle the address and/or data
when doing little-endian port access.
The current code can only support a single external memory bus
width for all chip selects, excluding the flash (which is using
raw access and is thus not affected by any of this.)
config AP7000_32_BIT_SMC
bool "32 bit"
config AP7000_16_BIT_SMC
bool "16 bit"
config AP7000_8_BIT_SMC
bool "8 bit"
endchoice
endmenu
endif # PLATFORM_AT32AP
obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o
obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o
obj-$(CONFIG_CPU_AT32AP7000) += time-tc.o
......@@ -18,6 +18,7 @@
#include <asm/arch/sm.h>
#include "clock.h"
#include "hmatrix.h"
#include "pio.h"
#include "sm.h"
......@@ -416,7 +417,15 @@ struct platform_device at32_sm_device = {
.resource = sm_resource,
.num_resources = ARRAY_SIZE(sm_resource),
};
DEV_CLK(pclk, at32_sm, pbb, 0);
static struct clk at32_sm_pclk = {
.name = "pclk",
.dev = &at32_sm_device.dev,
.parent = &pbb_clk,
.mode = pbb_clk_mode,
.get_rate = pbb_clk_get_rate,
.users = 1,
.index = 0,
};
static struct resource intc0_resource[] = {
PBMEM(0xfff00400),
......@@ -442,6 +451,7 @@ static struct clk hramc_clk = {
.mode = hsb_clk_mode,
.get_rate = hsb_clk_get_rate,
.users = 1,
.index = 3,
};
static struct resource smc0_resource[] = {
......@@ -466,6 +476,57 @@ static struct clk pico_clk = {
.users = 1,
};
/* --------------------------------------------------------------------
* HMATRIX
* -------------------------------------------------------------------- */
static struct clk hmatrix_clk = {
.name = "hmatrix_clk",
.parent = &pbb_clk,
.mode = pbb_clk_mode,
.get_rate = pbb_clk_get_rate,
.index = 2,
.users = 1,
};
#define HMATRIX_BASE ((void __iomem *)0xfff00800)
#define hmatrix_readl(reg) \
__raw_readl((HMATRIX_BASE) + HMATRIX_##reg)
#define hmatrix_writel(reg,value) \
__raw_writel((value), (HMATRIX_BASE) + HMATRIX_##reg)
/*
* Set bits in the HMATRIX Special Function Register (SFR) used by the
* External Bus Interface (EBI). This can be used to enable special
* features like CompactFlash support, NAND Flash support, etc. on
* certain chipselects.
*/
static inline void set_ebi_sfr_bits(u32 mask)
{
u32 sfr;
clk_enable(&hmatrix_clk);
sfr = hmatrix_readl(SFR4);
sfr |= mask;
hmatrix_writel(SFR4, sfr);
clk_disable(&hmatrix_clk);
}
/* --------------------------------------------------------------------
* System Timer/Counter (TC)
* -------------------------------------------------------------------- */
static struct resource at32_systc0_resource[] = {
PBMEM(0xfff00c00),
IRQ(22),
};
struct platform_device at32_systc0_device = {
.name = "systc",
.id = 0,
.resource = at32_systc0_resource,
.num_resources = ARRAY_SIZE(at32_systc0_resource),
};
DEV_CLK(pclk, at32_systc0, pbb, 3);
/* --------------------------------------------------------------------
* PIO
* -------------------------------------------------------------------- */
......@@ -514,6 +575,8 @@ void __init at32_add_system_devices(void)
platform_device_register(&smc0_device);
platform_device_register(&pdc_device);
platform_device_register(&at32_systc0_device);
platform_device_register(&pio0_device);
platform_device_register(&pio1_device);
platform_device_register(&pio2_device);
......@@ -950,6 +1013,7 @@ struct clk *at32_clock_list[] = {
&pbb_clk,
&at32_sm_pclk,
&at32_intc0_pclk,
&hmatrix_clk,
&ebi_clk,
&hramc_clk,
&smc0_pclk,
......@@ -962,6 +1026,7 @@ struct clk *at32_clock_list[] = {
&pio2_mck,
&pio3_mck,
&pio4_mck,
&at32_systc0_pclk,
&atmel_usart0_usart,
&atmel_usart1_usart,
&atmel_usart2_usart,
......@@ -1024,6 +1089,9 @@ void __init at32_clock_init(void)
for (i = 0; i < ARRAY_SIZE(at32_clock_list); i++) {
struct clk *clk = at32_clock_list[i];
if (clk->users == 0)
continue;
if (clk->mode == &cpu_clk_mode)
cpu_mask |= 1 << clk->index;
else if (clk->mode == &hsb_clk_mode)
......
/*
* Register definitions for High-Speed Bus Matrix
*/
#ifndef __HMATRIX_H
#define __HMATRIX_H
/* HMATRIX register offsets */
#define HMATRIX_MCFG0 0x0000
#define HMATRIX_MCFG1 0x0004
#define HMATRIX_MCFG2 0x0008
#define HMATRIX_MCFG3 0x000c
#define HMATRIX_MCFG4 0x0010
#define HMATRIX_MCFG5 0x0014
#define HMATRIX_MCFG6 0x0018
#define HMATRIX_MCFG7 0x001c
#define HMATRIX_MCFG8 0x0020
#define HMATRIX_MCFG9 0x0024
#define HMATRIX_MCFG10 0x0028
#define HMATRIX_MCFG11 0x002c
#define HMATRIX_MCFG12 0x0030
#define HMATRIX_MCFG13 0x0034
#define HMATRIX_MCFG14 0x0038
#define HMATRIX_MCFG15 0x003c
#define HMATRIX_SCFG0 0x0040
#define HMATRIX_SCFG1 0x0044
#define HMATRIX_SCFG2 0x0048
#define HMATRIX_SCFG3 0x004c
#define HMATRIX_SCFG4 0x0050
#define HMATRIX_SCFG5 0x0054
#define HMATRIX_SCFG6 0x0058
#define HMATRIX_SCFG7 0x005c
#define HMATRIX_SCFG8 0x0060
#define HMATRIX_SCFG9 0x0064
#define HMATRIX_SCFG10 0x0068
#define HMATRIX_SCFG11 0x006c
#define HMATRIX_SCFG12 0x0070
#define HMATRIX_SCFG13 0x0074
#define HMATRIX_SCFG14 0x0078
#define HMATRIX_SCFG15 0x007c
#define HMATRIX_PRAS0 0x0080
#define HMATRIX_PRBS0 0x0084
#define HMATRIX_PRAS1 0x0088
#define HMATRIX_PRBS1 0x008c
#define HMATRIX_PRAS2 0x0090
#define HMATRIX_PRBS2 0x0094
#define HMATRIX_PRAS3 0x0098
#define HMATRIX_PRBS3 0x009c
#define HMATRIX_PRAS4 0x00a0
#define HMATRIX_PRBS4 0x00a4
#define HMATRIX_PRAS5 0x00a8
#define HMATRIX_PRBS5 0x00ac
#define HMATRIX_PRAS6 0x00b0
#define HMATRIX_PRBS6 0x00b4
#define HMATRIX_PRAS7 0x00b8
#define HMATRIX_PRBS7 0x00bc
#define HMATRIX_PRAS8 0x00c0
#define HMATRIX_PRBS8 0x00c4
#define HMATRIX_PRAS9 0x00c8
#define HMATRIX_PRBS9 0x00cc
#define HMATRIX_PRAS10 0x00d0
#define HMATRIX_PRBS10 0x00d4
#define HMATRIX_PRAS11 0x00d8
#define HMATRIX_PRBS11 0x00dc
#define HMATRIX_PRAS12 0x00e0
#define HMATRIX_PRBS12 0x00e4
#define HMATRIX_PRAS13 0x00e8
#define HMATRIX_PRBS13 0x00ec
#define HMATRIX_PRAS14 0x00f0
#define HMATRIX_PRBS14 0x00f4
#define HMATRIX_PRAS15 0x00f8
#define HMATRIX_PRBS15 0x00fc
#define HMATRIX_MRCR 0x0100
#define HMATRIX_SFR0 0x0110
#define HMATRIX_SFR1 0x0114
#define HMATRIX_SFR2 0x0118
#define HMATRIX_SFR3 0x011c
#define HMATRIX_SFR4 0x0120
#define HMATRIX_SFR5 0x0124
#define HMATRIX_SFR6 0x0128
#define HMATRIX_SFR7 0x012c
#define HMATRIX_SFR8 0x0130
#define HMATRIX_SFR9 0x0134
#define HMATRIX_SFR10 0x0138
#define HMATRIX_SFR11 0x013c
#define HMATRIX_SFR12 0x0140
#define HMATRIX_SFR13 0x0144
#define HMATRIX_SFR14 0x0148
#define HMATRIX_SFR15 0x014c
/* Bitfields in MCFGx */
#define HMATRIX_ULBT_OFFSET 0
#define HMATRIX_ULBT_SIZE 3
/* Bitfields in SCFGx */
#define HMATRIX_SLOT_CYCLE_OFFSET 0
#define HMATRIX_SLOT_CYCLE_SIZE 8
#define HMATRIX_DEFMSTR_TYPE_OFFSET 16
#define HMATRIX_DEFMSTR_TYPE_SIZE 2
#define HMATRIX_FIXED_DEFMSTR_OFFSET 18
#define HMATRIX_FIXED_DEFMSTR_SIZE 4
#define HMATRIX_ARBT_OFFSET 24
#define HMATRIX_ARBT_SIZE 2
/* Bitfields in PRASx */
#define HMATRIX_M0PR_OFFSET 0
#define HMATRIX_M0PR_SIZE 4
#define HMATRIX_M1PR_OFFSET 4
#define HMATRIX_M1PR_SIZE 4
#define HMATRIX_M2PR_OFFSET 8
#define HMATRIX_M2PR_SIZE 4
#define HMATRIX_M3PR_OFFSET 12
#define HMATRIX_M3PR_SIZE 4
#define HMATRIX_M4PR_OFFSET 16
#define HMATRIX_M4PR_SIZE 4
#define HMATRIX_M5PR_OFFSET 20
#define HMATRIX_M5PR_SIZE 4
#define HMATRIX_M6PR_OFFSET 24
#define HMATRIX_M6PR_SIZE 4
#define HMATRIX_M7PR_OFFSET 28
#define HMATRIX_M7PR_SIZE 4
/* Bitfields in PRBSx */
#define HMATRIX_M8PR_OFFSET 0
#define HMATRIX_M8PR_SIZE 4
#define HMATRIX_M9PR_OFFSET 4
#define HMATRIX_M9PR_SIZE 4
#define HMATRIX_M10PR_OFFSET 8
#define HMATRIX_M10PR_SIZE 4
#define HMATRIX_M11PR_OFFSET 12
#define HMATRIX_M11PR_SIZE 4
#define HMATRIX_M12PR_OFFSET 16
#define HMATRIX_M12PR_SIZE 4
#define HMATRIX_M13PR_OFFSET 20
#define HMATRIX_M13PR_SIZE 4
#define HMATRIX_M14PR_OFFSET 24
#define HMATRIX_M14PR_SIZE 4
#define HMATRIX_M15PR_OFFSET 28
#define HMATRIX_M15PR_SIZE 4
/* Bitfields in SFR4 */
#define HMATRIX_CS1A_OFFSET 1
#define HMATRIX_CS1A_SIZE 1
#define HMATRIX_CS3A_OFFSET 3
#define HMATRIX_CS3A_SIZE 1
#define HMATRIX_CS4A_OFFSET 4
#define HMATRIX_CS4A_SIZE 1
#define HMATRIX_CS5A_OFFSET 5
#define HMATRIX_CS5A_SIZE 1
#define HMATRIX_DBPUC_OFFSET 8
#define HMATRIX_DBPUC_SIZE 1
/* Constants for ULBT */
#define HMATRIX_ULBT_INFINITE 0
#define HMATRIX_ULBT_SINGLE 1
#define HMATRIX_ULBT_FOUR_BEAT 2
#define HMATRIX_ULBT_EIGHT_BEAT 3
#define HMATRIX_ULBT_SIXTEEN_BEAT 4
/* Constants for DEFMSTR_TYPE */
#define HMATRIX_DEFMSTR_TYPE_NO_DEFAULT 0
#define HMATRIX_DEFMSTR_TYPE_LAST_DEFAULT 1
#define HMATRIX_DEFMSTR_TYPE_FIXED_DEFAULT 2
/* Constants for ARBT */
#define HMATRIX_ARBT_ROUND_ROBIN 0
#define HMATRIX_ARBT_FIXED_PRIORITY 1
/* Bit manipulation macros */
#define HMATRIX_BIT(name) \
(1 << HMATRIX_##name##_OFFSET)
#define HMATRIX_BF(name,value) \
(((value) & ((1 << HMATRIX_##name##_SIZE) - 1)) \
<< HMATRIX_##name##_OFFSET)
#define HMATRIX_BFEXT(name,value) \
(((value) >> HMATRIX_##name##_OFFSET) \
& ((1 << HMATRIX_##name##_SIZE) - 1))
#define HMATRIX_BFINS(name,value,old) \
(((old) & ~(((1 << HMATRIX_##name##_SIZE) - 1) \
<< HMATRIX_##name##_OFFSET)) \
| HMATRIX_BF(name,value))
#endif /* __HMATRIX_H */
......@@ -75,12 +75,35 @@ int smc_set_configuration(int cs, const struct smc_config *config)
return -EINVAL;
}
switch (config->nwait_mode) {
case 0:
mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_DISABLED);
break;
case 1:
mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_RESERVED);
break;
case 2:
mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_FROZEN);
break;
case 3:
mode |= HSMC_BF(EXNW_MODE, HSMC_EXNW_MODE_READY);
break;
default:
return -EINVAL;
}
if (config->tdf_cycles) {
mode |= HSMC_BF(TDF_CYCLES, config->tdf_cycles);
}
if (config->nrd_controlled)
mode |= HSMC_BIT(READ_MODE);
if (config->nwe_controlled)
mode |= HSMC_BIT(WRITE_MODE);
if (config->byte_write)
mode |= HSMC_BIT(BAT);
if (config->tdf_mode)
mode |= HSMC_BIT(TDF_MODE);
pr_debug("smc cs%d: setup/%08x pulse/%08x cycle/%08x mode/%08x\n",
cs, setup, pulse, cycle, mode);
......
/*
* Copyright (C) 2004-2007 Atmel Corporation
*
* Based on MIPS implementation arch/mips/kernel/time.c
* Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/clocksource.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/profile.h>
#include <linux/sysdev.h>
#include <linux/err.h>
#include <asm/div64.h>
#include <asm/sysreg.h>
#include <asm/io.h>
#include <asm/sections.h>
#include <asm/arch/time.h>
/* how many counter cycles in a jiffy? */
static u32 cycles_per_jiffy;
/* the count value for the next timer interrupt */
static u32 expirelo;
/* the I/O registers of the TC module */
static void __iomem *ioregs;
cycle_t read_cycle_count(void)
{
return (cycle_t)timer_read(ioregs, 0, CV);
}
struct clocksource clocksource_avr32 = {
.name = "avr32",
.rating = 342,
.read = read_cycle_count,
.mask = CLOCKSOURCE_MASK(16),
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void avr32_timer_ack(void)
{
u16 count = expirelo;
/* Ack this timer interrupt and set the next one, use a u16
* variable so it will wrap around correctly */
count += cycles_per_jiffy;
expirelo = count;
timer_write(ioregs, 0, RC, expirelo);
/* Check to see if we have missed any timer interrupts */
count = timer_read(ioregs, 0, CV);
if ((count - expirelo) < 0x7fff) {
expirelo = count + cycles_per_jiffy;
timer_write(ioregs, 0, RC, expirelo);
}
}
u32 avr32_hpt_read(void)
{
return timer_read(ioregs, 0, CV);
}
static int avr32_timer_calc_div_and_set_jiffies(struct clk *pclk)
{
unsigned int cycles_max = (clocksource_avr32.mask + 1) / 2;
unsigned int divs[] = { 4, 8, 16, 32 };
int divs_size = sizeof(divs) / sizeof(*divs);
int i = 0;
unsigned long count_hz;
unsigned long shift;
unsigned long mult;
int clock_div = -1;
u64 tmp;
shift = clocksource_avr32.shift;
do {
count_hz = clk_get_rate(pclk) / divs[i];
mult = clocksource_hz2mult(count_hz, shift);
clocksource_avr32.mult = mult;
tmp = TICK_NSEC;
tmp <<= shift;
tmp += mult / 2;
do_div(tmp, mult);
cycles_per_jiffy = tmp;
} while (cycles_per_jiffy > cycles_max && ++i < divs_size);
clock_div = i + 1;
if (clock_div > divs_size) {
pr_debug("timer: could not calculate clock divider\n");
return -EFAULT;
}
/* Set the clock divider */
timer_write(ioregs, 0, CMR, TIMER_BF(CMR_TCCLKS, clock_div));
return 0;
}
int avr32_hpt_init(unsigned int count)
{
struct resource *regs;
struct clk *pclk;
int irq = -1;
int ret = 0;
ret = -ENXIO;
irq = platform_get_irq(&at32_systc0_device, 0);
if (irq < 0) {
pr_debug("timer: could not get irq\n");
goto out_error;
}
pclk = clk_get(&at32_systc0_device.dev, "pclk");
if (IS_ERR(pclk)) {
pr_debug("timer: could not get clk: %ld\n", PTR_ERR(pclk));
goto out_error;
}
clk_enable(pclk);
regs = platform_get_resource(&at32_systc0_device, IORESOURCE_MEM, 0);
if (!regs) {
pr_debug("timer: could not get resource\n");
goto out_error_clk;
}
ioregs = ioremap(regs->start, regs->end - regs->start + 1);
if (!ioregs) {
pr_debug("timer: could not get ioregs\n");
goto out_error_clk;
}
ret = avr32_timer_calc_div_and_set_jiffies(pclk);
if (ret)
goto out_error_io;
ret = setup_irq(irq, &timer_irqaction);
if (ret) {
pr_debug("timer: could not request irq %d: %d\n",
irq, ret);
goto out_error_io;
}
expirelo = (timer_read(ioregs, 0, CV) / cycles_per_jiffy + 1)
* cycles_per_jiffy;
/* Enable clock and interrupts on RC compare */
timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_CLKEN));
timer_write(ioregs, 0, IER, TIMER_BIT(IER_CPCS));
/* Set cycles to first interrupt */
timer_write(ioregs, 0, RC, expirelo);
printk(KERN_INFO "timer: AT32AP system timer/counter at 0x%p irq %d\n",
ioregs, irq);
return 0;
out_error_io:
iounmap(ioregs);
out_error_clk:
clk_put(pclk);
out_error:
return ret;
}
int avr32_hpt_start(void)
{
timer_write(ioregs, 0, CCR, TIMER_BIT(CCR_SWTRG));
return 0;
}
irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned int sr = timer_read(ioregs, 0, SR);
if (sr & TIMER_BIT(SR_CPCS)) {
/* ack timer interrupt and try to set next interrupt */
avr32_timer_ack();
/*
* Call the generic timer interrupt handler
*/
write_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
/*
* In UP mode, we call local_timer_interrupt() to do profiling
* and process accounting.
*
* SMP is not supported yet.
*/
local_timer_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
......@@ -16,26 +16,8 @@
#include <asm/kdebug.h>
#include <asm/mmu_context.h>
#include <asm/sysreg.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#ifdef DEBUG
static void dump_code(unsigned long pc)
{
char *p = (char *)pc;
char val;
int i;
printk(KERN_DEBUG "Code:");
for (i = 0; i < 16; i++) {
if (__get_user(val, p + i))
break;
printk(" %02x", val);
}
printk("\n");
}
#endif
#include <asm/uaccess.h>
#ifdef CONFIG_KPROBES
ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
......@@ -68,17 +50,19 @@ static inline int notify_page_fault(enum die_val val, struct pt_regs *regs,
}
#endif
int exception_trace = 1;
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
*
* ecr is the Exception Cause Register. Possible values are:
* 5: Page not found (instruction access)
* 6: Protection fault (instruction access)
* 12: Page not found (read access)
* 13: Page not found (write access)
* 14: Protection fault (read access)
* 15: Protection fault (write access)
* 15: Protection fault (read access)
* 16: Protection fault (write access)
* 20: Page not found (instruction access)
* 24: Page not found (read access)
* 28: Page not found (write access)
*/
asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
{
......@@ -88,7 +72,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
const struct exception_table_entry *fixup;
unsigned long address;
unsigned long page;
int writeaccess = 0;
int writeaccess;
long signr;
int code;
if (notify_page_fault(DIE_PAGE_FAULT, regs,
ecr, SIGSEGV) == NOTIFY_STOP)
......@@ -99,6 +85,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
tsk = current;
mm = tsk->mm;
signr = SIGSEGV;
code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user context, we must
* not take the fault...
......@@ -125,7 +114,9 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* can handle it...
*/
good_area:
//pr_debug("good area: vm_flags = 0x%lx\n", vma->vm_flags);
code = SEGV_ACCERR;
writeaccess = 0;
switch (ecr) {
case ECR_PROTECTION_X:
case ECR_TLB_MISS_X:
......@@ -176,46 +167,24 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
* map. Fix it, but check if it's kernel or user first...
*/
bad_area:
pr_debug("Bad area [%s:%u]: addr %08lx, ecr %lu\n",
tsk->comm, tsk->pid, address, ecr);
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
/* Hmm...we have to pass address and ecr somehow... */
/* tsk->thread.address = address;
tsk->thread.error_code = ecr; */
#ifdef DEBUG
show_regs(regs);
dump_code(regs->pc);
page = sysreg_read(PTBR);
printk("ptbr = %08lx", page);
if (page) {
page = ((unsigned long *)page)[address >> 22];
printk(" pgd = %08lx", page);
if (page & _PAGE_PRESENT) {
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
printk(" pte = %08lx\n", page);
}
}
#endif
pr_debug("Sending SIGSEGV to PID %d...\n",
tsk->pid);
force_sig(SIGSEGV, tsk);
if (exception_trace)
printk("%s%s[%d]: segfault at %08lx pc %08lx "
"sp %08lx ecr %lu\n",
is_init(tsk) ? KERN_EMERG : KERN_INFO,
tsk->comm, tsk->pid, address, regs->pc,
regs->sp, ecr);
_exception(SIGSEGV, regs, code, address);
return;
}
no_context:
pr_debug("No context\n");
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
pr_debug("Found fixup at %08lx\n", fixup->fixup);
return;
}
......@@ -230,7 +199,6 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
printk(KERN_ALERT
"Unable to handle kernel paging request");
printk(" at virtual address %08lx\n", address);
printk(KERN_ALERT "pc = %08lx\n", regs->pc);
page = sysreg_read(PTBR);
printk(KERN_ALERT "ptbr = %08lx", page);
......@@ -241,20 +209,20 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
page &= PAGE_MASK;
address &= 0x003ff000;
page = ((unsigned long *)__va(page))[address >> PAGE_SHIFT];
printk(" pte = %08lx\n", page);
printk(" pte = %08lx", page);
}
}
die("\nOops", regs, ecr);
do_exit(SIGKILL);
printk("\n");
die("Kernel access of bad area", regs, signr);
return;
/*
* We ran out of memory, or some other thing happened to us
* that made us unable to handle the page fault gracefully.
*/
out_of_memory:
printk("Out of memory\n");
up_read(&mm->mmap_sem);
if (current->pid == 1) {
if (is_init(current)) {
yield();
down_read(&mm->mmap_sem);
goto survive;
......@@ -267,21 +235,20 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
do_sigbus:
up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel or
* user mode.
*/
/* address, error_code, trap_no, ... */
#ifdef DEBUG
show_regs(regs);
dump_code(regs->pc);
#endif
pr_debug("Sending SIGBUS to PID %d...\n", tsk->pid);
force_sig(SIGBUS, tsk);
/* Kernel mode? Handle exceptions or die */
signr = SIGBUS;
code = BUS_ADRERR;
if (!user_mode(regs))
goto no_context;
if (exception_trace)
printk("%s%s[%d]: bus error at %08lx pc %08lx "
"sp %08lx ecr %lu\n",
is_init(tsk) ? KERN_EMERG : KERN_INFO,
tsk->comm, tsk->pid, address, regs->pc,
regs->sp, ecr);
_exception(SIGBUS, regs, BUS_ADRERR, address);
}
asmlinkage void do_bus_error(unsigned long addr, int write_access,
......@@ -292,8 +259,7 @@ asmlinkage void do_bus_error(unsigned long addr, int write_access,
addr, write_access ? "write" : "read");
printk(KERN_INFO "DTLB dump:\n");
dump_dtlb();
die("Bus Error", regs, write_access);
do_exit(SIGKILL);
die("Bus Error", regs, SIGKILL);
}
/*
......
......@@ -10,11 +10,9 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
#include <linux/pfn.h>
#include <linux/nodemask.h>
#include <asm/page.h>
......@@ -78,242 +76,6 @@ void show_mem(void)
printk ("%d pages swap cached\n", cached);
}
static void __init print_memory_map(const char *what,
struct tag_mem_range *mem)
{
printk ("%s:\n", what);
for (; mem; mem = mem->next) {
printk (" %08lx - %08lx\n",
(unsigned long)mem->addr,
(unsigned long)(mem->addr + mem->size));
}
}
#define MAX_LOWMEM HIGHMEM_START
#define MAX_LOWMEM_PFN PFN_DOWN(MAX_LOWMEM)
/*
* Sort a list of memory regions in-place by ascending address.
*
* We're using bubble sort because we only have singly linked lists
* with few elements.
*/
static void __init sort_mem_list(struct tag_mem_range **pmem)
{
int done;
struct tag_mem_range **a, **b;
if (!*pmem)
return;
do {
done = 1;
a = pmem, b = &(*pmem)->next;
while (*b) {
if ((*a)->addr > (*b)->addr) {
struct tag_mem_range *tmp;
tmp = (*b)->next;
(*b)->next = *a;
*a = *b;
*b = tmp;
done = 0;
}
a = &(*a)->next;
b = &(*a)->next;
}
} while (!done);
}
/*
* Find a free memory region large enough for storing the
* bootmem bitmap.
*/
static unsigned long __init
find_bootmap_pfn(const struct tag_mem_range *mem)
{
unsigned long bootmap_pages, bootmap_len;
unsigned long node_pages = PFN_UP(mem->size);
unsigned long bootmap_addr = mem->addr;
struct tag_mem_range *reserved = mem_reserved;
struct tag_mem_range *ramdisk = mem_ramdisk;
unsigned long kern_start = virt_to_phys(_stext);
unsigned long kern_end = virt_to_phys(_end);
bootmap_pages = bootmem_bootmap_pages(node_pages);
bootmap_len = bootmap_pages << PAGE_SHIFT;
/*
* Find a large enough region without reserved pages for
* storing the bootmem bitmap. We can take advantage of the
* fact that all lists have been sorted.
*
* We have to check explicitly reserved regions as well as the
* kernel image and any RAMDISK images...
*
* Oh, and we have to make sure we don't overwrite the taglist
* since we're going to use it until the bootmem allocator is
* fully up and running.
*/
while (1) {
if ((bootmap_addr < kern_end) &&
((bootmap_addr + bootmap_len) > kern_start))
bootmap_addr = kern_end;
while (reserved &&
(bootmap_addr >= (reserved->addr + reserved->size)))
reserved = reserved->next;
if (reserved &&
((bootmap_addr + bootmap_len) >= reserved->addr)) {
bootmap_addr = reserved->addr + reserved->size;
continue;
}
while (ramdisk &&
(bootmap_addr >= (ramdisk->addr + ramdisk->size)))
ramdisk = ramdisk->next;
if (!ramdisk ||
((bootmap_addr + bootmap_len) < ramdisk->addr))
break;
bootmap_addr = ramdisk->addr + ramdisk->size;
}
if ((PFN_UP(bootmap_addr) + bootmap_len) >= (mem->addr + mem->size))
return ~0UL;
return PFN_UP(bootmap_addr);
}
void __init setup_bootmem(void)
{
unsigned bootmap_size;
unsigned long first_pfn, bootmap_pfn, pages;
unsigned long max_pfn, max_low_pfn;
unsigned long kern_start = virt_to_phys(_stext);
unsigned long kern_end = virt_to_phys(_end);
unsigned node = 0;
struct tag_mem_range *bank, *res;
sort_mem_list(&mem_phys);
sort_mem_list(&mem_reserved);
print_memory_map("Physical memory", mem_phys);
print_memory_map("Reserved memory", mem_reserved);
nodes_clear(node_online_map);
if (mem_ramdisk) {
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = (unsigned long)__va(mem_ramdisk->addr);
initrd_end = initrd_start + mem_ramdisk->size;
print_memory_map("RAMDISK images", mem_ramdisk);
if (mem_ramdisk->next)
printk(KERN_WARNING
"Warning: Only the first RAMDISK image "
"will be used\n");
sort_mem_list(&mem_ramdisk);
#else
printk(KERN_WARNING "RAM disk image present, but "
"no initrd support in kernel!\n");
#endif
}
if (mem_phys->next)
printk(KERN_WARNING "Only using first memory bank\n");
for (bank = mem_phys; bank; bank = NULL) {
first_pfn = PFN_UP(bank->addr);
max_low_pfn = max_pfn = PFN_DOWN(bank->addr + bank->size);
bootmap_pfn = find_bootmap_pfn(bank);
if (bootmap_pfn > max_pfn)
panic("No space for bootmem bitmap!\n");
if (max_low_pfn > MAX_LOWMEM_PFN) {
max_low_pfn = MAX_LOWMEM_PFN;
#ifndef CONFIG_HIGHMEM
/*
* Lowmem is memory that can be addressed
* directly through P1/P2
*/
printk(KERN_WARNING
"Node %u: Only %ld MiB of memory will be used.\n",
node, MAX_LOWMEM >> 20);
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
#else
#error HIGHMEM is not supported by AVR32 yet
#endif
}
/* Initialize the boot-time allocator with low memory only. */
bootmap_size = init_bootmem_node(NODE_DATA(node), bootmap_pfn,
first_pfn, max_low_pfn);
printk("Node %u: bdata = %p, bdata->node_bootmem_map = %p\n",
node, NODE_DATA(node)->bdata,
NODE_DATA(node)->bdata->node_bootmem_map);
/*
* Register fully available RAM pages with the bootmem
* allocator.
*/
pages = max_low_pfn - first_pfn;
free_bootmem_node (NODE_DATA(node), PFN_PHYS(first_pfn),
PFN_PHYS(pages));
/*
* Reserve space for the kernel image (if present in
* this node)...
*/
if ((kern_start >= PFN_PHYS(first_pfn)) &&
(kern_start < PFN_PHYS(max_pfn))) {
printk("Node %u: Kernel image %08lx - %08lx\n",
node, kern_start, kern_end);
reserve_bootmem_node(NODE_DATA(node), kern_start,
kern_end - kern_start);
}
/* ...the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
bootmap_size);
/* ...any RAMDISK images... */
for (res = mem_ramdisk; res; res = res->next) {
if (res->addr > PFN_PHYS(max_pfn))
break;
if (res->addr >= PFN_PHYS(first_pfn)) {
printk("Node %u: RAMDISK %08lx - %08lx\n",
node,
(unsigned long)res->addr,
(unsigned long)(res->addr + res->size));
reserve_bootmem_node(NODE_DATA(node),
res->addr, res->size);
}
}
/* ...and any other reserved regions. */
for (res = mem_reserved; res; res = res->next) {
if (res->addr > PFN_PHYS(max_pfn))
break;
if (res->addr >= PFN_PHYS(first_pfn)) {
printk("Node %u: Reserved %08lx - %08lx\n",
node,
(unsigned long)res->addr,
(unsigned long)(res->addr + res->size));
reserve_bootmem_node(NODE_DATA(node),
res->addr, res->size);
}
}
node_set_online(node);
}
}
/*
* paging_init() sets up the page tables
*
......
#ifndef __ASM_AVR32_ARCH_AT32AP_IO_H
#define __ASM_AVR32_ARCH_AT32AP_IO_H
/* For "bizarre" halfword swapping */
#include <linux/byteorder/swabb.h>
#if defined(CONFIG_AP7000_32_BIT_SMC)
# define __swizzle_addr_b(addr) (addr ^ 3UL)
# define __swizzle_addr_w(addr) (addr ^ 2UL)
# define __swizzle_addr_l(addr) (addr)
# define ioswabb(a, x) (x)
# define ioswabw(a, x) (x)
# define ioswabl(a, x) (x)
# define __mem_ioswabb(a, x) (x)
# define __mem_ioswabw(a, x) swab16(x)
# define __mem_ioswabl(a, x) swab32(x)
#elif defined(CONFIG_AP7000_16_BIT_SMC)
# define __swizzle_addr_b(addr) (addr ^ 1UL)
# define __swizzle_addr_w(addr) (addr)
# define __swizzle_addr_l(addr) (addr)
# define ioswabb(a, x) (x)
# define ioswabw(a, x) (x)
# define ioswabl(a, x) swahw32(x)
# define __mem_ioswabb(a, x) (x)
# define __mem_ioswabw(a, x) swab16(x)
# define __mem_ioswabl(a, x) swahb32(x)
#else
# define __swizzle_addr_b(addr) (addr)
# define __swizzle_addr_w(addr) (addr)
# define __swizzle_addr_l(addr) (addr)
# define ioswabb(a, x) (x)
# define ioswabw(a, x) swab16(x)
# define ioswabl(a, x) swab32(x)
# define __mem_ioswabb(a, x) (x)
# define __mem_ioswabw(a, x) (x)
# define __mem_ioswabl(a, x) (x)
#endif
#endif /* __ASM_AVR32_ARCH_AT32AP_IO_H */
......@@ -47,11 +47,33 @@ struct smc_config {
*/
unsigned int nwe_controlled:1;
/*
* 0: NWAIT is disabled
* 1: Reserved
* 2: NWAIT is frozen mode
* 3: NWAIT in ready mode
*/
unsigned int nwait_mode:2;
/*
* 0: Byte select access type
* 1: Byte write access type
*/
unsigned int byte_write:1;
/*
* Number of clock cycles before data is released after
* the rising edge of the read controlling signal
*
* Total cycles from SMC is tdf_cycles + 1
*/
unsigned int tdf_cycles:4;
/*
* 0: TDF optimization disabled
* 1: TDF optimization enabled
*/
unsigned int tdf_mode:1;
};
extern int smc_set_configuration(int cs, const struct smc_config *config);
......
/*
* Copyright (C) 2007 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASM_AVR32_ARCH_AT32AP_TIME_H
#define _ASM_AVR32_ARCH_AT32AP_TIME_H
#include <linux/platform_device.h>
extern struct irqaction timer_irqaction;
extern struct platform_device at32_systc0_device;
extern void local_timer_interrupt(int irq, void *dev_id);
#define TIMER_BCR 0x000000c0
#define TIMER_BCR_SYNC 0
#define TIMER_BMR 0x000000c4
#define TIMER_BMR_TC0XC0S 0
#define TIMER_BMR_TC1XC1S 2
#define TIMER_BMR_TC2XC2S 4
#define TIMER_CCR 0x00000000
#define TIMER_CCR_CLKDIS 1
#define TIMER_CCR_CLKEN 0
#define TIMER_CCR_SWTRG 2
#define TIMER_CMR 0x00000004
#define TIMER_CMR_ABETRG 10
#define TIMER_CMR_ACPA 16
#define TIMER_CMR_ACPC 18
#define TIMER_CMR_AEEVT 20
#define TIMER_CMR_ASWTRG 22
#define TIMER_CMR_BCPB 24
#define TIMER_CMR_BCPC 26
#define TIMER_CMR_BEEVT 28
#define TIMER_CMR_BSWTRG 30
#define TIMER_CMR_BURST 4
#define TIMER_CMR_CLKI 3
#define TIMER_CMR_CPCDIS 7
#define TIMER_CMR_CPCSTOP 6
#define TIMER_CMR_CPCTRG 14
#define TIMER_CMR_EEVT 10
#define TIMER_CMR_EEVTEDG 8
#define TIMER_CMR_ENETRG 12
#define TIMER_CMR_ETRGEDG 8
#define TIMER_CMR_LDBDIS 7
#define TIMER_CMR_LDBSTOP 6
#define TIMER_CMR_LDRA 16
#define TIMER_CMR_LDRB 18
#define TIMER_CMR_TCCLKS 0
#define TIMER_CMR_WAVE 15
#define TIMER_CMR_WAVSEL 13
#define TIMER_CV 0x00000010
#define TIMER_CV_CV 0
#define TIMER_IDR 0x00000028
#define TIMER_IDR_COVFS 0
#define TIMER_IDR_CPAS 2
#define TIMER_IDR_CPBS 3
#define TIMER_IDR_CPCS 4
#define TIMER_IDR_ETRGS 7
#define TIMER_IDR_LDRAS 5
#define TIMER_IDR_LDRBS 6
#define TIMER_IDR_LOVRS 1
#define TIMER_IER 0x00000024
#define TIMER_IER_COVFS 0
#define TIMER_IER_CPAS 2
#define TIMER_IER_CPBS 3
#define TIMER_IER_CPCS 4
#define TIMER_IER_ETRGS 7
#define TIMER_IER_LDRAS 5
#define TIMER_IER_LDRBS 6
#define TIMER_IER_LOVRS 1
#define TIMER_IMR 0x0000002c
#define TIMER_IMR_COVFS 0
#define TIMER_IMR_CPAS 2
#define TIMER_IMR_CPBS 3
#define TIMER_IMR_CPCS 4
#define TIMER_IMR_ETRGS 7
#define TIMER_IMR_LDRAS 5
#define TIMER_IMR_LDRBS 6
#define TIMER_IMR_LOVRS 1
#define TIMER_RA 0x00000014
#define TIMER_RA_RA 0
#define TIMER_RB 0x00000018
#define TIMER_RB_RB 0
#define TIMER_RC 0x0000001c
#define TIMER_RC_RC 0
#define TIMER_SR 0x00000020
#define TIMER_SR_CLKSTA 16
#define TIMER_SR_COVFS 0
#define TIMER_SR_CPAS 2
#define TIMER_SR_CPBS 3
#define TIMER_SR_CPCS 4
#define TIMER_SR_ETRGS 7
#define TIMER_SR_LDRAS 5
#define TIMER_SR_LDRBS 6
#define TIMER_SR_LOVRS 1
#define TIMER_SR_MTIOA 17
#define TIMER_SR_MTIOB 18
/* Bit manipulation macros */
#define TIMER_BIT(name) (1 << TIMER_##name)
#define TIMER_BF(name,value) ((value) << TIMER_##name)
/* Register access macros */
#define timer_read(port,instance,reg) \
__raw_readl(port + (0x40 * instance) + TIMER_##reg)
#define timer_write(port,instance,reg,value) \
__raw_writel((value), port + (0x40 * instance) + TIMER_##reg)
#endif /* _ASM_AVR32_ARCH_AT32AP_TIME_H */
......@@ -173,7 +173,7 @@ static inline int atomic_sub_if_positive(int i, atomic_t *v)
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
#define atomic_add(i, v) (void)atomic_add_return(i, v)
......
......@@ -18,27 +18,53 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define BUG() \
do { \
asm volatile(".hword %0\n\t" \
".hword %1\n\t" \
".long %2" \
: \
: "n"(AVR32_BUG_OPCODE), \
"i"(__LINE__), "X"(__FILE__)); \
} while (0)
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \
"2: .long 1b\n" \
" .long %1\n" \
" .short %2\n" \
" .short %3\n" \
" .org 2b + %4\n" \
" .previous" \
: \
: "i"(AVR32_BUG_OPCODE), "i"(__FILE__), \
"i"(__LINE__), "i"(flags), \
"i"(sizeof(struct bug_entry)))
#else
#define _BUG_OR_WARN(flags) \
asm volatile( \
"1: .hword %0\n" \
" .section __bug_table,\"a\",@progbits\n" \
"2: .long 1b\n" \
" .short %1\n" \
" .org 2b + %2\n" \
" .previous" \
: \
: "i"(AVR32_BUG_OPCODE), "i"(flags), \
"i"(sizeof(struct bug_entry)))
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define BUG() \
do { \
asm volatile(".hword %0\n\t" \
: : "n"(AVR32_BUG_OPCODE)); \
_BUG_OR_WARN(0); \
for (;;); \
} while (0)
#endif /* CONFIG_DEBUG_BUGVERBOSE */
#define WARN_ON(condition) \
({ \
typeof(condition) __ret_warn_on = (condition); \
if (unlikely(__ret_warn_on)) \
_BUG_OR_WARN(BUGFLAG_WARNING); \
unlikely(__ret_warn_on); \
})
#define HAVE_ARCH_BUG
#define HAVE_ARCH_WARN_ON
#endif /* CONFIG_BUG */
......
此差异已折叠。
......@@ -40,6 +40,14 @@ enum tlb_config {
TLB_INVALID
};
#define AVR32_FEATURE_RMW (1 << 0)
#define AVR32_FEATURE_DSP (1 << 1)
#define AVR32_FEATURE_SIMD (1 << 2)
#define AVR32_FEATURE_OCD (1 << 3)
#define AVR32_FEATURE_PCTR (1 << 4)
#define AVR32_FEATURE_JAVA (1 << 5)
#define AVR32_FEATURE_FPU (1 << 6)
struct avr32_cpuinfo {
struct clk *clk;
unsigned long loops_per_jiffy;
......@@ -48,6 +56,7 @@ struct avr32_cpuinfo {
unsigned short arch_revision;
unsigned short cpu_revision;
enum tlb_config tlb_config;
unsigned long features;
struct cache_info icache;
struct cache_info dcache;
......@@ -125,10 +134,10 @@ extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define thread_saved_pc(tsk) ((tsk)->thread.cpu_context.pc)
struct pt_regs;
void show_trace(struct task_struct *task, unsigned long *stack,
struct pt_regs *regs);
extern unsigned long get_wchan(struct task_struct *p);
extern void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl);
extern void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
struct pt_regs *regs, const char *log_lvl);
#define KSTK_EIP(tsk) ((tsk)->thread.cpu_context.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.cpu_context.ksp)
......
......@@ -124,19 +124,12 @@ struct tagtable {
#define for_each_tag(t,base) \
for (t = base; t->hdr.size; t = tag_next(t))
extern struct tag_mem_range *mem_phys;
extern struct tag_mem_range *mem_reserved;
extern struct tag_mem_range *mem_ramdisk;
extern struct tag *bootloader_tags;
extern void setup_bootmem(void);
extern void setup_processor(void);
extern void board_setup_fbmem(unsigned long fbmem_start,
unsigned long fbmem_size);
extern resource_size_t fbmem_start;
extern resource_size_t fbmem_size;
/* Chip-specific hook to enable the use of SDRAM */
void chip_enable_sdram(void);
void setup_processor(void);
#endif /* !__ASSEMBLY__ */
......
此差异已折叠。
......@@ -9,6 +9,7 @@
#define __ASM_AVR32_SYSTEM_H
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/ptrace.h>
......@@ -140,15 +141,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
sizeof(*(ptr))))
struct pt_regs;
extern void __die(const char *, struct pt_regs *, unsigned long,
const char *, const char *, unsigned long);
extern void __die_if_kernel(const char *, struct pt_regs *, unsigned long,
const char *, const char *, unsigned long);
#define die(msg, regs, err) \
__die(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
#define die_if_kernel(msg, regs, err) \
__die_if_kernel(msg, regs, err, __FILE__ ":", __FUNCTION__, __LINE__)
void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
void _exception(long signr, struct pt_regs *regs, int code,
unsigned long addr);
#define arch_align_stack(x) (x)
......
......@@ -83,6 +83,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLE_STEP 6 /* single step after next break */
#define TIF_MEMDIE 7
#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal */
#define TIF_CPU_GOING_TO_SLEEP 9 /* CPU is entering sleep 0 mode */
#define TIF_USERSPACE 31 /* true if FS sets userspace */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
......@@ -94,6 +95,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SINGLE_STEP (1 << TIF_SINGLE_STEP)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
/* XXX: These two masks must never span more than 16 bits! */
/* work to do on interrupt/exception return */
......
......@@ -181,24 +181,23 @@ extern int __put_user_bad(void);
#define __get_user_nocheck(x, ptr, size) \
({ \
typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
unsigned long __gu_val = 0; \
int __gu_err = 0; \
\
switch (size) { \
case 1: __get_user_asm("ub", __gu_val, ptr, __gu_err); break; \
case 2: __get_user_asm("uh", __gu_val, ptr, __gu_err); break; \
case 4: __get_user_asm("w", __gu_val, ptr, __gu_err); break; \
case 8: __get_user_asm("d", __gu_val, ptr, __gu_err); break; \
default: __gu_err = __get_user_bad(); break; \
} \
\
x = __gu_val; \
x = (typeof(*(ptr)))__gu_val; \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
typeof(*(ptr)) __gu_val = (typeof(*(ptr)) __force)0; \
unsigned long __gu_val = 0; \
const typeof(*(ptr)) __user * __gu_addr = (ptr); \
int __gu_err = 0; \
\
......@@ -216,10 +215,6 @@ extern int __put_user_bad(void);
__get_user_asm("w", __gu_val, __gu_addr, \
__gu_err); \
break; \
case 8: \
__get_user_asm("d", __gu_val, __gu_addr, \
__gu_err); \
break; \
default: \
__gu_err = __get_user_bad(); \
break; \
......@@ -227,7 +222,7 @@ extern int __put_user_bad(void);
} else { \
__gu_err = -EFAULT; \
} \
x = __gu_val; \
x = (typeof(*(ptr)))__gu_val; \
__gu_err; \
})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册