提交 173d6681 编写于 作者: C Chris Zankel 提交者: Linus Torvalds

[PATCH] xtensa: remove extra header files

The Xtensa port contained many header files that were never needed.  This
rather lengthy patch removes all those files.  Unfortunately, there were
many dependencies that needed to be updated, so this patch touches quite a
few source files.
Signed-off-by: NChris Zankel <chris@zankel.net>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 fd43fe19
......@@ -48,25 +48,10 @@ menu "Processor type and features"
choice
prompt "Xtensa Processor Configuration"
default XTENSA_CPU_LINUX_BE
default XTENSA_VARIANT_FSF
config XTENSA_CPU_LINUX_BE
bool "linux_be"
---help---
The linux_be processor configuration is the baseline Xtensa
configurations included in this kernel and also used by
binutils, gcc, and gdb. It contains no TIE, no coprocessors,
and the following configuration options:
Code Density Option 2 Misc Special Registers
NSA/NSAU Instructions 128-bit Data Bus Width
Processor ID 8K, 2-way I and D Caches
Zero-Overhead Loops 2 Inst Address Break Registers
Big Endian 2 Data Address Break Registers
64 General-Purpose Registers JTAG Interface and Trace Port
17 Interrupts MMU w/ TLBs and Autorefill
3 Interrupt Levels 8 Autorefill Ways (I/D TLBs)
3 Timers Unaligned Exceptions
config XTENSA_VARIANT_FSF
bool "fsf"
endchoice
config MMU
......
......@@ -11,13 +11,13 @@
# this architecture
# Core configuration.
# (Use CPU=<xtensa_config> to use another default compiler.)
# (Use VAR=<xtensa_config> to use another default compiler.)
cpu-$(CONFIG_XTENSA_CPU_LINUX_BE) := linux_be
cpu-$(CONFIG_XTENSA_CPU_LINUX_CUSTOM) := linux_custom
variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf
variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom
CPU = $(cpu-y)
export CPU
VARIANT = $(variant-y)
export VARIANT
# Platform configuration
......@@ -27,8 +27,6 @@ platform-$(CONFIG_XTENSA_PLATFORM_ISS) := iss
PLATFORM = $(platform-y)
export PLATFORM
CPPFLAGS += $(if $(KBUILD_SRC),-I$(srctree)/include/asm-xtensa/)
CPPFLAGS += -Iinclude/asm
CFLAGS += -pipe -mlongcalls
KBUILD_DEFCONFIG := iss_defconfig
......@@ -41,12 +39,12 @@ core-$(CONFIG_EMBEDDED_RAMDISK) += arch/xtensa/boot/ramdisk/
# Test for cross compiling
ifneq ($(CPU),)
ifneq ($(VARIANT),)
COMPILE_ARCH = $(shell uname -m)
ifneq ($(COMPILE_ARCH), xtensa)
ifndef CROSS_COMPILE
CROSS_COMPILE = xtensa_$(CPU)-
CROSS_COMPILE = xtensa_$(VARIANT)-
endif
endif
endif
......@@ -68,14 +66,13 @@ archinc := include/asm-xtensa
archprepare: $(archinc)/.platform
# Update machine cpu and platform symlinks if something which affects
# Update processor variant and platform symlinks if something which affects
# them changed.
$(archinc)/.platform: $(wildcard include/config/arch/*.h) include/config/auto.conf
@echo ' SYMLINK $(archinc)/xtensa/config -> $(archinc)/xtensa/config-$(CPU)'
@echo ' SYMLINK $(archinc)/variant -> $(archinc)/variant-$(VARIANT)'
$(Q)mkdir -p $(archinc)
$(Q)mkdir -p $(archinc)/xtensa
$(Q)ln -fsn $(srctree)/$(archinc)/xtensa/config-$(CPU) $(archinc)/xtensa/config
$(Q)ln -fsn $(srctree)/$(archinc)/variant-$(VARIANT) $(archinc)/variant
@echo ' SYMLINK $(archinc)/platform -> $(archinc)/platform-$(PLATFORM)'
$(Q)ln -fsn $(srctree)/$(archinc)/platform-$(PLATFORM) $(archinc)/platform
@touch $@
......@@ -89,7 +86,7 @@ zImage zImage.initrd: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
CLEAN_FILES += arch/xtensa/vmlinux.lds \
$(archinc)/platform $(archinc)/xtensa/config \
$(archinc)/platform $(archinc)/variant \
$(archinc)/.platform
define archhelp
......
#include <xtensa/config/specreg.h>
#include <xtensa/config/core.h>
#include <asm/bootparam.h>
......
#define _ASMLANGUAGE
#include <xtensa/config/specreg.h>
#include <xtensa/config/core.h>
#include <xtensa/cacheasm.h>
#include <asm/variant/core.h>
#include <asm/regs.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
/*
* RB-Data: RedBoot data/bss
* P: Boot-Parameters
......@@ -77,8 +75,14 @@ _start:
/* Note: The assembler cannot relax "addi a0, a0, ..." to an
l32r, so we load to a4 first. */
addi a4, a0, __start - __start_a0
mov a0, a4
# addi a4, a0, __start - __start_a0
# mov a0, a4
movi a4, __start
movi a5, __start_a0
add a4, a0, a4
sub a0, a4, a5
movi a4, __start
movi a5, __reloc_end
......@@ -106,9 +110,13 @@ _start:
/* We have to flush and invalidate the caches here before we jump. */
#if XCHAL_DCACHE_IS_WRITEBACK
dcache_writeback_all a5, a6
___flush_dcache_all a5 a6
#endif
icache_invalidate_all a5, a6
___invalidate_icache_all a5 a6
isync
movi a11, _reloc
jx a11
......@@ -209,9 +217,14 @@ _reloc:
/* jump to the kernel */
2:
#if XCHAL_DCACHE_IS_WRITEBACK
dcache_writeback_all a5, a6
___flush_dcache_all a5 a6
#endif
icache_invalidate_all a5, a6
___invalidate_icache_all a5 a6
isync
movi a5, __start
movi a3, boot_initrd_start
......
......@@ -53,11 +53,7 @@ CONFIG_CC_ALIGN_JUMPS=0
#
# Processor type and features
#
CONFIG_XTENSA_ARCH_LINUX_BE=y
# CONFIG_XTENSA_ARCH_LINUX_LE is not set
# CONFIG_XTENSA_ARCH_LINUX_TEST is not set
# CONFIG_XTENSA_ARCH_S5 is not set
# CONFIG_XTENSA_CUSTOM is not set
CONFIG_XTENSA_VARIANT_FSF=y
CONFIG_MMU=y
# CONFIG_XTENSA_UNALIGNED_USER is not set
# CONFIG_PREEMPT is not set
......
......@@ -16,14 +16,9 @@
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
......@@ -216,7 +211,7 @@ ENTRY(fast_unaligned)
extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
_beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
addi a6, a5, -OP0_S32I_N
_beqz a6, .Lstore # S32I.N, do a store
......@@ -251,7 +246,7 @@ ENTRY(fast_unaligned)
#endif
__src_b a3, a5, a6 # a3 has the data word
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
......@@ -279,14 +274,14 @@ ENTRY(fast_unaligned)
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
#if XCHAL_HAVE_LOOPS
rsr a5, LEND # check if we reached LEND
bne a7, a5, 1f
rsr a5, LCOUNT # and LCOUNT != 0
beqz a5, 1f
addi a5, a5, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
wsr a5, LCOUNT
#endif
1: wsr a7, EPC_1 # skip load instruction
......@@ -336,7 +331,7 @@ ENTRY(fast_unaligned)
movi a6, 0 # mask: ffffffff:00000000
#if XCHAL_HAVE_NARROW
#if XCHAL_HAVE_DENSITY
addi a7, a7, 2 # incr. PC,assume 16-bit instruction
extui a5, a4, INSN_OP0, 4 # extract OP0
......@@ -359,14 +354,14 @@ ENTRY(fast_unaligned)
/* Get memory address */
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
#if XCHAL_HAVE_LOOPS
rsr a4, LEND # check if we reached LEND
bne a7, a4, 1f
rsr a4, LCOUNT # and LCOUNT != 0
beqz a4, 1f
addi a4, a4, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
wsr a4, LCOUNT
#endif
1: wsr a7, EPC_1 # skip store instruction
......@@ -416,6 +411,7 @@ ENTRY(fast_unaligned)
/* Restore working register */
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
......@@ -446,7 +442,7 @@ ENTRY(fast_unaligned)
mov a1, a2
rsr a0, PS
bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
......
......@@ -90,7 +90,6 @@ ENTRY(enable_coprocessor)
rsync
retw
#endif
ENTRY(save_coprocessor_extra)
entry sp, 16
......@@ -197,4 +196,5 @@ _xtensa_reginfo_tables:
XCHAL_CP7_SA_CONTENTS_LIBDB
.word 0xFC000000 /* invalid register number,marks end of table*/
_xtensa_reginfo_table_end:
#endif
......@@ -24,7 +24,7 @@
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/signal.h>
#include <xtensa/coreasm.h>
#include <asm/tlbflush.h>
/* Unimplemented features. */
......@@ -364,7 +364,7 @@ common_exception:
movi a2, 1
extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0]
moveqz a3, a2, a0 # a3 = 1 iff interrupt exception
movi a2, PS_WOE_MASK
movi a2, 1 << PS_WOE_BIT
or a3, a3, a2
rsr a0, EXCCAUSE
xsr a3, PS
......@@ -399,7 +399,7 @@ common_exception_return:
/* Jump if we are returning from kernel exceptions. */
1: l32i a3, a1, PT_PS
_bbsi.l a3, PS_UM_SHIFT, 2f
_bbsi.l a3, PS_UM_BIT, 2f
j kernel_exception_exit
/* Specific to a user exception exit:
......@@ -422,7 +422,7 @@ common_exception_return:
* (Hint: There is only one user exception frame on stack)
*/
movi a3, PS_WOE_MASK
movi a3, 1 << PS_WOE_BIT
_bbsi.l a4, TIF_NEED_RESCHED, 3f
_bbci.l a4, TIF_SIGPENDING, 4f
......@@ -694,7 +694,7 @@ common_exception_exit:
ENTRY(debug_exception)
rsr a0, EPS + XCHAL_DEBUGLEVEL
bbsi.l a0, PS_EXCM_SHIFT, 1f # exception mode
bbsi.l a0, PS_EXCM_BIT, 1f # exception mode
/* Set EPC_1 and EXCCAUSE */
......@@ -707,7 +707,7 @@ ENTRY(debug_exception)
/* Restore PS to the value before the debug exc but with PS.EXCM set.*/
movi a2, 1 << PS_EXCM_SHIFT
movi a2, 1 << PS_EXCM_BIT
or a2, a0, a2
movi a0, debug_exception # restore a3, debug jump vector
wsr a2, PS
......@@ -715,7 +715,7 @@ ENTRY(debug_exception)
/* Switch to kernel/user stack, restore jump vector, and save a0 */
bbsi.l a2, PS_UM_SHIFT, 2f # jump if user mode
bbsi.l a2, PS_UM_BIT, 2f # jump if user mode
addi a2, a1, -16-PT_SIZE # assume kernel stack
s32i a0, a2, PT_AREG0
......@@ -778,7 +778,7 @@ ENTRY(unrecoverable_exception)
wsr a1, WINDOWBASE
rsync
movi a1, PS_WOE_MASK | 1
movi a1, (1 << PS_WOE_BIT) | 1
wsr a1, PS
rsync
......@@ -1491,7 +1491,7 @@ ENTRY(_spill_registers)
*/
rsr a0, PS
_bbci.l a0, PS_UM_SHIFT, 1f
_bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
......@@ -1510,7 +1510,7 @@ ENTRY(_spill_registers)
l32i a1, a3, EXC_TABLE_KSTK
wsr a3, EXCSAVE_1
movi a4, PS_WOE_MASK | 1
movi a4, (1 << PS_WOE_BIT) | 1
wsr a4, PS
rsync
......@@ -1612,7 +1612,7 @@ ENTRY(fast_second_level_miss)
rsr a1, PTEVADDR
srli a1, a1, PAGE_SHIFT
slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK
addi a1, a1, DTLB_WAY_PGTABLE # ... + way_number
addi a1, a1, DTLB_WAY_PGD # ... + way_number
wdtlb a0, a1
dsync
......@@ -1654,7 +1654,7 @@ ENTRY(fast_second_level_miss)
mov a1, a2
rsr a2, PS
bbsi.l a2, PS_UM_SHIFT, 1f
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1753,7 +1753,7 @@ ENTRY(fast_store_prohibited)
mov a1, a2
rsr a2, PS
bbsi.l a2, PS_UM_SHIFT, 1f
bbsi.l a2, PS_UM_BIT, 1f
j _kernel_exception
1: j _user_exception
......@@ -1924,7 +1924,7 @@ ENTRY(_switch_to)
/* Disable ints while we manipulate the stack pointer; spill regs. */
movi a5, PS_EXCM_MASK | LOCKLEVEL
movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
xsr a5, PS
rsr a3, EXCSAVE_1
rsync
......
......@@ -15,9 +15,9 @@
* Kevin Chea
*/
#include <xtensa/cacheasm.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
/*
* This module contains the entry code for kernel images. It performs the
......@@ -32,13 +32,6 @@
*
*/
.macro iterate from, to , cmd
.ifeq ((\to - \from) & ~0xfff)
\cmd \from
iterate "(\from+1)", \to, \cmd
.endif
.endm
/*
* _start
*
......@@ -64,7 +57,7 @@ _startup:
/* Disable interrupts and exceptions. */
movi a0, XCHAL_PS_EXCM_MASK
movi a0, LOCKLEVEL
wsr a0, PS
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
......@@ -91,11 +84,11 @@ _startup:
movi a1, 15
wsr a0, ICOUNTLEVEL
.macro reset_dbreak num
wsr a0, DBREAKC + \num
.endm
iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
.set _index, 0
.rept XCHAL_NUM_DBREAK - 1
wsr a0, DBREAKC + _index
.set _index, _index + 1
.endr
#endif
/* Clear CCOUNT (not really necessary, but nice) */
......@@ -110,10 +103,11 @@ _startup:
/* Disable all timers. */
.macro reset_timer num
wsr a0, CCOMPARE_0 + \num
.endm
iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
.set _index, 0
.rept XCHAL_NUM_TIMERS - 1
wsr a0, CCOMPARE + _index
.set _index, _index + 1
.endr
/* Interrupt initialization. */
......@@ -139,12 +133,21 @@ _startup:
rsync
/* Initialize the caches.
* Does not include flushing writeback d-cache.
* a6, a7 are just working registers (clobbered).
* a2, a3 are just working registers (clobbered).
*/
icache_reset a2, a3
dcache_reset a2, a3
#if XCHAL_DCACHE_LINE_LOCKABLE
___unlock_dcache_all a2 a3
#endif
#if XCHAL_ICACHE_LINE_LOCKABLE
___unlock_icache_all a2 a3
#endif
___invalidate_dcache_all a2 a3
___invalidate_icache_all a2 a3
isync
/* Unpack data sections
*
......@@ -181,9 +184,9 @@ _startup:
movi a2, _bss_start # start of BSS
movi a3, _bss_end # end of BSS
1: addi a2, a2, 4
__loopt a2, a3, a4, 2
s32i a0, a2, 0
blt a2, a3, 1b
__endla a2, a4, 4
#if XCHAL_DCACHE_IS_WRITEBACK
......@@ -191,7 +194,7 @@ _startup:
* instructions/data are available.
*/
dcache_writeback_all a2, a3
___flush_dcache_all a2 a3
#endif
/* Setup stack and enable window exceptions (keep irqs disabled) */
......
/*
* arch/xtensa/kernel/pci-dma.c
* arch/xtensa/pci-dma.c
*
* DMA coherent memory allocation.
*
......@@ -29,28 +29,48 @@
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
{
void *ret;
unsigned long ret;
unsigned long uncached = 0;
/* ignore region speicifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (ret != NULL) {
memset(ret, 0, size);
*handle = virt_to_bus(ret);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
ret = (unsigned long)__get_free_pages(flag, get_order(size));
if (ret == 0)
return NULL;
/* We currently don't support coherent memory outside KSEG */
if (ret < XCHAL_KSEG_CACHED_VADDR
|| ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE)
BUG();
if (ret != 0) {
memset((void*) ret, 0, size);
uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void*)ret);
__flush_invalidate_dcache_range(ret, size);
}
return (void*) BYPASS_ADDR((unsigned long)ret);
return (void*)uncached;
}
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR;
if (addr < 0 || addr >= XCHAL_KSEG_SIZE)
BUG();
free_pages(addr, get_order(size));
}
......
// TODO verify coprocessor handling
/*
* arch/xtensa/kernel/process.c
*
......@@ -43,7 +42,7 @@
#include <asm/irq.h>
#include <asm/atomic.h>
#include <asm/asm-offsets.h>
#include <asm/coprocessor.h>
#include <asm/regs.h>
extern void ret_from_fork(void);
......@@ -67,25 +66,6 @@ void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);
#if XCHAL_CP_NUM > 0
/*
* Coprocessor ownership.
*/
coprocessor_info_t coprocessor_info[] = {
{ 0, XTENSA_CPE_CP0_OFFSET },
{ 0, XTENSA_CPE_CP1_OFFSET },
{ 0, XTENSA_CPE_CP2_OFFSET },
{ 0, XTENSA_CPE_CP3_OFFSET },
{ 0, XTENSA_CPE_CP4_OFFSET },
{ 0, XTENSA_CPE_CP5_OFFSET },
{ 0, XTENSA_CPE_CP6_OFFSET },
{ 0, XTENSA_CPE_CP7_OFFSET },
};
#endif
/*
* Powermanagement idle function, if any is provided by the platform.
*/
......@@ -110,12 +90,10 @@ void cpu_idle(void)
void exit_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
void flush_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
/*
......@@ -275,7 +253,7 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
*/
elfregs->pc = regs->pc;
elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
elfregs->exccause = regs->exccause;
elfregs->excvaddr = regs->excvaddr;
elfregs->windowbase = regs->windowbase;
......@@ -325,7 +303,7 @@ void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
*/
regs->pc = elfregs->pc;
regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
regs->ps = (elfregs->ps | (1 << PS_EXCM_BIT));
regs->exccause = elfregs->exccause;
regs->excvaddr = elfregs->excvaddr;
regs->windowbase = elfregs->windowbase;
......@@ -459,16 +437,7 @@ int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{
/* see asm/coprocessor.h for this magic number 16 */
#if XTENSA_CP_EXTRA_SIZE > 16
do_save_fpregs (r, regs, task);
/* For now, bit 16 means some extra state may be present: */
// FIXME!! need to track to return more accurate mask
return 0x10000 | XCHAL_CP_MASK;
#else
return 0; /* no coprocessors active on this processor */
#endif
}
/*
......
......@@ -96,7 +96,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
break;
case REG_WB:
tmp = regs->windowbase;
......
......@@ -42,8 +42,6 @@
#include <asm/page.h>
#include <asm/setup.h>
#include <xtensa/config/system.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
#endif
......@@ -336,7 +334,7 @@ c_show(struct seq_file *f, void *slot)
/* high-level stuff */
seq_printf(f,"processor\t: 0\n"
"vendor_id\t: Tensilica\n"
"model\t\t: Xtensa " XCHAL_HW_RELEASE_NAME "\n"
"model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
"core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n"
"byte order\t: %s\n"
......@@ -420,25 +418,6 @@ c_show(struct seq_file *f, void *slot)
XCHAL_NUM_TIMERS,
XCHAL_DEBUGLEVEL);
/* Coprocessors */
#if XCHAL_HAVE_CP
seq_printf(f, "coprocessors\t: %d\n", XCHAL_CP_NUM);
#else
seq_printf(f, "coprocessors\t: none\n");
#endif
/* {I,D}{RAM,ROM} and XLMI */
seq_printf(f,"inst ROMs\t: %d\n"
"inst RAMs\t: %d\n"
"data ROMs\t: %d\n"
"data RAMs\t: %d\n"
"XLMI ports\t: %d\n",
XCHAL_NUM_IROM,
XCHAL_NUM_IRAM,
XCHAL_NUM_DROM,
XCHAL_NUM_DRAM,
XCHAL_NUM_XLMI);
/* Cache */
seq_printf(f,"icache line size: %d\n"
"icache ways\t: %d\n"
......@@ -466,24 +445,6 @@ c_show(struct seq_file *f, void *slot)
XCHAL_DCACHE_WAYS,
XCHAL_DCACHE_SIZE);
/* MMU */
seq_printf(f,"ASID bits\t: %d\n"
"ASID invalid\t: %d\n"
"ASID kernel\t: %d\n"
"rings\t\t: %d\n"
"itlb ways\t: %d\n"
"itlb AR ways\t: %d\n"
"dtlb ways\t: %d\n"
"dtlb AR ways\t: %d\n",
XCHAL_MMU_ASID_BITS,
XCHAL_MMU_ASID_INVALID,
XCHAL_MMU_ASID_KERNEL,
XCHAL_MMU_RINGS,
XCHAL_ITLB_WAYS,
XCHAL_ITLB_ARF_WAYS,
XCHAL_DTLB_WAYS,
XCHAL_DTLB_ARF_WAYS);
return 0;
}
......
......@@ -12,8 +12,8 @@
*
*/
#include <xtensa/config/core.h>
#include <xtensa/hal.h>
#include <asm/variant/core.h>
#include <asm/coprocessor.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
......@@ -216,8 +216,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
* handler, or the user mode value doesn't matter (e.g. PS.OWB).
*/
err |= __get_user(ps, &sc->sc_ps);
regs->ps = (regs->ps & ~XCHAL_PS_CALLINC_MASK)
| (ps & XCHAL_PS_CALLINC_MASK);
regs->ps = (regs->ps & ~PS_CALLINC_MASK)
| (ps & PS_CALLINC_MASK);
/* Additional corruption checks */
......@@ -280,7 +280,7 @@ flush_my_cpstate(struct task_struct *tsk)
static int
save_cpextra (struct _cpstate *buf)
{
#if (XCHAL_EXTRA_SA_SIZE == 0) && (XCHAL_CP_NUM == 0)
#if XCHAL_CP_NUM == 0
return 0;
#else
......@@ -497,8 +497,10 @@ gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
/* Flush generated code out of the data cache */
if (err == 0)
__flush_invalidate_cache_range((unsigned long)codemem, 6UL);
if (err == 0) {
__invalidate_icache_range((unsigned long)codemem, 6UL);
__flush_invalidate_dcache_range((unsigned long)codemem, 6UL);
}
return err;
}
......
......@@ -175,8 +175,8 @@ void system_call (struct pt_regs *regs)
* interrupts in the first place:
*/
local_save_flags (ps);
local_irq_restore((ps & ~XCHAL_PS_INTLEVEL_MASK) |
(regs->ps & XCHAL_PS_INTLEVEL_MASK) );
local_irq_restore((ps & ~PS_INTLEVEL_MASK) |
(regs->ps & PS_INTLEVEL_MASK) );
if (syscallnr > __NR_Linux_syscalls) {
regs->areg[2] = -ENOSYS;
......
......@@ -75,7 +75,7 @@ extern void system_call (struct pt_regs*);
#define USER 0x02
#define COPROCESSOR(x) \
{ XCHAL_EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
typedef struct {
int cause;
......@@ -85,38 +85,38 @@ typedef struct {
dispatch_init_table_t __init dispatch_init_table[] = {
{ XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ XCHAL_EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
{ XCHAL_EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ XCHAL_EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* XCHAL_EXCCAUSE_INSTRUCTION_FETCH unhandled */
/* XCHAL_EXCCAUSE_LOAD_STORE_ERROR unhandled*/
{ XCHAL_EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
{ XCHAL_EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
/* XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* XCHAL_EXCCAUSE_PRIVILEGED unhandled */
{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
{ EXCCAUSE_SYSTEM_CALL, KRNL, fast_syscall_kernel },
{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
/* EXCCAUSE_PRIVILEGED unhandled */
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
#ifdef CONFIG_UNALIGNED_USER
{ XCHAL_EXCCAUSE_UNALIGNED, USER, fast_unaligned },
{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
#else
{ XCHAL_EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
#endif
{ XCHAL_EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
#endif
{ XCHAL_EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ XCHAL_EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ XCHAL_EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ XCHAL_EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* XCHAL_EXCCAUSE_SIZE_RESTRICTION unhandled */
{ XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ XCHAL_EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ XCHAL_EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ XCHAL_EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ XCHAL_EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
#if (XCHAL_CP_MASK & 1)
COPROCESSOR(0),
......
......@@ -53,6 +53,8 @@
#include <asm/thread_info.h>
#include <asm/processor.h>
#define WINDOW_VECTORS_SIZE 0x180
/*
* User exception vector. (Exceptions with PS.UM == 1, PS.EXCM == 0)
......@@ -210,7 +212,7 @@ ENTRY(_DoubleExceptionVector)
/* Check for kernel double exception (usually fatal). */
rsr a3, PS
_bbci.l a3, PS_UM_SHIFT, .Lksp
_bbci.l a3, PS_UM_BIT, .Lksp
/* Check if we are currently handling a window exception. */
/* Note: We don't need to indicate that we enter a critical section. */
......@@ -219,7 +221,7 @@ ENTRY(_DoubleExceptionVector)
movi a3, XCHAL_WINDOW_VECTORS_VADDR
_bltu a0, a3, .Lfixup
addi a3, a3, XSHAL_WINDOW_VECTORS_SIZE
addi a3, a3, WINDOW_VECTORS_SIZE
_bgeu a0, a3, .Lfixup
/* Window overflow/underflow exception. Get stack pointer. */
......@@ -245,7 +247,7 @@ ENTRY(_DoubleExceptionVector)
wsr a2, DEPC # save stack pointer temporarily
rsr a0, PS
extui a0, a0, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS
extui a0, a0, PS_OWB_SHIFT, 4
wsr a0, WINDOWBASE
rsync
......@@ -312,8 +314,8 @@ ENTRY(_DoubleExceptionVector)
.Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */
rsr a3, EXCCAUSE
beqi a3, XCHAL_EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -XCHAL_EXCCAUSE_DTLB_MISS
beqi a3, EXCCAUSE_ITLB_MISS, 1f
addi a3, a3, -EXCCAUSE_DTLB_MISS
bnez a3, .Lunrecoverable
1: movi a3, fast_second_level_miss_double_kernel
jx a3
......
......@@ -16,20 +16,17 @@
#include <asm-generic/vmlinux.lds.h>
#define _NOCLANGUAGE
#undef __ASSEMBLER__
#include <xtensa/config/core.h>
#include <xtensa/config/system.h>
#include <asm/variant/core.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
#if XCHAL_MEMORY_ORDER == XTHAL_BIGENDIAN
#ifdef __XTENSA_EB__
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
#define KERNELOFFSET 0x1000
#define KERNELOFFSET 0xd0001000
/* Note: In the following macros, it would be nice to specify only the
vector name and section kind and construct "sym" and "section" using
......@@ -76,7 +73,7 @@ jiffies = jiffies_64;
SECTIONS
{
. = XCHAL_KSEG_CACHED_VADDR + KERNELOFFSET;
. = KERNELOFFSET;
/* .text section */
_text = .;
......@@ -160,7 +157,7 @@ SECTIONS
/* Initialization code and data: */
. = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
. = ALIGN(1 << 12);
__init_begin = .;
.init.text : {
_sinittext = .;
......@@ -224,32 +221,32 @@ SECTIONS
.dummy)
SECTION_VECTOR (_DebugInterruptVector_literal,
.DebugInterruptVector.literal,
XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) - 4,
XCHAL_DEBUG_VECTOR_VADDR - 4,
SIZEOF(.WindowVectors.text),
.WindowVectors.text)
SECTION_VECTOR (_DebugInterruptVector_text,
.DebugInterruptVector.text,
XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL),
XCHAL_DEBUG_VECTOR_VADDR,
4,
.DebugInterruptVector.literal)
SECTION_VECTOR (_KernelExceptionVector_literal,
.KernelExceptionVector.literal,
XCHAL_KERNELEXC_VECTOR_VADDR - 4,
XCHAL_KERNEL_VECTOR_VADDR - 4,
SIZEOF(.DebugInterruptVector.text),
.DebugInterruptVector.text)
SECTION_VECTOR (_KernelExceptionVector_text,
.KernelExceptionVector.text,
XCHAL_KERNELEXC_VECTOR_VADDR,
XCHAL_KERNEL_VECTOR_VADDR,
4,
.KernelExceptionVector.literal)
SECTION_VECTOR (_UserExceptionVector_literal,
.UserExceptionVector.literal,
XCHAL_USEREXC_VECTOR_VADDR - 4,
XCHAL_USER_VECTOR_VADDR - 4,
SIZEOF(.KernelExceptionVector.text),
.KernelExceptionVector.text)
SECTION_VECTOR (_UserExceptionVector_text,
.UserExceptionVector.text,
XCHAL_USEREXC_VECTOR_VADDR,
XCHAL_USER_VECTOR_VADDR,
4,
.UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal,
......@@ -264,7 +261,7 @@ SECTIONS
.DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
. = ALIGN(1<<XCHAL_MMU_MIN_PTE_PAGE_SIZE);
. = ALIGN(1 << 12);
__init_end = .;
......
......@@ -16,8 +16,7 @@
#include <asm/errno.h>
#include <linux/linkage.h>
#define _ASMLANGUAGE
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
......
......@@ -9,7 +9,7 @@
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
.macro src_b r, w0, w1
#ifdef __XTENSA_EB__
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
/*
* void *memset(void *dst, int c, size_t length)
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
#include <linux/errno.h>
/* Load or store instructions that may cause exceptions use the EX macro. */
......
......@@ -11,7 +11,7 @@
* Copyright (C) 2002 Tensilica Inc.
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
/* Load or store instructions that may cause exceptions use the EX macro. */
......
......@@ -53,7 +53,7 @@
* a11/ original length
*/
#include <xtensa/coreasm.h>
#include <asm/variant/core.h>
#ifdef __XTENSA_EB__
#define ALIGN(R, W0, W1) src R, W0, W1
......
......@@ -21,7 +21,7 @@
#include <asm/system.h>
#include <asm/pgalloc.h>
unsigned long asid_cache = ASID_FIRST_VERSION;
unsigned long asid_cache = ASID_USER_FIRST;
void bad_page_fault(struct pt_regs*, unsigned long, int);
/*
......@@ -58,10 +58,10 @@ void do_page_fault(struct pt_regs *regs)
return;
}
is_write = (exccause == XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
is_exec = (exccause == XCHAL_EXCCAUSE_ITLB_PRIVILEGE ||
exccause == XCHAL_EXCCAUSE_ITLB_MISS ||
exccause == XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
exccause == EXCCAUSE_ITLB_MISS ||
exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
#if 0
printk("[%s:%d:%08x:%d:%08x:%s%s]\n", current->comm, current->pid,
......
......@@ -141,8 +141,8 @@ void __init bootmem_init(void)
if (min_low_pfn > max_pfn)
panic("No memory found!\n");
max_low_pfn = max_pfn < MAX_LOW_MEMORY >> PAGE_SHIFT ?
max_pfn : MAX_LOW_MEMORY >> PAGE_SHIFT;
max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
/* Find an area to use for the bootmem bitmap. */
......@@ -215,7 +215,7 @@ void __init init_mmu (void)
/* Set rasid register to a known value. */
set_rasid_register (ASID_ALL_RESERVED);
set_rasid_register (ASID_USER_FIRST);
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
......
......@@ -19,9 +19,8 @@
#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <xtensa/cacheasm.h>
#include <xtensa/cacheattrasm.h>
#include <asm/asmmacro.h>
#include <asm/cacheasm.h>
/* clear_page (page) */
......@@ -74,104 +73,66 @@ ENTRY(copy_page)
retw
/*
* void __flush_invalidate_cache_all(void)
* void __invalidate_icache_page(ulong start)
*/
ENTRY(__flush_invalidate_cache_all)
ENTRY(__invalidate_icache_page)
entry sp, 16
dcache_writeback_inv_all a2, a3
icache_invalidate_all a2, a3
retw
/*
* void __invalidate_icache_all(void)
*/
___invalidate_icache_page a2 a3
isync
ENTRY(__invalidate_icache_all)
entry sp, 16
icache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_all(void)
* void __invalidate_dcache_page(ulong start)
*/
ENTRY(__flush_invalidate_dcache_all)
ENTRY(__invalidate_dcache_page)
entry sp, 16
dcache_writeback_inv_all a2, a3
retw
/*
* void __flush_invalidate_cache_range(ulong start, ulong size)
*/
___invalidate_dcache_page a2 a3
dsync
ENTRY(__flush_invalidate_cache_range)
entry sp, 16
mov a4, a2
mov a5, a3
dcache_writeback_inv_region a4, a5, a6
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_icache_page(ulong start)
* void __flush_invalidate_dcache_page(ulong start)
*/
ENTRY(__invalidate_icache_page)
ENTRY(__flush_invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_page(ulong start)
*/
___flush_invalidate_dcache_page a2 a3
ENTRY(__invalidate_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_invalidate_region a2, a3, a4
dsync
retw
/*
* void __invalidate_icache_range(ulong start, ulong size)
* void __flush_dcache_page(ulong start)
*/
ENTRY(__invalidate_icache_range)
ENTRY(__flush_dcache_page)
entry sp, 16
icache_invalidate_region a2, a3, a4
retw
/*
* void __invalidate_dcache_range(ulong start, ulong size)
*/
___flush_dcache_page a2 a3
ENTRY(__invalidate_dcache_range)
entry sp, 16
dcache_invalidate_region a2, a3, a4
dsync
retw
/*
* void __flush_dcache_page(ulong start)
*/
ENTRY(__flush_dcache_page)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_region a2, a3, a4
retw
/*
* void __flush_invalidate_dcache_page(ulong start)
* void __invalidate_icache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_page)
ENTRY(__invalidate_icache_range)
entry sp, 16
movi a3, PAGE_SIZE
dcache_writeback_inv_region a2, a3, a4
___invalidate_icache_range a2 a3 a4
isync
retw
/*
......@@ -180,195 +141,69 @@ ENTRY(__flush_invalidate_dcache_page)
ENTRY(__flush_invalidate_dcache_range)
entry sp, 16
dcache_writeback_inv_region a2, a3, a4
retw
/*
* void __invalidate_dcache_all(void)
*/
___flush_invalidate_dcache_range a2 a3 a4
dsync
ENTRY(__invalidate_dcache_all)
entry sp, 16
dcache_invalidate_all a2, a3
retw
/*
* void __flush_invalidate_dcache_page_phys(ulong start)
* void _flush_dcache_range(ulong start, ulong size)
*/
ENTRY(__flush_invalidate_dcache_page_phys)
ENTRY(__flush_dcache_range)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
___flush_dcache_range a2 a3 a4
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: diwbi a3, 0
bgeui a3, 2, 1b
retw
ENTRY(check_dcache_low0)
entry sp, 16
movi a3, XCHAL_DCACHE_SIZE / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
2: j 2b
ENTRY(check_dcache_high0)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE / 2
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void _invalidate_dcache_range(ulong start, ulong size)
*/
ENTRY(check_dcache_low1)
ENTRY(__invalidate_dcache_range)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE * 3 / 4
movi a4, PAGE_MASK | 1
addi a2, a2, 1
___invalidate_dcache_range a2 a3 a4
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void _invalidate_icache_all(void)
*/
ENTRY(check_dcache_high1)
ENTRY(__invalidate_icache_all)
entry sp, 16
movi a5, XCHAL_DCACHE_SIZE / 4
movi a3, XCHAL_DCACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_DCACHE_LINESIZE
addi a5, a5, -XCHAL_DCACHE_LINESIZE
___invalidate_icache_all a2 a3
isync
ldct a6, a3
dsync
and a6, a6, a4
beq a6, a2, 2f
bgeui a5, 2, 1b
retw
2: j 2b
/*
* void __invalidate_icache_page_phys(ulong start)
* void _flush_invalidate_dcache_all(void)
*/
ENTRY(__invalidate_icache_page_phys)
ENTRY(__flush_invalidate_dcache_all)
entry sp, 16
movi a3, XCHAL_ICACHE_SIZE
movi a4, PAGE_MASK | 1
addi a2, a2, 1
1: addi a3, a3, -XCHAL_ICACHE_LINESIZE
lict a6, a3
isync
and a6, a6, a4
beq a6, a2, 2f
bgeui a3, 2, 1b
retw
___flush_invalidate_dcache_all a2 a3
dsync
2: iii a3, 0
bgeui a3, 2, 1b
retw
/*
* void _invalidate_dcache_all(void)
*/
#if 0
movi a3, XCHAL_DCACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: diwbi a5, 0
diwbi a5, XCHAL_DCACHE_LINESIZE
diwbi a5, XCHAL_DCACHE_LINESIZE * 2
diwbi a5, XCHAL_DCACHE_LINESIZE * 3
addi a5, a5, XCHAL_DCACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS
bgez a3, 1b
retw
ENTRY(__invalidate_icache_page_index)
ENTRY(__invalidate_dcache_all)
entry sp, 16
movi a3, XCHAL_ICACHE_WAYS - 1
movi a4, PAGE_SIZE
1: mov a5, a2
add a6, a2, a4
2: iii a5, 0
iii a5, XCHAL_ICACHE_LINESIZE
iii a5, XCHAL_ICACHE_LINESIZE * 2
iii a5, XCHAL_ICACHE_LINESIZE * 3
addi a5, a5, XCHAL_ICACHE_LINESIZE * 4
blt a5, a6, 2b
addi a3, a3, -1
addi a2, a2, XCHAL_ICACHE_SIZE / XCHAL_ICACHE_WAYS
bgez a3, 2b
___invalidate_dcache_all a2 a3
dsync
retw
#endif
......@@ -24,12 +24,12 @@
static inline void __flush_itlb_all (void)
{
int way, index;
int w, i;
for (way = 0; way < XCHAL_ITLB_ARF_WAYS; way++) {
for (index = 0; index < ITLB_ENTRIES_PER_ARF_WAY; index++) {
int entry = way + (index << PAGE_SHIFT);
invalidate_itlb_entry_no_isync (entry);
for (w = 0; w < ITLB_ARF_WAYS; w++) {
for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
int e = w + (i << PAGE_SHIFT);
invalidate_itlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
......@@ -37,12 +37,12 @@ static inline void __flush_itlb_all (void)
static inline void __flush_dtlb_all (void)
{
int way, index;
int w, i;
for (way = 0; way < XCHAL_DTLB_ARF_WAYS; way++) {
for (index = 0; index < DTLB_ENTRIES_PER_ARF_WAY; index++) {
int entry = way + (index << PAGE_SHIFT);
invalidate_dtlb_entry_no_isync (entry);
for (w = 0; w < DTLB_ARF_WAYS; w++) {
for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
int e = w + (i << PAGE_SHIFT);
invalidate_dtlb_entry_no_isync(e);
}
}
asm volatile ("isync\n");
......@@ -63,21 +63,25 @@ void flush_tlb_all (void)
void flush_tlb_mm(struct mm_struct *mm)
{
#if 0
printk("[tlbmm<%lx>]\n", (unsigned long)mm->context);
#endif
if (mm == current->active_mm) {
int flags;
local_save_flags(flags);
get_new_mmu_context(mm, asid_cache);
set_rasid_register(ASID_INSERT(mm->context));
__get_new_mmu_context(mm);
__load_mmu_context(mm);
local_irq_restore(flags);
}
else
mm->context = 0;
}
#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
#if _ITLB_ENTRIES > _DTLB_ENTRIES
# define _TLB_ENTRIES _ITLB_ENTRIES
#else
# define _TLB_ENTRIES _DTLB_ENTRIES
#endif
void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
......@@ -93,7 +97,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
#endif
local_save_flags(flags);
if (end-start + (PAGE_SIZE-1) <= SMALLEST_NTLB_ENTRIES << PAGE_SHIFT) {
if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context));
start &= PAGE_MASK;
......@@ -111,9 +115,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
set_rasid_register(oldpid);
} else {
get_new_mmu_context(mm, asid_cache);
if (mm == current->active_mm)
set_rasid_register(ASID_INSERT(mm->context));
flush_tlb_mm(mm);
}
local_irq_restore(flags);
}
......@@ -123,10 +125,6 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
struct mm_struct* mm = vma->vm_mm;
unsigned long flags;
int oldpid;
#if 0
printk("[tlbpage<%02lx,%08lx>]\n",
(unsigned long)mm->context, page);
#endif
if(mm->context == NO_CONTEXT)
return;
......@@ -142,404 +140,5 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
set_rasid_register(oldpid);
local_irq_restore(flags);
#if 0
flush_tlb_all();
return;
#endif
}
#ifdef DEBUG_TLB
#define USE_ITLB 0
#define USE_DTLB 1
struct way_config_t {
int indicies;
int indicies_log2;
int pgsz_log2;
int arf;
};
static struct way_config_t itlb[XCHAL_ITLB_WAYS] =
{
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ARF)
},
{ XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES_LOG2),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, PAGESZ_LOG2_MIN),
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ARF)
}
};
static struct way_config_t dtlb[XCHAL_DTLB_WAYS] =
{
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ARF)
},
{ XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES_LOG2),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, PAGESZ_LOG2_MIN),
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ARF)
}
};
/* Total number of entries: */
#define ITLB_TOTAL_ENTRIES \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY0_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY1_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY2_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY3_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY4_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY5_SET, ENTRIES) + \
XCHAL_ITLB_SET(XCHAL_ITLB_WAY6_SET, ENTRIES)
#define DTLB_TOTAL_ENTRIES \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY0_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY1_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY2_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY3_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY4_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY5_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY6_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY7_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY8_SET, ENTRIES) + \
XCHAL_DTLB_SET(XCHAL_DTLB_WAY9_SET, ENTRIES)
typedef struct {
unsigned va;
unsigned pa;
unsigned char asid;
unsigned char ca;
unsigned char way;
unsigned char index;
unsigned char pgsz_log2; /* 0 .. 32 */
unsigned char type; /* 0=ITLB 1=DTLB */
} tlb_dump_entry_t;
/* Return -1 if a precedes b, +1 if a follows b, 0 if same: */
int cmp_tlb_dump_info( tlb_dump_entry_t *a, tlb_dump_entry_t *b )
{
if (a->asid < b->asid) return -1;
if (a->asid > b->asid) return 1;
if (a->va < b->va) return -1;
if (a->va > b->va) return 1;
if (a->pa < b->pa) return -1;
if (a->pa > b->pa) return 1;
if (a->ca < b->ca) return -1;
if (a->ca > b->ca) return 1;
if (a->way < b->way) return -1;
if (a->way > b->way) return 1;
if (a->index < b->index) return -1;
if (a->index > b->index) return 1;
return 0;
}
void sort_tlb_dump_info( tlb_dump_entry_t *t, int n )
{
int i, j;
/* Simple O(n*n) sort: */
for (i = 0; i < n-1; i++)
for (j = i+1; j < n; j++)
if (cmp_tlb_dump_info(t+i, t+j) > 0) {
tlb_dump_entry_t tmp = t[i];
t[i] = t[j];
t[j] = tmp;
}
}
static tlb_dump_entry_t itlb_dump_info[ITLB_TOTAL_ENTRIES];
static tlb_dump_entry_t dtlb_dump_info[DTLB_TOTAL_ENTRIES];
static inline char *way_type (int type)
{
return type ? "autorefill" : "non-autorefill";
}
void print_entry (struct way_config_t *way_info,
unsigned int way,
unsigned int index,
unsigned int virtual,
unsigned int translation)
{
char valid_chr;
unsigned int va, pa, asid, ca;
va = virtual &
~((1 << (way_info->pgsz_log2 + way_info->indicies_log2)) - 1);
asid = virtual & ((1 << XCHAL_MMU_ASID_BITS) - 1);
pa = translation & ~((1 << way_info->pgsz_log2) - 1);
ca = translation & ((1 << XCHAL_MMU_CA_BITS) - 1);
valid_chr = asid ? 'V' : 'I';
/* Compute and incorporate the effect of the index bits on the
* va. It's more useful for kernel debugging, since we always
* want to know the effective va anyway. */
va += index << way_info->pgsz_log2;
printk ("\t[%d,%d] (%c) vpn 0x%.8x ppn 0x%.8x asid 0x%.2x am 0x%x\n",
way, index, valid_chr, va, pa, asid, ca);
}
void print_itlb_entry (struct way_config_t *way_info, int way, int index)
{
print_entry (way_info, way, index,
read_itlb_virtual (way + (index << way_info->pgsz_log2)),
read_itlb_translation (way + (index << way_info->pgsz_log2)));
}
void print_dtlb_entry (struct way_config_t *way_info, int way, int index)
{
print_entry (way_info, way, index,
read_dtlb_virtual (way + (index << way_info->pgsz_log2)),
read_dtlb_translation (way + (index << way_info->pgsz_log2)));
}
void dump_itlb (void)
{
int way, index;
printk ("\nITLB: ways = %d\n", XCHAL_ITLB_WAYS);
for (way = 0; way < XCHAL_ITLB_WAYS; way++) {
printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
way, itlb[way].indicies,
itlb[way].pgsz_log2, way_type(itlb[way].arf));
for (index = 0; index < itlb[way].indicies; index++) {
print_itlb_entry(&itlb[way], way, index);
}
}
}
void dump_dtlb (void)
{
int way, index;
printk ("\nDTLB: ways = %d\n", XCHAL_DTLB_WAYS);
for (way = 0; way < XCHAL_DTLB_WAYS; way++) {
printk ("\nWay: %d, Entries: %d, MinPageSize: %d, Type: %s\n",
way, dtlb[way].indicies,
dtlb[way].pgsz_log2, way_type(dtlb[way].arf));
for (index = 0; index < dtlb[way].indicies; index++) {
print_dtlb_entry(&dtlb[way], way, index);
}
}
}
void dump_tlb (tlb_dump_entry_t *tinfo, struct way_config_t *config,
int entries, int ways, int type, int show_invalid)
{
tlb_dump_entry_t *e = tinfo;
int way, i;
/* Gather all info: */
for (way = 0; way < ways; way++) {
struct way_config_t *cfg = config + way;
for (i = 0; i < cfg->indicies; i++) {
unsigned wayindex = way + (i << cfg->pgsz_log2);
unsigned vv = (type ? read_dtlb_virtual (wayindex)
: read_itlb_virtual (wayindex));
unsigned pp = (type ? read_dtlb_translation (wayindex)
: read_itlb_translation (wayindex));
/* Compute and incorporate the effect of the index bits on the
* va. It's more useful for kernel debugging, since we always
* want to know the effective va anyway. */
e->va = (vv & ~((1 << (cfg->pgsz_log2 + cfg->indicies_log2)) - 1));
e->va += (i << cfg->pgsz_log2);
e->pa = (pp & ~((1 << cfg->pgsz_log2) - 1));
e->asid = (vv & ((1 << XCHAL_MMU_ASID_BITS) - 1));
e->ca = (pp & ((1 << XCHAL_MMU_CA_BITS) - 1));
e->way = way;
e->index = i;
e->pgsz_log2 = cfg->pgsz_log2;
e->type = type;
e++;
}
}
#if 1
/* Sort by ASID and VADDR: */
sort_tlb_dump_info (tinfo, entries);
#endif
/* Display all sorted info: */
printk ("\n%cTLB dump:\n", (type ? 'D' : 'I'));
for (e = tinfo, i = 0; i < entries; i++, e++) {
#if 0
if (e->asid == 0 && !show_invalid)
continue;
#endif
printk ("%c way=%d i=%d ASID=%02X V=%08X -> P=%08X CA=%X (%d %cB)\n",
(e->type ? 'D' : 'I'), e->way, e->index,
e->asid, e->va, e->pa, e->ca,
(1 << (e->pgsz_log2 % 10)),
" kMG"[e->pgsz_log2 / 10]
);
}
}
void dump_tlbs2 (int showinv)
{
dump_tlb (itlb_dump_info, itlb, ITLB_TOTAL_ENTRIES, XCHAL_ITLB_WAYS, 0, showinv);
dump_tlb (dtlb_dump_info, dtlb, DTLB_TOTAL_ENTRIES, XCHAL_DTLB_WAYS, 1, showinv);
}
void dump_all_tlbs (void)
{
dump_tlbs2 (1);
}
void dump_valid_tlbs (void)
{
dump_tlbs2 (0);
}
void dump_tlbs (void)
{
dump_itlb();
dump_dtlb();
}
void dump_cache_tag(int dcache, int idx)
{
int w, i, s, e;
unsigned long tag, index;
unsigned long num_lines, num_ways, cache_size, line_size;
num_ways = dcache ? XCHAL_DCACHE_WAYS : XCHAL_ICACHE_WAYS;
cache_size = dcache ? XCHAL_DCACHE_SIZE : XCHAL_ICACHE_SIZE;
line_size = dcache ? XCHAL_DCACHE_LINESIZE : XCHAL_ICACHE_LINESIZE;
num_lines = cache_size / num_ways;
s = 0; e = num_lines;
if (idx >= 0)
e = (s = idx * line_size) + 1;
for (i = s; i < e; i+= line_size) {
printk("\nline %#08x:", i);
for (w = 0; w < num_ways; w++) {
index = w * num_lines + i;
if (dcache)
__asm__ __volatile__("ldct %0, %1\n\t"
: "=a"(tag) : "a"(index));
else
__asm__ __volatile__("lict %0, %1\n\t"
: "=a"(tag) : "a"(index));
printk(" %#010lx", tag);
}
}
printk ("\n");
}
void dump_icache(int index)
{
unsigned long data, addr;
int w, i;
const unsigned long num_ways = XCHAL_ICACHE_WAYS;
const unsigned long cache_size = XCHAL_ICACHE_SIZE;
const unsigned long line_size = XCHAL_ICACHE_LINESIZE;
const unsigned long num_lines = cache_size / num_ways / line_size;
for (w = 0; w < num_ways; w++) {
printk ("\nWay %d", w);
for (i = 0; i < line_size; i+= 4) {
addr = w * num_lines + index * line_size + i;
__asm__ __volatile__("licw %0, %1\n\t"
: "=a"(data) : "a"(addr));
printk(" %#010lx", data);
}
}
printk ("\n");
}
void dump_cache_tags(void)
{
printk("Instruction cache\n");
dump_cache_tag(0, -1);
printk("Data cache\n");
dump_cache_tag(1, -1);
}
#endif
......@@ -25,11 +25,15 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <xtensa/simcall.h>
#include <asm/platform/simcall.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#ifdef SERIAL_INLINE
#define _INLINE_ inline
#endif
#define SERIAL_MAX_NUM_LINES 1
#define SERIAL_TIMER_VALUE (20 * HZ)
......@@ -191,7 +195,7 @@ static int rs_read_proc(char *page, char **start, off_t off, int count,
}
static const struct tty_operations serial_ops = {
static struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
......
......@@ -34,7 +34,7 @@
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <xtensa/simcall.h>
#include <asm/platform/simcall.h>
#define DRIVER_NAME "iss-netdev"
#define ETH_MAX_PACKET 1500
......
/*
* include/asm-xtensa/asmmacro.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ASMMACRO_H
#define _XTENSA_ASMMACRO_H
#include <asm/variant/core.h>
/*
* Some little helpers for loops. Use zero-overhead-loops
* where applicable and if supported by the processor.
*
* __loopi ar, at, size, inc
* ar register initialized with the start address
* at scratch register used by macro
* size size immediate value
* inc increment
*
* __loops ar, as, at, inc_log2[, mask_log2][, cond][, ncond]
* ar register initialized with the start address
* as register initialized with the size
* at scratch register use by macro
* inc_log2 increment [in log2]
* mask_log2 mask [in log2]
* cond true condition (used in loop'cond')
* ncond false condition (used in b'ncond')
*
* __loop as
* restart loop. 'as' register must not have been modified!
*
* __endla ar, at, incr
* ar start address (modified)
* as scratch register used by macro
* inc increment
*/
/*
* loop for given size as immediate
*/
.macro __loopi ar, at, size, incr
#if XCHAL_HAVE_LOOPS
movi \at, ((\size + \incr - 1) / (\incr))
loop \at, 99f
#else
addi \at, \ar, \size
98:
#endif
.endm
/*
* loop for given size in register
*/
.macro __loops ar, as, at, incr_log2, mask_log2, cond, ncond
#if XCHAL_HAVE_LOOPS
.ifgt \incr_log2 - 1
addi \at, \as, (1 << \incr_log2) - 1
.ifnc \mask_log2,
extui \at, \at, \incr_log2, \mask_log2
.else
srli \at, \at, \incr_log2
.endif
.endif
loop\cond \at, 99f
#else
.ifnc \mask_log2,
extui \at, \as, \incr_log2, \mask_log2
.else
.ifnc \ncond,
srli \at, \as, \incr_log2
.endif
.endif
.ifnc \ncond,
b\ncond \at, 99f
.endif
.ifnc \mask_log2,
slli \at, \at, \incr_log2
add \at, \ar, \at
.else
add \at, \ar, \as
.endif
#endif
98:
.endm
/*
* loop from ar to ax
*/
.macro __loopt ar, as, at, incr_log2
#if XCHAL_HAVE_LOOPS
sub \at, \as, \ar
.ifgt \incr_log2 - 1
addi \at, \at, (1 << \incr_log2) - 1
srli \at, \at, \incr_log2
.endif
loop \at, 99f
#else
98:
#endif
.endm
/*
* restart loop. registers must be unchanged
*/
.macro __loop as
#if XCHAL_HAVE_LOOPS
loop \as, 99f
#else
98:
#endif
.endm
/*
* end of loop with no increment of the address.
*/
.macro __endl ar, as
#if !XCHAL_HAVE_LOOPS
bltu \ar, \as, 98b
#endif
99:
.endm
/*
* end of loop with increment of the address.
*/
.macro __endla ar, as, incr
addi \ar, \ar, \incr
__endl \ar \as
.endm
#endif /* _XTENSA_ASMMACRO_H */
......@@ -11,7 +11,6 @@
#ifndef _XTENSA_BYTEORDER_H
#define _XTENSA_BYTEORDER_H
#include <asm/processor.h>
#include <asm/types.h>
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
......
......@@ -4,7 +4,6 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* 2 of the License, or (at your option) any later version.
*
* (C) 2001 - 2005 Tensilica Inc.
*/
......@@ -12,21 +11,14 @@
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#if XCHAL_ICACHE_SIZE > 0
# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#define L1_CACHE_SHIFT XCHAL_DCACHE_LINEWIDTH
#define L1_CACHE_BYTES XCHAL_DCACHE_LINESIZE
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#if XCHAL_DCACHE_SIZE > 0
# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS)
#define ICACHE_WAY_SIZE (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS)
#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX
#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX
#endif /* _XTENSA_CACHE_H */
/*
* include/asm-xtensa/cacheasm.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Tensilica Inc.
*/
#include <asm/cache.h>
#include <asm/asmmacro.h>
#include <linux/stringify.h>
/*
* Define cache functions as macros here so that they can be used
* by the kernel and boot loader. We should consider moving them to a
* library that can be linked by both.
*
* Locking
*
* ___unlock_dcache_all
* ___unlock_icache_all
*
* Flush and invaldating
*
* ___flush_invalidate_dcache_{all|range|page}
* ___flush_dcache_{all|range|page}
* ___invalidate_dcache_{all|range|page}
* ___invalidate_icache_{all|range|page}
*
*/
.macro __loop_cache_all ar at insn size line_width
movi \ar, 0
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
.macro __loop_cache_range ar as at insn line_width
extui \at, \ar, 0, \line_width
add \as, \as, \at
__loops \ar, \as, \at, \line_width
\insn \ar, 0
__endla \ar, \at, (1 << (\line_width))
.endm
.macro __loop_cache_page ar at insn line_width
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.endm
#if XCHAL_DCACHE_LINE_LOCKABLE
.macro ___unlock_dcache_all ar at
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
#endif
#if XCHAL_ICACHE_LINE_LOCKABLE
.macro ___unlock_icache_all ar at
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
.endm
#endif
.macro ___flush_invalidate_dcache_all ar at
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_all ar at
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_all ar at
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_all ar at
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_range ar as at
__loop_cache_range \ar \as \at dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_range ar as at
__loop_cache_range \ar \as \at dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_range ar as at
__loop_cache_range \ar \as \at ihi XCHAL_ICACHE_LINEWIDTH
.endm
.macro ___flush_invalidate_dcache_page ar as
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___flush_dcache_page ar as
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_dcache_page ar as
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
.endm
.macro ___invalidate_icache_page ar as
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm
......@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2005 Tensilica Inc.
* (C) 2001 - 2006 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
......
......@@ -12,7 +12,7 @@
#define _XTENSA_CHECKSUM_H
#include <linux/in6.h>
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* computes the checksum of a memory block at buff, length len,
......
......@@ -11,7 +11,16 @@
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#include <asm/variant/tie.h>
#if !XCHAL_HAVE_CP
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
#define XTENSA_CP_EXTRA_SIZE 0
#else
#define XTOFS(last_start,last_size,align) \
((last_start+last_size+align-1) & -align)
......@@ -67,4 +76,6 @@ extern void save_coprocessor_registers(void*, int);
# endif
#endif
#endif
#endif /* _XTENSA_COPROCESSOR_H */
......@@ -12,7 +12,6 @@
#define _XTENSA_DMA_H
#include <asm/io.h> /* need byte IO */
#include <xtensa/config/core.h>
/*
* This is only to be defined if we have PC-like DMA.
......@@ -44,7 +43,9 @@
* enters another area, and virt_to_phys() may not return
* the value desired).
*/
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1)
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KIO_SIZE - 1)
/* Reserve and release a DMA channel */
extern int request_dma(unsigned int dmanr, const char * device_id);
......
......@@ -13,9 +13,8 @@
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/variant/core.h>
#include <asm/ptrace.h>
#include <asm/coprocessor.h>
#include <xtensa/config/core.h>
/* Xtensa processor ELF architecture-magic number */
......@@ -118,11 +117,15 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
* using memcpy(). But we do allow space for such alignment,
* to allow optimizations of layout and copying.
*/
#if 0
#define TOTAL_FPREGS_SIZE \
(4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
#define ELF_NFPREG \
((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
#else
#define TOTAL_FPREGS_SIZE 0
#define ELF_NFPREG 0
#endif
typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
......
/*
* include/asm-xtensa/fixmap.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FIXMAP_H
#define _XTENSA_FIXMAP_H
#include <asm/processor.h>
#ifdef CONFIG_MMU
/*
* Here we define all the compile-time virtual addresses.
*/
#if XCHAL_SEG_MAPPABLE_VADDR != 0
# error "Current port requires virtual user space starting at 0"
#endif
#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
# error "Current port requires at least 0x8000000 bytes for user space"
#endif
/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
#define __IN_VMALLOC(addr) \
(((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
#define __SPAN_VMALLOC(start,end) \
(((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
#define INSIDE_VMALLOC(start,end) \
(__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
#if XCHAL_NUM_INSTROM
# if XCHAL_NUM_INSTROM == 1
# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
# error vmalloc range conflicts with instrom0
# endif
# endif
# if XCHAL_NUM_INSTROM == 2
# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
# error vmalloc range conflicts with instrom1
# endif
# endif
#endif
#if XCHAL_NUM_INSTRAM
# if XCHAL_NUM_INSTRAM == 1
# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
# error vmalloc range conflicts with instram0
# endif
# endif
# if XCHAL_NUM_INSTRAM == 2
# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
# error vmalloc range conflicts with instram1
# endif
# endif
#endif
#if XCHAL_NUM_DATAROM
# if XCHAL_NUM_DATAROM == 1
# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
# error vmalloc range conflicts with datarom0
# endif
# endif
# if XCHAL_NUM_DATAROM == 2
# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
# error vmalloc range conflicts with datarom1
# endif
# endif
#endif
#if XCHAL_NUM_DATARAM
# if XCHAL_NUM_DATARAM == 1
# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
# error vmalloc range conflicts with dataram0
# endif
# endif
# if XCHAL_NUM_DATARAM == 2
# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
# error vmalloc range conflicts with dataram1
# endif
# endif
#endif
#if XCHAL_NUM_XLMI
# if XCHAL_NUM_XLMI == 1
# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
# error vmalloc range conflicts with xlmi0
# endif
# endif
# if XCHAL_NUM_XLMI == 2
# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
# error vmalloc range conflicts with xlmi1
# endif
# endif
#endif
#if (XCHAL_NUM_INSTROM > 2) || \
(XCHAL_NUM_INSTRAM > 2) || \
(XCHAL_NUM_DATARAM > 2) || \
(XCHAL_NUM_DATAROM > 2) || \
(XCHAL_NUM_XLMI > 2)
# error Insufficient checks on vmalloc above for more than 2 devices
#endif
/*
* USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
* TASK_SIZE down to 0x4000000 to simplify the handling of windowed
* call instructions (currently limited to a range of 1 GByte). User
* tasks may very well reclaim the VM space from 0x40000000 to
* 0x7fffffff in the future, so we do not want the kernel becoming
* accustomed to having any of its stuff (e.g., page tables) in this
* region. This VM region is no-man's land for now.
*/
#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
#define USER_VM_SIZE 0x80000000
/* Size of page table: */
#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
/* All kernel-mappable space: */
#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
/* Carve out page table at start of kernel-mappable area: */
#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
#error "Gimme some space for page table!"
#endif
#define PGTABLE_START KERNEL_ALLMAP_START
/* Remaining kernel-mappable space: */
#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
# error "Shouldn't the kernel have at least *some* mappable space?"
#endif
#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
#endif
/*
* Some constants used elsewhere, but perhaps only in Xtensa header
* files, so maybe we can get rid of some and access compile-time HAL
* directly...
*
* Note: We assume that system RAM is located at the very start of the
* kernel segments !!
*/
#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
/*
* Returns the physical/virtual addresses of the kernel space
* (works with the cached kernel segment only, which is the
* one normally used for kernel operation).
*/
/* PHYSICAL BYPASS CACHED
*
* bypass vaddr bypass paddr * cached vaddr
* cached vaddr cached paddr bypass vaddr *
* bypass paddr * bypass vaddr cached vaddr
* cached paddr * bypass vaddr cached vaddr
* other * * *
*/
#define PHYSADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
(unsigned)(a))
#define CACHED_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
(unsigned)(a))
#define PHYSADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
(unsigned)(a))
#define CACHED_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
(unsigned)(a))
#endif /* _XTENSA_ADDRSPACE_H */
/*
* linux/include/asm-xtensa/io.h
* include/asm-xtensa/io.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
......@@ -15,10 +15,11 @@
#include <asm/byteorder.h>
#include <linux/types.h>
#include <asm/fixmap.h>
#define _IO_BASE 0
#define XCHAL_KIO_CACHED_VADDR 0xf0000000
#define XCHAL_KIO_BYPASS_VADDR 0xf8000000
#define XCHAL_KIO_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x08000000
/*
* swap functions to change byte order from little-endian to big-endian and
......@@ -42,40 +43,43 @@ static inline unsigned int _swapl (unsigned int v)
static inline unsigned long virt_to_phys(volatile void * address)
{
return PHYSADDR((unsigned long)address);
return __pa(address);
}
static inline void * phys_to_virt(unsigned long address)
{
return (void*) CACHED_ADDR(address);
return __va(address);
}
/*
* IO bus memory addresses are also 1:1 with the physical address
* virt_to_bus and bus_to_virt are deprecated.
*/
static inline unsigned long virt_to_bus(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
static inline void * bus_to_virt (unsigned long address)
{
return (void *) CACHED_ADDR(address);
}
#define virt_to_bus(x) virt_to_phys(x)
#define bus_to_virt(x) phys_to_virt(x)
/*
* Change "struct page" to physical address.
* Return the virtual (cached) address for the specified bus memory.
* Note that we currently don't support any address outside the KIO segment.
*/
static inline void *ioremap(unsigned long offset, unsigned long size)
{
return (void *) CACHED_ADDR_IO(offset);
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
else
BUG();
}
static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
return (void *) BYPASS_ADDR_IO(offset);
if (offset >= XCHAL_KIO_PADDR
&& offset < XCHAL_KIO_PADDR + XCHAL_KIO_SIZE)
return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
else
BUG();
}
static inline void iounmap(void *addr)
......@@ -121,9 +125,6 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
*(__force volatile __u32 *)(addr) = b;
}
/* These are the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl, the "string" versions
* insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
......@@ -131,11 +132,11 @@ static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
* The macros don't do byte-swapping.
*/
#define inb(port) readb((u8 *)((port)+_IO_BASE))
#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE))
#define inw(port) readw((u16 *)((port)+_IO_BASE))
#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE))
#define inl(port) readl((u32 *)((port)+_IO_BASE))
#define inb(port) readb((u8 *)((port)))
#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)))
#define inw(port) readw((u16 *)((port)))
#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)))
#define inl(port) readl((u32 *)((port)))
#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
#define inb_p(port) inb((port))
......@@ -180,14 +181,13 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
/*
* * Convert a physical pointer to a virtual kernel pointer for /dev/mem
* * access
* */
* Convert a physical pointer to a virtual kernel pointer for /dev/mem access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* * Convert a virtual cached pointer to an uncached pointer
* */
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
......
......@@ -12,8 +12,7 @@
#define _XTENSA_IRQ_H
#include <asm/platform/hardware.h>
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
......@@ -27,10 +26,5 @@ static __inline__ int irq_canonicalize(int irq)
}
struct irqaction;
#if 0 // FIXME
extern void disable_irq_nosync(unsigned int);
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
#endif
#endif /* _XTENSA_IRQ_H */
......@@ -16,187 +16,32 @@
#include <linux/stringify.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
* Linux was ported to Xtensa assuming all auto-refill ways in set 0
* had the same properties (a very likely assumption). Multiple sets
* of auto-refill ways will still work properly, but not as optimally
* as the Xtensa designer may have assumed.
*
* We make this case a hard #error, killing the kernel build, to alert
* the developer to this condition (which is more likely an error).
* You super-duper clever developers can change it to a warning or
* remove it altogether if you think you know what you're doing. :)
*/
#define XCHAL_MMU_ASID_BITS 8
#if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!"
#endif
#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
# error "MMU must have auto-refill ways"
#endif
#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
# error Linux may not use all auto-refill ways as efficiently as you think
#endif
#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
# error Only one page size allowed!
#endif
extern unsigned long asid_cache;
extern pgd_t *current_pgd;
/*
* Define the number of entries per auto-refill way in set 0 of both I and D
* TLBs. We deal only with set 0 here (an assumption further explained in
* assertions.h). Also, define the total number of ARF entries in both TLBs.
*/
#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
#define ITLB_ENTRIES \
(ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
#define DTLB_ENTRIES \
(DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
/*
* SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
* In practice, they are probably equal. This macro simplifies function
* flush_tlb_range().
*/
#if (DTLB_ENTRIES < ITLB_ENTRIES)
# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
#else
# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
#endif
/*
* asid_cache tracks only the ASID[USER_RING] field of the RASID special
* register, which is the current user-task asid allocation value.
* mm->context has the same meaning. When it comes time to write the
* asid_cache or mm->context values to the RASID special register, we first
* shift the value left by 8, then insert the value.
* ASID[0] always contains the kernel's asid value, and we reserve three
* other asid values that we never assign to user tasks.
*/
#define ASID_INC 0x1
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
/*
* XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
* indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
* Xtensa processor constant indicating the kernel address space. They can
* be arbitrary values.
*
* We identify three more unique, reserved ASID values to use in the unused
* ring positions. No other user process will be assigned these reserved
* ASID values.
*
* For example, given that
*
* XCHAL_MMU_ASID_INVALID == 0
* XCHAL_MMU_ASID_KERNEL == 1
*
* the following maze of #if statements would generate
*
* ASID_RESERVED_1 == 2
* ASID_RESERVED_2 == 3
* ASID_RESERVED_3 == 4
* ASID_FIRST_NONRESERVED == 5
*/
#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
#else
# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
#else
# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
#else
# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
#else
# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
#endif
#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
((ASID_RESERVED_2) << 16) + \
((ASID_RESERVED_3) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
/*
* NO_CONTEXT is the invalid ASID value that we don't ever assign to
* any user or kernel context. NO_CONTEXT is a better mnemonic than
* XCHAL_MMU_ASID_INVALID, so we use it in code instead.
*/
#define NO_CONTEXT XCHAL_MMU_ASID_INVALID
#if (KERNEL_RING != 0)
# error The KERNEL_RING really should be zero.
#endif
#if (USER_RING >= XCHAL_MMU_RINGS)
# error USER_RING cannot be greater than the highest numbered ring.
#endif
#if (USER_RING == KERNEL_RING)
# error The user and kernel rings really should not be equal.
#endif
#if (USER_RING == 1)
#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
((ASID_RESERVED_2) << 16) + \
(((x) & (ASID_MASK)) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#elif (USER_RING == 2)
#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
(((x) & (ASID_MASK)) << 16) + \
((ASID_RESERVED_2) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#elif (USER_RING == 3)
#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
((ASID_RESERVED_1) << 16) + \
((ASID_RESERVED_2) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#else
#error Goofy value for USER_RING
#endif /* USER_RING == 1 */
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
* any user or kernel context.
*
* 0 invalid
* 1 kernel
* 2 reserved
* 3 reserved
* 4...255 available
*/
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION \
((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
#define NO_CONTEXT 0
#define ASID_USER_FIRST 4
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
static inline void set_rasid_register (unsigned long val)
{
......@@ -207,67 +52,28 @@ static inline void set_rasid_register (unsigned long val)
static inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
__asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
__asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
return tmp;
}
#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
__get_new_mmu_context(struct mm_struct *mm)
{
extern void flush_tlb_all(void);
if (! ((asid += ASID_INC) & ASID_MASK) ) {
if (! (++asid_cache & ASID_MASK) ) {
flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
asid += ASID_FIRST_NONRESERVED;
asid_cache += ASID_USER_FIRST;
}
mm->context = asid_cache = asid;
}
#else
#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
really the best, but if you insist... */
static inline int validate_asid (unsigned long asid)
{
switch (asid) {
case XCHAL_MMU_ASID_INVALID:
case XCHAL_MMU_ASID_KERNEL:
case ASID_RESERVED_1:
case ASID_RESERVED_2:
case ASID_RESERVED_3:
return 0; /* can't use these values as ASIDs */
}
return 1; /* valid */
mm->context = asid_cache;
}
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
__load_mmu_context(struct mm_struct *mm)
{
extern void flush_tlb_all(void);
while (1) {
asid += ASID_INC;
if ( ! (asid & ASID_MASK) ) {
flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
asid += ASID_FIRST_NONRESERVED;
break; /* no need to validate here */
}
if (validate_asid (asid & ASID_MASK))
break;
}
mm->context = asid_cache = asid;
set_rasid_register(ASID_INSERT(mm->context));
invalidate_page_directory();
}
#endif
/*
* Initialize the context related info for a new mm_struct
* instance.
......@@ -280,6 +86,20 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
__get_new_mmu_context(next);
__load_mmu_context(next);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
......@@ -287,11 +107,10 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Check if our ASID is of an older version and thus invalid */
if ((next->context ^ asid) & ASID_VERSION_MASK)
get_new_mmu_context(next, asid);
if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
__get_new_mmu_context(next);
set_rasid_register (ASID_INSERT(next->context));
invalidate_page_directory();
__load_mmu_context(next);
}
#define deactivate_mm(tsk, mm) do { } while(0)
......@@ -302,20 +121,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, asid_cache);
set_rasid_register (ASID_INSERT(next->context));
invalidate_page_directory();
}
......
......@@ -15,18 +15,24 @@
#include <asm/processor.h>
#define XCHAL_KSEG_CACHED_VADDR 0xd0000000
#define XCHAL_KSEG_BYPASS_VADDR 0xd8000000
#define XCHAL_KSEG_PADDR 0x00000000
#define XCHAL_KSEG_SIZE 0x08000000
/*
* PAGE_SHIFT determines the page size
* PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
*/
#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE
#define PGTABLE_START 0x80000000
#ifdef __ASSEMBLY__
......
......@@ -11,7 +11,7 @@
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
#ifdef __KERNEL__
# define HZ 100 /* internal timer frequency */
......
......@@ -14,45 +14,6 @@
#include <asm-generic/pgtable-nopmd.h>
#include <asm/page.h>
/* Assertions. */
#ifdef CONFIG_MMU
#if (XCHAL_MMU_RINGS < 2)
# error Linux build assumes at least 2 ring levels.
#endif
#if (XCHAL_MMU_CA_BITS != 4)
# error We assume exactly four bits for CA.
#endif
#if (XCHAL_MMU_SR_BITS != 0)
# error We have no room for SR bits.
#endif
/*
* Use the first min-wired way for mapping page-table pages.
* Page coloring requires a second min-wired way.
*/
#if (XCHAL_DTLB_MINWIRED_SETS == 0)
# error Need a min-wired way for mapping page-table pages
#endif
#define DTLB_WAY_PGTABLE XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAY)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
# if XCHAL_DTLB_SET(XCHAL_DTLB_MINWIRED_SET0, WAYS) >= 2
# define DTLB_WAY_DCACHE_ALIAS0 (DTLB_WAY_PGTABLE + 1)
# define DTLB_WAY_DCACHE_ALIAS1 (DTLB_WAY_PGTABLE + 2)
# else
# error Page coloring requires its own wired dtlb way!
# endif
#endif
#endif /* CONFIG_MMU */
/*
* We only use two ring levels, user and kernel space.
*/
......@@ -97,7 +58,7 @@
#define PGD_ORDER 0
#define PMD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS XCHAL_SEG_MAPPABLE_VADDR
#define FIRST_USER_ADDRESS 0
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
/* virtual memory area. We keep a distance to other memory regions to be
......
......@@ -12,18 +12,18 @@
* This file contains the default configuration of ISS.
*/
#ifndef __ASM_XTENSA_ISS_HARDWARE
#define __ASM_XTENSA_ISS_HARDWARE
#ifndef _XTENSA_PLATFORM_ISS_HARDWARE_H
#define _XTENSA_PLATFORM_ISS_HARDWARE_H
/*
* Memory configuration.
*/
#define PLATFORM_DEFAULT_MEM_START XSHAL_RAM_PADDR
#define PLATFORM_DEFAULT_MEM_SIZE XSHAL_RAM_VSIZE
#define PLATFORM_DEFAULT_MEM_START 0x00000000
#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
/*
* Interrupt configuration.
*/
#endif /* __ASM_XTENSA_ISS_HARDWARE */
#endif /* _XTENSA_PLATFORM_ISS_HARDWARE_H */
#ifndef SIMCALL_INCLUDED
#define SIMCALL_INCLUDED
/*
* THIS FILE IS GENERATED -- DO NOT MODIFY BY HAND
*
* include/asm-xtensa/xtensa/simcall.h - Simulator call numbers
* include/asm-xtensa/platform-iss/hardware.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002 Tensilica Inc.
* Copyright (C) 2001 Tensilica Inc.
*/
#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
#define _XTENSA_PLATFORM_ISS_SIMCALL_H
/*
* System call like services offered by the simulator host.
* These are modeled after the Linux 2.4 kernel system calls
* for Xtensa processors. However not all system calls and
* not all functionality of a given system call are implemented,
* or necessarily have well defined or equivalent semantics in
* the context of a simulation (as opposed to a Unix kernel).
*
* These services behave largely as if they had been invoked
* as a task in the simulator host's operating system
* (eg. files accessed are those of the simulator host).
* However, these SIMCALLs model a virtual operating system
* so that various definitions, bit assignments etc
* (eg. open mode bits, errno values, etc) are independent
* of the host operating system used to run the simulation.
* Rather these definitions are specific to the Xtensa ISS.
* This way Xtensa ISA code written to use these SIMCALLs
* can (in principle) be simulated on any host.
*
* Up to 6 parameters are passed in registers a3 to a8
* (note the 6th parameter isn't passed on the stack,
* unlike windowed function calling conventions).
* The return value is in a2. A negative value in the
* range -4096 to -1 indicates a negated error code to be
* reported in errno with a return value of -1, otherwise
* the value in a2 is returned as is.
*/
/* These #defines need to match what's in Xtensa/OS/vxworks/xtiss/simcalls.c */
#define SYS_nop 0 /* n/a - setup; used to flush register windows */
#define SYS_nop 0 /* unused */
#define SYS_exit 1 /*x*/
#define SYS_fork 2
#define SYS_read 3 /*x*/
......@@ -77,54 +49,14 @@
#define SYS_bind 30
#define SYS_ioctl 31
/*
* Other...
*/
#define SYS_iss_argc 1000 /* returns value of argc */
#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
/*
* SIMCALLs for the ferret memory debugger. All are invoked by
* libferret.a ... ( Xtensa/Target-Libs/ferret )
*/
#define SYS_ferret 1010
#define SYS_malloc 1011
#define SYS_free 1012
#define SYS_more_heap 1013
#define SYS_no_heap 1014
/*
* Extra SIMCALLs for GDB:
*/
#define SYS_gdb_break -1 /* invoked by XTOS on user exceptions if EPC points
to a break.n/break, regardless of cause! */
#define SYS_xmon_out -2 /* invoked by XMON: ... */
#define SYS_xmon_in -3 /* invoked by XMON: ... */
#define SYS_xmon_flush -4 /* invoked by XMON: ... */
#define SYS_gdb_abort -5 /* invoked by XTOS in _xtos_panic() */
#define SYS_gdb_illegal_inst -6 /* invoked by XTOS for illegal instructions (too deeply) */
#define SYS_xmon_init -7 /* invoked by XMON: ... */
#define SYS_gdb_enter_sktloop -8 /* invoked by XTOS on debug exceptions */
/*
* SIMCALLs for vxWorks xtiss BSP:
*/
#define SYS_setup_ppp_pipes -83
#define SYS_log_msg -84
/*
* Test SIMCALLs:
*/
#define SYS_test_write_state -100
#define SYS_test_read_state -101
/*
* SYS_select_one specifiers
*/
#define XTISS_SELECT_ONE_READ 1
#define XTISS_SELECT_ONE_WRITE 2
#define XTISS_SELECT_ONE_EXCEPT 3
#endif /* !SIMCALL_INCLUDED */
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
......@@ -11,24 +11,18 @@
#ifndef _XTENSA_PROCESSOR_H
#define _XTENSA_PROCESSOR_H
#ifdef __ASSEMBLY__
#define _ASMLANGUAGE
#endif
#include <xtensa/config/core.h>
#include <xtensa/config/specreg.h>
#include <xtensa/config/tie.h>
#include <xtensa/config/system.h>
#include <asm/variant/core.h>
#include <asm/coprocessor.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/coprocessor.h>
#include <asm/regs.h>
/* Assertions. */
#if (XCHAL_HAVE_WINDOWED != 1)
#error Linux requires the Xtensa Windowed Registers Option.
# error Linux requires the Xtensa Windowed Registers Option.
#endif
/*
......@@ -145,11 +139,11 @@ struct thread_struct {
* Note: We set-up ps as if we did a call4 to the new pc.
* set_thread_state in signal.c depends on it.
*/
#define USER_PS_VALUE ( (1 << XCHAL_PS_WOE_SHIFT) + \
(1 << XCHAL_PS_CALLINC_SHIFT) + \
(USER_RING << XCHAL_PS_RING_SHIFT) + \
(1 << XCHAL_PS_PROGSTACK_SHIFT) + \
(1 << XCHAL_PS_EXCM_SHIFT) )
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
(1 << PS_CALLINC_SHIFT) | \
(USER_RING << PS_RING_SHIFT) | \
(1 << PS_UM_BIT) | \
(1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
......
......@@ -11,7 +11,7 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <xtensa/config/core.h>
#include <asm/variant/core.h>
/*
* Kernel stack
......
/*
* Xtensa Special Register symbolic names
*/
/* $Id: specreg.h,v 1.2 2003/03/07 19:15:18 joetaylor Exp $ */
/*
* Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
* Copyright (c) 2006 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
......@@ -28,18 +22,20 @@
* USA.
*/
#ifndef XTENSA_SPECREG_H
#define XTENSA_SPECREG_H
#ifndef _XTENSA_REGS_H
#define _XTENSA_REGS_H
/* Include these special register bitfield definitions, for historical reasons: */
#include <xtensa/corebits.h>
/* Special registers. */
/* Special registers: */
#define LBEG 0
#define LEND 1
#define LCOUNT 2
#define SAR 3
#define BR 4
#define SCOMPARE1 12
#define ACCHI 16
#define ACCLO 17
#define MR 32
#define WINDOWBASE 72
#define WINDOWSTART 73
#define PTEVADDR 83
......@@ -48,52 +44,95 @@
#define DTLBCFG 92
#define IBREAKENABLE 96
#define DDR 104
#define IBREAKA_0 128
#define IBREAKA_1 129
#define DBREAKA_0 144
#define DBREAKA_1 145
#define DBREAKC_0 160
#define DBREAKC_1 161
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPC_1 177
#define EPC_2 178
#define EPC_3 179
#define EPC_4 180
#define DEPC 192
#define EPS_2 194
#define EPS_3 195
#define EPS_4 196
#define EPS 192
#define EPS_1 193
#define EXCSAVE 208
#define EXCSAVE_1 209
#define EXCSAVE_2 210
#define EXCSAVE_3 211
#define EXCSAVE_4 212
#define INTERRUPT 226
#define INTENABLE 228
#define PS 230
#define THREADPTR 231
#define EXCCAUSE 232
#define DEBUGCAUSE 233
#define CCOUNT 234
#define PRID 235
#define ICOUNT 236
#define ICOUNTLEVEL 237
#define EXCVADDR 238
#define CCOMPARE_0 240
#define CCOMPARE_1 241
#define CCOMPARE_2 242
#define MISC_REG_0 244
#define MISC_REG_1 245
/* Special cases (bases of special register series): */
#define IBREAKA 128
#define DBREAKA 144
#define DBREAKC 160
#define EPC 176
#define EPS 192
#define EXCSAVE 208
#define CCOMPARE 240
#define MISC 244
/* Special names for read-only and write-only interrupt registers. */
/* Special names for read-only and write-only interrupt registers: */
#define INTREAD 226
#define INTSET 226
#define INTCLEAR 227
#endif /* XTENSA_SPECREG_H */
/* EXCCAUSE register fields */
#define EXCCAUSE_EXCCAUSE_SHIFT 0
#define EXCCAUSE_EXCCAUSE_MASK 0x3F
#define EXCCAUSE_ILLEGAL_INSTRUCTION 0
#define EXCCAUSE_SYSTEM_CALL 1
#define EXCCAUSE_INSTRUCTION_FETCH_ERROR 2
#define EXCCAUSE_LOAD_STORE_ERROR 3
#define EXCCAUSE_LEVEL1_INTERRUPT 4
#define EXCCAUSE_ALLOCA 5
#define EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6
#define EXCCAUSE_SPECULATION 7
#define EXCCAUSE_PRIVILEGED 8
#define EXCCAUSE_UNALIGNED 9
#define EXCCAUSE_ITLB_MISS 16
#define EXCCAUSE_ITLB_MULTIHIT 17
#define EXCCAUSE_ITLB_PRIVILEGE 18
#define EXCCAUSE_ITLB_SIZE_RESTRICTION 19
#define EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20
#define EXCCAUSE_DTLB_MISS 24
#define EXCCAUSE_DTLB_MULTIHIT 25
#define EXCCAUSE_DTLB_PRIVILEGE 26
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
#define EXCCAUSE_FLOATING_POINT 40
/* PS register fields. */
#define PS_WOE_BIT 18
#define PS_CALLINC_SHIFT 16
#define PS_CALLINC_MASK 0x00030000
#define PS_OWB_SHIFT 8
#define PS_OWB_MASK 0x00000F00
#define PS_RING_SHIFT 6
#define PS_RING_MASK 0x000000C0
#define PS_UM_BIT 5
#define PS_EXCM_BIT 4
#define PS_INTLEVEL_SHIFT 0
#define PS_INTLEVEL_MASK 0x0000000F
/* DBREAKCn register fields. */
#define DBREAKC_MASK_BIT 0
#define DBREAKC_MASK_MASK 0x0000003F
#define DBREAKC_LOAD_BIT 30
#define DBREAKC_LOAD_MASK 0x40000000
#define DBREAKC_STOR_BIT 31
#define DBREAKC_STOR_MASK 0x80000000
/* DEBUGCAUSE register fields. */
#define DEBUGCAUSE_DEBUGINT_BIT 5 /* External debug interrupt */
#define DEBUGCAUSE_BREAKN_BIT 4 /* BREAK.N instruction */
#define DEBUGCAUSE_BREAK_BIT 3 /* BREAK instruction */
#define DEBUGCAUSE_DBREAK_BIT 2 /* DBREAK match */
#define DEBUGCAUSE_IBREAK_BIT 1 /* IBREAK match */
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */
......@@ -25,7 +25,7 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if XCHAL_HAVE_LE
#ifdef __XTENSA_EL__
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused1;
__kernel_time_t sem_ctime; /* last change time */
......
......@@ -213,7 +213,7 @@ static inline void spill_registers(void)
unsigned int a0, ps;
__asm__ __volatile__ (
"movi a14," __stringify (PS_EXCM_MASK) " | 1\n\t"
"movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t"
"mov a12, a0\n\t"
"rsr a13," __stringify(SAR) "\n\t"
"xsr a14," __stringify(PS) "\n\t"
......
......@@ -16,17 +16,22 @@
#include <asm/processor.h>
#include <linux/stringify.h>
#if XCHAL_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) == 1
#define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL
#define INTLEVEL(x) _INTLEVEL(x)
#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) == 1
# define LINUX_TIMER 0
#elif XCHAL_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) == 1
# define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) == 1
# define LINUX_TIMER 1
#elif XCHAL_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) == 1
# define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) == 1
# define LINUX_TIMER 2
# define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
#else
# error "Bad timer number for Linux configurations!"
#endif
#define LINUX_TIMER_INT XCHAL_TIMER_INTERRUPT(LINUX_TIMER)
#define LINUX_TIMER_MASK (1L << LINUX_TIMER_INT)
#define CLOCK_TICK_RATE 1193180 /* (everyone is using this value) */
......@@ -60,8 +65,8 @@ extern cycles_t cacheflush_time;
#define WSR_CCOUNT(r) __asm__("wsr %0,"__stringify(CCOUNT) :: "a" (r))
#define RSR_CCOUNT(r) __asm__("rsr %0,"__stringify(CCOUNT) : "=a" (r))
#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE_0)"+"__stringify(x) : "=a"(r))
#define WSR_CCOMPARE(x,r) __asm__("wsr %0,"__stringify(CCOMPARE)"+"__stringify(x) :: "a"(r))
#define RSR_CCOMPARE(x,r) __asm__("rsr %0,"__stringify(CCOMPARE)"+"__stringify(x) : "=a"(r))
static inline unsigned long get_ccount (void)
{
......
......@@ -11,12 +11,20 @@
#ifndef _XTENSA_TLBFLUSH_H
#define _XTENSA_TLBFLUSH_H
#define DEBUG_TLB
#ifdef __KERNEL__
#include <asm/processor.h>
#include <linux/stringify.h>
#include <asm/processor.h>
#define DTLB_WAY_PGD 7
#define ITLB_ARF_WAYS 4
#define DTLB_ARF_WAYS 4
#define ITLB_HIT_BIT 3
#define DTLB_HIT_BIT 4
#ifndef __ASSEMBLY__
/* TLB flushing:
*
......@@ -46,11 +54,6 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm,
/* TLB operations. */
#define ITLB_WAYS_LOG2 XCHAL_ITLB_WAY_BITS
#define DTLB_WAYS_LOG2 XCHAL_DTLB_WAY_BITS
#define ITLB_PROBE_SUCCESS (1 << ITLB_WAYS_LOG2)
#define DTLB_PROBE_SUCCESS (1 << DTLB_WAYS_LOG2)
static inline unsigned long itlb_probe(unsigned long addr)
{
unsigned long tmp;
......@@ -131,29 +134,30 @@ static inline void write_itlb_entry (pte_t entry, int way)
static inline void invalidate_page_directory (void)
{
invalidate_dtlb_entry (DTLB_WAY_PGTABLE);
invalidate_dtlb_entry (DTLB_WAY_PGD);
invalidate_dtlb_entry (DTLB_WAY_PGD+1);
invalidate_dtlb_entry (DTLB_WAY_PGD+2);
}
static inline void invalidate_itlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = itlb_probe (address)) & ITLB_PROBE_SUCCESS)
invalidate_itlb_entry (tlb_entry);
if (((tlb_entry = itlb_probe(address)) & (1 << ITLB_HIT_BIT)) != 0)
invalidate_itlb_entry(tlb_entry);
}
static inline void invalidate_dtlb_mapping (unsigned address)
{
unsigned long tlb_entry;
while ((tlb_entry = dtlb_probe (address)) & DTLB_PROBE_SUCCESS)
invalidate_dtlb_entry (tlb_entry);
if (((tlb_entry = dtlb_probe(address)) & (1 << DTLB_HIT_BIT)) != 0)
invalidate_dtlb_entry(tlb_entry);
}
#define check_pgt_cache() do { } while (0)
#ifdef DEBUG_TLB
/* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
/*
* DO NOT USE THESE FUNCTIONS. These instructions aren't part of the Xtensa
* ISA and exist only for test purposes..
* You may find it helpful for MMU debugging, however.
*
......@@ -193,8 +197,6 @@ static inline unsigned long read_itlb_translation (int way)
return tmp;
}
#endif /* DEBUG_TLB */
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
#endif /* _XTENSA_TLBFLUSH_H */
/*
* Xtensa processor core configuration information.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2006 Tensilica Inc.
*/
#ifndef _XTENSA_CORE_H
#define _XTENSA_CORE_H
/****************************************************************************
Parameters Useful for Any Code, USER or PRIVILEGED
****************************************************************************/
/*
* Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
* configured, and a value of 0 otherwise. These macros are always defined.
*/
/*----------------------------------------------------------------------
ISA
----------------------------------------------------------------------*/
#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
#define XCHAL_HAVE_DEBUG 1 /* debug option */
#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
#define XCHAL_HAVE_L32R 1 /* L32R instruction */
#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
#define XCHAL_HAVE_ABS 1 /* ABS instruction */
/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
#define XCHAL_HAVE_SPECULATION 0 /* speculation */
#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
#define XCHAL_NUM_CONTEXTS 1 /* */
#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
#define XCHAL_HAVE_PRID 1 /* processor ID register */
#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
#define XCHAL_HAVE_FP 0 /* floating point pkg */
#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
/*----------------------------------------------------------------------
MISC
----------------------------------------------------------------------*/
#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
/* In T1050, applies to selected core load and store instructions (see ISA): */
#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
#define XCHAL_CORE_ID "fsf" /* alphanum core name
(CoreID) set in the Xtensa
Processor Generator */
#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
/*
* These definitions describe the hardware targeted by this software.
*/
#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
#define XTHAL_HW_REL_LX2 1
#define XTHAL_HW_REL_LX2_0 1
#define XTHAL_HW_REL_LX2_0_0 1
#define XCHAL_HW_CONFIGID_RELIABLE 1
/* If software targets a *range* of hardware versions, these are the bounds: */
#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
/****************************************************************************
Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
****************************************************************************/
#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
/*----------------------------------------------------------------------
CACHE
----------------------------------------------------------------------*/
#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
/* Number of cache sets in log2(lines per way): */
#define XCHAL_ICACHE_SETWIDTH 8
#define XCHAL_DCACHE_SETWIDTH 8
/* Cache set associativity (number of ways): */
#define XCHAL_ICACHE_WAYS 2
#define XCHAL_DCACHE_WAYS 2
/* Cache features: */
#define XCHAL_ICACHE_LINE_LOCKABLE 0
#define XCHAL_DCACHE_LINE_LOCKABLE 0
#define XCHAL_ICACHE_ECC_PARITY 0
#define XCHAL_DCACHE_ECC_PARITY 0
/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
#define XCHAL_CA_BITS 4
/*----------------------------------------------------------------------
INTERNAL I/D RAM/ROMs and XLMI
----------------------------------------------------------------------*/
#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
/*----------------------------------------------------------------------
INTERRUPTS and TIMERS
----------------------------------------------------------------------*/
#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
(not including level zero) */
#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
/* Masks of interrupts at each interrupt level: */
#define XCHAL_INTLEVEL1_MASK 0x000064F9
#define XCHAL_INTLEVEL2_MASK 0x00008902
#define XCHAL_INTLEVEL3_MASK 0x00011204
#define XCHAL_INTLEVEL4_MASK 0x00000000
#define XCHAL_INTLEVEL5_MASK 0x00000000
#define XCHAL_INTLEVEL6_MASK 0x00000000
#define XCHAL_INTLEVEL7_MASK 0x00000000
/* Masks of interrupts at each range 1..n of interrupt levels: */
#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
/* Level of each interrupt: */
#define XCHAL_INT0_LEVEL 1
#define XCHAL_INT1_LEVEL 2
#define XCHAL_INT2_LEVEL 3
#define XCHAL_INT3_LEVEL 1
#define XCHAL_INT4_LEVEL 1
#define XCHAL_INT5_LEVEL 1
#define XCHAL_INT6_LEVEL 1
#define XCHAL_INT7_LEVEL 1
#define XCHAL_INT8_LEVEL 2
#define XCHAL_INT9_LEVEL 3
#define XCHAL_INT10_LEVEL 1
#define XCHAL_INT11_LEVEL 2
#define XCHAL_INT12_LEVEL 3
#define XCHAL_INT13_LEVEL 1
#define XCHAL_INT14_LEVEL 1
#define XCHAL_INT15_LEVEL 2
#define XCHAL_INT16_LEVEL 3
#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
/* Type of each interrupt: */
#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
/* Masks of interrupts for each type of interrupt: */
#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
#define XCHAL_INTTYPE_MASK_NMI 0x00000000
#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
/* Interrupt numbers assigned to specific interrupt sources: */
#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
/* Interrupt numbers for levels at which only one interrupt is configured: */
/* (There are many interrupts each at level(s) 1, 2, 3.) */
/*
* External interrupt vectors/levels.
* These macros describe how Xtensa processor interrupt numbers
* (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
* map to external BInterrupt<n> pins, for those interrupts
* configured as external (level-triggered, edge-triggered, or NMI).
* See the Xtensa processor databook for more details.
*/
/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
/*----------------------------------------------------------------------
EXCEPTIONS and VECTORS
----------------------------------------------------------------------*/
#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
number: 1 == XEA1 (old)
2 == XEA2 (new)
0 == XEAX (extern) */
#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
#define XCHAL_USER_VECTOR_VADDR 0xD0000220
#define XCHAL_USER_VECTOR_PADDR 0x00000220
#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
/*----------------------------------------------------------------------
DEBUG
----------------------------------------------------------------------*/
#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
/*----------------------------------------------------------------------
MMU
----------------------------------------------------------------------*/
/* See <xtensa/config/core-matmap.h> header file for more details. */
#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
[autorefill] and protection)
usable for an MMU-based OS */
/* If none of the above last 4 are set, it's a custom TLB configuration. */
#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
#endif /* _XTENSA_CORE_CONFIGURATION_H */
/*
* Xtensa processor core configuration information.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999-2006 Tensilica Inc.
*/
#ifndef XTENSA_TIE_H
#define XTENSA_TIE_H
/*----------------------------------------------------------------------
COPROCESSORS and EXTRA STATE
----------------------------------------------------------------------*/
#define XCHAL_CP_NUM 0 /* number of coprocessors */
#define XCHAL_CP_MASK 0x00
#endif /*XTENSA_CONFIG_TIE_H*/
此差异已折叠。
此差异已折叠。
/*
* xtensa/config/system.h -- HAL definitions that are dependent on SYSTEM configuration
*
* NOTE: The location and contents of this file are highly subject to change.
*
* Source for configuration-independent binaries (which link in a
* configuration-specific HAL library) must NEVER include this file.
* The HAL itself has historically included this file in some instances,
* but this is not appropriate either, because the HAL is meant to be
* core-specific but system independent.
*/
/*
* Copyright (c) 2003 Tensilica, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2.1 of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*/
#ifndef XTENSA_CONFIG_SYSTEM_H
#define XTENSA_CONFIG_SYSTEM_H
/*#include <xtensa/hal.h>*/
/*----------------------------------------------------------------------
DEVICE ADDRESSES
----------------------------------------------------------------------*/
/*
* Strange place to find these, but the configuration GUI
* allows moving these around to account for various core
* configurations. Specific boards (and their BSP software)
* will have specific meanings for these components.
*/
/* I/O Block areas: */
#define XSHAL_IOBLOCK_CACHED_VADDR 0xE0000000
#define XSHAL_IOBLOCK_CACHED_PADDR 0xF0000000
#define XSHAL_IOBLOCK_CACHED_SIZE 0x0E000000
#define XSHAL_IOBLOCK_BYPASS_VADDR 0xF0000000
#define XSHAL_IOBLOCK_BYPASS_PADDR 0xF0000000
#define XSHAL_IOBLOCK_BYPASS_SIZE 0x0E000000
/* System ROM: */
#define XSHAL_ROM_VADDR 0xEE000000
#define XSHAL_ROM_PADDR 0xFE000000
#define XSHAL_ROM_SIZE 0x00400000
/* Largest available area (free of vectors): */
#define XSHAL_ROM_AVAIL_VADDR 0xEE00052C
#define XSHAL_ROM_AVAIL_VSIZE 0x003FFAD4
/* System RAM: */
#define XSHAL_RAM_VADDR 0xD0000000
#define XSHAL_RAM_PADDR 0x00000000
#define XSHAL_RAM_VSIZE 0x08000000
#define XSHAL_RAM_PSIZE 0x10000000
#define XSHAL_RAM_SIZE XSHAL_RAM_PSIZE
/* Largest available area (free of vectors): */
#define XSHAL_RAM_AVAIL_VADDR 0xD0000370
#define XSHAL_RAM_AVAIL_VSIZE 0x07FFFC90
/*
* Shadow system RAM (same device as system RAM, at different address).
* (Emulation boards need this for the SONIC Ethernet driver
* when data caches are configured for writeback mode.)
* NOTE: on full MMU configs, this points to the BYPASS virtual address
* of system RAM, ie. is the same as XSHAL_RAM_* except that virtual
* addresses are viewed through the BYPASS static map rather than
* the CACHED static map.
*/
#define XSHAL_RAM_BYPASS_VADDR 0xD8000000
#define XSHAL_RAM_BYPASS_PADDR 0x00000000
#define XSHAL_RAM_BYPASS_PSIZE 0x08000000
/* Alternate system RAM (different device than system RAM): */
#define XSHAL_ALTRAM_VADDR 0xCEE00000
#define XSHAL_ALTRAM_PADDR 0xC0000000
#define XSHAL_ALTRAM_SIZE 0x00200000
/*----------------------------------------------------------------------
* DEVICE-ADDRESS DEPENDENT...
*
* Values written to CACHEATTR special register (or its equivalent)
* to enable and disable caches in various modes.
*----------------------------------------------------------------------*/
/*----------------------------------------------------------------------
BACKWARD COMPATIBILITY ...
----------------------------------------------------------------------*/
/*
* NOTE: the following two macros are DEPRECATED. Use the latter
* board-specific macros instead, which are specially tuned for the
* particular target environments' memory maps.
*/
#define XSHAL_CACHEATTR_BYPASS XSHAL_XT2000_CACHEATTR_BYPASS /* disable caches in bypass mode */
#define XSHAL_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_DEFAULT /* default setting to enable caches (no writeback!) */
/*----------------------------------------------------------------------
ISS (Instruction Set Simulator) SPECIFIC ...
----------------------------------------------------------------------*/
#define XSHAL_ISS_CACHEATTR_WRITEBACK 0x1122222F /* enable caches in write-back mode */
#define XSHAL_ISS_CACHEATTR_WRITEALLOC 0x1122222F /* enable caches in write-allocate mode */
#define XSHAL_ISS_CACHEATTR_WRITETHRU 0x1122222F /* enable caches in write-through mode */
#define XSHAL_ISS_CACHEATTR_BYPASS 0x2222222F /* disable caches in bypass mode */
#define XSHAL_ISS_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* For Coware only: */
#define XSHAL_COWARE_CACHEATTR_WRITEBACK 0x11222222 /* enable caches in write-back mode */
#define XSHAL_COWARE_CACHEATTR_WRITEALLOC 0x11222222 /* enable caches in write-allocate mode */
#define XSHAL_COWARE_CACHEATTR_WRITETHRU 0x11222222 /* enable caches in write-through mode */
#define XSHAL_COWARE_CACHEATTR_BYPASS 0x22222222 /* disable caches in bypass mode */
#define XSHAL_COWARE_CACHEATTR_DEFAULT XSHAL_COWARE_CACHEATTR_WRITEBACK /* default setting to enable caches */
/* For BFM and other purposes: */
#define XSHAL_ALLVALID_CACHEATTR_WRITEBACK 0x11222222 /* enable caches without any invalid regions */
#define XSHAL_ALLVALID_CACHEATTR_DEFAULT XSHAL_ALLVALID_CACHEATTR_WRITEBACK /* default setting for caches without any invalid regions */
#define XSHAL_ISS_PIPE_REGIONS 0
#define XSHAL_ISS_SDRAM_REGIONS 0
/*----------------------------------------------------------------------
XT2000 BOARD SPECIFIC ...
----------------------------------------------------------------------*/
#define XSHAL_XT2000_CACHEATTR_WRITEBACK 0x22FFFFFF /* enable caches in write-back mode */
#define XSHAL_XT2000_CACHEATTR_WRITEALLOC 0x22FFFFFF /* enable caches in write-allocate mode */
#define XSHAL_XT2000_CACHEATTR_WRITETHRU 0x22FFFFFF /* enable caches in write-through mode */
#define XSHAL_XT2000_CACHEATTR_BYPASS 0x22FFFFFF /* disable caches in bypass mode */
#define XSHAL_XT2000_CACHEATTR_DEFAULT XSHAL_XT2000_CACHEATTR_WRITEBACK /* default setting to enable caches */
#define XSHAL_XT2000_PIPE_REGIONS 0x00001000 /* BusInt pipeline regions */
#define XSHAL_XT2000_SDRAM_REGIONS 0x00000005 /* BusInt SDRAM regions */
/*----------------------------------------------------------------------
VECTOR SIZES
----------------------------------------------------------------------*/
/*
* Sizes allocated to vectors by the system (memory map) configuration.
* These sizes are constrained by core configuration (eg. one vector's
* code cannot overflow into another vector) but are dependent on the
* system or board (or LSP) memory map configuration.
*
* Whether or not each vector happens to be in a system ROM is also
* a system configuration matter, sometimes useful, included here also:
*/
#define XSHAL_RESET_VECTOR_SIZE 0x000004E0
#define XSHAL_RESET_VECTOR_ISROM 1
#define XSHAL_USER_VECTOR_SIZE 0x0000001C
#define XSHAL_USER_VECTOR_ISROM 0
#define XSHAL_PROGRAMEXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_USEREXC_VECTOR_SIZE XSHAL_USER_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNEL_VECTOR_SIZE 0x0000001C
#define XSHAL_KERNEL_VECTOR_ISROM 0
#define XSHAL_STACKEDEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_KERNELEXC_VECTOR_SIZE XSHAL_KERNEL_VECTOR_SIZE /* for backward compatibility */
#define XSHAL_DOUBLEEXC_VECTOR_SIZE 0x000000E0
#define XSHAL_DOUBLEEXC_VECTOR_ISROM 0
#define XSHAL_WINDOW_VECTORS_SIZE 0x00000180
#define XSHAL_WINDOW_VECTORS_ISROM 0
#define XSHAL_INTLEVEL2_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL2_VECTOR_ISROM 0
#define XSHAL_INTLEVEL3_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL3_VECTOR_ISROM 0
#define XSHAL_INTLEVEL4_VECTOR_SIZE 0x0000000C
#define XSHAL_INTLEVEL4_VECTOR_ISROM 1
#define XSHAL_DEBUG_VECTOR_SIZE XSHAL_INTLEVEL4_VECTOR_SIZE
#define XSHAL_DEBUG_VECTOR_ISROM XSHAL_INTLEVEL4_VECTOR_ISROM
#endif /*XTENSA_CONFIG_SYSTEM_H*/
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册