提交 5a0015d6 编写于 作者: C Chris Zankel 提交者: Linus Torvalds

[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 3

The attached patches provides part 3 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: NChris Zankel <chris@zankel.net>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 4bedea94
#
# Makefile for the Linux/Xtensa kernel.
#
extra-y := head.o vmlinux.lds
obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \
setup.o signal.o syscalls.o time.o traps.o vectors.o platform.o \
pci-dma.o
## windowspill.o
obj-$(CONFIG_KGDB) += xtensa-stub.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
/*
* arch/xtensa/kernel/align.S
*
* Handle unalignment exceptions in kernel space.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2001 - 2005 Tensilica, Inc.
*
* Rewritten by Chris Zankel <chris@zankel.net>
*
* Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <asm/offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/thread_info.h>
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
/* First-level exception handler for unaligned exceptions.
*
* Note: This handler works only for kernel exceptions. Unaligned user
* access should get a seg fault.
*/
/* Big and little endian 16-bit values are located in
* different halves of a register. HWORD_START helps to
* abstract the notion of extracting a 16-bit value from a
* register.
* We also have to define new shifting instructions because
* lsb and msb are on 'opposite' ends in a register for
* different endian machines.
*
* Assume a memory region in ascending address:
* 0 1 2 3|4 5 6 7
*
* When loading one word into a register, the content of that register is:
* LE 3 2 1 0, 7 6 5 4
* BE 0 1 2 3, 4 5 6 7
*
* Masking the bits of the higher/lower address means:
* LE X X 0 0, 0 0 X X
* BE 0 0 X X, X X 0 0
*
* Shifting to higher/lower addresses, means:
* LE shift left / shift right
* BE shift right / shift left
*
* Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
* LE mask 0 0 X X / shift left
* BE shift left / mask 0 0 X X
*/
#define UNALIGNED_USER_EXCEPTION
#if XCHAL_HAVE_BE
#define HWORD_START 16
#define INSN_OP0 28
#define INSN_T 24
#define INSN_OP1 16
.macro __src_b r, w0, w1; src \r, \w0, \w1; .endm
.macro __ssa8 r; ssa8b \r; .endm
.macro __ssa8r r; ssa8l \r; .endm
.macro __sh r, s; srl \r, \s; .endm
.macro __sl r, s; sll \r, \s; .endm
.macro __exth r, s; extui \r, \s, 0, 16; .endm
.macro __extl r, s; slli \r, \s, 16; .endm
#else
#define HWORD_START 0
#define INSN_OP0 0
#define INSN_T 4
#define INSN_OP1 12
.macro __src_b r, w0, w1; src \r, \w1, \w0; .endm
.macro __ssa8 r; ssa8l \r; .endm
.macro __ssa8r r; ssa8b \r; .endm
.macro __sh r, s; sll \r, \s; .endm
.macro __sl r, s; srl \r, \s; .endm
.macro __exth r, s; slli \r, \s, 16; .endm
.macro __extl r, s; extui \r, \s, 0, 16; .endm
#endif
/*
* xxxx xxxx = imm8 field
* yyyy = imm4 field
* ssss = s field
* tttt = t field
*
* 16 0
* -------------------
* L32I.N yyyy ssss tttt 1000
* S32I.N yyyy ssss tttt 1001
*
* 23 0
* -----------------------------
* res 0000 0010
* L16UI xxxx xxxx 0001 ssss tttt 0010
* L32I xxxx xxxx 0010 ssss tttt 0010
* XXX 0011 ssss tttt 0010
* XXX 0100 ssss tttt 0010
* S16I xxxx xxxx 0101 ssss tttt 0010
* S32I xxxx xxxx 0110 ssss tttt 0010
* XXX 0111 ssss tttt 0010
* XXX 1000 ssss tttt 0010
* L16SI xxxx xxxx 1001 ssss tttt 0010
* XXX 1010 0010
* **L32AI xxxx xxxx 1011 ssss tttt 0010 unsupported
* XXX 1100 0010
* XXX 1101 0010
* XXX 1110 0010
* **S32RI xxxx xxxx 1111 ssss tttt 0010 unsupported
* -----------------------------
* ^ ^ ^
* sub-opcode (NIBBLE_R) -+ | |
* t field (NIBBLE_T) -----------+ |
* major opcode (NIBBLE_OP0) --------------+
*/
#define OP0_L32I_N 0x8 /* load immediate narrow */
#define OP0_S32I_N 0x9 /* store immediate narrow */
#define OP1_SI_MASK 0x4 /* OP1 bit set for stores */
#define OP1_SI_BIT 2 /* OP1 bit number for stores */
#define OP1_L32I 0x2
#define OP1_L16UI 0x1
#define OP1_L16SI 0x9
#define OP1_L32AI 0xb
#define OP1_S32I 0x6
#define OP1_S16I 0x5
#define OP1_S32RI 0xf
/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
* a1: a1
* a2: new stack pointer, original in DEPC
* a3: dispatch table
* depc: a2, original value saved on stack (PT_DEPC)
* excsave_1: a3
*
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
*/
ENTRY(fast_unaligned)
/* Note: We don't expect the address to be aligned on a word
* boundary. After all, the processor generated that exception
* and it would be a hardware fault.
*/
/* Save some working register */
s32i a4, a2, PT_AREG4
s32i a5, a2, PT_AREG5
s32i a6, a2, PT_AREG6
s32i a7, a2, PT_AREG7
s32i a8, a2, PT_AREG8
rsr a0, DEPC
xsr a3, EXCSAVE_1
s32i a0, a2, PT_AREG2
s32i a3, a2, PT_AREG3
/* Keep value of SAR in a0 */
rsr a0, SAR
rsr a8, EXCVADDR # load unaligned memory address
/* Now, identify one of the following load/store instructions.
*
* The only possible danger of a double exception on the
* following l32i instructions is kernel code in vmalloc
* memory. The processor was just executing at the EPC_1
* address, and indeed, already fetched the instruction. That
* guarantees a TLB mapping, which hasn't been replaced by
* this unaligned exception handler that uses only static TLB
* mappings. However, high-level interrupt handlers might
* modify TLB entries, so for the generic case, we register a
* TABLE_FIXUP handler here, too.
*/
/* a3...a6 saved on stack, a2 = SP */
/* Extract the instruction that caused the unaligned access. */
rsr a7, EPC_1 # load exception address
movi a3, ~3
and a3, a3, a7 # mask lower bits
l32i a4, a3, 0 # load 2 words
l32i a5, a3, 4
__ssa8 a7
__src_b a4, a4, a5 # a4 has the instruction
/* Analyze the instruction (load or store?). */
extui a5, a4, INSN_OP0, 4 # get insn.op0 nibble
#if XCHAL_HAVE_NARROW
_beqi a5, OP0_L32I_N, .Lload # L32I.N, jump
addi a6, a5, -OP0_S32I_N
_beqz a6, .Lstore # S32I.N, do a store
#endif
/* 'store indicator bit' not set, jump */
_bbci.l a4, OP1_SI_BIT + INSN_OP1, .Lload
/* Store: Jump to table entry to get the value in the source register.*/
.Lstore:movi a5, .Lstore_table # table
extui a6, a4, INSN_T, 4 # get source register
addx8 a5, a6, a5
jx a5 # jump into table
/* Invalid instruction, CRITICAL! */
.Linvalid_instruction_load:
j .Linvalid_instruction
/* Load: Load memory address. */
.Lload: movi a3, ~3
and a3, a3, a8 # align memory address
__ssa8 a8
#ifdef UNALIGNED_USER_EXCEPTION
addi a3, a3, 8
l32e a5, a3, -8
l32e a6, a3, -4
#else
l32i a5, a3, 0
l32i a6, a3, 4
#endif
__src_b a3, a5, a6 # a3 has the data word
#if XCHAL_HAVE_NARROW
addi a7, a7, 2 # increment PC (assume 16-bit insn)
extui a5, a4, INSN_OP0, 4
_beqi a5, OP0_L32I_N, 1f # l32i.n: jump
addi a7, a7, 1
#else
addi a7, a7, 3
#endif
extui a5, a4, INSN_OP1, 4
_beqi a5, OP1_L32I, 1f # l32i: jump
extui a3, a3, 0, 16 # extract lower 16 bits
_beqi a5, OP1_L16UI, 1f
addi a5, a5, -OP1_L16SI
_bnez a5, .Linvalid_instruction_load
/* sign extend value */
slli a3, a3, 16
srai a3, a3, 16
/* Set target register. */
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
#endif
1: wsr a7, EPC_1 # skip load instruction
extui a4, a4, INSN_T, 4 # extract target register
movi a5, .Lload_table
addx8 a4, a4, a5
jx a4 # jump to entry for target register
.align 8
.Lload_table:
s32i a3, a2, PT_AREG0; _j .Lexit; .align 8
mov a1, a3; _j .Lexit; .align 8 # fishy??
s32i a3, a2, PT_AREG2; _j .Lexit; .align 8
s32i a3, a2, PT_AREG3; _j .Lexit; .align 8
s32i a3, a2, PT_AREG4; _j .Lexit; .align 8
s32i a3, a2, PT_AREG5; _j .Lexit; .align 8
s32i a3, a2, PT_AREG6; _j .Lexit; .align 8
s32i a3, a2, PT_AREG7; _j .Lexit; .align 8
s32i a3, a2, PT_AREG8; _j .Lexit; .align 8
mov a9, a3 ; _j .Lexit; .align 8
mov a10, a3 ; _j .Lexit; .align 8
mov a11, a3 ; _j .Lexit; .align 8
mov a12, a3 ; _j .Lexit; .align 8
mov a13, a3 ; _j .Lexit; .align 8
mov a14, a3 ; _j .Lexit; .align 8
mov a15, a3 ; _j .Lexit; .align 8
.Lstore_table:
l32i a3, a2, PT_AREG0; _j 1f; .align 8
mov a3, a1; _j 1f; .align 8 # fishy??
l32i a3, a2, PT_AREG2; _j 1f; .align 8
l32i a3, a2, PT_AREG3; _j 1f; .align 8
l32i a3, a2, PT_AREG4; _j 1f; .align 8
l32i a3, a2, PT_AREG5; _j 1f; .align 8
l32i a3, a2, PT_AREG6; _j 1f; .align 8
l32i a3, a2, PT_AREG7; _j 1f; .align 8
l32i a3, a2, PT_AREG8; _j 1f; .align 8
mov a3, a9 ; _j 1f; .align 8
mov a3, a10 ; _j 1f; .align 8
mov a3, a11 ; _j 1f; .align 8
mov a3, a12 ; _j 1f; .align 8
mov a3, a13 ; _j 1f; .align 8
mov a3, a14 ; _j 1f; .align 8
mov a3, a15 ; _j 1f; .align 8
1: # a7: instruction pointer, a4: instruction, a3: value
movi a6, 0 # mask: ffffffff:00000000
#if XCHAL_HAVE_NARROW
addi a7, a7, 2 # incr. PC,assume 16-bit instruction
extui a5, a4, INSN_OP0, 4 # extract OP0
addi a5, a5, -OP0_S32I_N
_beqz a5, 1f # s32i.n: jump
addi a7, a7, 1 # increment PC, 32-bit instruction
#else
addi a7, a7, 3 # increment PC, 32-bit instruction
#endif
extui a5, a4, INSN_OP1, 4 # extract OP1
_beqi a5, OP1_S32I, 1f # jump if 32 bit store
_bnei a5, OP1_S16I, .Linvalid_instruction_store
movi a5, -1
__extl a3, a3 # get 16-bit value
__exth a6, a5 # get 16-bit mask ffffffff:ffff0000
/* Get memory address */
1:
#if XCHAL_HAVE_LOOP
rsr a3, LEND # check if we reached LEND
bne a7, a3, 1f
rsr a3, LCOUNT # and LCOUNT != 0
beqz a3, 1f
addi a3, a3, -1 # decrement LCOUNT and set
rsr a7, LBEG # set PC to LBEGIN
wsr a3, LCOUNT
#endif
1: wsr a7, EPC_1 # skip store instruction
movi a4, ~3
and a4, a4, a8 # align memory address
/* Insert value into memory */
movi a5, -1 # mask: ffffffff:XXXX0000
#ifdef UNALIGNED_USER_EXCEPTION
addi a4, a4, 8
#endif
__ssa8r a8
__src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE)
__src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE)
#ifdef UNALIGNED_USER_EXCEPTION
l32e a5, a4, -8
#else
l32i a5, a4, 0 # load lower address word
#endif
and a5, a5, a7 # mask
__sh a7, a3 # shift value
or a5, a5, a7 # or with original value
#ifdef UNALIGNED_USER_EXCEPTION
s32e a5, a4, -8
l32e a7, a4, -4
#else
s32i a5, a4, 0 # store
l32i a7, a4, 4 # same for upper address word
#endif
__sl a5, a3
and a6, a7, a6
or a6, a6, a5
#ifdef UNALIGNED_USER_EXCEPTION
s32e a6, a4, -4
#else
s32i a6, a4, 4
#endif
/* Done. restore stack and return */
.Lexit:
movi a4, 0
rsr a3, EXCSAVE_1
s32i a4, a3, EXC_TABLE_FIXUP
/* Restore working register */
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
l32i a3, a2, PT_AREG3
/* restore SAR and return */
wsr a0, SAR
l32i a0, a2, PT_AREG0
l32i a2, a2, PT_AREG2
rfe
/* We cannot handle this exception. */
.extern _kernel_exception
.Linvalid_instruction_store:
.Linvalid_instruction:
/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
l32i a8, a2, PT_AREG8
l32i a7, a2, PT_AREG7
l32i a6, a2, PT_AREG6
l32i a5, a2, PT_AREG5
l32i a4, a2, PT_AREG4
wsr a0, SAR
mov a1, a2
rsr a0, PS
bbsi.l a2, PS_UM_SHIFT, 1f # jump if user mode
movi a0, _kernel_exception
jx a0
1: movi a0, _user_exception
jx a0
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
/*
* arch/xtensa/kernel/asm-offsets.c
*
* Generates definitions from c-type structures used by assembly sources.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <asm/processor.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/stddef.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* struct pt_regs */
DEFINE(PT_PC, offsetof (struct pt_regs, pc));
DEFINE(PT_PS, offsetof (struct pt_regs, ps));
DEFINE(PT_DEPC, offsetof (struct pt_regs, depc));
DEFINE(PT_EXCCAUSE, offsetof (struct pt_regs, exccause));
DEFINE(PT_EXCVADDR, offsetof (struct pt_regs, excvaddr));
DEFINE(PT_DEBUGCAUSE, offsetof (struct pt_regs, debugcause));
DEFINE(PT_WMASK, offsetof (struct pt_regs, wmask));
DEFINE(PT_LBEG, offsetof (struct pt_regs, lbeg));
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG1, offsetof (struct pt_regs, areg[1]));
DEFINE(PT_AREG2, offsetof (struct pt_regs, areg[2]));
DEFINE(PT_AREG3, offsetof (struct pt_regs, areg[3]));
DEFINE(PT_AREG4, offsetof (struct pt_regs, areg[4]));
DEFINE(PT_AREG5, offsetof (struct pt_regs, areg[5]));
DEFINE(PT_AREG6, offsetof (struct pt_regs, areg[6]));
DEFINE(PT_AREG7, offsetof (struct pt_regs, areg[7]));
DEFINE(PT_AREG8, offsetof (struct pt_regs, areg[8]));
DEFINE(PT_AREG9, offsetof (struct pt_regs, areg[9]));
DEFINE(PT_AREG10, offsetof (struct pt_regs, areg[10]));
DEFINE(PT_AREG11, offsetof (struct pt_regs, areg[11]));
DEFINE(PT_AREG12, offsetof (struct pt_regs, areg[12]));
DEFINE(PT_AREG13, offsetof (struct pt_regs, areg[13]));
DEFINE(PT_AREG14, offsetof (struct pt_regs, areg[14]));
DEFINE(PT_AREG15, offsetof (struct pt_regs, areg[15]));
DEFINE(PT_WINDOWBASE, offsetof (struct pt_regs, windowbase));
DEFINE(PT_WINDOWSTART, offsetof(struct pt_regs, windowstart));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
BLANK();
/* struct task_struct */
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof (struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof (struct task_struct, active_mm));
DEFINE(TASK_PID, offsetof (struct task_struct, pid));
DEFINE(TASK_THREAD, offsetof (struct task_struct, thread));
DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, thread_info));
DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct));
BLANK();
/* struct thread_info (offset from start_struct) */
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
BLANK();
/* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
DEFINE(MM_PGD, offsetof (struct mm_struct, pgd));
DEFINE(MM_CONTEXT, offsetof (struct mm_struct, context));
BLANK();
DEFINE(PT_SINGLESTEP_BIT, PT_SINGLESTEP_BIT);
return 0;
}
/*
* arch/xtensa/kernel/coprocessor.S
*
* Xtensa processor configuration-specific table of coprocessor and
* other custom register layout information.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*
* Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/
/*
* This module contains a table that describes the layout of the various
* custom registers and states associated with each coprocessor, as well
* as those not associated with any coprocessor ("extra state").
* This table is included with core dumps and is available via the ptrace
* interface, allowing the layout of such register/state information to
* be modified in the kernel without affecting the debugger. Each
* register or state is identified using a 32-bit "libdb target number"
* assigned when the Xtensa processor is generated.
*/
#include <linux/config.h>
#include <linux/linkage.h>
#include <asm/processor.h>
#if XCHAL_HAVE_CP
#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
ENTRY(release_coprocessors)
entry a1, 16
# a2: task
movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
movi a4, coprocessor_info+CP_LAST # a4: owner-table
# a5: tmp
movi a6, 0 # a6: 0
rsil a7, LOCKLEVEL # a7: PS
1: /* Check if task is coprocessor owner of coprocessor[i]. */
l32i a5, a4, COPROCESSOR_INFO_OWNER
srli a3, a3, 1
beqz a3, 1f
addi a4, a4, -8
beq a2, a5, 1b
/* Found an entry: Clear entry CPENABLE bit to disable CP. */
rsr a5, CPENABLE
s32i a6, a4, COPROCESSOR_INFO_OWNER
xor a5, a3, a5
wsr a5, CPENABLE
bnez a3, 1b
1: wsr a7, PS
rsync
retw
ENTRY(disable_coprocessor)
entry sp, 16
rsil a7, LOCKLEVEL
rsr a3, CPENABLE
movi a4, 1
ssl a2
sll a4, a4
and a4, a3, a4
xor a3, a3, a4
wsr a3, CPENABLE
wsr a7, PS
rsync
retw
ENTRY(enable_coprocessor)
entry sp, 16
rsil a7, LOCKLEVEL
rsr a3, CPENABLE
movi a4, 1
ssl a2
sll a4, a4
or a3, a3, a4
wsr a3, CPENABLE
wsr a7, PS
rsync
retw
#endif
ENTRY(save_coprocessor_extra)
entry sp, 16
xchal_extra_store_funcbody
retw
ENTRY(restore_coprocessor_extra)
entry sp, 16
xchal_extra_load_funcbody
retw
ENTRY(save_coprocessor_registers)
entry sp, 16
xchal_cpi_store_funcbody
retw
ENTRY(restore_coprocessor_registers)
entry sp, 16
xchal_cpi_load_funcbody
retw
/*
* The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
* describe the contents of coprocessor & extra save areas in terms of
* undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
* latter macros here; they expand into a table of the format we want.
* The general format is:
*
* CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
* bitmask, rsv2, rsv3)
* CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
* bitmask, rsv2, rsv3)
* CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
* numentries, contentsize, regname_base,
* regfile_name, rsv2, rsv3)
*
* For this table, we only care about the <libdbnum>, <offset> and <size>
* fields.
*/
/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
bitmask, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
bitmask, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
numentries, contentsize, regname_base, \
regfile_name, rsv2, rsv3) \
reg_entry libdbnum, offset, size ;
/* A single table entry: */
.macro reg_entry libdbnum, offset, size
.ifne (__last_offset-(__last_group_offset+\offset))
/* padding entry */
.word (0xFC000000+__last_offset-(__last_group_offset+\offset))
.endif
.word \libdbnum /* actual entry */
.set __last_offset, __last_group_offset+\offset+\size
.endm /* reg_entry */
/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
.macro reg_group cpnum, num_entries, align
.set __last_group_offset, (__last_offset + \align- 1) & -\align
.ifne \num_entries
.word 0xFD000000+(\cpnum<<16)+\num_entries
.endif
.endm /* reg_group */
/*
* Register info tables.
*/
.section .rodata, "a"
.globl _xtensa_reginfo_tables
.globl _xtensa_reginfo_table_size
.align 4
_xtensa_reginfo_table_size:
.word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
_xtensa_reginfo_tables:
.set __last_offset, 0
reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
XCHAL_EXTRA_SA_CONTENTS_LIBDB
reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
XCHAL_CP0_SA_CONTENTS_LIBDB
reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
XCHAL_CP1_SA_CONTENTS_LIBDB
reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
XCHAL_CP2_SA_CONTENTS_LIBDB
reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
XCHAL_CP3_SA_CONTENTS_LIBDB
reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
XCHAL_CP4_SA_CONTENTS_LIBDB
reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
XCHAL_CP5_SA_CONTENTS_LIBDB
reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
XCHAL_CP6_SA_CONTENTS_LIBDB
reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
XCHAL_CP7_SA_CONTENTS_LIBDB
.word 0xFC000000 /* invalid register number,marks end of table*/
_xtensa_reginfo_table_end:
此差异已折叠。
/*
* arch/xtensa/kernel/head.S
*
* Xtensa Processor startup code.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Kevin Chea
*/
#include <xtensa/cacheasm.h>
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* This module contains the entry code for kernel images. It performs the
* minimal setup needed to call the generic C routines.
*
* Prerequisites:
*
* - The kernel image has been loaded to the actual address where it was
* compiled to.
* - a2 contains either 0 or a pointer to a list of boot parameters.
* (see setup.c for more details)
*
*/
.macro iterate from, to , cmd
.ifeq ((\to - \from) & ~0xfff)
\cmd \from
iterate "(\from+1)", \to, \cmd
.endif
.endm
/*
* _start
*
* The bootloader passes a pointer to a list of boot parameters in a2.
*/
/* The first bytes of the kernel image must be an instruction, so we
* manually allocate and define the literal constant we need for a jx
* instruction.
*/
.section .head.text, "ax"
.globl _start
_start: _j 2f
.align 4
1: .word _startup
2: l32r a0, 1b
jx a0
.text
.align 4
_startup:
/* Disable interrupts and exceptions. */
movi a0, XCHAL_PS_EXCM_MASK
wsr a0, PS
/* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
wsr a2, EXCSAVE_1
/* Start with a fresh windowbase and windowstart. */
movi a1, 1
movi a0, 0
wsr a1, WINDOWSTART
wsr a0, WINDOWBASE
rsync
/* Set a0 to 0 for the remaining initialization. */
movi a0, 0
/* Clear debugging registers. */
#if XCHAL_HAVE_DEBUG
wsr a0, IBREAKENABLE
wsr a0, ICOUNT
movi a1, 15
wsr a0, ICOUNTLEVEL
.macro reset_dbreak num
wsr a0, DBREAKC + \num
.endm
iterate 0, XCHAL_NUM_IBREAK-1, reset_dbreak
#endif
/* Clear CCOUNT (not really necessary, but nice) */
wsr a0, CCOUNT # not really necessary, but nice
/* Disable zero-loops. */
#if XCHAL_HAVE_LOOPS
wsr a0, LCOUNT
#endif
/* Disable all timers. */
.macro reset_timer num
wsr a0, CCOMPARE_0 + \num
.endm
iterate 0, XCHAL_NUM_TIMERS-1, reset_timer
/* Interrupt initialization. */
movi a2, XCHAL_INTTYPE_MASK_SOFTWARE | XCHAL_INTTYPE_MASK_EXTERN_EDGE
wsr a0, INTENABLE
wsr a2, INTCLEAR
/* Disable coprocessors. */
#if XCHAL_CP_NUM > 0
wsr a0, CPENABLE
#endif
/* Set PS.INTLEVEL=1, PS.WOE=0, kernel stack, PS.EXCM=0
*
* Note: PS.EXCM must be cleared before using any loop
* instructions; otherwise, they are silently disabled, and
* at most one iteration of the loop is executed.
*/
movi a1, 1
wsr a1, PS
rsync
/* Initialize the caches.
* Does not include flushing writeback d-cache.
* a6, a7 are just working registers (clobbered).
*/
icache_reset a2, a3
dcache_reset a2, a3
/* Unpack data sections
*
* The linker script used to build the Linux kernel image
* creates a table located at __boot_reloc_table_start
* that contans the information what data needs to be unpacked.
*
* Uses a2-a7.
*/
movi a2, __boot_reloc_table_start
movi a3, __boot_reloc_table_end
1: beq a2, a3, 3f # no more entries?
l32i a4, a2, 0 # start destination (in RAM)
l32i a5, a2, 4 # end desination (in RAM)
l32i a6, a2, 8 # start source (in ROM)
addi a2, a2, 12 # next entry
beq a4, a5, 1b # skip, empty entry
beq a4, a6, 1b # skip, source and dest. are the same
2: l32i a7, a6, 0 # load word
addi a6, a6, 4
s32i a7, a4, 0 # store word
addi a4, a4, 4
bltu a4, a5, 2b
j 1b
3:
/* All code and initialized data segments have been copied.
* Now clear the BSS segment.
*/
movi a2, _bss_start # start of BSS
movi a3, _bss_end # end of BSS
1: addi a2, a2, 4
s32i a0, a2, 0
blt a2, a3, 1b
#if XCHAL_DCACHE_IS_WRITEBACK
/* After unpacking, flush the writeback cache to memory so the
* instructions/data are available.
*/
dcache_writeback_all a2, a3
#endif
/* Setup stack and enable window exceptions (keep irqs disabled) */
movi a1, init_thread_union
addi a1, a1, KERNEL_STACK_SIZE
movi a2, 0x00040001 # WOE=1, INTLEVEL=1, UM=0
wsr a2, PS # (enable reg-windows; progmode stack)
rsync
/* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
movi a2, debug_exception
wsr a2, EXCSAVE + XCHAL_DEBUGLEVEL
/* Set up EXCSAVE[1] to point to the exc_table. */
movi a6, exc_table
xsr a6, EXCSAVE_1
/* init_arch kick-starts the linux kernel */
movi a4, init_arch
callx4 a4
movi a4, start_kernel
callx4 a4
should_never_return:
j should_never_return
/* Define some common data structures here. We define them
* here in this assembly file due to their unusual alignment
* requirements.
*/
.comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
.comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
/*
* linux/arch/xtensa/kernel/irq.c
*
* Xtensa built-in interrupt controller and some generic functions copied
* from i386.
*
* Copyright (C) 2002 - 2005 Tensilica, Inc.
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
*
* Chris Zankel <chris@zankel.net>
* Kevin Chea
*
*/
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <asm/uaccess.h>
#include <asm/platform.h>
static void enable_xtensa_irq(unsigned int irq);
static void disable_xtensa_irq(unsigned int irq);
static void mask_and_ack_xtensa(unsigned int irq);
static void end_xtensa_irq(unsigned int irq);
static unsigned int cached_irq_mask;
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk("unexpected IRQ trap at vector %02x\n", irq);
}
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
unsigned int do_IRQ(int irq, struct pt_regs *regs)
{
irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
unsigned long sp;
__asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
sp &= THREAD_SIZE - 1;
if (unlikely(sp < (sizeof(thread_info) + 1024)))
printk("Stack overflow in do_IRQ: %ld\n",
sp - sizeof(struct thread_info));
}
#endif
__do_IRQ(irq, regs);
irq_exit();
return 1;
}
/*
* Generic, controller-independent functions:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for (j=0; j<NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "CPU%d ",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
}
return 0;
}
/* shutdown is same as "disable" */
#define shutdown_xtensa_irq disable_xtensa_irq
static unsigned int startup_xtensa_irq(unsigned int irq)
{
enable_xtensa_irq(irq);
return 0; /* never anything pending */
}
static struct hw_interrupt_type xtensa_irq_type = {
"Xtensa-IRQ",
startup_xtensa_irq,
shutdown_xtensa_irq,
enable_xtensa_irq,
disable_xtensa_irq,
mask_and_ack_xtensa,
end_xtensa_irq
};
static inline void mask_irq(unsigned int irq)
{
cached_irq_mask &= ~(1 << irq);
set_sr (cached_irq_mask, INTENABLE);
}
static inline void unmask_irq(unsigned int irq)
{
cached_irq_mask |= 1 << irq;
set_sr (cached_irq_mask, INTENABLE);
}
static void disable_xtensa_irq(unsigned int irq)
{
unsigned long flags;
local_save_flags(flags);
mask_irq(irq);
local_irq_restore(flags);
}
static void enable_xtensa_irq(unsigned int irq)
{
unsigned long flags;
local_save_flags(flags);
unmask_irq(irq);
local_irq_restore(flags);
}
static void mask_and_ack_xtensa(unsigned int irq)
{
disable_xtensa_irq(irq);
}
static void end_xtensa_irq(unsigned int irq)
{
enable_xtensa_irq(irq);
}
void __init init_IRQ(void)
{
int i;
for (i=0; i < XTENSA_NR_IRQS; i++)
irq_desc[i].handler = &xtensa_irq_type;
cached_irq_mask = 0;
platform_init_irq();
}
/*
* arch/xtensa/kernel/platform.c
*
* Module support.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*
*/
#include <linux/module.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cache.h>
LIST_HEAD(module_buf_list);
void *module_alloc(unsigned long size)
{
panic("module_alloc not implemented");
}
void module_free(struct module *mod, void *module_region)
{
panic("module_free not implemented");
}
int module_frob_arch_sections(Elf32_Ehdr *hdr,
Elf32_Shdr *sechdrs,
char *secstrings,
struct module *me)
{
panic("module_frob_arch_sections not implemented");
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
{
panic ("apply_relocate not implemented");
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *module)
{
panic("apply_relocate_add not implemented");
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
panic ("module_finalize not implemented");
}
void module_arch_cleanup(struct module *mod)
{
panic("module_arch_cleanup not implemented");
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
{
panic("module_find_bug not implemented");
}
/*
* arch/xtensa/pci-dma.c
*
* DMA coherent memory allocation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*
* Based on version for i386.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp)
{
void *ret;
/* ignore region speicifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
*handle = virt_to_bus(ret);
}
return (void*) BYPASS_ADDR((unsigned long)ret);
}
void dma_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
free_pages(CACHED_ADDR((unsigned long)vaddr), get_order(size));
}
void consistent_sync(void *vaddr, size_t size, int direction)
{
switch (direction) {
case PCI_DMA_NONE:
BUG();
case PCI_DMA_FROMDEVICE: /* invalidate only */
__invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)size);
break;
case PCI_DMA_TODEVICE: /* writeback only */
case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
__flush_invalidate_dcache_range((unsigned long)vaddr,
(unsigned long)size);
break;
}
}
/*
* arch/xtensa/pcibios.c
*
* PCI bios-type initialisation for PCI machines
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copyright (C) 2001-2005 Tensilica Inc.
*
* Based largely on work from Cort (ppc/kernel/pci.c)
* IO functions copied from sparc.
*
* Chris Zankel <chris@zankel.net>
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <asm/pci-bridge.h>
#include <asm/platform.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
/* PCI Controller */
/*
* pcibios_alloc_controller
* pcibios_enable_device
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
* pcibios_setup
* pci_bus_add_device
* pci_mmap_page_range
*/
struct pci_controller* pci_ctrl_head;
struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
static int pci_bus_count;
static void pcibios_fixup_resources(struct pci_dev* dev);
#if 0 // FIXME
struct pci_fixup pcibios_fixups[] = {
{ DECLARE_PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources },
{ 0 }
};
#endif
void
pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check, mask;
int reg;
struct pci_controller* pci_ctrl = dev->sysdata;
new = res->start;
if (pci_ctrl && res->flags & IORESOURCE_IO) {
new -= pci_ctrl->io_space.base;
}
new |= (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4*resource;
} else if (resource == PCI_ROM_RESOURCE) {
res->flags |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/* Somebody might have asked allocation of a non-standard resource */
return;
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
mask = (new & PCI_BASE_ADDRESS_SPACE_IO) ?
PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK;
if ((new ^ check) & mask) {
printk(KERN_ERR "PCI: Error while updating region "
"%s/%d (%08x != %08x)\n", dev->slot_name, resource,
new, check);
}
}
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
void
pcibios_align_resource(void *data, struct resource *res, unsigned long size,
unsigned long align)
{
struct pci_dev *dev = data;
if (res->flags & IORESOURCE_IO) {
unsigned long start = res->start;
if (size > 0x100) {
printk(KERN_ERR "PCI: I/O Region %s/%d too large"
" (%ld bytes)\n", dev->slot_name,
dev->resource - res, size);
}
if (start & 0x300) {
start = (start + 0x3ff) & ~0x3ff;
res->start = start;
}
}
}
int
pcibios_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
printk (KERN_ERR "PCI: Device %s not available because "
"of resource collisions\n", dev->slot_name);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (dev->resource[PCI_ROM_RESOURCE].start)
cmd |= PCI_COMMAND_MEMORY;
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n",
dev->slot_name, old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
struct pci_controller * __init pcibios_alloc_controller(void)
{
struct pci_controller *pci_ctrl;
pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
memset(pci_ctrl, 0, sizeof(struct pci_controller));
*pci_ctrl_tail = pci_ctrl;
pci_ctrl_tail = &pci_ctrl->next;
return pci_ctrl;
}
static int __init pcibios_init(void)
{
struct pci_controller *pci_ctrl;
struct pci_bus *bus;
int next_busno = 0, i;
printk("PCI: Probing PCI hardware\n");
/* Scan all of the recorded PCI controllers. */
for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
pci_ctrl->last_busno = 0xff;
bus = pci_scan_bus(pci_ctrl->first_busno, pci_ctrl->ops,
pci_ctrl);
if (pci_ctrl->io_resource.flags) {
unsigned long offs;
offs = (unsigned long)pci_ctrl->io_space.base;
pci_ctrl->io_resource.start += offs;
pci_ctrl->io_resource.end += offs;
bus->resource[0] = &pci_ctrl->io_resource;
}
for (i = 0; i < 3; ++i)
if (pci_ctrl->mem_resources[i].flags)
bus->resource[i+1] =&pci_ctrl->mem_resources[i];
pci_ctrl->bus = bus;
pci_ctrl->last_busno = bus->subordinate;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
pci_bus_count = next_busno;
return platform_pcibios_fixup();
}
subsys_initcall(pcibios_init);
void __init pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_controller *pci_ctrl = bus->sysdata;
struct resource *res;
unsigned long io_offset;
int i;
io_offset = (unsigned long)pci_ctrl->io_space.base;
if (bus->parent == NULL) {
/* this is a host bridge - fill in its resources */
pci_ctrl->bus = bus;
bus->resource[0] = res = &pci_ctrl->io_resource;
if (!res->flags) {
if (io_offset)
printk (KERN_ERR "I/O resource not set for host"
" bridge %d\n", pci_ctrl->index);
res->start = 0;
res->end = IO_SPACE_LIMIT;
res->flags = IORESOURCE_IO;
}
res->start += io_offset;
res->end += io_offset;
for (i = 0; i < 3; i++) {
res = &pci_ctrl->mem_resources[i];
if (!res->flags) {
if (i > 0)
continue;
printk(KERN_ERR "Memory resource not set for "
"host bridge %d\n", pci_ctrl->index);
res->start = 0;
res->end = ~0U;
res->flags = IORESOURCE_MEM;
}
bus->resource[i+1] = res;
}
} else {
/* This is a subordinate bridge */
pci_read_bridge_bases(bus);
for (i = 0; i < 4; i++) {
if ((res = bus->resource[i]) == NULL || !res->flags)
continue;
if (io_offset && (res->flags & IORESOURCE_IO)) {
res->start += io_offset;
res->end += io_offset;
}
}
}
}
char __init *pcibios_setup(char *str)
{
return str;
}
/* the next one is stolen from the alpha port... */
void __init
pcibios_update_irq(struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
int idx;
struct resource *r;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
for (idx=0; idx<6; idx++) {
r = &dev->resource[idx];
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available because "
"of resource collisions\n", dev->slot_name);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n",
dev->slot_name, old_cmd, cmd);
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
#ifdef CONFIG_PROC_FS
/*
* Return the index of the PCI controller for device pdev.
*/
int
pci_controller_num(struct pci_dev *dev)
{
struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
return pci_ctrl->index;
}
#endif /* CONFIG_PROC_FS */
static void
pcibios_fixup_resources(struct pci_dev *dev)
{
struct pci_controller* pci_ctrl = (struct pci_controller *)dev->sysdata;
int i;
unsigned long offset;
if (!pci_ctrl) {
printk(KERN_ERR "No pci_ctrl for PCI dev %s!\n",dev->slot_name);
return;
}
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
if (!res->start || !res->flags)
continue;
if (res->end == 0xffffffff) {
DBG("PCI:%s Resource %d [%08lx-%08lx] is unassigned\n",
dev->slot_name, i, res->start, res->end);
res->end -= res->start;
res->start = 0;
continue;
}
offset = 0;
if (res->flags & IORESOURCE_IO)
offset = (unsigned long) pci_ctrl->io_space.base;
else if (res->flags & IORESOURCE_MEM)
offset = (unsigned long) pci_ctrl->mem_space.base;
if (offset != 0) {
res->start += offset;
res->end += offset;
#ifdef DEBUG
printk("Fixup res %d (%lx) of dev %s: %lx -> %lx\n",
i, res->flags, dev->slot_name,
res->start - offset, res->start);
#endif
}
}
}
/*
* Platform support for /proc/bus/pci/X/Y mmap()s,
* modelled on the sparc64 implementation by Dave Miller.
* -- paulus.
*/
/*
* Adjust vm_pgoff of VMA such that it is the physical page offset
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
*
* Basically, the user finds the base address for his device which he wishes
* to mmap. They read the 32-bit value from the config space base register,
* add whatever PAGE_SIZE multiple offset they wish, and feed this into the
* offset parameter of mmap on /proc/bus/pci/XXX for that device.
*
* Returns negative error code on failure, zero on success.
*/
static __inline__ int
__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long io_offset = 0;
int i, res_bit;
if (pci_ctrl == 0)
return -EINVAL; /* should never happen */
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
res_bit = IORESOURCE_MEM;
} else {
io_offset = (unsigned long)pci_ctrl->io_space.base;
offset += io_offset;
res_bit = IORESOURCE_IO;
}
/*
* Check that the offset requested corresponds to one of the
* resources of the device.
*/
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &dev->resource[i];
int flags = rp->flags;
/* treat ROM as memory (should be already) */
if (i == PCI_ROM_RESOURCE)
flags |= IORESOURCE_MEM;
/* Active and same type? */
if ((flags & res_bit) == 0)
continue;
/* In the range of this resource? */
if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
continue;
/* found it! construct the final physical address */
if (mmap_state == pci_mmap_io)
offset += pci_ctrl->io_space.start - io_offset;
vma->vm_pgoff = offset >> PAGE_SHIFT;
return 0;
}
return -EINVAL;
}
/*
* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
* mapping.
*/
static __inline__ void
__pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO;
}
/*
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
static __inline__ void
__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
int prot = pgprot_val(vma->vm_page_prot);
/* Set to write-through */
prot &= ~_PAGE_NO_CACHE;
#if 0
if (!write_combine)
prot |= _PAGE_WRITETHRU;
#endif
vma->vm_page_prot = __pgprot(prot);
}
/*
* Perform the actual remap of the pages for a PCI device mapping, as
* appropriate for this architecture. The region in the process to map
* is described by vm_start and vm_end members of VMA, the base physical
* address is found in vm_pgoff.
* The pci device structure is provided so that architectures may make mapping
* decisions on a per-device or per-bus basis.
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
int ret;
ret = __pci_mmap_make_offset(dev, vma, mmap_state);
if (ret < 0)
return ret;
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
return ret;
}
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
* Also, think for a moment about likes of floppy.c that
* include architecture specific parts. They may want to redefine ins/outs.
*
* We do not use horroble macroses here because we want to
* advance pointer by sizeof(size).
*/
void outsb(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 1;
writeb(*(const char *)src, addr);
src += 1;
addr += 1;
}
}
void outsw(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 2;
writew(*(const short *)src, addr);
src += 2;
addr += 2;
}
}
void outsl(unsigned long addr, const void *src, unsigned long count) {
while (count) {
count -= 4;
writel(*(const long *)src, addr);
src += 4;
addr += 4;
}
}
void insb(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 1;
*(unsigned char *)dst = readb(addr);
dst += 1;
addr += 1;
}
}
void insw(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 2;
*(unsigned short *)dst = readw(addr);
dst += 2;
addr += 2;
}
}
void insl(unsigned long addr, void *dst, unsigned long count) {
while (count) {
count -= 4;
/*
* XXX I am sure we are in for an unaligned trap here.
*/
*(unsigned long *)dst = readl(addr);
dst += 4;
addr += 4;
}
}
/*
* arch/xtensa/kernel/platform.c
*
* Default platform functions.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*
* Chris Zankel <chris@zankel.net>
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/time.h>
#include <asm/platform.h>
#include <asm/timex.h>
#define _F(r,f,a,b) \
r __platform_##f a b; \
r platform_##f a __attribute__((weak, alias("__platform_"#f)))
/*
* Default functions that are used if no platform specific function is defined.
* (Please, refer to include/asm-xtensa/platform.h for more information)
*/
_F(void, setup, (char** cmd), { });
_F(void, init_irq, (void), { });
_F(void, restart, (void), { while(1); });
_F(void, halt, (void), { while(1); });
_F(void, power_off, (void), { while(1); });
_F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); });
_F(void, heartbeat, (void), { });
_F(int, pcibios_fixup, (void), { return 0; });
_F(int, get_rtc_time, (time_t* t), { return 0; });
_F(int, set_rtc_time, (time_t t), { return 0; });
#if CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void),
{
printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
ccount_per_jiffy = 100 * (1000000UL/HZ);
});
#endif
// TODO verify coprocessor handling
/*
* arch/xtensa/kernel/process.c
*
* Xtensa Processor version.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
* Kevin Chea
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/prctl.h>
#include <linux/init_task.h>
#include <linux/module.h>
#include <linux/mqueue.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/platform.h>
#include <asm/mmu.h>
#include <asm/irq.h>
#include <asm/atomic.h>
#include <asm/offsets.h>
#include <asm/coprocessor.h>
extern void ret_from_fork(void);
static struct fs_struct init_fs = INIT_FS;
static struct files_struct init_files = INIT_FILES;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
EXPORT_SYMBOL(init_mm);
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
#if XCHAL_CP_NUM > 0
/*
* Coprocessor ownership.
*/
coprocessor_info_t coprocessor_info[] = {
{ 0, XTENSA_CPE_CP0_OFFSET },
{ 0, XTENSA_CPE_CP1_OFFSET },
{ 0, XTENSA_CPE_CP2_OFFSET },
{ 0, XTENSA_CPE_CP3_OFFSET },
{ 0, XTENSA_CPE_CP4_OFFSET },
{ 0, XTENSA_CPE_CP5_OFFSET },
{ 0, XTENSA_CPE_CP6_OFFSET },
{ 0, XTENSA_CPE_CP7_OFFSET },
};
#endif
/*
* Powermanagement idle function, if any is provided by the platform.
*/
void cpu_idle(void)
{
local_irq_enable();
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
platform_idle();
preempt_enable();
schedule();
}
}
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
void flush_thread(void)
{
release_coprocessors(current); /* Empty macro if no CPs are defined */
}
/*
* Copy thread.
*
* The stack layout for the new thread looks like this:
*
* +------------------------+ <- sp in childregs (= tos)
* | childregs |
* +------------------------+ <- thread.sp = sp in dummy-frame
* | dummy-frame | (saved in dummy-frame spill-area)
* +------------------------+
*
* We create a dummy frame to return to ret_from_fork:
* a0 points to ret_from_fork (simulating a call4)
* sp points to itself (thread.sp)
* a2, a3 are unused.
*
* Note: This is a pristine frame, so we don't need any spill region on top of
* childregs.
*/
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs *childregs;
unsigned long tos;
int user_mode = user_mode(regs);
/* Set up new TSS. */
tos = (unsigned long)p->thread_info + THREAD_SIZE;
if (user_mode)
childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
else
childregs = (struct pt_regs*)tos - 1;
*childregs = *regs;
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0;
childregs->areg[1] = tos;
childregs->areg[2] = 0;
p->set_child_tid = p->clear_child_tid = NULL;
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
if (user_mode(regs)) {
int len = childregs->wmask & ~0xf;
childregs->areg[1] = usp;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
&regs->areg[XCHAL_NUM_AREGS - len/4], len);
if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6];
} else {
/* In kernel space, we start a new thread with a new stack. */
childregs->wmask = 1;
}
return 0;
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
long retval;
__asm__ __volatile__
("mov a5, %4\n\t" /* preserve fn in a5 */
"mov a6, %3\n\t" /* preserve and setup arg in a6 */
"movi a2, %1\n\t" /* load __NR_clone for syscall*/
"mov a3, sp\n\t" /* sp check and sys_clone */
"mov a4, %5\n\t" /* load flags for syscall */
"syscall\n\t"
"beq a3, sp, 1f\n\t" /* branch if parent */
"callx4 a5\n\t" /* call fn */
"movi a2, %2\n\t" /* load __NR_exit for syscall */
"mov a3, a6\n\t" /* load fn return value */
"syscall\n"
"1:\n\t"
"mov %0, a2\n\t" /* parent returns zero */
:"=r" (retval)
:"i" (__NR_clone), "i" (__NR_exit),
"r" (arg), "r" (fn),
"r" (flags | CLONE_VM)
: "a2", "a3", "a4", "a5", "a6" );
return retval;
}
/*
* These bracket the sleeping functions..
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long sp, pc;
unsigned long stack_page = (unsigned long) p->thread_info;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
sp = p->thread.sp;
pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
do {
if (sp < stack_page + sizeof(struct task_struct) ||
sp >= (stack_page + THREAD_SIZE) ||
pc == 0)
return 0;
if (!in_sched_functions(pc))
return pc;
/* Stack layout: sp-4: ra, sp-3: sp' */
pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
sp = *(unsigned long *)sp - 3;
} while (count++ < 16);
return 0;
}
/*
* do_copy_regs() gathers information from 'struct pt_regs' and
* 'current->thread.areg[]' to fill in the xtensa_gregset_t
* structure.
*
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
* of processor registers. Besides different ordering,
* xtensa_gregset_t contains non-live register information that
* 'struct pt_regs' does not. Exception handling (primarily) uses
* 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
*
*/
void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
struct task_struct *tsk)
{
int i, n, wb_offset;
elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
__asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
elfregs->cpux = i;
__asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
elfregs->cpuy = i;
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
*/
elfregs->pc = regs->pc;
elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
elfregs->exccause = regs->exccause;
elfregs->excvaddr = regs->excvaddr;
elfregs->windowbase = regs->windowbase;
elfregs->windowstart = regs->windowstart;
elfregs->lbeg = regs->lbeg;
elfregs->lend = regs->lend;
elfregs->lcount = regs->lcount;
elfregs->sar = regs->sar;
elfregs->syscall = regs->syscall;
/* Copy register file.
* The layout looks like this:
*
* | a0 ... a15 | Z ... Z | arX ... arY |
* current window unused saved frames
*/
memset (elfregs->ar, 0, sizeof(elfregs->ar));
wb_offset = regs->windowbase * 4;
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
for (i = 0; i < n; i++)
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
n = (regs->wmask >> 4) * 4;
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
}
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
{
do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
}
/* The inverse of do_copy_regs(). No error or sanity checking. */
void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
struct task_struct *tsk)
{
int i, n, wb_offset;
/* Note: PS.EXCM is not set while user task is running; it
* needs to be set in regs->ps is for exception handling convenience.
*/
regs->pc = elfregs->pc;
regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
regs->exccause = elfregs->exccause;
regs->excvaddr = elfregs->excvaddr;
regs->windowbase = elfregs->windowbase;
regs->windowstart = elfregs->windowstart;
regs->lbeg = elfregs->lbeg;
regs->lend = elfregs->lend;
regs->lcount = elfregs->lcount;
regs->sar = elfregs->sar;
regs->syscall = elfregs->syscall;
/* Clear everything. */
memset (regs->areg, 0, sizeof(regs->areg));
/* Copy regs from live window frame. */
wb_offset = regs->windowbase * 4;
n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
for (i = 0; i < n; i++)
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
n = (regs->wmask >> 4) * 4;
for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
}
/*
* do_save_fpregs() gathers information from 'struct pt_regs' and
* 'current->thread' to fill in the elf_fpregset_t structure.
*
* Core files and ptrace use elf_fpregset_t.
*/
void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Before dumping coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first saved to memory:
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now dump coprocessor & extra state: */
memcpy((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size);
memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
#endif
}
/*
* The inverse of do_save_fpregs().
* Copies coprocessor and extra state from fpregs into regs and tsk->thread.
* Returns 0 on success, non-zero if layout doesn't match.
*/
int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
struct task_struct *tsk)
{
#if XCHAL_HAVE_CP
extern unsigned char _xtensa_reginfo_tables[];
extern unsigned _xtensa_reginfo_table_size;
int i;
unsigned long flags;
/* Make sure save area layouts match.
* FIXME: in the future we could allow restoring from
* a different layout of the same registers, by comparing
* fpregs' table with _xtensa_reginfo_tables and matching
* entries and copying registers one at a time.
* Not too sure yet whether that's very useful.
*/
if( memcmp((unsigned char*)fpregs,
_xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
return -1;
}
/* Before restoring coprocessor state from memory,
* ensure any live coprocessor contents for this
* task are first invalidated.
*/
local_irq_save(flags);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if (tsk == coprocessor_info[i].owner) {
enable_coprocessor(i);
save_coprocessor_registers(
tsk->thread.cp_save+coprocessor_info[i].offset,i);
coprocessor_info[i].owner = 0;
disable_coprocessor(i);
}
}
local_irq_restore(flags);
/* Now restore coprocessor & extra state: */
memcpy(tsk->thread.cp_save,
(unsigned char*)fpregs + _xtensa_reginfo_table_size,
XTENSA_CP_EXTRA_SIZE);
#endif
return 0;
}
/*
* Fill in the CP structure for a core dump for a particular task.
*/
int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{
/* see asm/coprocessor.h for this magic number 16 */
#if TOTAL_CPEXTRA_SIZE > 16
do_save_fpregs (r, regs, task);
/* For now, bit 16 means some extra state may be present: */
// FIXME!! need to track to return more accurate mask
return 0x10000 | XCHAL_CP_MASK;
#else
return 0; /* no coprocessors active on this processor */
#endif
}
/*
* Fill in the CP structure for a core dump.
* This includes any FPU coprocessor.
* Here, we dump all coprocessors, and other ("extra") custom state.
*
* This function is called by elf_core_dump() in fs/binfmt_elf.c
* (in which case 'regs' comes from calls to do_coredump, see signals.c).
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
return dump_task_fpu(regs, current, r);
}
// TODO some minor issues
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
* Chris Zankel <chris@zankel.net>
* Scott Foehner<sfoehner@yahoo.com>,
* Kevin Chea
* Marc Gauthier<marc@tensilica.com> <marc@alumni.uwaterloo.ca>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/security.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/elf.h>
#define TEST_KERNEL // verify kernel operations FIXME: remove
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Nothing to do.. */
}
int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret = -EPERM;
lock_kernel();
#if 0
if ((int)request != 1)
printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
(int) request, (int) pid, (unsigned long) addr,
(unsigned long) data);
#endif
if (request == PTRACE_TRACEME) {
/* Are we already being traced? */
if (current->ptrace & PT_PTRACED)
goto out;
if ((ret = security_ptrace(current->parent, current)))
goto out;
/* Set the ptrace bit in the process flags. */
current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* you may not mess with init */
goto out;
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
goto out_tsk;
}
if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
{
unsigned long tmp;
int copied;
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
ret = -EIO;
if (copied != sizeof(tmp))
break;
ret = put_user(tmp,(unsigned long *) data);
goto out;
}
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
{
struct pt_regs *regs;
unsigned long tmp;
regs = xtensa_pt_regs(child);
tmp = 0; /* Default return value. */
switch(addr) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
ar &= (XCHAL_NUM_AREGS - 1);
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
tmp = regs->areg[ar];
else
ret = -EIO;
break;
}
case REG_A_BASE ... REG_A_BASE + 15:
tmp = regs->areg[addr - REG_A_BASE];
break;
case REG_PC:
tmp = regs->pc;
break;
case REG_PS:
/* Note: PS.EXCM is not set while user task is running;
* its being set in regs is for exception handling
* convenience. */
tmp = (regs->ps & ~XCHAL_PS_EXCM_MASK);
break;
case REG_WB:
tmp = regs->windowbase;
break;
case REG_WS:
tmp = regs->windowstart;
break;
case REG_LBEG:
tmp = regs->lbeg;
break;
case REG_LEND:
tmp = regs->lend;
break;
case REG_LCOUNT:
tmp = regs->lcount;
break;
case REG_SAR:
tmp = regs->sar;
break;
case REG_DEPC:
tmp = regs->depc;
break;
case REG_EXCCAUSE:
tmp = regs->exccause;
break;
case REG_EXCVADDR:
tmp = regs->excvaddr;
break;
case SYSCALL_NR:
tmp = regs->syscall;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned long *) data);
goto out;
}
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
if (access_process_vm(child, addr, &data, sizeof(data), 1)
== sizeof(data))
break;
ret = -EIO;
goto out;
case PTRACE_POKEUSR:
{
struct pt_regs *regs;
regs = xtensa_pt_regs(child);
switch (addr) {
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
{
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
else
ret = -EIO;
break;
}
case REG_A_BASE ... REG_A_BASE + 15:
regs->areg[addr - REG_A_BASE] = data;
break;
case REG_PC:
regs->pc = data;
break;
case SYSCALL_NR:
regs->syscall = data;
break;
#ifdef TEST_KERNEL
case REG_WB:
regs->windowbase = data;
break;
case REG_WS:
regs->windowstart = data;
break;
#endif
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
/* continue and stop at next (return from) syscall */
case PTRACE_SYSCALL:
case PTRACE_CONT: /* restart after signal. */
{
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
/* Make sure the single step bit is not set. */
child->ptrace &= ~PT_SINGLESTEP;
wake_up_process(child);
ret = 0;
break;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL:
ret = 0;
if (child->state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
child->ptrace &= ~PT_SINGLESTEP;
wake_up_process(child);
break;
case PTRACE_SINGLESTEP:
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->ptrace |= PT_SINGLESTEP;
child->exit_code = data;
wake_up_process(child);
ret = 0;
break;
case PTRACE_GETREGS:
{
/* 'data' points to user memory in which to write.
* Mainly due to the non-live register values, we
* reformat the register values into something more
* standard. For convenience, we use the handy
* elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = xtensa_pt_regs(child);
do_copy_regs (&format, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
ret = -EFAULT;
break;
}
case PTRACE_SETREGS:
{
/* 'data' points to user memory that contains the new
* values in the elf_gregset_t format. */
xtensa_gregset_t format;
struct pt_regs *regs = xtensa_pt_regs(child);
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
ret = -EFAULT;
break;
}
/* FIXME: Perhaps we want some sanity checks on
* these user-space values? See ARM version. Are
* debuggers a security concern? */
do_restore_regs (&format, regs, child);
ret = 0;
break;
}
case PTRACE_GETFPREGS:
{
/* 'data' points to user memory in which to write.
* For convenience, we use the handy
* elf_fpregset_t format. */
elf_fpregset_t fpregs;
struct pt_regs *regs = xtensa_pt_regs(child);
do_save_fpregs (&fpregs, regs, child);
/* Now, copy to user space nice and easy... */
ret = 0;
if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
ret = -EFAULT;
break;
}
case PTRACE_SETFPREGS:
{
/* 'data' points to user memory that contains the new
* values in the elf_fpregset_t format.
*/
elf_fpregset_t fpregs;
struct pt_regs *regs = xtensa_pt_regs(child);
ret = 0;
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
ret = -EFAULT;
break;
}
if (do_restore_fpregs (&fpregs, regs, child))
ret = -EIO;
break;
}
case PTRACE_GETFPREGSIZE:
/* 'data' points to 'unsigned long' set to the size
* of elf_fpregset_t
*/
ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
break;
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
goto out;
}
out_tsk:
put_task_struct(child);
out:
unlock_kernel();
return ret;
}
void do_syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/*
* The 0x80 provides a way for the tracing parent to distinguish
* between a syscall stop and SIGTRAP delivery
*/
ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* arch/xtensa/kernel/xtensa_ksyms.c
*
* Export Xtensa-specific functions for loadable modules.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Joe Taylor <joe@tensilica.com>
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <linux/in6.h>
#include <linux/pci.h>
#include <linux/ide.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/semaphore.h>
#ifdef CONFIG_BLK_DEV_FD
#include <asm/floppy.h>
#endif
#ifdef CONFIG_NET
#include <net/checksum.h>
#endif /* CONFIG_NET */
/*
* String functions
*/
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/*
* gcc internal math functions
*/
extern long long __ashrdi3(long long, int);
extern long long __ashldi3(long long, int);
extern long long __lshrdi3(long long, int);
extern int __divsi3(int, int);
extern int __modsi3(int, int);
extern long long __muldi3(long long, long long);
extern int __mulsi3(int, int);
extern unsigned int __udivsi3(unsigned int, unsigned int);
extern unsigned int __umodsi3(unsigned int, unsigned int);
extern unsigned long long __umoddi3(unsigned long long, unsigned long long);
extern unsigned long long __udivdi3(unsigned long long, unsigned long long);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__mulsi3);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
/*
* Semaphore operations
*/
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
EXPORT_SYMBOL(__down_trylock);
EXPORT_SYMBOL(__up);
#ifdef CONFIG_NET
/*
* Networking support
*/
EXPORT_SYMBOL(csum_partial_copy_generic);
#endif /* CONFIG_NET */
/*
* Architecture-specific symbols
*/
EXPORT_SYMBOL(__xtensa_copy_user);
/*
* Kernel hacking ...
*/
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
// FIXME EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册