提交 433bcf67 编写于 作者: L Linus Torvalds

Merge tag 'xtensa-20180820' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa updates from Max Filippov:

 - switch xtensa arch to the generic noncoherent direct mapping
   operations

 - add support for DMA_ATTR_NO_KERNEL_MAPPING attribute

 - clean up users of platform/hardware.h in generic Xtensa code

 - fix assembly cache maintenance code for long cache lines

 - rework noMMU cache attributes initialization

 - add big-endian HiFi2 test_kc705_be CPU variant

* tag 'xtensa-20180820' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: add test_kc705_be variant
  xtensa: clean up boot-elf/bootstrap.S
  xtensa: make bootparam parsing optional
  xtensa: drop variant IRQ support
  xtensa: drop unneeded platform/hardware.h headers
  xtensa: move PLATFORM_NR_IRQS to Kconfig
  xtensa: rework {CONFIG,PLATFORM}_DEFAULT_MEM_START
  xtensa: drop unused {CONFIG,PLATFORM}_DEFAULT_MEM_SIZE
  xtensa: rework noMMU cache attributes initialization
  xtensa: increase ranges in ___invalidate_{i,d}cache_all
  xtensa: limit offsets in __loop_cache_{all,page}
  xtensa: platform-specific handling of coherent memory
  xtensa: support DMA_ATTR_NO_KERNEL_MAPPING attribute
  xtensa: use generic dma_noncoherent_ops
......@@ -4,12 +4,15 @@ config ZONE_DMA
config XTENSA
def_bool y
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_NONCOHERENT_OPS
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW
......@@ -72,9 +75,6 @@ config TRACE_IRQFLAGS_SUPPORT
config MMU
def_bool n
config VARIANT_IRQ_SWITCH
def_bool n
config HAVE_XTENSA_GPIO32
def_bool n
......@@ -244,6 +244,23 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
If in doubt, say Y.
config MEMMAP_CACHEATTR
hex "Cache attributes for the memory address space"
depends on !MMU
default 0x22222222
help
These cache attributes are set up for noMMU systems. Each hex digit
specifies cache attributes for the corresponding 512MB memory
region: bits 0..3 -- for addresses 0x00000000..0x1fffffff,
bits 4..7 -- for addresses 0x20000000..0x3fffffff, and so on.
Cache attribute values are specific for the MMU type, so e.g.
for region protection MMUs: 2 is cache bypass, 4 is WB cached,
1 is WT cached, f is illegal. For ful MMU: bit 0 makes it executable,
bit 1 makes it writable, bits 2..3 meaning is 0: cache bypass,
1: WB cache, 2: WT cache, 3: special (c and e are illegal, f is
reserved).
config KSEG_PADDR
hex "Physical address of the KSEG mapping"
depends on INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX && MMU
......@@ -413,6 +430,10 @@ config XTENSA_PLATFORM_XTFPGA
endchoice
config PLATFORM_NR_IRQS
int
default 3 if XTENSA_PLATFORM_XT2000
default 0
config XTENSA_CPU_CLOCK
int "CPU clock rate [MHz]"
......@@ -450,6 +471,15 @@ config BUILTIN_DTB
string "DTB to build into the kernel image"
depends on OF
config PARSE_BOOTPARAM
bool "Parse bootparam block"
default y
help
Parse parameters passed to the kernel from the bootloader. It may
be disabled if the kernel is known to run without the bootloader.
If unsure, say Y.
config BLK_DEV_SIMDISK
tristate "Host file-based simulated block device support"
default n
......@@ -506,25 +536,13 @@ config PLATFORM_WANT_DEFAULT_MEM
def_bool n
config DEFAULT_MEM_START
hex "Physical address of the default memory area start"
depends on PLATFORM_WANT_DEFAULT_MEM
default 0x00000000 if MMU
default 0x60000000 if !MMU
help
This is the base address of the default memory area.
Default memory area has platform-specific meaning, it may be used
for e.g. early cache initialization.
If unsure, leave the default value here.
config DEFAULT_MEM_SIZE
hex "Maximal size of the default memory area"
depends on PLATFORM_WANT_DEFAULT_MEM
default 0x04000000
hex
prompt "PAGE_OFFSET/PHYS_OFFSET" if !MMU && PLATFORM_WANT_DEFAULT_MEM
default 0x60000000 if PLATFORM_WANT_DEFAULT_MEM
default 0x00000000
help
This is the size of the default memory area.
Default memory area has platform-specific meaning, it may be used
for e.g. early cache initialization.
This is the base address used for both PAGE_OFFSET and PHYS_OFFSET
in noMMU configurations.
If unsure, leave the default value here.
......
......@@ -15,10 +15,6 @@
*/
#include <asm/bootparam.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <asm/initialize_mmu.h>
#include <asm/vectors.h>
#include <linux/linkage.h>
......@@ -33,16 +29,18 @@ _ResetVector:
.begin no-absolute-literals
.literal_position
.align 4
RomInitAddr:
#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \
XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
.word CONFIG_KERNEL_LOAD_ADDRESS
.literal RomInitAddr, CONFIG_KERNEL_LOAD_ADDRESS
#else
.word KERNELOFFSET
.literal RomInitAddr, KERNELOFFSET
#endif
RomBootParam:
.word _bootparam
#ifndef CONFIG_PARSE_BOOTPARAM
.literal RomBootParam, 0
#else
.literal RomBootParam, _bootparam
.align 4
_bootparam:
.short BP_TAG_FIRST
.short 4
......@@ -50,6 +48,7 @@ _bootparam:
.short BP_TAG_LAST
.short 0
.long 0
#endif
.align 4
_SetupMMU:
......
......@@ -33,13 +33,13 @@ CONFIG_XTENSA_VARIANT_CUSTOM_NAME="de212"
# CONFIG_XTENSA_VARIANT_MMU is not set
CONFIG_XTENSA_UNALIGNED_USER=y
CONFIG_PREEMPT=y
CONFIG_MEMMAP_CACHEATTR=0xfff2442f
# CONFIG_PCI is not set
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000"
CONFIG_USE_OF=y
CONFIG_BUILTIN_DTB="kc705_nommu"
CONFIG_DEFAULT_MEM_SIZE=0x10000000
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
......
......@@ -3,6 +3,7 @@ generic-y += compat.h
generic-y += device.h
generic-y += div64.h
generic-y += dma-contiguous.h
generic-y += dma-mapping.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h
......
......@@ -31,16 +31,32 @@
*
*/
.macro __loop_cache_all ar at insn size line_width
movi \ar, 0
.macro __loop_cache_unroll ar at insn size line_width max_immed
.if (1 << (\line_width)) > (\max_immed)
.set _reps, 1
.elseif (2 << (\line_width)) > (\max_immed)
.set _reps, 2
.else
.set _reps, 4
.endif
__loopi \ar, \at, \size, (_reps << (\line_width))
.set _index, 0
.rep _reps
\insn \ar, _index << (\line_width)
.set _index, _index + 1
.endr
__endla \ar, \at, _reps << (\line_width)
.endm
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
.macro __loop_cache_all ar at insn size line_width max_immed
movi \ar, 0
__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
.endm
......@@ -57,14 +73,9 @@
.endm
.macro __loop_cache_page ar at insn line_width
.macro __loop_cache_page ar at insn line_width max_immed
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
.endm
......@@ -72,7 +83,8 @@
.macro ___unlock_dcache_all ar at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
......@@ -81,7 +93,8 @@
.macro ___unlock_icache_all ar at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 240
#endif
.endm
......@@ -90,7 +103,8 @@
.macro ___flush_invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
......@@ -99,7 +113,8 @@
.macro ___flush_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
......@@ -108,8 +123,8 @@
.macro ___invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
......@@ -118,8 +133,8 @@
.macro ___invalidate_icache_all ar at
#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
......@@ -166,7 +181,7 @@
.macro ___flush_invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
......@@ -175,7 +190,7 @@
.macro ___flush_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
......@@ -184,7 +199,7 @@
.macro ___invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
......@@ -193,7 +208,7 @@
.macro ___invalidate_icache_page ar as
#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_DMA_MAPPING_H
#define _XTENSA_DMA_MAPPING_H
#include <asm/cache.h>
#include <asm/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
extern const struct dma_map_ops xtensa_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return &xtensa_dma_map_ops;
}
#endif /* _XTENSA_DMA_MAPPING_H */
......@@ -177,36 +177,36 @@
#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU &&
XCHAL_HAVE_SPANNING_WAY */
#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS && \
(XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE)
/* Enable data and instruction cache in the DEFAULT_MEMORY region
* if the processor has DTLB and ITLB.
*/
.endm
.macro initialize_cacheattr
movi a5, PLATFORM_DEFAULT_MEM_START | XCHAL_SPANNING_WAY
#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
#if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
#error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
#endif
movi a5, XCHAL_SPANNING_WAY
movi a6, ~_PAGE_ATTRIB_MASK
movi a7, CA_WRITEBACK
movi a4, CONFIG_MEMMAP_CACHEATTR
movi a8, 0x20000000
movi a9, PLATFORM_DEFAULT_MEM_SIZE
j 2f
1:
sub a9, a9, a8
2:
#if XCHAL_DCACHE_SIZE
rdtlb1 a3, a5
xor a3, a3, a4
and a3, a3, a6
or a3, a3, a7
xor a3, a3, a4
wdtlb a3, a5
#endif
#if XCHAL_ICACHE_SIZE
ritlb1 a4, a5
and a4, a4, a6
or a4, a4, a7
witlb a4, a5
#endif
ritlb1 a3, a5
xor a3, a3, a4
and a3, a3, a6
xor a3, a3, a4
witlb a3, a5
add a5, a5, a8
bltu a8, a9, 1b
srli a4, a4, 4
bgeu a5, a8, 1b
isync
#endif
.endm
......
......@@ -12,32 +12,17 @@
#define _XTENSA_IRQ_H
#include <linux/init.h>
#include <platform/hardware.h>
#include <variant/core.h>
#ifdef CONFIG_VARIANT_IRQ_SWITCH
#include <variant/irq.h>
#ifdef CONFIG_PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
#else
static inline void variant_irq_enable(unsigned int irq) { }
static inline void variant_irq_disable(unsigned int irq) { }
#endif
#ifndef VARIANT_NR_IRQS
# define VARIANT_NR_IRQS 0
#endif
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
#else
void variant_init_irq(void) __init;
#endif
static __inline__ int irq_canonicalize(int irq)
{
return (irq);
......
......@@ -63,12 +63,6 @@
#error XCHAL_KSEG_PADDR is not properly aligned to XCHAL_KSEG_ALIGNMENT
#endif
#else
#define XCHAL_KSEG_CACHED_VADDR __XTENSA_UL_CONST(0xd0000000)
#define XCHAL_KSEG_BYPASS_VADDR __XTENSA_UL_CONST(0xd8000000)
#define XCHAL_KSEG_SIZE __XTENSA_UL_CONST(0x08000000)
#endif
#ifndef CONFIG_KASAN
......
......@@ -14,7 +14,6 @@
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
#include <platform/hardware.h>
#include <asm/kmem_layout.h>
/*
......@@ -31,8 +30,8 @@
#define MAX_LOW_PFN (PHYS_PFN(XCHAL_KSEG_PADDR) + \
PHYS_PFN(XCHAL_KSEG_SIZE))
#else
#define PAGE_OFFSET PLATFORM_DEFAULT_MEM_START
#define PHYS_OFFSET PLATFORM_DEFAULT_MEM_START
#define PAGE_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
#define PHYS_OFFSET _AC(CONFIG_DEFAULT_MEM_START, UL)
#define MAX_LOW_PFN PHYS_PFN(0xfffffffful)
#endif
......
......@@ -66,6 +66,7 @@
#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
#ifdef CONFIG_MMU
/*
* Virtual memory area. We keep a distance to other memory regions to be
* on the safe side. We also use this area for cache aliasing.
......@@ -80,6 +81,13 @@
#define TLBTEMP_SIZE ICACHE_WAY_SIZE
#endif
#else
#define VMALLOC_START __XTENSA_UL_CONST(0)
#define VMALLOC_END __XTENSA_UL_CONST(0xffffffff)
#endif
/*
* For the Xtensa architecture, the PTE layout is as follows:
*
......
......@@ -75,4 +75,31 @@ extern void platform_calibrate_ccount (void);
*/
void cpu_reset(void) __attribute__((noreturn));
/*
* Memory caching is platform-dependent in noMMU xtensa configurations.
* The following set of functions should be implemented in platform code
* in order to enable coherent DMA memory operations when CONFIG_MMU is not
* enabled. Default implementations do nothing and issue a warning.
*/
/*
* Check whether p points to a cached memory.
*/
bool platform_vaddr_cached(const void *p);
/*
* Check whether p points to an uncached memory.
*/
bool platform_vaddr_uncached(const void *p);
/*
* Return pointer to an uncached view of the cached sddress p.
*/
void *platform_vaddr_to_uncached(void *p);
/*
* Return pointer to a cached view of the uncached sddress p.
*/
void *platform_vaddr_to_cached(void *p);
#endif /* _XTENSA_PLATFORM_H */
......@@ -11,7 +11,6 @@
#define _XTENSA_PROCESSOR_H
#include <variant/core.h>
#include <platform/hardware.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
......
......@@ -19,7 +19,6 @@
#define _XTENSA_VECTORS_H
#include <variant/core.h>
#include <platform/hardware.h>
#include <asm/kmem_layout.h>
#if XCHAL_HAVE_PTP_MMU
......
......@@ -181,6 +181,8 @@ ENTRY(_startup)
isync
initialize_cacheattr
#ifdef CONFIG_HAVE_SMP
movi a2, CCON # MX External Register to Configure Cache
movi a3, 1
......
......@@ -158,7 +158,6 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
ipi_init();
#endif
variant_init_irq();
}
#ifdef CONFIG_HOTPLUG_CPU
......
......@@ -16,26 +16,25 @@
*/
#include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h>
#include <linux/dma-direct.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/platform.h>
static void do_cache_op(dma_addr_t dma_handle, size_t size,
static void do_cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long, unsigned long))
{
unsigned long off = dma_handle & (PAGE_SIZE - 1);
unsigned long pfn = PFN_DOWN(dma_handle);
unsigned long off = paddr & (PAGE_SIZE - 1);
unsigned long pfn = PFN_DOWN(paddr);
struct page *page = pfn_to_page(pfn);
if (!PageHighMem(page))
fn((unsigned long)bus_to_virt(dma_handle), size);
fn((unsigned long)phys_to_virt(paddr), size);
else
while (size > 0) {
size_t sz = min_t(size_t, size, PAGE_SIZE - off);
......@@ -49,14 +48,13 @@ static void do_cache_op(dma_addr_t dma_handle, size_t size,
}
}
static void xtensa_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
do_cache_op(dma_handle, size, __invalidate_dcache_range);
do_cache_op(paddr, size, __invalidate_dcache_range);
break;
case DMA_NONE:
......@@ -68,15 +66,14 @@ static void xtensa_sync_single_for_cpu(struct device *dev,
}
}
static void xtensa_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
if (XCHAL_DCACHE_IS_WRITEBACK)
do_cache_op(dma_handle, size, __flush_dcache_range);
do_cache_op(paddr, size, __flush_dcache_range);
break;
case DMA_NONE:
......@@ -88,43 +85,66 @@ static void xtensa_sync_single_for_device(struct device *dev,
}
}
static void xtensa_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
#ifdef CONFIG_MMU
bool platform_vaddr_cached(const void *p)
{
struct scatterlist *s;
int i;
unsigned long addr = (unsigned long)p;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
return addr >= XCHAL_KSEG_CACHED_VADDR &&
addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
}
static void xtensa_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
bool platform_vaddr_uncached(const void *p)
{
struct scatterlist *s;
int i;
unsigned long addr = (unsigned long)p;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_device(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
return addr >= XCHAL_KSEG_BYPASS_VADDR &&
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
}
void *platform_vaddr_to_uncached(void *p)
{
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
void *platform_vaddr_to_cached(void *p)
{
return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
}
#else
bool __attribute__((weak)) platform_vaddr_cached(const void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return true;
}
bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return false;
}
void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return p;
}
void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
{
WARN_ONCE(1, "Default %s implementation is used\n", __func__);
return p;
}
#endif
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
static void *xtensa_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag,
unsigned long attrs)
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
{
unsigned long ret;
unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
......@@ -147,6 +167,10 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
*handle = phys_to_dma(dev, page_to_phys(page));
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
return page;
}
#ifdef CONFIG_MMU
if (PageHighMem(page)) {
void *p;
......@@ -161,27 +185,21 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
return p;
}
#endif
ret = (unsigned long)page_address(page);
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
__invalidate_dcache_range(ret, size);
return (void *)uncached;
BUG_ON(!platform_vaddr_cached(page_address(page)));
__invalidate_dcache_range((unsigned long)page_address(page), size);
return platform_vaddr_to_uncached(page_address(page));
}
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)vaddr;
struct page *page;
if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
page = virt_to_page(addr);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
page = vaddr;
} else if (platform_vaddr_uncached(vaddr)) {
page = virt_to_page(platform_vaddr_to_cached(vaddr));
} else {
#ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size, VM_MAP);
......@@ -192,72 +210,3 @@ static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
xtensa_sync_single_for_device(dev, dma_handle, size, dir);
return dma_handle;
}
static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
s->length, dir, attrs);
}
return nents;
}
static void xtensa_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
}
int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
const struct dma_map_ops xtensa_dma_map_ops = {
.alloc = xtensa_dma_alloc,
.free = xtensa_dma_free,
.map_page = xtensa_map_page,
.unmap_page = xtensa_unmap_page,
.map_sg = xtensa_map_sg,
.unmap_sg = xtensa_unmap_sg,
.sync_single_for_cpu = xtensa_sync_single_for_cpu,
.sync_single_for_device = xtensa_sync_single_for_device,
.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
.sync_sg_for_device = xtensa_sync_sg_for_device,
.mapping_error = xtensa_dma_mapping_error,
};
EXPORT_SYMBOL(xtensa_dma_map_ops);
......@@ -47,8 +47,6 @@
#include <asm/smp.h>
#include <asm/sysmem.h>
#include <platform/hardware.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
.orig_x = 0,
......@@ -81,6 +79,7 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
#endif
#ifdef CONFIG_PARSE_BOOTPARAM
/*
* Boot parameter parsing.
*
......@@ -178,6 +177,13 @@ static int __init parse_bootparam(const bp_tag_t* tag)
return 0;
}
#else
static int __init parse_bootparam(const bp_tag_t *tag)
{
pr_info("Ignoring boot parameters at %p\n", tag);
return 0;
}
#endif
#ifdef CONFIG_OF
......
......@@ -20,7 +20,7 @@
#include <asm/vectors.h>
#include <variant/core.h>
#include <platform/hardware.h>
OUTPUT_ARCH(xtensa)
ENTRY(_start)
......
/*
* include/asm-xtensa/platform-iss/hardware.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 Tensilica Inc.
*/
/*
* This file contains the default configuration of ISS.
*/
#ifndef _XTENSA_PLATFORM_ISS_HARDWARE_H
#define _XTENSA_PLATFORM_ISS_HARDWARE_H
/*
* Memory configuration.
*/
#define PLATFORM_DEFAULT_MEM_START 0x00000000
#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
/*
* Interrupt configuration.
*/
#endif /* _XTENSA_PLATFORM_ISS_HARDWARE_H */
......@@ -17,17 +17,6 @@
#include <variant/core.h>
/*
* Memory configuration.
*/
#define PLATFORM_DEFAULT_MEM_START 0x00000000
#define PLATFORM_DEFAULT_MEM_SIZE 0x08000000
/*
* Number of platform IRQs
*/
#define PLATFORM_NR_IRQS 3
/*
* On-board components.
*/
......
......@@ -17,15 +17,6 @@
#ifndef __XTENSA_XTAVNET_HARDWARE_H
#define __XTENSA_XTAVNET_HARDWARE_H
/* Memory configuration. */
#define PLATFORM_DEFAULT_MEM_START __XTENSA_UL(CONFIG_DEFAULT_MEM_START)
#define PLATFORM_DEFAULT_MEM_SIZE __XTENSA_UL(CONFIG_DEFAULT_MEM_SIZE)
/* Interrupt configuration. */
#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
......
/*
* tie-asm.h -- compile-time HAL assembler definitions dependent on CORE & TIE
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file contains assembly-language definitions (assembly
macros, etc.) for this specific Xtensa processor's TIE extensions
and options. It is customized to this Xtensa processor configuration.
Copyright (c) 1999-2015 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_ASM_H
#define _XTENSA_CORE_TIE_ASM_H
/* Selection parameter values for save-area save/restore macros: */
/* Option vs. TIE: */
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
#define XTHAL_SAS_ANYOT 0x0003 /* both of the above */
/* Whether used automatically by compiler: */
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
#define XTHAL_SAS_ANYCC 0x000C /* both of the above */
/* ABI handling across function calls: */
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
#define XTHAL_SAS_ANYABI 0x0070 /* all of the above three */
/* Misc */
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
#define XTHAL_SAS3(optie,ccuse,abi) ( ((optie) & XTHAL_SAS_ANYOT) \
| ((ccuse) & XTHAL_SAS_ANYCC) \
| ((abi) & XTHAL_SAS_ANYABI) )
/*
* Macro to store all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 4 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters:
* continue If macro invoked as part of a larger store sequence, set to 1
* if this is not the first in the sequence. Defaults to 0.
* ofs Offset from start of larger sequence (from value of first ptr
* in sequence) at which to store. Defaults to next available space
* (or 0 if <continue> is 0).
* select Select what category(ies) of registers to store, as a bitmask
* (see XTHAL_SAS_xxx constants). Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if any
* category is selected here that is not in <select>, space for
* the corresponding registers is skipped without doing any store.
*/
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Optional global registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1020, 4, 4
rur.THREADPTR \at1 // threadptr option
s32i \at1, \ptr, .Lxchal_ofs_+0
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1020, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
// Optional caller-saved registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
rsr.ACCLO \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.ACCHI \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
// Optional caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1000, 4, 4
rsr.BR \at1 // boolean option
s32i \at1, \ptr, .Lxchal_ofs_+0
rsr.SCOMPARE1 \at1 // conditional store option
s32i \at1, \ptr, .Lxchal_ofs_+4
rsr.M0 \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+8
rsr.M1 \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+12
rsr.M2 \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+16
rsr.M3 \at1 // MAC16 option
s32i \at1, \ptr, .Lxchal_ofs_+20
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1000, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.endif
.endm // xchal_ncp_store
/*
* Macro to load all non-coprocessor (extra) custom TIE and optional state
* (not including zero-overhead loop registers).
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 4 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_NCP_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters:
* continue If macro invoked as part of a larger load sequence, set to 1
* if this is not the first in the sequence. Defaults to 0.
* ofs Offset from start of larger sequence (from value of first ptr
* in sequence) at which to load. Defaults to next available space
* (or 0 if <continue> is 0).
* select Select what category(ies) of registers to load, as a bitmask
* (see XTHAL_SAS_xxx constants). Defaults to all registers.
* alloc Select what category(ies) of registers to allocate; if any
* category is selected here that is not in <select>, space for
* the corresponding registers is skipped without doing any load.
*/
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Optional global registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\select)
xchal_sa_align \ptr, 0, 1020, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.THREADPTR \at1 // threadptr option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1020, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
.endif
// Optional caller-saved registers used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1016, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.ACCLO \at1 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.ACCHI \at1 // MAC16 option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1016, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 8
.endif
// Optional caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 1000, 4, 4
l32i \at1, \ptr, .Lxchal_ofs_+0
wsr.BR \at1 // boolean option
l32i \at1, \ptr, .Lxchal_ofs_+4
wsr.SCOMPARE1 \at1 // conditional store option
l32i \at1, \ptr, .Lxchal_ofs_+8
wsr.M0 \at1 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+12
wsr.M1 \at1 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+16
wsr.M2 \at1 // MAC16 option
l32i \at1, \ptr, .Lxchal_ofs_+20
wsr.M3 \at1 // MAC16 option
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.elseif ((XTHAL_SAS_OPT | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 1000, 4, 4
.set .Lxchal_ofs_, .Lxchal_ofs_ + 24
.endif
.endm // xchal_ncp_load
#define XCHAL_NCP_NUM_ATMPS 1
/*
* Macro to store the state of TIE coprocessor AudioEngineLX.
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 8 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters are the same as for xchal_ncp_store.
*/
#define xchal_cp_AudioEngineLX_store xchal_cp1_store
.macro xchal_cp1_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 0, 8, 8
rur.AE_OVF_SAR \at1 // ureg 240
s32i \at1, \ptr, .Lxchal_ofs_+0
rur.AE_BITHEAD \at1 // ureg 241
s32i \at1, \ptr, .Lxchal_ofs_+4
rur.AE_TS_FTS_BU_BP \at1 // ureg 242
s32i \at1, \ptr, .Lxchal_ofs_+8
rur.AE_SD_NO \at1 // ureg 243
s32i \at1, \ptr, .Lxchal_ofs_+12
rur.AE_CBEGIN0 \at1 // ureg 246
s32i \at1, \ptr, .Lxchal_ofs_+16
rur.AE_CEND0 \at1 // ureg 247
s32i \at1, \ptr, .Lxchal_ofs_+20
ae_sp24x2s.i aep0, \ptr, .Lxchal_ofs_+24
ae_sp24x2s.i aep1, \ptr, .Lxchal_ofs_+32
ae_sp24x2s.i aep2, \ptr, .Lxchal_ofs_+40
ae_sp24x2s.i aep3, \ptr, .Lxchal_ofs_+48
ae_sp24x2s.i aep4, \ptr, .Lxchal_ofs_+56
addi \ptr, \ptr, 64
ae_sp24x2s.i aep5, \ptr, .Lxchal_ofs_+0
ae_sp24x2s.i aep6, \ptr, .Lxchal_ofs_+8
ae_sp24x2s.i aep7, \ptr, .Lxchal_ofs_+16
ae_sq56s.i aeq0, \ptr, .Lxchal_ofs_+24
ae_sq56s.i aeq1, \ptr, .Lxchal_ofs_+32
ae_sq56s.i aeq2, \ptr, .Lxchal_ofs_+40
ae_sq56s.i aeq3, \ptr, .Lxchal_ofs_+48
.set .Lxchal_pofs_, .Lxchal_pofs_ + 64
.set .Lxchal_ofs_, .Lxchal_ofs_ + 56
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 0, 8, 8
.set .Lxchal_ofs_, .Lxchal_ofs_ + 120
.endif
.endm // xchal_cp1_store
/*
* Macro to load the state of TIE coprocessor AudioEngineLX.
* Required parameters:
* ptr Save area pointer address register (clobbered)
* (register must contain a 8 byte aligned address).
* at1..at4 Four temporary address registers (first XCHAL_CP1_NUM_ATMPS
* registers are clobbered, the remaining are unused).
* Optional parameters are the same as for xchal_ncp_load.
*/
#define xchal_cp_AudioEngineLX_load xchal_cp1_load
.macro xchal_cp1_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL alloc=0
xchal_sa_start \continue, \ofs
// Custom caller-saved registers not used by default by the compiler:
.ifeq (XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\select)
xchal_sa_align \ptr, 0, 0, 8, 8
l32i \at1, \ptr, .Lxchal_ofs_+0
wur.AE_OVF_SAR \at1 // ureg 240
l32i \at1, \ptr, .Lxchal_ofs_+4
wur.AE_BITHEAD \at1 // ureg 241
l32i \at1, \ptr, .Lxchal_ofs_+8
wur.AE_TS_FTS_BU_BP \at1 // ureg 242
l32i \at1, \ptr, .Lxchal_ofs_+12
wur.AE_SD_NO \at1 // ureg 243
l32i \at1, \ptr, .Lxchal_ofs_+16
wur.AE_CBEGIN0 \at1 // ureg 246
l32i \at1, \ptr, .Lxchal_ofs_+20
wur.AE_CEND0 \at1 // ureg 247
ae_lp24x2.i aep0, \ptr, .Lxchal_ofs_+24
ae_lp24x2.i aep1, \ptr, .Lxchal_ofs_+32
ae_lp24x2.i aep2, \ptr, .Lxchal_ofs_+40
ae_lp24x2.i aep3, \ptr, .Lxchal_ofs_+48
ae_lp24x2.i aep4, \ptr, .Lxchal_ofs_+56
addi \ptr, \ptr, 64
ae_lp24x2.i aep5, \ptr, .Lxchal_ofs_+0
ae_lp24x2.i aep6, \ptr, .Lxchal_ofs_+8
ae_lp24x2.i aep7, \ptr, .Lxchal_ofs_+16
addi \ptr, \ptr, 24
ae_lq56.i aeq0, \ptr, .Lxchal_ofs_+0
ae_lq56.i aeq1, \ptr, .Lxchal_ofs_+8
ae_lq56.i aeq2, \ptr, .Lxchal_ofs_+16
ae_lq56.i aeq3, \ptr, .Lxchal_ofs_+24
.set .Lxchal_pofs_, .Lxchal_pofs_ + 88
.set .Lxchal_ofs_, .Lxchal_ofs_ + 32
.elseif ((XTHAL_SAS_TIE | XTHAL_SAS_NOCC | XTHAL_SAS_CALR) & ~(\alloc)) == 0
xchal_sa_align \ptr, 0, 0, 8, 8
.set .Lxchal_ofs_, .Lxchal_ofs_ + 120
.endif
.endm // xchal_cp1_load
#define XCHAL_CP1_NUM_ATMPS 1
#define XCHAL_SA_NUM_ATMPS 1
/* Empty macros for unconfigured coprocessors: */
.macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm
.macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm
#endif /*_XTENSA_CORE_TIE_ASM_H*/
/*
* tie.h -- compile-time HAL definitions dependent on CORE & TIE configuration
*
* NOTE: This header file is not meant to be included directly.
*/
/* This header file describes this specific Xtensa processor's TIE extensions
that extend basic Xtensa core functionality. It is customized to this
Xtensa processor configuration.
Copyright (c) 1999-2015 Cadence Design Systems Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#ifndef _XTENSA_CORE_TIE_H
#define _XTENSA_CORE_TIE_H
#define XCHAL_CP_NUM 2 /* number of coprocessors */
#define XCHAL_CP_MAX 8 /* max CP ID + 1 (0 if none) */
#define XCHAL_CP_MASK 0x82 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x80 /* bitmask of only port CPs */
/* Basic parameters of each coprocessor: */
#define XCHAL_CP1_NAME "AudioEngineLX"
#define XCHAL_CP1_IDENT AudioEngineLX
#define XCHAL_CP1_SA_SIZE 120 /* size of state save area */
#define XCHAL_CP1_SA_ALIGN 8 /* min alignment of save area */
#define XCHAL_CP_ID_AUDIOENGINELX 1 /* coprocessor ID (0..7) */
#define XCHAL_CP7_NAME "XTIOP"
#define XCHAL_CP7_IDENT XTIOP
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_CP0_SA_SIZE 0
#define XCHAL_CP0_SA_ALIGN 1
#define XCHAL_CP2_SA_SIZE 0
#define XCHAL_CP2_SA_ALIGN 1
#define XCHAL_CP3_SA_SIZE 0
#define XCHAL_CP3_SA_ALIGN 1
#define XCHAL_CP4_SA_SIZE 0
#define XCHAL_CP4_SA_ALIGN 1
#define XCHAL_CP5_SA_SIZE 0
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 36
#define XCHAL_NCP_SA_ALIGN 4
/* Total save area for optional and custom state (NCP + CPn): */
#define XCHAL_TOTAL_SA_SIZE 160 /* with 16-byte align padding */
#define XCHAL_TOTAL_SA_ALIGN 8 /* actual minimum alignment */
/*
* Detailed contents of save areas.
* NOTE: caller must define the XCHAL_SA_REG macro (not defined here)
* before expanding the XCHAL_xxx_SA_LIST() macros.
*
* XCHAL_SA_REG(s,ccused,abikind,kind,opt,name,galign,align,asize,
* dbnum,base,regnum,bitsz,gapsz,reset,x...)
*
* s = passed from XCHAL_*_LIST(s), eg. to select how to expand
* ccused = set if used by compiler without special options or code
* abikind = 0 (caller-saved), 1 (callee-saved), or 2 (thread-global)
* kind = 0 (special reg), 1 (TIE user reg), or 2 (TIE regfile reg)
* opt = 0 (custom TIE extension or coprocessor), or 1 (optional reg)
* name = lowercase reg name (no quotes)
* galign = group byte alignment (power of 2) (galign >= align)
* align = register byte alignment (power of 2)
* asize = allocated size in bytes (asize*8 == bitsz + gapsz + padsz)
* (not including any pad bytes required to galign this or next reg)
* dbnum = unique target number f/debug (see <xtensa-libdb-macros.h>)
* base = reg shortname w/o index (or sr=special, ur=TIE user reg)
* regnum = reg index in regfile, or special/TIE-user reg number
* bitsz = number of significant bits (regfile width, or ur/sr mask bits)
* gapsz = intervening bits, if bitsz bits not stored contiguously
* (padsz = pad bits at end [TIE regfile] or at msbits [ur,sr] of asize)
* reset = register reset value (or 0 if undefined at reset)
* x = reserved for future use (0 until then)
*
* To filter out certain registers, e.g. to expand only the non-global
* registers used by the compiler, you can do something like this:
*
* #define XCHAL_SA_REG(s,ccused,p...) SELCC##ccused(p)
* #define SELCC0(p...)
* #define SELCC1(abikind,p...) SELAK##abikind(p)
* #define SELAK0(p...) REG(p)
* #define SELAK1(p...) REG(p)
* #define SELAK2(p...)
* #define REG(kind,tie,name,galn,aln,asz,csz,dbnum,base,rnum,bsz,rst,x...) \
* ...what you want to expand...
*/
#define XCHAL_NCP_SA_NUM 9
#define XCHAL_NCP_SA_LIST(s) \
XCHAL_SA_REG(s,1,2,1,1, threadptr, 4, 4, 4,0x03E7, ur,231, 32,0,0,0) \
XCHAL_SA_REG(s,1,0,0,1, acclo, 4, 4, 4,0x0210, sr,16 , 32,0,0,0) \
XCHAL_SA_REG(s,1,0,0,1, acchi, 4, 4, 4,0x0211, sr,17 , 8,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, br, 4, 4, 4,0x0204, sr,4 , 16,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, scompare1, 4, 4, 4,0x020C, sr,12 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m0, 4, 4, 4,0x0220, sr,32 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m1, 4, 4, 4,0x0221, sr,33 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m2, 4, 4, 4,0x0222, sr,34 , 32,0,0,0) \
XCHAL_SA_REG(s,0,0,0,1, m3, 4, 4, 4,0x0223, sr,35 , 32,0,0,0)
#define XCHAL_CP0_SA_NUM 0
#define XCHAL_CP0_SA_LIST(s) /* empty */
#define XCHAL_CP1_SA_NUM 18
#define XCHAL_CP1_SA_LIST(s) \
XCHAL_SA_REG(s,0,0,1,0, ae_ovf_sar, 8, 4, 4,0x03F0, ur,240, 7,0,0,0) \
XCHAL_SA_REG(s,0,0,1,0, ae_bithead, 4, 4, 4,0x03F1, ur,241, 32,0,0,0) \
XCHAL_SA_REG(s,0,0,1,0,ae_ts_fts_bu_bp, 4, 4, 4,0x03F2, ur,242, 16,0,0,0) \
XCHAL_SA_REG(s,0,0,1,0, ae_sd_no, 4, 4, 4,0x03F3, ur,243, 28,0,0,0) \
XCHAL_SA_REG(s,0,0,1,0, ae_cbegin0, 4, 4, 4,0x03F6, ur,246, 32,0,0,0) \
XCHAL_SA_REG(s,0,0,1,0, ae_cend0, 4, 4, 4,0x03F7, ur,247, 32,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep0, 8, 8, 8,0x0060, aep,0 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep1, 8, 8, 8,0x0061, aep,1 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep2, 8, 8, 8,0x0062, aep,2 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep3, 8, 8, 8,0x0063, aep,3 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep4, 8, 8, 8,0x0064, aep,4 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep5, 8, 8, 8,0x0065, aep,5 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep6, 8, 8, 8,0x0066, aep,6 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aep7, 8, 8, 8,0x0067, aep,7 , 48,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aeq0, 8, 8, 8,0x0068, aeq,0 , 56,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aeq1, 8, 8, 8,0x0069, aeq,1 , 56,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aeq2, 8, 8, 8,0x006A, aeq,2 , 56,0,0,0) \
XCHAL_SA_REG(s,0,0,2,0, aeq3, 8, 8, 8,0x006B, aeq,3 , 56,0,0,0)
#define XCHAL_CP2_SA_NUM 0
#define XCHAL_CP2_SA_LIST(s) /* empty */
#define XCHAL_CP3_SA_NUM 0
#define XCHAL_CP3_SA_LIST(s) /* empty */
#define XCHAL_CP4_SA_NUM 0
#define XCHAL_CP4_SA_LIST(s) /* empty */
#define XCHAL_CP5_SA_NUM 0
#define XCHAL_CP5_SA_LIST(s) /* empty */
#define XCHAL_CP6_SA_NUM 0
#define XCHAL_CP6_SA_LIST(s) /* empty */
#define XCHAL_CP7_SA_NUM 0
#define XCHAL_CP7_SA_LIST(s) /* empty */
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,8
/* Byte length of instruction from its first byte, per FLIX. */
#define XCHAL_BYTE0_FORMAT_LENGTHS \
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,\
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,\
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
#endif /*_XTENSA_CORE_TIE_H*/
......@@ -98,14 +98,12 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
static void xtensa_mx_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->hwirq);
xtensa_mx_irq_unmask(d);
}
static void xtensa_mx_irq_disable(struct irq_data *d)
{
xtensa_mx_irq_mask(d);
variant_irq_disable(d->hwirq);
}
static void xtensa_mx_irq_ack(struct irq_data *d)
......
......@@ -55,14 +55,12 @@ static void xtensa_irq_unmask(struct irq_data *d)
static void xtensa_irq_enable(struct irq_data *d)
{
variant_irq_enable(d->hwirq);
xtensa_irq_unmask(d);
}
static void xtensa_irq_disable(struct irq_data *d)
{
xtensa_irq_mask(d);
variant_irq_disable(d->hwirq);
}
static void xtensa_irq_ack(struct irq_data *d)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册