提交 fb9ea5ea 编写于 作者: W weety

enable at91sam9260 mmu, update SDIO and EMAC drivers

上级 92d4c193
......@@ -27,6 +27,12 @@
#define mci_dbg(fmt, ...)
#endif
#define MMU_NOCACHE_ADDR(a) ((rt_uint32_t)a | (1UL<<31))
extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
| AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
| AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
......@@ -334,6 +340,7 @@ static void at91_mci_send_command(struct at91_mci *mci, struct rt_mmcsd_cmd *cmd
* Handle a read
*/
mmu_invalidate_dcache(data->buf, data->blksize*data->blks);
at91_mci_init_dma_read(mci);
ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
}
......@@ -359,13 +366,15 @@ static void at91_mci_send_command(struct at91_mci *mci, struct rt_mmcsd_cmd *cmd
return;
}
rt_memset(mci->buf, 0, 12);
rt_memcpy(mci->buf, data->buf, block_length * blocks);
rt_memcpy(mci->buf, data->buf, length);
mmu_clean_dcache(mci->buf, length);
at91_mci_write(AT91_PDC_TPR, (rt_uint32_t)(mci->buf));
at91_mci_write(AT91_PDC_TCR, (data->blksize & 0x3) ?
length : length / 4);
}
else
{
mmu_clean_dcache(data->buf, data->blksize*data->blks);
at91_mci_write(AT91_PDC_TPR, (rt_uint32_t)(data->buf));
at91_mci_write(AT91_PDC_TCR, (data->blksize & 0x3) ?
length : length / 4);
......
......@@ -341,7 +341,7 @@ void rt_hw_board_init()
rt_hw_uart_init();
/* initialize mmu */
//rt_hw_mmu_init();
rt_hw_mmu_init();
/* initialize timer0 */
rt_hw_timer_init();
......
......@@ -18,6 +18,26 @@
#include <at91sam926x.h>
#include "macb.h"
#define MMU_NOCACHE_ADDR(a) ((rt_uint32_t)a | (1UL<<31))
extern void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size);
extern void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size);
/* Cache macros - Packet buffers would be from pbuf pool which is cached */
#define EMAC_VIRT_NOCACHE(addr) (addr)
#define EMAC_CACHE_INVALIDATE(addr, size) \
mmu_invalidate_dcache((rt_uint32_t)addr, size)
#define EMAC_CACHE_WRITEBACK(addr, size) \
mmu_clean_dcache((rt_uint32_t)addr, size)
#define EMAC_CACHE_WRITEBACK_INVALIDATE(addr, size) \
mmu_clean_invalidated_dcache((rt_uint32_t)addr, size)
/* EMAC has BD's in cached memory - so need cache functions */
#define BD_CACHE_INVALIDATE(addr, size)
#define BD_CACHE_WRITEBACK(addr, size)
#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
#define CONFIG_RMII
#define MACB_RX_BUFFER_SIZE 4096*4
......@@ -378,6 +398,8 @@ static rt_err_t rt_macb_init(rt_device_t dev)
}
macb->rx_tail = macb->tx_head = macb->tx_tail = 0;
BD_CACHE_WRITEBACK_INVALIDATE(macb->rx_ring, MACB_RX_RING_SIZE * sizeof(struct macb_dma_desc));
BD_CACHE_WRITEBACK_INVALIDATE(macb->tx_ring, MACB_TX_RING_SIZE * sizeof(struct macb_dma_desc));
macb_writel(macb, RBQP, macb->rx_ring_dma);
macb_writel(macb, TBQP, macb->tx_ring_dma);
......@@ -470,28 +492,12 @@ static rt_err_t rt_macb_control(rt_device_t dev, rt_uint8_t cmd, void *args)
/* transmit packet. */
rt_err_t rt_macb_tx( rt_device_t dev, struct pbuf* p)
{
struct pbuf* q;
rt_uint8_t* bufptr, *buf = RT_NULL;
unsigned long ctrl;
struct rt_macb_eth *macb = dev->user_data;
unsigned int tx_head = macb->tx_head;
/* lock macb device */
rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
buf = rt_malloc(p->tot_len);
if (!buf) {
rt_kprintf("%s:alloc buf failed\n", __func__);
return -RT_ENOMEM;
}
bufptr = buf;
for (q = p; q != NULL; q = q->next)
{
memcpy(bufptr, q->payload, q->len);
bufptr += q->len;
}
rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
EMAC_CACHE_WRITEBACK(p->payload, p->tot_len);
ctrl = p->tot_len & TXBUF_FRMLEN_MASK;
ctrl |= TXBUF_FRAME_END;
if (tx_head == (MACB_TX_RING_SIZE - 1)) {
......@@ -499,17 +505,14 @@ rt_err_t rt_macb_tx( rt_device_t dev, struct pbuf* p)
macb->tx_head = 0;
} else
macb->tx_head++;
macb->tx_ring[tx_head].ctrl = ctrl;
macb->tx_ring[tx_head].addr = (rt_uint32_t)buf;
macb->tx_ring[tx_head].addr = (rt_uint32_t)p->payload;
BD_CACHE_WRITEBACK_INVALIDATE(macb->tx_ring[tx_head], sizeof(struct macb_dma_desc));
macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
/* unlock macb device */
rt_sem_release(&sem_lock);
rt_sem_release(&sem_lock);
/* wait ack */
rt_sem_take(&sem_ack, RT_WAITING_FOREVER);
rt_free(buf);
return RT_EOK;
}
......@@ -546,12 +549,7 @@ struct pbuf *rt_macb_rx(rt_device_t dev)
int wrapped = 0;
rt_uint32_t status;
struct pbuf* q;
rt_uint8_t *buf = RT_NULL;
/* lock macb device */
rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
for (;;) {
if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED))
break;
......@@ -574,39 +572,18 @@ struct pbuf *rt_macb_rx(rt_device_t dev)
}
if (wrapped) {
unsigned int headlen, taillen;
buf = rt_malloc(len);
if (!buf)
{
rt_kprintf("%s:alloc memory failed\n", __func__);
pbuf_free(p);
p = RT_NULL;
break;
}
headlen = 128 * (MACB_RX_RING_SIZE
- macb->rx_tail);
taillen = len - headlen;
memcpy((void *)buf,
buffer, headlen);
memcpy((void *)((unsigned int)buf + headlen),
EMAC_CACHE_INVALIDATE(buffer, headlen);
EMAC_CACHE_INVALIDATE(macb->rx_buffer, taillen);
memcpy((void *)p->payload, buffer, headlen);
memcpy((void *)((unsigned int)p->payload + headlen),
macb->rx_buffer, taillen);
buffer = (void *)buf;
for (q = p; q != RT_NULL; q= q->next)
{
/* read data from device */
memcpy((void *)q->payload, buffer, q->len);
buffer = (void *)((unsigned int)buffer + q->len);
}
rt_free(buf);
buf = RT_NULL;
} else {
for (q = p; q != RT_NULL; q= q->next)
{
/* read data from device */
memcpy((void *)q->payload, buffer, q->len);
buffer = (void *)((unsigned int)buffer + q->len);
}
EMAC_CACHE_INVALIDATE(buffer, len);
memcpy((void *)p->payload, buffer, p->len);
}
if (++rx_tail >= MACB_RX_RING_SIZE)
......@@ -620,7 +597,7 @@ struct pbuf *rt_macb_rx(rt_device_t dev)
}
}
}
/* unlock macb device */
rt_sem_release(&sem_lock);
return p;
......@@ -647,11 +624,17 @@ void macb_initialize()
macb->rx_buffer = rt_malloc(MACB_RX_BUFFER_SIZE);
macb->rx_ring = rt_malloc(MACB_RX_RING_SIZE * sizeof(struct macb_dma_desc));
macb->tx_ring = rt_malloc(MACB_TX_RING_SIZE * sizeof(struct macb_dma_desc));
EMAC_CACHE_INVALIDATE(macb->rx_ring, MACB_RX_RING_SIZE * sizeof(struct macb_dma_desc));
EMAC_CACHE_INVALIDATE(macb->tx_ring, MACB_TX_RING_SIZE * sizeof(struct macb_dma_desc));
macb->rx_buffer_dma = (unsigned long)macb->rx_buffer;
macb->rx_ring_dma = (unsigned long)macb->rx_ring;
macb->tx_ring_dma = (unsigned long)macb->tx_ring;
macb->rx_ring = MMU_NOCACHE_ADDR(macb->rx_ring);
macb->tx_ring = MMU_NOCACHE_ADDR(macb->tx_ring);
macb->regs = AT91SAM9260_BASE_EMAC;
macb->phy_addr = 0x00;
......
......@@ -141,8 +141,6 @@
#define RT_USING_LWIP
#define RT_LWIP_DNS
#define LWIP_NETIF_LINK_CALLBACK 1
/* Trace LwIP protocol */
// #define RT_LWIP_DEBUG
......
......@@ -9,19 +9,17 @@
*
* Change Logs:
* Date Author Notes
* 2011-01-13 weety modified from mini2440
*/
#include <rtthread.h>
#include "at91sam926x.h"
#define _MMUTT_STARTADDRESS 0x33FF0000
#define CACHE_LINE_SIZE 32
#define DESC_SEC (0x2|(1<<4))
#define CB (3<<2) //cache_on, write_back
#define CNB (2<<2) //cache_on, write_through
#define NCB (1<<2) //cache_off,WR_BUF on
#define NCNB (0<<2) //cache_off,WR_BUF off
#define NCNB (0<<2) //cache_off,WR_BUF off
#define AP_RW (3<<10) //supervisor=RW, user=RW
#define AP_RO (2<<10) //supervisor=RW, user=RO
......@@ -34,15 +32,246 @@
#define DOMAIN0_ATTR (DOMAIN_CHK<<0)
#define DOMAIN1_ATTR (DOMAIN_FAULT<<2)
#define RW_CB (AP_RW|DOMAIN0|CB|DESC_SEC)
#define RW_CNB (AP_RW|DOMAIN0|CNB|DESC_SEC)
#define RW_NCNB (AP_RW|DOMAIN0|NCNB|DESC_SEC)
#define RW_FAULT (AP_RW|DOMAIN1|NCNB|DESC_SEC)
#define RW_CB (AP_RW|DOMAIN0|CB|DESC_SEC) /* Read/Write, cache, write back */
#define RW_CNB (AP_RW|DOMAIN0|CNB|DESC_SEC) /* Read/Write, cache, write through */
#define RW_NCNB (AP_RW|DOMAIN0|NCNB|DESC_SEC) /* Read/Write without cache and write buffer */
#define RW_FAULT (AP_RW|DOMAIN1|NCNB|DESC_SEC) /* Read/Write without cache and write buffer */
#ifdef __GNUC__
#ifdef __CC_ARM
void mmu_setttbase(rt_uint32_t i)
{
register rt_uint32_t value;
/* Invalidates all TLBs.Domain access is selected as
* client by configuring domain access register,
* in that case access controlled by permission value
* set by page table entry
*/
value = 0;
__asm
{
mcr p15, 0, value, c8, c7, 0
}
value = 0x55555555;
__asm
{
mcr p15, 0, value, c3, c0, 0
mcr p15, 0, i, c2, c0, 0
}
}
void mmu_set_domain(rt_uint32_t i)
{
__asm
{
mcr p15,0, i, c3, c0, 0
}
}
void mmu_enable()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x01
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x01
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_enable_icache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x1000
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_enable_dcache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x04
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable_icache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x1000
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable_dcache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x04
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_enable_alignfault()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x02
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable_alignfault()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x02
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_clean_invalidated_cache_index(int index)
{
__asm
{
mcr p15, 0, index, c7, c14, 2
}
}
void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
unsigned int ptr;
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
while(ptr < buffer + size)
{
__asm
{
MCR p15, 0, ptr, c7, c14, 1
}
ptr += CACHE_LINE_SIZE;
}
}
void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
unsigned int ptr;
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
while (ptr < buffer + size)
{
__asm
{
MCR p15, 0, ptr, c7, c10, 1
}
ptr += CACHE_LINE_SIZE;
}
}
void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
unsigned int ptr;
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
while (ptr < buffer + size)
{
__asm
{
MCR p15, 0, ptr, c7, c6, 1
}
ptr += CACHE_LINE_SIZE;
}
}
void mmu_invalidate_tlb()
{
register rt_uint32_t value;
value = 0;
__asm
{
mcr p15, 0, value, c8, c7, 0
}
}
void mmu_invalidate_icache()
{
register rt_uint32_t value;
value = 0;
__asm
{
mcr p15, 0, value, c7, c5, 0
}
}
void mmu_invalidate_dcache_all()
{
register rt_uint32_t value;
value = 0;
__asm
{
mcr p15, 0, value, c7, c6, 0
}
}
#elif defined(__GNUC__)
void mmu_setttbase(register rt_uint32_t i)
{
asm ("mcr p15, 0, %0, c2, c0, 0": :"r" (i));
register rt_uint32_t value;
/* Invalidates all TLBs.Domain access is selected as
* client by configuring domain access register,
* in that case access controlled by permission value
* set by page table entry
*/
value = 0;
asm ("mcr p15, 0, %0, c8, c7, 0"::"r"(value));
value = 0x55555555;
asm ("mcr p15, 0, %0, c3, c0, 0"::"r"(value));
asm ("mcr p15, 0, %0, c2, c0, 0"::"r"(i));
}
void mmu_set_domain(register rt_uint32_t i)
......@@ -159,167 +388,69 @@ void mmu_clean_invalidated_cache_index(int index)
asm ("mcr p15, 0, %0, c7, c14, 2": :"r" (index));
}
void mmu_invalidate_tlb()
void mmu_clean_invalidated_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
asm ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
}
void mmu_invalidate_icache()
{
asm ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
}
#endif
#ifdef __CC_ARM
void mmu_setttbase(rt_uint32_t i)
{
__asm
{
mcr p15, 0, i, c2, c0, 0
}
}
void mmu_set_domain(rt_uint32_t i)
{
__asm
{
mcr p15,0, i, c3, c0, 0
}
}
void mmu_enable()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x01
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x01
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_enable_icache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x1000
mcr p15, 0, value, c1, c0, 0
}
}
unsigned int ptr;
void mmu_enable_dcache()
{
register rt_uint32_t value;
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
__asm
while(ptr < buffer + size)
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x04
mcr p15, 0, value, c1, c0, 0
asm ("mcr p15, 0, %0, c7, c14, 1": :"r" (ptr));
ptr += CACHE_LINE_SIZE;
}
}
void mmu_disable_icache()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x1000
mcr p15, 0, value, c1, c0, 0
}
}
void mmu_disable_dcache()
void mmu_clean_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
register rt_uint32_t value;
unsigned int ptr;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x04
mcr p15, 0, value, c1, c0, 0
}
}
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
void mmu_enable_alignfault()
{
register rt_uint32_t value;
__asm
{
mrc p15, 0, value, c1, c0, 0
orr value, value, #0x02
mcr p15, 0, value, c1, c0, 0
}
while (ptr < buffer + size)
{
asm ("mcr p15, 0, %0, c7, c10, 1": :"r" (ptr));
ptr += CACHE_LINE_SIZE;
}
}
void mmu_disable_alignfault()
void mmu_invalidate_dcache(rt_uint32_t buffer, rt_uint32_t size)
{
register rt_uint32_t value;
unsigned int ptr;
__asm
{
mrc p15, 0, value, c1, c0, 0
bic value, value, #0x02
mcr p15, 0, value, c1, c0, 0
}
}
ptr = buffer & ~(CACHE_LINE_SIZE - 1);
void mmu_clean_invalidated_cache_index(int index)
{
__asm
{
mcr p15, 0, index, c7, c14, 2
}
while (ptr < buffer + size)
{
asm ("mcr p15, 0, %0, c7, c6, 1": :"r" (ptr));
ptr += CACHE_LINE_SIZE;
}
}
void mmu_invalidate_tlb()
{
register rt_uint32_t value;
value = 0;
__asm
{
mcr p15, 0, value, c8, c7, 0
}
asm ("mcr p15, 0, %0, c8, c7, 0": :"r" (0));
}
void mmu_invalidate_icache()
{
register rt_uint32_t value;
value = 0;
asm ("mcr p15, 0, %0, c7, c5, 0": :"r" (0));
}
__asm
{
mcr p15, 0, value, c7, c5, 0
}
void mmu_invalidate_dcache_all()
{
asm ("mcr p15, 0, %0, c7, c6, 0": :"r" (0));
}
#endif
void mmu_setmtt(int vaddrStart,int vaddrEnd,int paddrStart,int attr)
/* level1 page table */
static volatile unsigned int _page_table[4*1024] __attribute__((aligned(16*1024)));
void mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd, rt_uint32_t paddrStart, rt_uint32_t attr)
{
volatile rt_uint32_t *pTT;
volatile int i,nSec;
pTT=(rt_uint32_t *)_MMUTT_STARTADDRESS+(vaddrStart>>20);
pTT=(rt_uint32_t *)_page_table+(vaddrStart>>20);
nSec=(vaddrEnd>>20)-(vaddrStart>>20);
for(i=0;i<=nSec;i++)
{
......@@ -330,66 +461,32 @@ void mmu_setmtt(int vaddrStart,int vaddrEnd,int paddrStart,int attr)
void rt_hw_mmu_init(void)
{
#if 0
int i,j;
//========================== IMPORTANT NOTE =========================
//The current stack and code area can't be re-mapped in this routine.
//If you want memory map mapped freely, your own sophiscated mmu
//initialization code is needed.
//===================================================================
/* disable I/D cache */
mmu_disable_dcache();
mmu_disable_icache();
mmu_disable();
mmu_invalidate_tlb();
//If write-back is used,the DCache should be cleared.
for(i=0;i<64;i++)
for(j=0;j<8;j++)
mmu_clean_invalidated_cache_index((i<<26)|(j<<5));
/* set page table */
mmu_setmtt(0x00000000, 0xFFFFFFFF, 0x00000000, RW_NCNB); /* None cached for 4G memory */
mmu_setmtt(0x20000000, 0x24000000-1, 0x20000000, RW_CB); /* 64M cached SDRAM memory */
mmu_setmtt(0x00000000, 0x100000, 0x20000000, RW_CB); /* isr vector table */
mmu_setmtt(0x90000000, 0x100000, 0x00000000, RW_CB); /* 4K cached internal memory */
mmu_setmtt(0xA0000000, 0xA4000000-1, 0x20000000, RW_NCNB); /* 64M none-cached SDRAM memory */
mmu_invalidate_icache();
/* set MMU table address */
mmu_setttbase((rt_uint32_t)_page_table);
//To complete mmu_Init() fast, Icache may be turned on here.
mmu_enable_icache();
/* enables MMU */
mmu_enable();
mmu_disable();
mmu_invalidate_tlb();
/* enable Instruction Cache */
mmu_enable_icache();
//mmu_setmtt(int vaddrStart,int vaddrEnd,int paddrStart,int attr);
mmu_setmtt(0x00000000,0x07f00000,0x00000000,RW_CNB); //bank0
mmu_setmtt(0x00000000,0x03f00000,(int)0x30000000,RW_CB); //bank0
mmu_setmtt(0x04000000,0x07f00000,0,RW_NCNB); //bank0
mmu_setmtt(0x08000000,0x0ff00000,0x08000000,RW_CNB); //bank1
mmu_setmtt(0x10000000,0x17f00000,0x10000000,RW_NCNB); //bank2
mmu_setmtt(0x18000000,0x1ff00000,0x18000000,RW_NCNB); //bank3
//mmu_setmtt(0x20000000,0x27f00000,0x20000000,RW_CB); //bank4
mmu_setmtt(0x20000000,0x27f00000,0x20000000,RW_NCNB); //bank4 for DM9000
mmu_setmtt(0x28000000,0x2ff00000,0x28000000,RW_NCNB); //bank5
//30f00000->30100000, 31000000->30200000
mmu_setmtt(0x30000000,0x30100000,0x30000000,RW_CB); //bank6-1
mmu_setmtt(0x30200000,0x33e00000,0x30200000,RW_CB); //bank6-2
mmu_setmtt(0x33f00000,0x34000000,0x33f00000,RW_NCNB); //bank6-3
mmu_setmtt(0x38000000,0x3ff00000,0x38000000,RW_NCNB); //bank7
mmu_setmtt(0x40000000,0x47f00000,0x40000000,RW_NCNB); //SFR
mmu_setmtt(0x48000000,0x5af00000,0x48000000,RW_NCNB); //SFR
mmu_setmtt(0x5b000000,0x5b000000,0x5b000000,RW_NCNB); //SFR
mmu_setmtt(0x5b100000,0xfff00000,0x5b100000,RW_FAULT);//not used
mmu_setmtt(0x60000000,0x67f00000,0x60000000,RW_NCNB); //SFR
mmu_setttbase(_MMUTT_STARTADDRESS);
/* DOMAIN1: no_access, DOMAIN0,2~15=client(AP is checked) */
mmu_set_domain(0x55555550|DOMAIN1_ATTR|DOMAIN0_ATTR);
mmu_enable_alignfault();
mmu_enable();
/* ICache enable */
mmu_enable_icache();
/* DCache should be turned on after mmu is turned on. */
mmu_enable_dcache();
#endif
/* enable Data Cache */
mmu_enable_dcache();
mmu_invalidate_icache();
mmu_invalidate_dcache_all();
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册