提交 05f0c553 编写于 作者: L Linus Torvalds

Merge tag 'nios2-v4.1-rc1' of git://git.rocketboards.org/linux-socfpga-next

Pull arch/nios2 updates from Ley Foon Tan:

 - update cache management code

 - rework trap handler with new define trap #.

 - fix on check header warning.

* tag 'nios2-v4.1-rc1' of git://git.rocketboards.org/linux-socfpga-next:
  nios2: rework cache
  nios2: Add types.h header required for __u32 type
  nios2: rework trap handler
  nios2: remove end address checking for initda
...@@ -46,7 +46,6 @@ generic-y += segment.h ...@@ -46,7 +46,6 @@ generic-y += segment.h
generic-y += sembuf.h generic-y += sembuf.h
generic-y += serial.h generic-y += serial.h
generic-y += shmbuf.h generic-y += shmbuf.h
generic-y += shmparam.h
generic-y += siginfo.h generic-y += siginfo.h
generic-y += signal.h generic-y += signal.h
generic-y += socket.h generic-y += socket.h
......
/*
* Copyright Altera Corporation (C) <2015>. All rights reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _ASM_NIOS2_SHMPARAM_H
#define _ASM_NIOS2_SHMPARAM_H
#define SHMLBA CONFIG_NIOS2_DCACHE_SIZE
#endif /* _ASM_NIOS2_SHMPARAM_H */
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
/* /*
* Register numbers used by 'ptrace' system call interface. * Register numbers used by 'ptrace' system call interface.
*/ */
......
...@@ -92,35 +92,35 @@ exception_table: ...@@ -92,35 +92,35 @@ exception_table:
trap_table: trap_table:
.word handle_system_call /* 0 */ .word handle_system_call /* 0 */
.word instruction_trap /* 1 */ .word handle_trap_1 /* 1 */
.word instruction_trap /* 2 */ .word handle_trap_2 /* 2 */
.word instruction_trap /* 3 */ .word handle_trap_3 /* 3 */
.word instruction_trap /* 4 */ .word handle_trap_reserved /* 4 */
.word instruction_trap /* 5 */ .word handle_trap_reserved /* 5 */
.word instruction_trap /* 6 */ .word handle_trap_reserved /* 6 */
.word instruction_trap /* 7 */ .word handle_trap_reserved /* 7 */
.word instruction_trap /* 8 */ .word handle_trap_reserved /* 8 */
.word instruction_trap /* 9 */ .word handle_trap_reserved /* 9 */
.word instruction_trap /* 10 */ .word handle_trap_reserved /* 10 */
.word instruction_trap /* 11 */ .word handle_trap_reserved /* 11 */
.word instruction_trap /* 12 */ .word handle_trap_reserved /* 12 */
.word instruction_trap /* 13 */ .word handle_trap_reserved /* 13 */
.word instruction_trap /* 14 */ .word handle_trap_reserved /* 14 */
.word instruction_trap /* 15 */ .word handle_trap_reserved /* 15 */
.word instruction_trap /* 16 */ .word handle_trap_reserved /* 16 */
.word instruction_trap /* 17 */ .word handle_trap_reserved /* 17 */
.word instruction_trap /* 18 */ .word handle_trap_reserved /* 18 */
.word instruction_trap /* 19 */ .word handle_trap_reserved /* 19 */
.word instruction_trap /* 20 */ .word handle_trap_reserved /* 20 */
.word instruction_trap /* 21 */ .word handle_trap_reserved /* 21 */
.word instruction_trap /* 22 */ .word handle_trap_reserved /* 22 */
.word instruction_trap /* 23 */ .word handle_trap_reserved /* 23 */
.word instruction_trap /* 24 */ .word handle_trap_reserved /* 24 */
.word instruction_trap /* 25 */ .word handle_trap_reserved /* 25 */
.word instruction_trap /* 26 */ .word handle_trap_reserved /* 26 */
.word instruction_trap /* 27 */ .word handle_trap_reserved /* 27 */
.word instruction_trap /* 28 */ .word handle_trap_reserved /* 28 */
.word instruction_trap /* 29 */ .word handle_trap_reserved /* 29 */
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
.word handle_kgdb_breakpoint /* 30 KGDB breakpoint */ .word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
#else #else
...@@ -455,6 +455,19 @@ handle_kgdb_breakpoint: ...@@ -455,6 +455,19 @@ handle_kgdb_breakpoint:
br ret_from_exception br ret_from_exception
#endif #endif
handle_trap_1:
call handle_trap_1_c
br ret_from_exception
handle_trap_2:
call handle_trap_2_c
br ret_from_exception
handle_trap_3:
handle_trap_reserved:
call handle_trap_3_c
br ret_from_exception
/* /*
* Beware - when entering resume, prev (the current task) is * Beware - when entering resume, prev (the current task) is
* in r4, next (the new task) is in r5, don't change these * in r4, next (the new task) is in r5, don't change these
......
...@@ -23,6 +23,17 @@ ...@@ -23,6 +23,17 @@
static DEFINE_SPINLOCK(die_lock); static DEFINE_SPINLOCK(die_lock);
static void _send_sig(int signo, int code, unsigned long addr)
{
siginfo_t info;
info.si_signo = signo;
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signo, &info, current);
}
void die(const char *str, struct pt_regs *regs, long err) void die(const char *str, struct pt_regs *regs, long err)
{ {
console_verbose(); console_verbose();
...@@ -39,16 +50,10 @@ void die(const char *str, struct pt_regs *regs, long err) ...@@ -39,16 +50,10 @@ void die(const char *str, struct pt_regs *regs, long err)
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr) void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
{ {
siginfo_t info;
if (!user_mode(regs)) if (!user_mode(regs))
die("Exception in kernel mode", regs, signo); die("Exception in kernel mode", regs, signo);
info.si_signo = signo; _send_sig(signo, code, addr);
info.si_errno = 0;
info.si_code = code;
info.si_addr = (void __user *) addr;
force_sig_info(signo, &info, current);
} }
/* /*
...@@ -183,3 +188,18 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int cause) ...@@ -183,3 +188,18 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int cause)
pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea)); pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea));
} }
asmlinkage void handle_trap_1_c(struct pt_regs *fp)
{
_send_sig(SIGUSR1, 0, fp->ea);
}
asmlinkage void handle_trap_2_c(struct pt_regs *fp)
{
_send_sig(SIGUSR2, 0, fp->ea);
}
asmlinkage void handle_trap_3_c(struct pt_regs *fp)
{
_send_sig(SIGILL, ILL_ILLTRP, fp->ea);
}
...@@ -58,9 +58,6 @@ static void __invalidate_dcache(unsigned long start, unsigned long end) ...@@ -58,9 +58,6 @@ static void __invalidate_dcache(unsigned long start, unsigned long end)
end += (cpuinfo.dcache_line_size - 1); end += (cpuinfo.dcache_line_size - 1);
end &= ~(cpuinfo.dcache_line_size - 1); end &= ~(cpuinfo.dcache_line_size - 1);
if (end > start + cpuinfo.dcache_size)
end = start + cpuinfo.dcache_size;
for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) { for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
__asm__ __volatile__ (" initda 0(%0)\n" __asm__ __volatile__ (" initda 0(%0)\n"
: /* Outputs */ : /* Outputs */
...@@ -131,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm) ...@@ -131,12 +128,14 @@ void flush_cache_dup_mm(struct mm_struct *mm)
void flush_icache_range(unsigned long start, unsigned long end) void flush_icache_range(unsigned long start, unsigned long end)
{ {
__flush_dcache(start, end);
__flush_icache(start, end); __flush_icache(start, end);
} }
void flush_dcache_range(unsigned long start, unsigned long end) void flush_dcache_range(unsigned long start, unsigned long end)
{ {
__flush_dcache(start, end); __flush_dcache(start, end);
__flush_icache(start, end);
} }
EXPORT_SYMBOL(flush_dcache_range); EXPORT_SYMBOL(flush_dcache_range);
...@@ -159,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page) ...@@ -159,6 +158,7 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page)
unsigned long start = (unsigned long) page_address(page); unsigned long start = (unsigned long) page_address(page);
unsigned long end = start + PAGE_SIZE; unsigned long end = start + PAGE_SIZE;
__flush_dcache(start, end);
__flush_icache(start, end); __flush_icache(start, end);
} }
...@@ -173,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, ...@@ -173,6 +173,18 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
__flush_icache(start, end); __flush_icache(start, end);
} }
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
unsigned long start = (unsigned long)page_address(page);
__flush_dcache_all(start, start + PAGE_SIZE);
}
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
struct address_space *mapping; struct address_space *mapping;
...@@ -190,11 +202,12 @@ void flush_dcache_page(struct page *page) ...@@ -190,11 +202,12 @@ void flush_dcache_page(struct page *page)
if (mapping && !mapping_mapped(mapping)) { if (mapping && !mapping_mapped(mapping)) {
clear_bit(PG_dcache_clean, &page->flags); clear_bit(PG_dcache_clean, &page->flags);
} else { } else {
unsigned long start = (unsigned long)page_address(page); __flush_dcache_page(mapping, page);
if (mapping) {
__flush_dcache_all(start, start + PAGE_SIZE); unsigned long start = (unsigned long)page_address(page);
if (mapping)
flush_aliases(mapping, page); flush_aliases(mapping, page);
flush_icache_range(start, start + PAGE_SIZE);
}
set_bit(PG_dcache_clean, &page->flags); set_bit(PG_dcache_clean, &page->flags);
} }
} }
...@@ -205,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma, ...@@ -205,6 +218,7 @@ void update_mmu_cache(struct vm_area_struct *vma,
{ {
unsigned long pfn = pte_pfn(*pte); unsigned long pfn = pte_pfn(*pte);
struct page *page; struct page *page;
struct address_space *mapping;
if (!pfn_valid(pfn)) if (!pfn_valid(pfn))
return; return;
...@@ -217,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma, ...@@ -217,16 +231,15 @@ void update_mmu_cache(struct vm_area_struct *vma,
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
if (!PageReserved(page) && mapping = page_mapping(page);
!test_and_set_bit(PG_dcache_clean, &page->flags)) { if (!test_and_set_bit(PG_dcache_clean, &page->flags))
unsigned long start = page_to_virt(page); __flush_dcache_page(mapping, page);
struct address_space *mapping;
if(mapping)
__flush_dcache(start, start + PAGE_SIZE); {
flush_aliases(mapping, page);
mapping = page_mapping(page); if (vma->vm_flags & VM_EXEC)
if (mapping) flush_icache_page(vma, page);
flush_aliases(mapping, page);
} }
} }
...@@ -234,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, ...@@ -234,15 +247,19 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *to) struct page *to)
{ {
__flush_dcache(vaddr, vaddr + PAGE_SIZE); __flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
copy_page(vto, vfrom); copy_page(vto, vfrom);
__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE); __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
} }
void clear_user_page(void *addr, unsigned long vaddr, struct page *page) void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
{ {
__flush_dcache(vaddr, vaddr + PAGE_SIZE); __flush_dcache(vaddr, vaddr + PAGE_SIZE);
__flush_icache(vaddr, vaddr + PAGE_SIZE);
clear_page(addr); clear_page(addr);
__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE); __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
} }
void copy_from_user_page(struct vm_area_struct *vma, struct page *page, void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
...@@ -251,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -251,7 +268,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
{ {
flush_cache_page(vma, user_vaddr, page_to_pfn(page)); flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len); memcpy(dst, src, len);
__flush_dcache((unsigned long)src, (unsigned long)src + len); __flush_dcache_all((unsigned long)src, (unsigned long)src + len);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)src, (unsigned long)src + len); __flush_icache((unsigned long)src, (unsigned long)src + len);
} }
...@@ -262,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -262,7 +279,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
{ {
flush_cache_page(vma, user_vaddr, page_to_pfn(page)); flush_cache_page(vma, user_vaddr, page_to_pfn(page));
memcpy(dst, src, len); memcpy(dst, src, len);
__flush_dcache((unsigned long)dst, (unsigned long)dst + len); __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
__flush_icache((unsigned long)dst, (unsigned long)dst + len); __flush_icache((unsigned long)dst, (unsigned long)dst + len);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册