提交 e96636cc 编写于 作者: Y Yoshinori Sato 提交者: Paul Mundt

sh: Various nommu fixes.

This fixes up some of the various outstanding nommu bugs on
SH.
Signed-off-by: NYoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 e7f93a35
......@@ -21,11 +21,17 @@ endif
CONFIG_PAGE_OFFSET ?= 0x80000000
CONFIG_MEMORY_START ?= 0x0c000000
CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET)+$(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET)])
IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET) + \
$(CONFIG_MEMORY_START) + \
$(CONFIG_BOOT_LINK_OFFSET)])
LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
$(call if_changed,ld)
@:
......
......@@ -248,8 +248,13 @@ void __init mem_init(void)
* Setup wrappers for copy/clear_page(), these will get overridden
* later in the boot process if a better method is available.
*/
#ifdef CONFIG_MMU
copy_page = copy_page_slow;
clear_page = clear_page_slow;
#else
copy_page = copy_page_nommu;
clear_page = clear_page_nommu;
#endif
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem_node(NODE_DATA(0));
......
......@@ -14,23 +14,24 @@
#include <linux/string.h>
#include <asm/page.h>
static void copy_page_nommu(void *to, void *from)
void copy_page_nommu(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
static void clear_page_nommu(void *to)
void clear_page_nommu(void *to)
{
memset(to, 0, PAGE_SIZE);
}
static int __init pg_nommu_init(void)
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
{
copy_page = copy_page_nommu;
clear_page = clear_page_nommu;
memcpy(to, from, n);
return 0;
}
subsys_initcall(pg_nommu_init);
__kernel_size_t __clear_user(void *to, __kernel_size_t n)
{
memset(to, 0, n);
return 0;
}
......@@ -14,11 +14,19 @@
#include <asm/cpu/addrspace.h>
/* Memory segments (32bit Privileged mode addresses) */
#ifdef CONFIG_MMU
#define P0SEG 0x00000000
#define P1SEG 0x80000000
#define P2SEG 0xa0000000
#define P3SEG 0xc0000000
#define P4SEG 0xe0000000
#else
#define P0SEG 0x00000000
#define P1SEG 0x00000000
#define P2SEG 0x20000000
#define P3SEG 0x40000000
#define P4SEG 0x80000000
#endif
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
......
......@@ -13,7 +13,7 @@
#define __ASM_SH_FLAT_H
#define flat_stack_align(sp) /* nothing needed */
#define flat_argvp_envp_on_stack() 1
#define flat_argvp_envp_on_stack() 0
#define flat_old_ram_flag(flags) (flags)
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
#define flat_get_addr_from_rp(rp, relval, flags) get_unaligned(rp)
......
......@@ -3,19 +3,8 @@
#if !defined(CONFIG_MMU)
struct mm_rblock_struct {
int size;
int refcount;
void *kblock;
};
struct mm_tblock_struct {
struct mm_rblock_struct *rblock;
struct mm_tblock_struct *next;
};
typedef struct {
struct mm_tblock_struct tblock;
struct vm_list_struct *vmlist;
unsigned long end_brk;
} mm_context_t;
......
......@@ -38,8 +38,13 @@
extern void (*clear_page)(void *to);
extern void (*copy_page)(void *to, void *from);
#ifdef CONFIG_MMU
extern void clear_page_slow(void *to);
extern void copy_page_slow(void *to, void *from);
#else
extern void clear_page_nommu(void *to);
extern void copy_page_nommu(void *to, void *from);
#endif
#if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \
defined(CONFIG_SH7705_CACHE_32KB))
......
......@@ -168,6 +168,7 @@ do { \
__gu_err; \
})
#ifdef CONFIG_MMU
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
......@@ -257,6 +258,18 @@ __asm__("stc r7_bank, %1\n\t" \
: "r" (addr) \
: "t"); \
})
#else /* CONFIG_MMU */
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err, __gu_val; \
if (__access_ok((unsigned long)(ptr), (size))) { \
__get_user_size(__gu_val, (ptr), (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} else \
__gu_err = -EFAULT; \
__gu_err; \
})
#endif
#define __get_user_asm(x, addr, err, insn) \
({ \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册