提交 2f47f447 编写于 作者: Y Yoshihiro Shimoda 提交者: Paul Mundt

sh: Support fixed 32-bit PMB mappings from bootloader.

This provides a method for supporting fixed PMB mappings inherited from
the bootloader, as an alternative to the dynamic PMB mapping currently
used by the kernel. In the future these methods will be combined.

P1/P2 area is handled like a regular 29-bit physical address, and local
bus device are assigned P3 area addresses.
Signed-off-by: NYoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 a29b99ec
...@@ -33,20 +33,24 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE ...@@ -33,20 +33,24 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(obj)/compressed/vmlinux: FORCE $(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@ $(Q)$(MAKE) $(build)=$(obj)/compressed $@
ifeq ($(CONFIG_32BIT),y) KERNEL_MEMORY := 0x00000000
KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ ifeq ($(CONFIG_PMB_FIXED),y)
$$[$(CONFIG_PAGE_OFFSET) + \ KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
$(CONFIG_ZERO_PAGE_OFFSET)]') $$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
else endif
ifeq ($(CONFIG_29BIT),y)
KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_MEMORY_START)]')
endif
KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \ KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \ $$[$(CONFIG_PAGE_OFFSET) + \
$(CONFIG_MEMORY_START) + \ $(KERNEL_MEMORY) + \
$(CONFIG_ZERO_PAGE_OFFSET)]') $(CONFIG_ZERO_PAGE_OFFSET)]')
endif
KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \ KERNEL_ENTRY := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \ $$[$(CONFIG_PAGE_OFFSET) + \
$(CONFIG_MEMORY_START) + \ $(KERNEL_MEMORY) + \
$(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]') $(CONFIG_ZERO_PAGE_OFFSET) + $(CONFIG_ENTRY_OFFSET)]')
quiet_cmd_uimage = UIMAGE $@ quiet_cmd_uimage = UIMAGE $@
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
/* Returns the physical address of a PnSEG (n=1,2) address */ /* Returns the physical address of a PnSEG (n=1,2) address */
#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
#ifdef CONFIG_29BIT #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
/* /*
* Map an address to a certain privileged segment * Map an address to a certain privileged segment
*/ */
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG)) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \ #define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG)) ((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
#endif /* 29BIT */ #endif /* 29BIT || PMB_FIXED */
#endif /* P1SEG */ #endif /* P1SEG */
/* Check if an address can be reached in 29 bits */ /* Check if an address can be reached in 29 bits */
......
...@@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr); ...@@ -238,7 +238,7 @@ extern void onchip_unmap(unsigned long vaddr);
static inline void __iomem * static inline void __iomem *
__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
{ {
#ifdef CONFIG_SUPERH32 #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
unsigned long last_addr = offset + size - 1; unsigned long last_addr = offset + size - 1;
#endif #endif
void __iomem *ret; void __iomem *ret;
...@@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) ...@@ -247,7 +247,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
if (ret) if (ret)
return ret; return ret;
#ifdef CONFIG_SUPERH32 #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED)
/* /*
* For P1 and P2 space this is trivial, as everything is already * For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2. * mapped. Uncached access for P1 addresses are done through P2.
......
...@@ -129,7 +129,12 @@ typedef struct page *pgtable_t; ...@@ -129,7 +129,12 @@ typedef struct page *pgtable_t;
* is not visible (it is part of the PMB mapping) and so needs to be * is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required. * added or subtracted as required.
*/ */
#ifdef CONFIG_32BIT #if defined(CONFIG_PMB_FIXED)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET))
#elif defined(CONFIG_32BIT)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START))
#else #else
......
...@@ -15,7 +15,10 @@ OUTPUT_ARCH(sh) ...@@ -15,7 +15,10 @@ OUTPUT_ARCH(sh)
ENTRY(_start) ENTRY(_start)
SECTIONS SECTIONS
{ {
#ifdef CONFIG_32BIT #ifdef CONFIG_PMB_FIXED
. = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
CONFIG_ZERO_PAGE_OFFSET;
#elif defined(CONFIG_32BIT)
. = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET; . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
#else #else
. = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET; . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
......
...@@ -57,7 +57,7 @@ config 32BIT ...@@ -57,7 +57,7 @@ config 32BIT
bool bool
default y if CPU_SH5 default y if CPU_SH5
config PMB config PMB_ENABLE
bool "Support 32-bit physical addressing through PMB" bool "Support 32-bit physical addressing through PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
select 32BIT select 32BIT
...@@ -67,6 +67,33 @@ config PMB ...@@ -67,6 +67,33 @@ config PMB
32-bits through the SH-4A PMB. If this is not set, legacy 32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used. 29-bit physical addressing will be used.
choice
prompt "PMB handling type"
depends on PMB_ENABLE
default PMB_FIXED
config PMB
bool "PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785)
select 32BIT
help
If you say Y here, physical addressing will be extended to
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
config PMB_FIXED
bool "fixed PMB"
depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \
CPU_SUBTYPE_SH7785)
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
from the boot loader, and the kernel does not attempt dynamic
management. This is the closest to legacy 29-bit physical mode,
and allows systems to support up to 512MiB of system memory.
endchoice
config X2TLB config X2TLB
bool "Enable extended TLB mode" bool "Enable extended TLB mode"
depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
......
...@@ -35,6 +35,7 @@ endif ...@@ -35,6 +35,7 @@ endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o
obj-$(CONFIG_NUMA) += numa.o obj-$(CONFIG_NUMA) += numa.o
EXTRA_CFLAGS += -Werror EXTRA_CFLAGS += -Werror
...@@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -59,11 +59,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr))
return (void __iomem *)phys_addr; return (void __iomem *)phys_addr;
#if !defined(CONFIG_PMB_FIXED)
/* /*
* Don't allow anybody to remap normal RAM that we're using.. * Don't allow anybody to remap normal RAM that we're using..
*/ */
if (phys_addr < virt_to_phys(high_memory)) if (phys_addr < virt_to_phys(high_memory))
return NULL; return NULL;
#endif
/* /*
* Mappings have to be page-aligned * Mappings have to be page-aligned
...@@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, ...@@ -81,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
area->phys_addr = phys_addr; area->phys_addr = phys_addr;
orig_addr = addr = (unsigned long)area->addr; orig_addr = addr = (unsigned long)area->addr;
#ifdef CONFIG_32BIT #ifdef CONFIG_PMB
/* /*
* First try to remap through the PMB once a valid VMA has been * First try to remap through the PMB once a valid VMA has been
* established. Smaller allocations (or the rest of the size * established. Smaller allocations (or the rest of the size
...@@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr) ...@@ -122,7 +124,7 @@ void __iounmap(void __iomem *addr)
if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) if (seg < P3SEG || vaddr >= P3_ADDR_MAX || is_pci_memaddr(vaddr))
return; return;
#ifdef CONFIG_32BIT #ifdef CONFIG_PMB
/* /*
* Purge any PMB entries that may have been established for this * Purge any PMB entries that may have been established for this
* mapping, then proceed with conventional VMA teardown. * mapping, then proceed with conventional VMA teardown.
......
/*
* arch/sh/mm/fixed_pmb.c
*
* Copyright (C) 2009 Renesas Solutions Corp.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
static int __uses_jump_to_uncached fixed_pmb_init(void)
{
int i;
unsigned long addr, data;
jump_to_uncached();
for (i = 0; i < PMB_ENTRY_MAX; i++) {
addr = PMB_DATA + (i << PMB_E_SHIFT);
data = ctrl_inl(addr);
if (!(data & PMB_V))
continue;
if (data & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
data |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
data &= ~PMB_WT;
#else
data &= ~(PMB_C | PMB_WT);
#endif
}
ctrl_outl(data, addr);
}
back_to_cached();
return 0;
}
arch_initcall(fixed_pmb_init);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册