Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
28cdac66
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
28cdac66
编写于
1月 06, 2011
作者:
R
Russell King
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'pgt' (early part) into devel
上级
4073723a
36bb94ba
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
315 addition
and
332 deletion
+315
-332
arch/arm/include/asm/page.h
arch/arm/include/asm/page.h
+4
-2
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgalloc.h
+22
-28
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/pgtable.h
+155
-160
arch/arm/kernel/smp.c
arch/arm/kernel/smp.c
+0
-36
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+6
-6
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+2
-2
arch/arm/mm/fault-armv.c
arch/arm/mm/fault-armv.c
+1
-1
arch/arm/mm/fault.c
arch/arm/mm/fault.c
+1
-1
arch/arm/mm/idmap.c
arch/arm/mm/idmap.c
+67
-0
arch/arm/mm/mm.h
arch/arm/mm/mm.h
+1
-1
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+13
-49
arch/arm/mm/pgd.c
arch/arm/mm/pgd.c
+18
-19
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-macros.S
+15
-15
arch/arm/mm/proc-v7.S
arch/arm/mm/proc-v7.S
+8
-10
arch/arm/mm/proc-xscale.S
arch/arm/mm/proc-xscale.S
+2
-2
未找到文件。
arch/arm/include/asm/page.h
浏览文件 @
28cdac66
...
...
@@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern
void
copy_page
(
void
*
to
,
const
void
*
from
);
typedef
unsigned
long
pteval_t
;
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef
struct
{
unsigned
long
pte
;
}
pte_t
;
typedef
struct
{
pteval_t
pte
;
}
pte_t
;
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
typedef
struct
{
unsigned
long
pgd
[
2
];
}
pgd_t
;
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
...
...
@@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* .. while these make it easier on the compiler
*/
typedef
unsigned
long
pte_t
;
typedef
pteval_t
pte_t
;
typedef
unsigned
long
pmd_t
;
typedef
unsigned
long
pgd_t
[
2
];
typedef
unsigned
long
pgprot_t
;
...
...
arch/arm/include/asm/pgalloc.h
浏览文件 @
28cdac66
...
...
@@ -30,14 +30,16 @@
#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
extern
pgd_t
*
get_pgd_slow
(
struct
mm_struct
*
mm
);
extern
void
free_pgd_slow
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
extern
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
);
extern
void
pgd_free
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
);
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
static
inline
void
clean_pte_table
(
pte_t
*
pte
)
{
clean_dcache_area
(
pte
+
PTE_HWTABLE_PTRS
,
PTE_HWTABLE_SIZE
);
}
/*
* Allocate one PTE table.
*
...
...
@@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
*/
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
...
...
@@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_t
*
pte
;
pte
=
(
pte_t
*
)
__get_free_page
(
PGALLOC_GFP
);
if
(
pte
)
{
clean_dcache_area
(
pte
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
pte
+=
PTRS_PER_PTE
;
}
if
(
pte
)
clean_pte_table
(
pte
);
return
pte
;
}
...
...
@@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte
=
alloc_pages
(
PGALLOC_GFP
,
0
);
#endif
if
(
pte
)
{
if
(
!
PageHighMem
(
pte
))
{
void
*
page
=
page_address
(
pte
);
clean_dcache_area
(
page
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
}
if
(
!
PageHighMem
(
pte
))
clean_pte_table
(
page_address
(
pte
));
pgtable_page_ctor
(
pte
);
}
...
...
@@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
*/
static
inline
void
pte_free_kernel
(
struct
mm_struct
*
mm
,
pte_t
*
pte
)
{
if
(
pte
)
{
pte
-=
PTRS_PER_PTE
;
if
(
pte
)
free_page
((
unsigned
long
)
pte
);
}
}
static
inline
void
pte_free
(
struct
mm_struct
*
mm
,
pgtable_t
pte
)
...
...
@@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page
(
pte
);
}
static
inline
void
__pmd_populate
(
pmd_t
*
pmdp
,
unsigned
long
pmdval
)
static
inline
void
__pmd_populate
(
pmd_t
*
pmdp
,
phys_addr_t
pte
,
unsigned
long
prot
)
{
unsigned
long
pmdval
=
(
pte
+
PTE_HWTABLE_OFF
)
|
prot
;
pmdp
[
0
]
=
__pmd
(
pmdval
);
pmdp
[
1
]
=
__pmd
(
pmdval
+
256
*
sizeof
(
pte_t
));
flush_pmd_entry
(
pmdp
);
...
...
@@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
pte_t
*
ptep
)
{
unsigned
long
pte_ptr
=
(
unsigned
long
)
ptep
;
/*
* The pmd must be loaded with the physical
* address of the PTE table
* The pmd must be loaded with the physical address of the PTE table
*/
pte_ptr
-=
PTRS_PER_PTE
*
sizeof
(
void
*
);
__pmd_populate
(
pmdp
,
__pa
(
pte_ptr
)
|
_PAGE_KERNEL_TABLE
);
__pmd_populate
(
pmdp
,
__pa
(
ptep
),
_PAGE_KERNEL_TABLE
);
}
static
inline
void
pmd_populate
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
pgtable_t
ptep
)
{
__pmd_populate
(
pmdp
,
page_to_p
fn
(
ptep
)
<<
PAGE_SHIFT
|
_PAGE_USER_TABLE
);
__pmd_populate
(
pmdp
,
page_to_p
hys
(
ptep
),
_PAGE_USER_TABLE
);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
...
...
arch/arm/include/asm/pgtable.h
浏览文件 @
28cdac66
...
...
@@ -10,6 +10,7 @@
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H
#include <linux/const.h>
#include <asm-generic/4level-fixup.h>
#include <asm/proc-fns.h>
...
...
@@ -54,7 +55,7 @@
* Therefore, we tweak the implementation slightly - we tell Linux that we
* have 2048 entries in the first level, each of which is 8 bytes (iow, two
* hardware pointers to the second level.) The second level contains two
* hardware PTE tables arranged contiguously,
follow
ed by Linux versions
* hardware PTE tables arranged contiguously,
preced
ed by Linux versions
* which contain the state information Linux needs. We, therefore, end up
* with 512 entries in the "PTE" level.
*
...
...
@@ -62,15 +63,15 @@
*
* pgd pte
* | |
* +--------+ +0
* | |-----> +------------+ +0
* +--------+
* | | +------------+ +0
* +- - - - + | Linux pt 0 |
* | | +------------+ +1024
* +--------+ +0 | Linux pt 1 |
* | |-----> +------------+ +2048
* +- - - - + +4 | h/w pt 0 |
* | |-----> +------------+ +
1024
* | |-----> +------------+ +
3072
* +--------+ +8 | h/w pt 1 |
* | | +------------+ +2048
* +- - - - + | Linux pt 0 |
* | | +------------+ +3072
* +--------+ | Linux pt 1 |
* | | +------------+ +4096
*
* See L_PTE_xxx below for definitions of bits in the "Linux pt", and
...
...
@@ -102,6 +103,10 @@
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
...
...
@@ -112,13 +117,13 @@
#define LIBRARY_TEXT_START 0x0c000000
#ifndef __ASSEMBLY__
extern
void
__pte_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
);
extern
void
__pmd_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
);
extern
void
__pgd_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
);
extern
void
__pte_error
(
const
char
*
file
,
int
line
,
pte_t
);
extern
void
__pmd_error
(
const
char
*
file
,
int
line
,
pmd_t
);
extern
void
__pgd_error
(
const
char
*
file
,
int
line
,
pgd_t
);
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte
_val(pte)
)
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd
_val(pmd)
)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd
_val(pgd)
)
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
#endif
/* !__ASSEMBLY__ */
#define PMD_SIZE (1UL << PMD_SHIFT)
...
...
@@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
*/
#define FIRST_USER_ADDRESS PAGE_SIZE
#define FIRST_USER_PGD_NR 1
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/*
* section address mask and size definitions.
...
...
@@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
* The PTE table pointer refers to the hardware entries; the "Linux"
* entries are stored 1024 bytes below.
*/
#define L_PTE_PRESENT (
1
<< 0)
#define L_PTE_YOUNG (
1
<< 1)
#define L_PTE_FILE (
1
<< 2)
/* only when !PRESENT */
#define L_PTE_DIRTY (
1
<< 6)
#define L_PTE_
WRITE (1
<< 7)
#define L_PTE_USER (
1
<< 8)
#define L_PTE_
EXEC (1
<< 9)
#define L_PTE_SHARED (
1
<< 10)
/* shared(v6), coherent(xsc3) */
#define L_PTE_PRESENT (
_AT(pteval_t, 1)
<< 0)
#define L_PTE_YOUNG (
_AT(pteval_t, 1)
<< 1)
#define L_PTE_FILE (
_AT(pteval_t, 1)
<< 2)
/* only when !PRESENT */
#define L_PTE_DIRTY (
_AT(pteval_t, 1)
<< 6)
#define L_PTE_
RDONLY (_AT(pteval_t, 1)
<< 7)
#define L_PTE_USER (
_AT(pteval_t, 1)
<< 8)
#define L_PTE_
XN (_AT(pteval_t, 1)
<< 9)
#define L_PTE_SHARED (
_AT(pteval_t, 1)
<< 10)
/* shared(v6), coherent(xsc3) */
/*
* These are the memory types, defined to be compatible with
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
*/
#define L_PTE_MT_UNCACHED (
0x00
<< 2)
/* 0000 */
#define L_PTE_MT_BUFFERABLE (
0x01
<< 2)
/* 0001 */
#define L_PTE_MT_WRITETHROUGH (
0x02
<< 2)
/* 0010 */
#define L_PTE_MT_WRITEBACK (
0x03
<< 2)
/* 0011 */
#define L_PTE_MT_MINICACHE (
0x06
<< 2)
/* 0110 (sa1100, xscale) */
#define L_PTE_MT_WRITEALLOC (
0x07
<< 2)
/* 0111 */
#define L_PTE_MT_DEV_SHARED (
0x04
<< 2)
/* 0100 */
#define L_PTE_MT_DEV_NONSHARED (
0x0c
<< 2)
/* 1100 */
#define L_PTE_MT_DEV_WC (
0x09
<< 2)
/* 1001 */
#define L_PTE_MT_DEV_CACHED (
0x0b
<< 2)
/* 1011 */
#define L_PTE_MT_MASK (
0x0f
<< 2)
#define L_PTE_MT_UNCACHED (
_AT(pteval_t, 0x00)
<< 2)
/* 0000 */
#define L_PTE_MT_BUFFERABLE (
_AT(pteval_t, 0x01)
<< 2)
/* 0001 */
#define L_PTE_MT_WRITETHROUGH (
_AT(pteval_t, 0x02)
<< 2)
/* 0010 */
#define L_PTE_MT_WRITEBACK (
_AT(pteval_t, 0x03)
<< 2)
/* 0011 */
#define L_PTE_MT_MINICACHE (
_AT(pteval_t, 0x06)
<< 2)
/* 0110 (sa1100, xscale) */
#define L_PTE_MT_WRITEALLOC (
_AT(pteval_t, 0x07)
<< 2)
/* 0111 */
#define L_PTE_MT_DEV_SHARED (
_AT(pteval_t, 0x04)
<< 2)
/* 0100 */
#define L_PTE_MT_DEV_NONSHARED (
_AT(pteval_t, 0x0c)
<< 2)
/* 1100 */
#define L_PTE_MT_DEV_WC (
_AT(pteval_t, 0x09)
<< 2)
/* 1001 */
#define L_PTE_MT_DEV_CACHED (
_AT(pteval_t, 0x0b)
<< 2)
/* 1011 */
#define L_PTE_MT_MASK (
_AT(pteval_t, 0x0f)
<< 2)
#ifndef __ASSEMBLY__
...
...
@@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
#define PAGE_NONE pgprot_user
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
#define PAGE_KERNEL pgprot_kernel
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
#define PAGE_KERNEL_EXEC pgprot_kernel
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
#define pgprot_noncached(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct
file
;
extern
pgprot_t
phys_mem_access_prot
(
struct
file
*
file
,
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
#endif
#endif
/* __ASSEMBLY__ */
...
...
@@ -255,26 +280,84 @@ extern pgprot_t pgprot_kernel;
extern
struct
page
*
empty_zero_page
;
#define ZERO_PAGE(vaddr) (empty_zero_page)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
extern
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp) do { } while (0)
#define set_pgd(pgd,pgdp) do { } while (0)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define copy_pmd(pmdpd,pmdps) \
do { \
pmdpd[0] = pmdps[0]; \
pmdpd[1] = pmdps[1]; \
flush_pmd_entry(pmdpd); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static
inline
pte_t
*
pmd_page_vaddr
(
pmd_t
pmd
)
{
return
__va
(
pmd_val
(
pmd
)
&
PAGE_MASK
);
}
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
#define pte_unmap(pte) __pte_unmap(pte)
#ifndef CONFIG_HIGHPTE
#define __pte_map(
dir) pmd_page_vaddr(*(dir
))
#define __pte_map(
pmd) pmd_page_vaddr(*(pmd
))
#define __pte_unmap(pte) do { } while (0)
#else
#define __pte_map(
dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE
)
#define __pte_unmap(pte) kunmap_atomic(
(pte - PTRS_PER_PTE)
)
#define __pte_map(
pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))
)
#define __pte_unmap(pte) kunmap_atomic(
pte
)
#endif
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
#define pte_unmap(pte) __pte_unmap(pte)
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#if __LINUX_ARM_ARCH__ < 6
static
inline
void
__sync_icache_dcache
(
pte_t
pteval
)
...
...
@@ -295,15 +378,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
}
}
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_write(pte) (
pte_val(pte) & L_PTE_WRITE
)
#define pte_write(pte) (
!(pte_val(pte) & L_PTE_RDONLY)
)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_exec(pte) (
pte_val(pte) & L_PTE_EXEC
)
#define pte_exec(pte) (
!(pte_val(pte) & L_PTE_XN)
)
#define pte_special(pte) (0)
#define pte_present_user(pte) \
...
...
@@ -313,8 +393,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
PTE_BIT_FUNC
(
wrprotect
,
&=
~
L_PTE_WRITE
);
PTE_BIT_FUNC
(
mkwrite
,
|=
L_PTE_WRITE
);
PTE_BIT_FUNC
(
wrprotect
,
|=
L_PTE_RDONLY
);
PTE_BIT_FUNC
(
mkwrite
,
&=
~
L_PTE_RDONLY
);
PTE_BIT_FUNC
(
mkclean
,
&=
~
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkdirty
,
|=
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkold
,
&=
~
L_PTE_YOUNG
);
...
...
@@ -322,101 +402,13 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
static
inline
pte_t
pte_mkspecial
(
pte_t
pte
)
{
return
pte
;
}
#define __pgprot_modify(prot,mask,bits) \
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct
file
;
extern
pgprot_t
phys_mem_access_prot
(
struct
file
*
file
,
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define copy_pmd(pmdpd,pmdps) \
do { \
pmdpd[0] = pmdps[0]; \
pmdpd[1] = pmdps[1]; \
flush_pmd_entry(pmdpd); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static
inline
pte_t
*
pmd_page_vaddr
(
pmd_t
pmd
)
{
unsigned
long
ptr
;
ptr
=
pmd_val
(
pmd
)
&
~
(
PTRS_PER_PTE
*
sizeof
(
void
*
)
-
1
);
ptr
+=
PTRS_PER_PTE
*
sizeof
(
void
*
);
return
__va
(
ptr
);
}
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp) do { } while (0)
#define set_pgd(pgd,pgdp) do { } while (0)
/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
/* Find an entry in the third-level page table.. */
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
{
const
unsigned
long
mask
=
L_PTE_EXEC
|
L_PTE_WRITE
|
L_PTE_USER
;
const
pteval_t
mask
=
L_PTE_XN
|
L_PTE_RDONLY
|
L_PTE_USER
;
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
~
mask
)
|
(
pgprot_val
(
newprot
)
&
mask
);
return
pte
;
}
extern
pgd_t
swapper_pg_dir
[
PTRS_PER_PGD
];
/*
* Encode and decode a swap entry. Swap entries are stored in the Linux
* page tables as follows:
...
...
@@ -481,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define pgtable_cache_init() do { } while (0)
void
identity_mapping_add
(
pgd_t
*
,
unsigned
long
,
unsigned
long
);
void
identity_mapping_del
(
pgd_t
*
,
unsigned
long
,
unsigned
long
);
#endif
/* !__ASSEMBLY__ */
#endif
/* CONFIG_MMU */
...
...
arch/arm/kernel/smp.c
浏览文件 @
28cdac66
...
...
@@ -55,42 +55,6 @@ enum ipi_msg_type {
IPI_CPU_STOP
,
};
static
inline
void
identity_mapping_add
(
pgd_t
*
pgd
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
addr
,
prot
;
pmd_t
*
pmd
;
prot
=
PMD_TYPE_SECT
|
PMD_SECT_AP_WRITE
;
if
(
cpu_architecture
()
<=
CPU_ARCH_ARMv5TEJ
&&
!
cpu_is_xscale
())
prot
|=
PMD_BIT4
;
for
(
addr
=
start
&
PGDIR_MASK
;
addr
<
end
;)
{
pmd
=
pmd_offset
(
pgd
+
pgd_index
(
addr
),
addr
);
pmd
[
0
]
=
__pmd
(
addr
|
prot
);
addr
+=
SECTION_SIZE
;
pmd
[
1
]
=
__pmd
(
addr
|
prot
);
addr
+=
SECTION_SIZE
;
flush_pmd_entry
(
pmd
);
outer_clean_range
(
__pa
(
pmd
),
__pa
(
pmd
+
1
));
}
}
static
inline
void
identity_mapping_del
(
pgd_t
*
pgd
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
addr
;
pmd_t
*
pmd
;
for
(
addr
=
start
&
PGDIR_MASK
;
addr
<
end
;
addr
+=
PGDIR_SIZE
)
{
pmd
=
pmd_offset
(
pgd
+
pgd_index
(
addr
),
addr
);
pmd
[
0
]
=
__pmd
(
0
);
pmd
[
1
]
=
__pmd
(
0
);
clean_pmd_entry
(
pmd
);
outer_clean_range
(
__pa
(
pmd
),
__pa
(
pmd
+
1
));
}
}
int
__cpuinit
__cpu_up
(
unsigned
int
cpu
)
{
struct
cpuinfo_arm
*
ci
=
&
per_cpu
(
cpu_data
,
cpu
);
...
...
arch/arm/kernel/traps.c
浏览文件 @
28cdac66
...
...
@@ -710,19 +710,19 @@ void __readwrite_bug(const char *fn)
}
EXPORT_SYMBOL
(
__readwrite_bug
);
void
__pte_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
)
void
__pte_error
(
const
char
*
file
,
int
line
,
pte_t
pte
)
{
printk
(
"%s:%d: bad pte %08lx.
\n
"
,
file
,
line
,
val
);
printk
(
"%s:%d: bad pte %08lx.
\n
"
,
file
,
line
,
pte_val
(
pte
)
);
}
void
__pmd_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
)
void
__pmd_error
(
const
char
*
file
,
int
line
,
pmd_t
pmd
)
{
printk
(
"%s:%d: bad pmd %08lx.
\n
"
,
file
,
line
,
val
);
printk
(
"%s:%d: bad pmd %08lx.
\n
"
,
file
,
line
,
pmd_val
(
pmd
)
);
}
void
__pgd_error
(
const
char
*
file
,
int
line
,
unsigned
long
val
)
void
__pgd_error
(
const
char
*
file
,
int
line
,
pgd_t
pgd
)
{
printk
(
"%s:%d: bad pgd %08lx.
\n
"
,
file
,
line
,
val
);
printk
(
"%s:%d: bad pgd %08lx.
\n
"
,
file
,
line
,
pgd_val
(
pgd
)
);
}
asmlinkage
void
__div0
(
void
)
...
...
arch/arm/mm/Makefile
浏览文件 @
28cdac66
...
...
@@ -5,8 +5,8 @@
obj-y
:=
dma-mapping.o extable.o fault.o init.o
\
iomap.o
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o i
oremap.o m
map.o
\
pgd.o mmu.o vmregion.o
obj-$(CONFIG_MMU)
+=
fault-armv.o flush.o i
dmap.o iore
map.o
\
mmap.o
pgd.o mmu.o vmregion.o
ifneq
($(CONFIG_MMU),y)
obj-y
+=
nommu.o
...
...
arch/arm/mm/fault-armv.c
浏览文件 @
28cdac66
...
...
@@ -26,7 +26,7 @@
#include "mm.h"
static
unsigned
long
shared_pte_mask
=
L_PTE_MT_BUFFERABLE
;
static
pteval_t
shared_pte_mask
=
L_PTE_MT_BUFFERABLE
;
#if __LINUX_ARM_ARCH__ < 6
/*
...
...
arch/arm/mm/fault.c
浏览文件 @
28cdac66
...
...
@@ -108,7 +108,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
pte
=
pte_offset_map
(
pmd
,
addr
);
printk
(
", *pte=%08lx"
,
pte_val
(
*
pte
));
printk
(
", *ppte=%08lx"
,
pte_val
(
pte
[
-
PTRS_PER_PTE
]));
printk
(
", *ppte=%08lx"
,
pte_val
(
pte
[
PTE_HWTABLE_PTRS
]));
pte_unmap
(
pte
);
}
while
(
0
);
...
...
arch/arm/mm/idmap.c
0 → 100644
浏览文件 @
28cdac66
#include <linux/kernel.h>
#include <asm/cputype.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
static
void
idmap_add_pmd
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
,
unsigned
long
prot
)
{
pmd_t
*
pmd
=
pmd_offset
(
pgd
,
addr
);
addr
=
(
addr
&
PMD_MASK
)
|
prot
;
pmd
[
0
]
=
__pmd
(
addr
);
addr
+=
SECTION_SIZE
;
pmd
[
1
]
=
__pmd
(
addr
);
flush_pmd_entry
(
pmd
);
}
void
identity_mapping_add
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
)
{
unsigned
long
prot
,
next
;
prot
=
PMD_TYPE_SECT
|
PMD_SECT_AP_WRITE
;
if
(
cpu_architecture
()
<=
CPU_ARCH_ARMv5TEJ
&&
!
cpu_is_xscale
())
prot
|=
PMD_BIT4
;
pgd
+=
pgd_index
(
addr
);
do
{
next
=
pgd_addr_end
(
addr
,
end
);
idmap_add_pmd
(
pgd
,
addr
,
next
,
prot
);
}
while
(
pgd
++
,
addr
=
next
,
addr
!=
end
);
}
#ifdef CONFIG_SMP
static
void
idmap_del_pmd
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
)
{
pmd_t
*
pmd
=
pmd_offset
(
pgd
,
addr
);
pmd_clear
(
pmd
);
}
void
identity_mapping_del
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
)
{
unsigned
long
next
;
pgd
+=
pgd_index
(
addr
);
do
{
next
=
pgd_addr_end
(
addr
,
end
);
idmap_del_pmd
(
pgd
,
addr
,
next
);
}
while
(
pgd
++
,
addr
=
next
,
addr
!=
end
);
}
#endif
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
*/
void
setup_mm_for_reboot
(
char
mode
)
{
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
identity_mapping_add
(
current
->
active_mm
->
pgd
,
0
,
TASK_SIZE
);
local_flush_tlb_all
();
}
arch/arm/mm/mm.h
浏览文件 @
28cdac66
...
...
@@ -16,7 +16,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
}
struct
mem_type
{
unsigned
in
t
prot_pte
;
pteval_
t
prot_pte
;
unsigned
int
prot_l1
;
unsigned
int
prot_sect
;
unsigned
int
domain
;
...
...
arch/arm/mm/mmu.c
浏览文件 @
28cdac66
...
...
@@ -63,7 +63,7 @@ struct cachepolicy {
const
char
policy
[
16
];
unsigned
int
cr_mask
;
unsigned
int
pmd
;
unsigned
in
t
pte
;
pteval_
t
pte
;
};
static
struct
cachepolicy
cache_policies
[]
__initdata
=
{
...
...
@@ -191,7 +191,7 @@ void adjust_cr(unsigned long mask, unsigned long set)
}
#endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_
WRITE
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_
XN
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static
struct
mem_type
mem_types
[]
=
{
...
...
@@ -236,19 +236,18 @@ static struct mem_type mem_types[] = {
},
[
MT_LOW_VECTORS
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_
EXEC
,
L_PTE_
RDONLY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
domain
=
DOMAIN_USER
,
},
[
MT_HIGH_VECTORS
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_USER
|
L_PTE_
EXEC
,
L_PTE_USER
|
L_PTE_
RDONLY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
domain
=
DOMAIN_USER
,
},
[
MT_MEMORY
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_WRITE
|
L_PTE_EXEC
,
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
prot_sect
=
PMD_TYPE_SECT
|
PMD_SECT_AP_WRITE
,
.
domain
=
DOMAIN_KERNEL
,
...
...
@@ -259,21 +258,20 @@ static struct mem_type mem_types[] = {
},
[
MT_MEMORY_NONCACHED
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_
WRITE
|
L_PTE_EXEC
|
L_PTE_
MT_BUFFERABLE
,
L_PTE_MT_BUFFERABLE
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
prot_sect
=
PMD_TYPE_SECT
|
PMD_SECT_AP_WRITE
,
.
domain
=
DOMAIN_KERNEL
,
},
[
MT_MEMORY_DTCM
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_
WRITE
,
L_PTE_
XN
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
prot_sect
=
PMD_TYPE_SECT
|
PMD_SECT_XN
,
.
domain
=
DOMAIN_KERNEL
,
},
[
MT_MEMORY_ITCM
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_WRITE
|
L_PTE_EXEC
,
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
domain
=
DOMAIN_KERNEL
,
},
...
...
@@ -480,7 +478,7 @@ static void __init build_mem_type_table(void)
pgprot_user
=
__pgprot
(
L_PTE_PRESENT
|
L_PTE_YOUNG
|
user_pgprot
);
pgprot_kernel
=
__pgprot
(
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_WRITE
|
kern_pgprot
);
L_PTE_DIRTY
|
kern_pgprot
);
mem_types
[
MT_LOW_VECTORS
].
prot_l1
|=
ecc_mask
;
mem_types
[
MT_HIGH_VECTORS
].
prot_l1
|=
ecc_mask
;
...
...
@@ -536,7 +534,7 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned l
{
if
(
pmd_none
(
*
pmd
))
{
pte_t
*
pte
=
early_alloc
(
2
*
PTRS_PER_PTE
*
sizeof
(
pte_t
));
__pmd_populate
(
pmd
,
__pa
(
pte
)
|
prot
);
__pmd_populate
(
pmd
,
__pa
(
pte
)
,
prot
);
}
BUG_ON
(
pmd_bad
(
*
pmd
));
return
pte_offset_kernel
(
pmd
,
addr
);
...
...
@@ -554,7 +552,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
}
static
void
__init
alloc_init_section
(
pgd_t
*
pgd
,
unsigned
long
addr
,
unsigned
long
end
,
unsigned
long
phys
,
unsigned
long
end
,
phys_addr_t
phys
,
const
struct
mem_type
*
type
)
{
pmd_t
*
pmd
=
pmd_offset
(
pgd
,
addr
);
...
...
@@ -589,7 +587,8 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
static
void
__init
create_36bit_mapping
(
struct
map_desc
*
md
,
const
struct
mem_type
*
type
)
{
unsigned
long
phys
,
addr
,
length
,
end
;
unsigned
long
addr
,
length
,
end
;
phys_addr_t
phys
;
pgd_t
*
pgd
;
addr
=
md
->
virtual
;
...
...
@@ -1044,38 +1043,3 @@ void __init paging_init(struct machine_desc *mdesc)
empty_zero_page
=
virt_to_page
(
zero_page
);
__flush_dcache_page
(
NULL
,
empty_zero_page
);
}
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
*/
void
setup_mm_for_reboot
(
char
mode
)
{
unsigned
long
base_pmdval
;
pgd_t
*
pgd
;
int
i
;
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
pgd
=
current
->
active_mm
->
pgd
;
base_pmdval
=
PMD_SECT_AP_WRITE
|
PMD_SECT_AP_READ
|
PMD_TYPE_SECT
;
if
(
cpu_architecture
()
<=
CPU_ARCH_ARMv5TEJ
&&
!
cpu_is_xscale
())
base_pmdval
|=
PMD_BIT4
;
for
(
i
=
0
;
i
<
FIRST_USER_PGD_NR
+
USER_PTRS_PER_PGD
;
i
++
,
pgd
++
)
{
unsigned
long
pmdval
=
(
i
<<
PGDIR_SHIFT
)
|
base_pmdval
;
pmd_t
*
pmd
;
pmd
=
pmd_off
(
pgd
,
i
<<
PGDIR_SHIFT
);
pmd
[
0
]
=
__pmd
(
pmdval
);
pmd
[
1
]
=
__pmd
(
pmdval
+
(
1
<<
(
PGDIR_SHIFT
-
1
)));
flush_pmd_entry
(
pmd
);
}
local_flush_tlb_all
();
}
arch/arm/mm/pgd.c
浏览文件 @
28cdac66
...
...
@@ -17,12 +17,10 @@
#include "mm.h"
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
/*
* need to get a 16k page for level 1
*/
pgd_t
*
get_pgd_slow
(
struct
mm_struct
*
mm
)
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
pgd_t
*
new_pgd
,
*
init_pgd
;
pmd_t
*
new_pmd
,
*
init_pmd
;
...
...
@@ -32,14 +30,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if
(
!
new_pgd
)
goto
no_pgd
;
memset
(
new_pgd
,
0
,
FIRST_KERNEL_PGD_NR
*
sizeof
(
pgd_t
));
memset
(
new_pgd
,
0
,
USER_PTRS_PER_PGD
*
sizeof
(
pgd_t
));
/*
* Copy over the kernel and IO PGD entries
*/
init_pgd
=
pgd_offset_k
(
0
);
memcpy
(
new_pgd
+
FIRST_KERNEL_PGD_NR
,
init_pgd
+
FIRST_KERNEL_PGD_NR
,
(
PTRS_PER_PGD
-
FIRST_KERNEL_PGD_NR
)
*
sizeof
(
pgd_t
));
memcpy
(
new_pgd
+
USER_PTRS_PER_PGD
,
init_pgd
+
USER_PTRS_PER_PGD
,
(
PTRS_PER_PGD
-
USER_PTRS_PER_PGD
)
*
sizeof
(
pgd_t
));
clean_dcache_area
(
new_pgd
,
PTRS_PER_PGD
*
sizeof
(
pgd_t
));
...
...
@@ -73,28 +71,29 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
return
NULL
;
}
void
free_pgd_slow
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
)
void
pgd_free
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd_base
)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pgtable_t
pte
;
if
(
!
pgd
)
if
(
!
pgd
_base
)
return
;
/* pgd is always present and good */
pmd
=
pmd_off
(
pgd
,
0
);
if
(
pmd_none
(
*
pmd
))
goto
free
;
if
(
pmd_bad
(
*
pmd
))
{
pmd_ERROR
(
*
pmd
);
pmd_clear
(
pmd
);
goto
free
;
}
pgd
=
pgd_base
+
pgd_index
(
0
);
if
(
pgd_none_or_clear_bad
(
pgd
))
goto
no_pgd
;
pmd
=
pmd_offset
(
pgd
,
0
);
if
(
pmd_none_or_clear_bad
(
pmd
))
goto
no_pmd
;
pte
=
pmd_pgtable
(
*
pmd
);
pmd_clear
(
pmd
);
pte_free
(
mm
,
pte
);
no_pmd:
pgd_clear
(
pgd
);
pmd_free
(
mm
,
pmd
);
free
:
free_pages
((
unsigned
long
)
pgd
,
2
);
no_pgd
:
free_pages
((
unsigned
long
)
pgd
_base
,
2
);
}
arch/arm/mm/proc-macros.S
浏览文件 @
28cdac66
...
...
@@ -91,7 +91,7 @@
#if L_PTE_SHARED != PTE_EXT_SHARED
#error PTE shared bit mismatch
#endif
#if (L_PTE_
EXEC+L_PTE_USER+L_PTE_WRITE
+L_PTE_DIRTY+L_PTE_YOUNG+\
#if (L_PTE_
XN+L_PTE_USER+L_PTE_RDONLY
+L_PTE_DIRTY+L_PTE_YOUNG+\
L_PTE_FILE
+
L_PTE_PRESENT
)
>
L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
...
...
@@ -135,7 +135,7 @@
.
endm
.
macro
armv6_set_pte_ext
pfx
str
r1
,
[
r0
],
#
-
2048
@
linux
version
str
r1
,
[
r0
],
#
2048
@
linux
version
bic
r3
,
r1
,
#
0x000003fc
bic
r3
,
r3
,
#
PTE_TYPE_MASK
...
...
@@ -146,9 +146,9 @@
and
r2
,
r1
,
#
L_PTE_MT_MASK
ldr
r2
,
[
ip
,
r2
]
tst
r1
,
#
L_PTE_WRITE
tst
ne
r1
,
#
L_PTE_DIRT
Y
orr
eq
r3
,
r3
,
#
PTE_EXT_APX
eor
r1
,
r1
,
#
L_PTE_DIRTY
tst
r1
,
#
L_PTE_DIRTY
|
L_PTE_RDONL
Y
orr
ne
r3
,
r3
,
#
PTE_EXT_APX
tst
r1
,
#
L_PTE_USER
orrne
r3
,
r3
,
#
PTE_EXT_AP1
...
...
@@ -158,8 +158,8 @@
bicne
r3
,
r3
,
#
PTE_EXT_APX
|
PTE_EXT_AP0
#endif
tst
r1
,
#
L_PTE_
EXEC
orr
eq
r3
,
r3
,
#
PTE_EXT_XN
tst
r1
,
#
L_PTE_
XN
orr
ne
r3
,
r3
,
#
PTE_EXT_XN
orr
r3
,
r3
,
r2
...
...
@@ -187,9 +187,9 @@
*
1111
0xff
r
/
w
r
/
w
*/
.
macro
armv3_set_pte_ext
wc_disable
=
1
str
r1
,
[
r0
],
#
-
2048
@
linux
version
str
r1
,
[
r0
],
#
2048
@
linux
version
eor
r3
,
r1
,
#
L_PTE_PRESENT
| L_PTE_YOUNG |
L_PTE_
WRITE
|
L_PTE_
DIRTY
eor
r3
,
r1
,
#
L_PTE_PRESENT
| L_PTE_YOUNG |
L_PTE_DIRTY
bic
r2
,
r1
,
#
PTE_SMALL_AP_MASK
@
keep
C
,
B
bits
bic
r2
,
r2
,
#
PTE_TYPE_MASK
...
...
@@ -198,7 +198,7 @@
tst
r3
,
#
L_PTE_USER
@
user
?
orrne
r2
,
r2
,
#
PTE_SMALL_AP_URO_SRW
tst
r3
,
#
L_PTE_
WRITE
|
L_PTE_DIRTY
@
write
and
dirty
?
tst
r3
,
#
L_PTE_
RDONLY
|
L_PTE_DIRTY
@
write
and
dirty
?
orreq
r2
,
r2
,
#
PTE_SMALL_AP_UNO_SRW
tst
r3
,
#
L_PTE_PRESENT
|
L_PTE_YOUNG
@
present
and
young
?
...
...
@@ -210,7 +210,7 @@
bicne
r2
,
r2
,
#
PTE_BUFFERABLE
#endif
.
endif
str
r2
,
[
r0
]
@
hardware
version
str
r2
,
[
r0
]
@
hardware
version
.
endm
...
...
@@ -230,9 +230,9 @@
*
1111
11
r
/
w
r
/
w
*/
.
macro
xscale_set_pte_ext_prologue
str
r1
,
[
r0
]
,
#-
2048
@
linux
version
str
r1
,
[
r0
]
@
linux
version
eor
r3
,
r1
,
#
L_PTE_PRESENT
| L_PTE_YOUNG |
L_PTE_
WRITE
|
L_PTE_
DIRTY
eor
r3
,
r1
,
#
L_PTE_PRESENT
| L_PTE_YOUNG |
L_PTE_DIRTY
bic
r2
,
r1
,
#
PTE_SMALL_AP_MASK
@
keep
C
,
B
bits
orr
r2
,
r2
,
#
PTE_TYPE_EXT
@
extended
page
...
...
@@ -240,7 +240,7 @@
tst
r3
,
#
L_PTE_USER
@
user
?
orrne
r2
,
r2
,
#
PTE_EXT_AP_URO_SRW
@
yes
->
user
r
/
o
,
system
r
/
w
tst
r3
,
#
L_PTE_
WRITE
|
L_PTE_DIRTY
@
write
and
dirty
?
tst
r3
,
#
L_PTE_
RDONLY
|
L_PTE_DIRTY
@
write
and
dirty
?
orreq
r2
,
r2
,
#
PTE_EXT_AP_UNO_SRW
@
yes
->
user
n
/
a
,
system
r
/
w
@
combined
with
user
->
user
r
/
w
.
endm
...
...
@@ -249,7 +249,7 @@
tst
r3
,
#
L_PTE_PRESENT
|
L_PTE_YOUNG
@
present
and
young
?
movne
r2
,
#
0
@
no
->
fault
str
r2
,
[
r0
]
@
hardware
version
str
r2
,
[
r0
,
#
2048
]!
@
hardware
version
mov
ip
,
#
0
mcr
p15
,
0
,
r0
,
c7
,
c10
,
1
@
clean
L1
D
line
mcr
p15
,
0
,
ip
,
c7
,
c10
,
4
@
data
write
barrier
...
...
arch/arm/mm/proc-v7.S
浏览文件 @
28cdac66
...
...
@@ -124,15 +124,13 @@ ENDPROC(cpu_v7_switch_mm)
*
Set
a
level
2
translation
table
entry
.
*
*
-
ptep
-
pointer
to
level
2
translation
table
entry
*
(
hardware
version
is
stored
at
-
1024
bytes
)
*
(
hardware
version
is
stored
at
+
2048
bytes
)
*
-
pte
-
PTE
value
to
store
*
-
ext
-
value
for
extended
PTE
bits
*/
ENTRY
(
cpu_v7_set_pte_ext
)
#ifdef CONFIG_MMU
ARM
(
str
r1
,
[
r0
],
#-
2048
)
@
linux
version
THUMB
(
str
r1
,
[
r0
]
)
@
linux
version
THUMB
(
sub
r0
,
r0
,
#
2048
)
str
r1
,
[
r0
]
@
linux
version
bic
r3
,
r1
,
#
0x000003f0
bic
r3
,
r3
,
#
PTE_TYPE_MASK
...
...
@@ -142,9 +140,9 @@ ENTRY(cpu_v7_set_pte_ext)
tst
r1
,
#
1
<<
4
orrne
r3
,
r3
,
#
PTE_EXT_TEX
(
1
)
tst
r1
,
#
L_PTE_WRITE
tst
ne
r1
,
#
L_PTE_DIRTY
orr
eq
r3
,
r3
,
#
PTE_EXT_APX
eor
r1
,
r1
,
#
L_PTE_DIRTY
tst
r1
,
#
L_PTE_RDONLY
|
L_PTE_DIRTY
orr
ne
r3
,
r3
,
#
PTE_EXT_APX
tst
r1
,
#
L_PTE_USER
orrne
r3
,
r3
,
#
PTE_EXT_AP1
...
...
@@ -154,14 +152,14 @@ ENTRY(cpu_v7_set_pte_ext)
bicne
r3
,
r3
,
#
PTE_EXT_APX
|
PTE_EXT_AP0
#endif
tst
r1
,
#
L_PTE_
EXEC
orr
eq
r3
,
r3
,
#
PTE_EXT_XN
tst
r1
,
#
L_PTE_
XN
orr
ne
r3
,
r3
,
#
PTE_EXT_XN
tst
r1
,
#
L_PTE_YOUNG
tstne
r1
,
#
L_PTE_PRESENT
moveq
r3
,
#
0
str
r3
,
[
r0
]
str
r3
,
[
r0
,
#
2048
]!
mcr
p15
,
0
,
r0
,
c7
,
c10
,
1
@
flush_pte
#endif
mov
pc
,
lr
...
...
arch/arm/mm/proc-xscale.S
浏览文件 @
28cdac66
...
...
@@ -500,8 +500,8 @@ ENTRY(cpu_xscale_set_pte_ext)
@
@
Erratum
40
:
must
set
memory
to
write
-
through
for
user
read
-
only
pages
@
and
ip
,
r1
,
#(
L_PTE_MT_MASK
| L_PTE_USER |
L_PTE_
WRITE
)
&
~
(
4
<<
2
)
teq
ip
,
#
L_PTE_MT_WRITEBACK
|
L_PTE_USER
and
ip
,
r1
,
#(
L_PTE_MT_MASK
| L_PTE_USER |
L_PTE_
RDONLY
)
&
~
(
4
<<
2
)
teq
ip
,
#
L_PTE_MT_WRITEBACK
| L_PTE_USER
|
L_PTE_RDONLY
moveq
r1
,
#
L_PTE_MT_WRITETHROUGH
and
r1
,
r1
,
#
L_PTE_MT_MASK
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录