提交 64b703ef 编写于 作者: V Vineet Gupta

ARC: MMUv4 preps/1 - Fold PTE K/U access flags

The current ARC VM code has 13 flags in Page Table entry: some software
(accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU
page, we need 19 bits for addressing page frame so remaining 13 bits is
just about enough to accomodate the current flags.

In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT
(cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19
before for 8k). Thus these can't be held in current PTE w/o making each
entry 64bit wide.

It seems there is some scope of compressing the current PTE flags (and
freeing up a few bits). Currently PTE contains fully orthogonal distinct
access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux)
which can be folded into one set (R, W, X). The translation of 3 PTE
bits into 6 TLB bits (when programming the MMU) can be done based on
following pre-requites/assumptions:

1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to
   0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user
   space entries can never be global). Thus such a PTE can translate
   to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts.

2. For non global entries, the PTE flags can be used to create mirrored
   K and U TLB bits. This is true after commit a950549c
   "ARC: copy_(to|from)_user() to honor usermode-access permissions"
   which ensured that user-space translations _MUST_ have same access
   permissions for both U/K mode accesses so that  copy_{to,from}_user()
   play fair with fault based CoW break and such...

There is no such thing as free lunch - the cost is slightly infalted
TLB-Miss Handlers.
Signed-off-by: NVineet Gupta <vgupta@synopsys.com>
上级 4b06ff35
...@@ -57,27 +57,21 @@ ...@@ -57,27 +57,21 @@
#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
#else #else /* MMU v3 onwards */
/* PD1 */ /* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
/* PD0 */ /* PD0 */
...@@ -92,8 +86,8 @@ ...@@ -92,8 +86,8 @@
#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
#endif #endif
/* Kernel allowed all permissions for all pages */ /* vmalloc permissions */
#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
_PAGE_GLOBAL | _PAGE_PRESENT) _PAGE_GLOBAL | _PAGE_PRESENT)
#ifdef CONFIG_ARC_CACHE_PAGES #ifdef CONFIG_ARC_CACHE_PAGES
...@@ -109,10 +103,6 @@ ...@@ -109,10 +103,6 @@
*/ */
#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
/* Set of bits not changed in pte_modify */ /* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
...@@ -126,8 +116,8 @@ ...@@ -126,8 +116,8 @@
#define PAGE_SHARED PAGE_U_W_R #define PAGE_SHARED PAGE_U_W_R
/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
* kernel vaddr space - visible in all addr spaces, but kernel mode only * user vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached * Thus Global, all-kernel-access, no-user-access, cached
*/ */
#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
...@@ -137,9 +127,8 @@ ...@@ -137,9 +127,8 @@
/* Masks for actual TLB "PD"s */ /* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
_PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
/************************************************************************** /**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific) * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
......
...@@ -341,7 +341,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -341,7 +341,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{ {
unsigned long flags; unsigned long flags;
unsigned int idx, asid_or_sasid; unsigned int idx, asid_or_sasid, rwx;
unsigned long pd0_flags; unsigned long pd0_flags;
/* /*
...@@ -393,8 +393,23 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) ...@@ -393,8 +393,23 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
/*
* ARC MMU provides fully orthogonal access bits for K/U mode,
* however Linux only saves 1 set to save PTE real-estate
* Here we convert 3 PTE bits into 6 MMU bits:
* -Kernel only entries have Kr Kw Kx 0 0 0
* -User entries have mirrored K and U bits
*/
rwx = pte_val(*ptep) & PTE_BITS_RWX;
if (pte_val(*ptep) & _PAGE_GLOBAL)
rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
else
rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
/* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); write_aux_reg(ARC_REG_TLBPD1,
rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1));
/* First verify if entry for this vaddr+ASID already exists */ /* First verify if entry for this vaddr+ASID already exists */
write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
......
...@@ -218,7 +218,14 @@ ex_saved_reg1: ...@@ -218,7 +218,14 @@ ex_saved_reg1:
; IN: r0 = PTE, r1 = ptr to PTE ; IN: r0 = PTE, r1 = ptr to PTE
.macro CONV_PTE_TO_TLB .macro CONV_PTE_TO_TLB
and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE and r3, r0, PTE_BITS_RWX ; r w x
lsl r2, r3, 3 ; r w x 0 0 0
and.f 0, r0, _PAGE_GLOBAL
or.z r2, r2, r3 ; r w x r w x
and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
or r3, r3, r2
sr r3, [ARC_REG_TLBPD1] ; these go in PD1 sr r3, [ARC_REG_TLBPD1] ; these go in PD1
and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
...@@ -272,8 +279,8 @@ ARC_ENTRY EV_TLBMissI ...@@ -272,8 +279,8 @@ ARC_ENTRY EV_TLBMissI
;---------------------------------------------------------------- ;----------------------------------------------------------------
; VERIFY_PTE: Check if PTE permissions approp for executing code ; VERIFY_PTE: Check if PTE permissions approp for executing code
cmp_s r2, VMALLOC_START cmp_s r2, VMALLOC_START
mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) or.hs r2, r2, _PAGE_GLOBAL
and r3, r0, r2 ; Mask out NON Flag bits from PTE and r3, r0, r2 ; Mask out NON Flag bits from PTE
xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
...@@ -312,26 +319,21 @@ ARC_ENTRY EV_TLBMissD ...@@ -312,26 +319,21 @@ ARC_ENTRY EV_TLBMissD
;---------------------------------------------------------------- ;----------------------------------------------------------------
; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
mov_s r2, 0 cmp_s r2, VMALLOC_START
mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
; Linux PTE [RWX] bits are semantically overloaded:
; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
; -Otherwise they are user-mode permissions, and those are exactly
; same for kernel mode as well (e.g. copy_(to|from)_user)
lr r3, [ecr] lr r3, [ecr]
btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
; Above laddering takes care of XCHG access ; Above laddering takes care of XCHG access (both R and W)
; which is both Read and Write
; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
; For copy_(to|from)_user, despite exception taken in kernel mode,
; this code is not hit, because EFA would still be the user mode
; address (EFA < 0x6000_0000).
; This code is for legit kernel mode faults, vmalloc specifically
; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
lr r3, [efa]
cmp r3, VMALLOC_START - 1 ; If kernel mode access
asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
; By now, r2 setup with all the Flags we need to check in PTE ; By now, r2 setup with all the Flags we need to check in PTE
and r3, r0, r2 ; Mask out NON Flag bits from PTE and r3, r0, r2 ; Mask out NON Flag bits from PTE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册