提交 146e4b3c 编写于 作者: M Martin Schwidefsky

[S390] 1K/2K page table pages.

This patch implements 1K/2K page table pages for s390.
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 0c1f1dcd
......@@ -184,7 +184,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
ptep_invalidate(address, pte);
ptep_invalidate(&init_mm, address, pte);
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
......
......@@ -26,8 +26,14 @@
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
#define TABLES_PER_PAGE 4
#define FRAG_MASK 15UL
#define SECOND_HALVES 10UL
#else
#define ALLOC_ORDER 2
#define TABLES_PER_PAGE 2
#define FRAG_MASK 3UL
#define SECOND_HALVES 2UL
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
......@@ -45,13 +51,20 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
}
page->index = page_to_phys(shadow);
}
spin_lock(&mm->page_table_lock);
list_add(&page->lru, &mm->context.crst_list);
spin_unlock(&mm->page_table_lock);
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(unsigned long *table)
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
struct page *page = virt_to_page(table);
spin_lock(&mm->page_table_lock);
list_del(&page->lru);
spin_unlock(&mm->page_table_lock);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
......@@ -60,37 +73,84 @@ void crst_table_free(unsigned long *table)
/*
* page table entry allocation/free routines.
*/
unsigned long *page_table_alloc(int noexec)
unsigned long *page_table_alloc(struct mm_struct *mm)
{
struct page *page = alloc_page(GFP_KERNEL);
struct page *page;
unsigned long *table;
unsigned long bits;
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_page(GFP_KERNEL);
if (!shadow) {
__free_page(page);
bits = mm->context.noexec ? 3UL : 1UL;
spin_lock(&mm->page_table_lock);
page = NULL;
if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list,
struct page, lru);
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
page = NULL;
}
if (!page) {
spin_unlock(&mm->page_table_lock);
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
}
table = (unsigned long *) page_to_phys(shadow);
pgtable_page_ctor(page);
page->flags &= ~FRAG_MASK;
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
spin_lock(&mm->page_table_lock);
list_add(&page->lru, &mm->context.pgtable_list);
}
pgtable_page_ctor(page);
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
while (page->flags & bits) {
table += 256;
bits <<= 1;
}
page->flags |= bits;
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
list_move_tail(&page->lru, &mm->context.pgtable_list);
spin_unlock(&mm->page_table_lock);
return table;
}
void page_table_free(unsigned long *table)
void page_table_free(struct mm_struct *mm, unsigned long *table)
{
unsigned long *shadow = get_shadow_pte(table);
struct page *page;
unsigned long bits;
pgtable_page_dtor(virt_to_page(table));
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
bits = mm->context.noexec ? 3UL : 1UL;
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock(&mm->page_table_lock);
page->flags ^= bits;
if (page->flags & FRAG_MASK) {
/* Page now has some free pgtable fragments. */
list_move(&page->lru, &mm->context.pgtable_list);
page = NULL;
} else
/* All fragments of the 4K page have been freed. */
list_del(&page->lru);
spin_unlock(&mm->page_table_lock);
if (page) {
pgtable_page_dtor(page);
__free_page(page);
}
}
void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
{
struct page *page;
spin_lock(&mm->page_table_lock);
/* Free shadow region and segment tables. */
list_for_each_entry(page, &mm->context.crst_list, lru)
if (page->index) {
free_pages((unsigned long) page->index, ALLOC_ORDER);
page->index = 0;
}
/* "Free" second halves of page tables. */
list_for_each_entry(page, &mm->context.pgtable_list, lru)
page->flags &= ~SECOND_HALVES;
spin_unlock(&mm->page_table_lock);
mm->context.noexec = 0;
update_mm(mm, tsk);
}
......@@ -84,13 +84,18 @@ static inline pmd_t *vmem_pmd_alloc(void)
return pmd;
}
static inline pte_t *vmem_pte_alloc(void)
static pte_t __init_refok *vmem_pte_alloc(void)
{
pte_t *pte = vmem_alloc_pages(0);
pte_t *pte;
if (slab_is_available())
pte = (pte_t *) page_table_alloc(&init_mm);
else
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
......@@ -360,6 +365,9 @@ void __init vmem_map_init(void)
{
int i;
INIT_LIST_HEAD(&init_mm.context.crst_list);
INIT_LIST_HEAD(&init_mm.context.pgtable_list);
init_mm.context.noexec = 0;
NODE_DATA(0)->node_mem_map = VMEM_MAP;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
......
......@@ -115,6 +115,7 @@ typedef s390_regs elf_gregset_t;
#include <linux/sched.h> /* for task_struct */
#include <asm/system.h> /* for save_access_regs */
#include <asm/mmu_context.h>
/*
* This is used to ensure we don't load something for the wrong architecture.
......@@ -214,4 +215,16 @@ do { \
} while (0)
#endif /* __s390x__ */
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) \
({ \
if (current->mm->context.noexec && \
executable_stack != EXSTACK_DISABLE_X) \
disable_noexec(current->mm, current); \
current->mm->context.noexec == 0; \
})
#endif
#ifndef __MMU_H
#define __MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
typedef struct {
struct list_head crst_list;
struct list_head pgtable_list;
unsigned long asce_bits;
int noexec;
} mm_context_t;
#endif
......@@ -10,15 +10,17 @@
#define __S390_MMU_CONTEXT_H
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm-generic/mm_hooks.h>
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
mm->context |= _ASCE_TYPE_REGION3;
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
mm->context.noexec = s390_noexec;
return 0;
}
......@@ -32,11 +34,13 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
{
S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
if (switch_amode) {
/* Load primary space page table origin. */
pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd;
S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) );
} else
......
......@@ -74,43 +74,17 @@ static inline void copy_page(void *to, void *from)
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pte; } pte_t;
#define pte_val(x) ((x).pte)
#define pgprot_val(x) ((x).pgprot)
#ifndef __s390x__
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct {
unsigned long pgd0;
unsigned long pgd1;
unsigned long pgd2;
unsigned long pgd3;
} pgd_t;
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd0)
#else /* __s390x__ */
typedef struct {
unsigned long pmd0;
unsigned long pmd1;
} pmd_t;
typedef struct { unsigned long pud; } pud_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef pte_t *pgtable_t;
#define pmd_val(x) ((x).pmd0)
#define pmd_val1(x) ((x).pmd1)
#define pgprot_val(x) ((x).pgprot)
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pud_val(x) ((x).pud)
#define pgd_val(x) ((x).pgd)
#endif /* __s390x__ */
typedef struct page *pgtable_t;
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
......@@ -167,7 +141,7 @@ static inline int pfn_valid(unsigned long pfn)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
......
......@@ -20,10 +20,11 @@
#define check_pgt_cache() do {} while (0)
unsigned long *crst_table_alloc(struct mm_struct *, int);
void crst_table_free(unsigned long *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(int);
void page_table_free(unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void disable_noexec(struct mm_struct *, struct task_struct *);
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
......@@ -80,12 +81,12 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) crst;
unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
if (table)
crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) table;
}
#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd)
#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
......@@ -98,63 +99,55 @@ static inline void pud_populate_kernel(struct mm_struct *mm,
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
pud_t *shadow_pud = get_shadow_table(pud);
pmd_t *shadow_pmd = get_shadow_table(pmd);
if (shadow_pud && shadow_pmd)
pud_populate_kernel(mm, shadow_pud, shadow_pmd);
pud_populate_kernel(mm, pud, pmd);
if (mm->context.noexec) {
pud = get_shadow_table(pud);
pmd = get_shadow_table(pmd);
pud_populate_kernel(mm, pud, pmd);
}
}
#endif /* __s390x__ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned long *crst = crst_table_alloc(mm, s390_noexec);
unsigned long *crst;
INIT_LIST_HEAD(&mm->context.crst_list);
INIT_LIST_HEAD(&mm->context.pgtable_list);
crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd)
#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
#ifndef __s390x__
pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
#else /* __s390x__ */
pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
#endif /* __s390x__ */
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
static inline void pmd_populate(struct mm_struct *mm,
pmd_t *pmd, pgtable_t pte)
{
pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_table(pmd);
pte_t *shadow_pte = get_shadow_pte(pte);
pmd_populate_kernel(mm, pmd, pte);
if (shadow_pmd && shadow_pte)
pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
if (mm->context.noexec) {
pmd = get_shadow_table(pmd);
pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
}
}
#define pmd_pgtable(pmd) pmd_page(pmd)
#define pmd_pgtable(pmd) \
(pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
/*
* page table entry allocation/free routines.
*/
#define pte_alloc_one_kernel(mm, vmaddr) \
((pte_t *) page_table_alloc(s390_noexec))
#define pte_alloc_one(mm, vmaddr) \
virt_to_page(page_table_alloc(s390_noexec))
#define pte_free_kernel(mm, pte) \
page_table_free((unsigned long *) pte)
#define pte_free(mm, pte) \
page_table_free((unsigned long *) page_to_phys((struct page *) pte))
#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
#endif /* _S390_PGALLOC_H */
......@@ -57,11 +57,11 @@ extern char empty_zero_page[PAGE_SIZE];
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
#ifndef __s390x__
# define PMD_SHIFT 22
# define PUD_SHIFT 22
# define PGDIR_SHIFT 22
# define PMD_SHIFT 20
# define PUD_SHIFT 20
# define PGDIR_SHIFT 20
#else /* __s390x__ */
# define PMD_SHIFT 21
# define PMD_SHIFT 20
# define PUD_SHIFT 31
# define PGDIR_SHIFT 31
#endif /* __s390x__ */
......@@ -79,17 +79,14 @@ extern char empty_zero_page[PAGE_SIZE];
* for S390 segment-table entries are combined to one PGD
* that leads to 1024 pte per pgd
*/
#define PTRS_PER_PTE 256
#ifndef __s390x__
# define PTRS_PER_PTE 1024
# define PTRS_PER_PMD 1
# define PTRS_PER_PUD 1
# define PTRS_PER_PGD 512
#define PTRS_PER_PMD 1
#else /* __s390x__ */
# define PTRS_PER_PTE 512
# define PTRS_PER_PMD 1024
# define PTRS_PER_PUD 1
# define PTRS_PER_PGD 2048
#define PTRS_PER_PMD 2048
#endif /* __s390x__ */
#define PTRS_PER_PUD 1
#define PTRS_PER_PGD 2048
#define FIRST_USER_ADDRESS 0
......@@ -376,24 +373,6 @@ extern char empty_zero_page[PAGE_SIZE];
# define PxD_SHADOW_SHIFT 2
#endif /* __s390x__ */
static inline struct page *get_shadow_page(struct page *page)
{
if (s390_noexec && page->index)
return virt_to_page((void *)(addr_t) page->index);
return NULL;
}
static inline void *get_shadow_pte(void *table)
{
unsigned long addr, offset;
struct page *page;
addr = (unsigned long) table;
offset = addr & (PAGE_SIZE - 1);
page = virt_to_page((void *)(addr ^ offset));
return (void *)(addr_t)(page->index ? (page->index | offset) : 0UL);
}
static inline void *get_shadow_table(void *table)
{
unsigned long addr, offset;
......@@ -411,17 +390,16 @@ static inline void *get_shadow_table(void *table)
* hook is made available.
*/
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *pteptr, pte_t pteval)
pte_t *ptep, pte_t entry)
{
pte_t *shadow_pte = get_shadow_pte(pteptr);
*pteptr = pteval;
if (shadow_pte) {
if (!(pte_val(pteval) & _PAGE_INVALID) &&
(pte_val(pteval) & _PAGE_SWX))
pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO;
*ptep = entry;
if (mm->context.noexec) {
if (!(pte_val(entry) & _PAGE_INVALID) &&
(pte_val(entry) & _PAGE_SWX))
pte_val(entry) |= _PAGE_RO;
else
pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
pte_val(entry) = _PAGE_TYPE_EMPTY;
ptep[PTRS_PER_PTE] = entry;
}
}
......@@ -536,14 +514,6 @@ static inline int pte_young(pte_t pte)
#define pgd_clear(pgd) do { } while (0)
#define pud_clear(pud) do { } while (0)
static inline void pmd_clear_kernel(pmd_t * pmdp)
{
pmd_val(pmdp[0]) = _SEGMENT_ENTRY_EMPTY;
pmd_val(pmdp[1]) = _SEGMENT_ENTRY_EMPTY;
pmd_val(pmdp[2]) = _SEGMENT_ENTRY_EMPTY;
pmd_val(pmdp[3]) = _SEGMENT_ENTRY_EMPTY;
}
#else /* __s390x__ */
#define pgd_clear(pgd) do { } while (0)
......@@ -562,30 +532,27 @@ static inline void pud_clear(pud_t * pud)
pud_clear_kernel(shadow);
}
#endif /* __s390x__ */
static inline void pmd_clear_kernel(pmd_t * pmdp)
{
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
pmd_val1(*pmdp) = _SEGMENT_ENTRY_EMPTY;
}
#endif /* __s390x__ */
static inline void pmd_clear(pmd_t * pmdp)
static inline void pmd_clear(pmd_t *pmd)
{
pmd_t *shadow_pmd = get_shadow_table(pmdp);
pmd_t *shadow = get_shadow_table(pmd);
pmd_clear_kernel(pmdp);
if (shadow_pmd)
pmd_clear_kernel(shadow_pmd);
pmd_clear_kernel(pmd);
if (shadow)
pmd_clear_kernel(shadow);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t *shadow_pte = get_shadow_pte(ptep);
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
if (shadow_pte)
pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY;
if (mm->context.noexec)
pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
}
/*
......@@ -666,7 +633,7 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
{
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
#ifndef __s390x__
/* S390 has 1mb segments, we are emulating 4MB segments */
/* pto must point to the start of the segment table */
pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
#else
/* ipte in zarch mode can do the math */
......@@ -680,12 +647,12 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
pte_val(*ptep) = _PAGE_TYPE_EMPTY;
}
static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
static inline void ptep_invalidate(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
__ptep_ipte(address, ptep);
ptep = get_shadow_pte(ptep);
if (ptep)
__ptep_ipte(address, ptep);
if (mm->context.noexec)
__ptep_ipte(address, ptep + PTRS_PER_PTE);
}
/*
......@@ -707,7 +674,7 @@ static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
pte_t __pte = *(__ptep); \
if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__address, __ptep); \
ptep_invalidate(__mm, __address, __ptep); \
else \
pte_clear((__mm), (__address), (__ptep)); \
__pte; \
......@@ -718,7 +685,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
ptep_invalidate(address, ptep);
ptep_invalidate(vma->vm_mm, address, ptep);
return pte;
}
......@@ -739,7 +706,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if (full)
pte_clear(mm, addr, ptep);
else
ptep_invalidate(addr, ptep);
ptep_invalidate(mm, addr, ptep);
return pte;
}
......@@ -750,7 +717,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
if (pte_write(__pte)) { \
if (atomic_read(&(__mm)->mm_users) > 1 || \
(__mm) != current->active_mm) \
ptep_invalidate(__addr, __ptep); \
ptep_invalidate(__mm, __addr, __ptep); \
set_pte_at(__mm, __addr, __ptep, pte_wrprotect(__pte)); \
} \
})
......@@ -760,7 +727,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
({ \
int __changed = !pte_same(*(__ptep), __entry); \
if (__changed) { \
ptep_invalidate(__addr, __ptep); \
ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
} \
__changed; \
......
......@@ -95,14 +95,14 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
*/
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
{
if (!tlb->fullmm) {
tlb->array[tlb->nr_ptes++] = page;
tlb->array[tlb->nr_ptes++] = pte;
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
pte_free(tlb->mm, page);
pte_free(tlb->mm, pte);
}
/*
......
......@@ -61,11 +61,12 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE) {
pgd_t *shadow = get_shadow_table(mm->pgd);
if (shadow)
__tlb_flush_idte((unsigned long) shadow | mm->context);
__tlb_flush_idte((unsigned long) mm->pgd | mm->context);
if (mm->context.noexec)
__tlb_flush_idte((unsigned long)
get_shadow_table(mm->pgd) |
mm->context.asce_bits);
__tlb_flush_idte((unsigned long) mm->pgd |
mm->context.asce_bits);
return;
}
preempt_disable();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册