提交 1c7ec8a4 编写于 作者: A Aneesh Kumar K.V 提交者: Michael Ellerman

powerpc/mm/book3s64/4k: Switch 4k pagesize config to use pagetable fragment

4K config use one full page at level 4 of the pagetable. Add support for single
fragment allocation in pagetable fragment code and and use that for 4K config.
This makes both 4k and 64k use the same code path. Later we will switch pmd to
use the page table fragment code. This is done only for 64bit platforms which
is using page table fragment support.
Signed-off-by: NAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 70234676
......@@ -38,8 +38,10 @@
#define H_PAGE_4K_PFN 0x0
#define H_PAGE_THP_HUGE 0x0
#define H_PAGE_COMBO 0x0
#define H_PTE_FRAG_NR 0
#define H_PTE_FRAG_SIZE_SHIFT 0
/* 8 bytes per each pte entry */
#define H_PTE_FRAG_SIZE_SHIFT (H_PTE_INDEX_SIZE + 3)
#define H_PTE_FRAG_NR (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
/* memory key bits, only 8 keys supported */
#define H_PTE_PKEY_BIT0 0
......
......@@ -134,10 +134,10 @@ typedef struct {
#ifdef CONFIG_PPC_SUBPAGE_PROT
struct subpage_prot_table spt;
#endif /* CONFIG_PPC_SUBPAGE_PROT */
#ifdef CONFIG_PPC_64K_PAGES
/* for 4K PTE fragment support */
/*
* pagetable fragment support
*/
void *pte_frag;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct list_head iommu_group_mem_list;
#endif
......
......@@ -173,31 +173,6 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
return (pgtable_t)pmd_page_vaddr(pmd);
}
#ifdef CONFIG_PPC_4K_PAGES
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
}
static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page;
pte_t *pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!pte)
return NULL;
page = virt_to_page(pte);
if (!pgtable_page_ctor(page)) {
__free_page(page);
return NULL;
}
return pte;
}
#else /* if CONFIG_PPC_64K_PAGES */
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
......@@ -209,7 +184,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
{
return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
#endif
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
......
......@@ -159,9 +159,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
mm->context.id = index;
#ifdef CONFIG_PPC_64K_PAGES
mm->context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
mm_iommu_init(mm);
#endif
......@@ -192,7 +190,6 @@ static void destroy_contexts(mm_context_t *ctx)
spin_unlock(&mmu_context_lock);
}
#ifdef CONFIG_PPC_64K_PAGES
static void destroy_pagetable_page(struct mm_struct *mm)
{
int count;
......@@ -213,13 +210,6 @@ static void destroy_pagetable_page(struct mm_struct *mm)
}
}
#else
static inline void destroy_pagetable_page(struct mm_struct *mm)
{
return;
}
#endif
void destroy_context(struct mm_struct *mm)
{
#ifdef CONFIG_SPAPR_TCE_IOMMU
......
......@@ -225,7 +225,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
#ifdef CONFIG_PPC_64K_PAGES
static pte_t *get_pte_from_cache(struct mm_struct *mm)
{
void *pte_frag, *ret;
......@@ -264,7 +264,14 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return NULL;
}
ret = page_address(page);
/*
* if we support only one fragment just return the
* allocated page.
*/
if (PTE_FRAG_NR == 1)
return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
......@@ -291,8 +298,6 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel
return __alloc_for_ptecache(mm, kernel);
}
#endif /* CONFIG_PPC_64K_PAGES */
void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册