diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c67966e10a95012ab9ca60eed4d027eb0113bac9..0d2866b8f4257da039e735ff69776e3f6748c04e 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -43,34 +43,31 @@ void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) #endif /* PAGETABLE_LEVELS > 3 */ #endif /* PAGETABLE_LEVELS > 2 */ -#ifdef CONFIG_X86_64 static inline void pgd_list_add(pgd_t *pgd) { struct page *page = virt_to_page(pgd); - unsigned long flags; - spin_lock_irqsave(&pgd_lock, flags); list_add(&page->lru, &pgd_list); - spin_unlock_irqrestore(&pgd_lock, flags); } static inline void pgd_list_del(pgd_t *pgd) { struct page *page = virt_to_page(pgd); - unsigned long flags; - spin_lock_irqsave(&pgd_lock, flags); list_del(&page->lru); - spin_unlock_irqrestore(&pgd_lock, flags); } +#ifdef CONFIG_X86_64 pgd_t *pgd_alloc(struct mm_struct *mm) { unsigned boundary; pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + unsigned long flags; if (!pgd) return NULL; + spin_lock_irqsave(&pgd_lock, flags); pgd_list_add(pgd); + spin_unlock_irqrestore(&pgd_lock, flags); /* * Copy kernel pointers in from init. * Could keep a freelist or slab cache of those because the kernel @@ -86,8 +83,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { + unsigned long flags; BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); + spin_lock_irqsave(&pgd_lock, flags); pgd_list_del(pgd); + spin_unlock_irqrestore(&pgd_lock, flags); free_page((unsigned long)pgd); } #else @@ -101,20 +101,6 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) * vmalloc faults work because attached pagetables are never freed. * -- wli */ -static inline void pgd_list_add(pgd_t *pgd) -{ - struct page *page = virt_to_page(pgd); - - list_add(&page->lru, &pgd_list); -} - -static inline void pgd_list_del(pgd_t *pgd) -{ - struct page *page = virt_to_page(pgd); - - list_del(&page->lru); -} - #define UNSHARED_PTRS_PER_PGD \ (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)