提交 416716ed 编写于 作者: L Linus Torvalds

Merge tag 'powerpc-4.1-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux

Pull powerpc fixes from Michael Ellerman:

 - THP/hugetlb fixes from Aneesh.

 - MCE fix from Daniel.

 - TOC fix from Anton.

* tag 'powerpc-4.1-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux:
  powerpc: Align TOC to 256 bytes
  powerpc/mce: fix off by one errors in mce event handling
  powerpc/mm: Return NULL for not present hugetlb page
  powerpc/thp: Serialize pmd clear against a linux page table walk.
......@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t nip, uint64_t addr)
{
uint64_t srr1;
int index = __this_cpu_inc_return(mce_nest_count);
int index = __this_cpu_inc_return(mce_nest_count) - 1;
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
/*
......@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
index = __this_cpu_inc_return(mce_queue_count);
index = __this_cpu_inc_return(mce_queue_count) - 1;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
__this_cpu_dec(mce_queue_count);
......
......@@ -213,6 +213,7 @@ SECTIONS
*(.opd)
}
. = ALIGN(256);
.got : AT(ADDR(.got) - LOAD_OFFSET) {
__toc_start = .;
#ifndef CONFIG_RELOCATABLE
......
......@@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
pte_t *ptep;
struct page *page;
pte_t *ptep, pte;
unsigned shift;
unsigned long mask, flags;
struct page *page = ERR_PTR(-EINVAL);
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
if (!ptep)
goto no_page;
pte = READ_ONCE(*ptep);
/*
* Verify it is a huge page else bail.
* Transparent hugepages are handled by generic code. We can skip them
* here.
*/
local_irq_save(flags);
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
goto no_page;
/* Verify it is a huge page else bail. */
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
local_irq_restore(flags);
return ERR_PTR(-EINVAL);
if (!pte_present(pte)) {
page = NULL;
goto no_page;
}
mask = (1UL << shift) - 1;
page = pte_page(*ptep);
page = pte_page(pte);
if (page)
page += (address & mask) / PAGE_SIZE;
no_page:
local_irq_restore(flags);
return page;
}
......
......@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
* hash fault look at them.
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
/*
* Serialize against find_linux_pte_or_hugepte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
* it casts pmd_t to pte_t. Since format of pte_t is different from
* pmd_t we want to prevent transit from pmd pointing to page table
* to pmd pointing to huge page (and back) while interrupts are disabled.
* We clear pmd to possibly replace it with page table pointer in
* different code paths. So make sure we wait for the parallel
* find_linux_pte_or_hugepage to finish.
*/
kick_all_cpus_sync();
return old_pmd;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册