提交 4e19fd93 编写于 作者: L Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "11 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  .mailmap: add Christophe Ricard
  Make CONFIG_FHANDLE default y
  mm/page_isolation.c: fix the function comments
  oom, oom_reaper: do not enqueue task if it is on the oom_reaper_list head
  mm/page_isolation: fix tracepoint to mirror check function behavior
  mm/rmap: batched invalidations should use existing api
  x86/mm: TLB_REMOTE_SEND_IPI should count pages
  mm: fix invalid node in alloc_migrate_target()
  include/linux/huge_mm.h: return NULL instead of false for pmd_trans_huge_lock()
  mm, kasan: fix compilation for CONFIG_SLAB
  MAINTAINERS: orangefs mailing list is subscribers-only
...@@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de> ...@@ -33,6 +33,7 @@ Björn Steinbrink <B.Steinbrink@gmx.de>
Brian Avery <b.avery@hp.com> Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com> Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de> Christoph Hellwig <hch@lst.de>
Christophe Ricard <christophe.ricard@gmail.com>
Corey Minyard <minyard@acm.org> Corey Minyard <minyard@acm.org>
Damian Hobson-Garcia <dhobsong@igel.co.jp> Damian Hobson-Garcia <dhobsong@igel.co.jp>
David Brownell <david-b@pacbell.net> David Brownell <david-b@pacbell.net>
......
...@@ -8253,7 +8253,7 @@ F: Documentation/filesystems/overlayfs.txt ...@@ -8253,7 +8253,7 @@ F: Documentation/filesystems/overlayfs.txt
ORANGEFS FILESYSTEM ORANGEFS FILESYSTEM
M: Mike Marshall <hubcap@omnibond.com> M: Mike Marshall <hubcap@omnibond.com>
L: pvfs2-developers@beowulf-underground.org L: pvfs2-developers@beowulf-underground.org (subscribers-only)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
S: Supported S: Supported
F: fs/orangefs/ F: fs/orangefs/
......
...@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void) ...@@ -319,12 +319,6 @@ static inline void reset_lazy_tlbstate(void)
#endif /* SMP */ #endif /* SMP */
/* Not inlined due to inc_irq_stat not being defined yet */
#define flush_tlb_local() { \
inc_irq_stat(irq_tlb_count); \
local_flush_tlb(); \
}
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \ #define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end) native_flush_tlb_others(mask, mm, start, end)
......
...@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info) ...@@ -104,10 +104,8 @@ static void flush_tlb_func(void *info)
inc_irq_stat(irq_tlb_count); inc_irq_stat(irq_tlb_count);
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return; return;
if (!f->flush_end)
f->flush_end = f->flush_start + PAGE_SIZE;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
...@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask, ...@@ -135,12 +133,20 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
unsigned long end) unsigned long end)
{ {
struct flush_tlb_info info; struct flush_tlb_info info;
if (end == 0)
end = start + PAGE_SIZE;
info.flush_mm = mm; info.flush_mm = mm;
info.flush_start = start; info.flush_start = start;
info.flush_end = end; info.flush_end = end;
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
trace_tlb_flush(TLB_REMOTE_SEND_IPI, end - start); if (end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
else
trace_tlb_flush(TLB_REMOTE_SEND_IPI,
(end - start) >> PAGE_SHIFT);
if (is_uv_system()) { if (is_uv_system()) {
unsigned int cpu; unsigned int cpu;
......
...@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, ...@@ -127,7 +127,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma); return __pmd_trans_huge_lock(pmd, vma);
else else
return false; return NULL;
} }
static inline int hpage_nr_pages(struct page *page) static inline int hpage_nr_pages(struct page *page)
{ {
......
...@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated, ...@@ -29,7 +29,7 @@ TRACE_EVENT(test_pages_isolated,
TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s", TP_printk("start_pfn=0x%lx end_pfn=0x%lx fin_pfn=0x%lx ret=%s",
__entry->start_pfn, __entry->end_pfn, __entry->fin_pfn, __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
__entry->end_pfn == __entry->fin_pfn ? "success" : "fail") __entry->end_pfn <= __entry->fin_pfn ? "success" : "fail")
); );
#endif /* _TRACE_PAGE_ISOLATION_H */ #endif /* _TRACE_PAGE_ISOLATION_H */
......
...@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH ...@@ -272,8 +272,9 @@ config CROSS_MEMORY_ATTACH
See the man page for more details. See the man page for more details.
config FHANDLE config FHANDLE
bool "open by fhandle syscalls" bool "open by fhandle syscalls" if EXPERT
select EXPORTFS select EXPORTFS
default y
help help
If you say Y here, a user level program will be able to map If you say Y here, a user level program will be able to map
file names to handle and then later use the handle for file names to handle and then later use the handle for
......
...@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object) ...@@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
struct kasan_alloc_meta *alloc_info = struct kasan_alloc_meta *alloc_info =
get_alloc_info(cache, object); get_alloc_info(cache, object);
alloc_info->state = KASAN_STATE_FREE; alloc_info->state = KASAN_STATE_FREE;
set_track(&free_info->track); set_track(&free_info->track, GFP_NOWAIT);
} }
#endif #endif
......
...@@ -547,7 +547,11 @@ static int oom_reaper(void *unused) ...@@ -547,7 +547,11 @@ static int oom_reaper(void *unused)
static void wake_oom_reaper(struct task_struct *tsk) static void wake_oom_reaper(struct task_struct *tsk)
{ {
if (!oom_reaper_th || tsk->oom_reaper_list) if (!oom_reaper_th)
return;
/* tsk is already queued? */
if (tsk == oom_reaper_list || tsk->oom_reaper_list)
return; return;
get_task_struct(tsk); get_task_struct(tsk);
......
...@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, ...@@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* all pages in [start_pfn...end_pfn) must be in the same zone. * all pages in [start_pfn...end_pfn) must be in the same zone.
* zone->lock must be held before call this. * zone->lock must be held before call this.
* *
* Returns 1 if all pages in the range are isolated. * Returns the last tested pfn.
*/ */
static unsigned long static unsigned long
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
...@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, ...@@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
* now as a simple work-around, we use the next node for destination. * now as a simple work-around, we use the next node for destination.
*/ */
if (PageHuge(page)) { if (PageHuge(page)) {
nodemask_t src = nodemask_of_node(page_to_nid(page)); int node = next_online_node(page_to_nid(page));
nodemask_t dst; if (node == MAX_NUMNODES)
nodes_complement(dst, src); node = first_online_node;
return alloc_huge_page_node(page_hstate(compound_head(page)), return alloc_huge_page_node(page_hstate(compound_head(page)),
next_node(page_to_nid(page), dst)); node);
} }
if (PageHighMem(page)) if (PageHighMem(page))
......
...@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) ...@@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
} }
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
static void percpu_flush_tlb_batch_pages(void *data)
{
/*
* All TLB entries are flushed on the assumption that it is
* cheaper to flush all TLBs and let them be refilled than
* flushing individual PFNs. Note that we do not track mm's
* to flush as that might simply be multiple full TLB flushes
* for no gain.
*/
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
flush_tlb_local();
}
/* /*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is * Flush TLB entries for recently unmapped pages from remote CPUs. It is
* important if a PTE was dirty when it was unmapped that it's flushed * important if a PTE was dirty when it was unmapped that it's flushed
...@@ -598,15 +585,14 @@ void try_to_unmap_flush(void) ...@@ -598,15 +585,14 @@ void try_to_unmap_flush(void)
cpu = get_cpu(); cpu = get_cpu();
trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) local_flush_tlb();
percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) {
smp_call_function_many(&tlb_ubc->cpumask,
percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true);
} }
if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
cpumask_clear(&tlb_ubc->cpumask); cpumask_clear(&tlb_ubc->cpumask);
tlb_ubc->flush_required = false; tlb_ubc->flush_required = false;
tlb_ubc->writable = false; tlb_ubc->writable = false;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册