提交 41b6167e 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

mm: get rid of __GFP_OTHER_NODE

The flag was introduced by commit 78afd561 ("mm: add
__GFP_OTHER_NODE flag") to allow proper accounting of remote node
allocations done by kernel daemons on behalf of a process - e.g.
khugepaged.

After "mm: fix remote numa hits statistics" we do not need and actually
use the flag so we can safely remove it because all allocations which
are satisfied from their "home" node are accounted properly.

[mhocko@suse.com: fix build]
Link: http://lkml.kernel.org/r/20170106122225.GK5556@dhcp22.suse.cz
Link: http://lkml.kernel.org/r/20170102153057.9451-3-mhocko@kernel.orgSigned-off-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NMel Gorman <mgorman@suse.de>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 2df26639
...@@ -38,9 +38,8 @@ struct vm_area_struct; ...@@ -38,9 +38,8 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT 0x100000u #define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u #define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u #define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x800000u
#define ___GFP_WRITE 0x1000000u #define ___GFP_KSWAPD_RECLAIM 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* If the above are modified, __GFP_BITS_SHIFT may need updating */
/* /*
...@@ -172,11 +171,6 @@ struct vm_area_struct; ...@@ -172,11 +171,6 @@ struct vm_area_struct;
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that * distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables). * cannot be supported (e.g. page tables).
*
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
* should not be accounted for as a remote allocation in vmstat. A
* typical user would be khugepaged collapsing a huge page on a remote
* node.
*/ */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) #define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
...@@ -184,10 +178,9 @@ struct vm_area_struct; ...@@ -184,10 +178,9 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */ /* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 26 #define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/* /*
......
...@@ -47,8 +47,7 @@ ...@@ -47,8 +47,7 @@
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \ {(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
{(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
{(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
#define show_gfp_flags(flags) \ #define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \ (flags) ? __print_flags(flags, "|", \
......
...@@ -919,8 +919,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, ...@@ -919,8 +919,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
} }
for (i = 0; i < HPAGE_PMD_NR; i++) { for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
__GFP_OTHER_NODE, vma,
vmf->address, page_to_nid(page)); vmf->address, page_to_nid(page));
if (unlikely(!pages[i] || if (unlikely(!pages[i] ||
mem_cgroup_try_charge(pages[i], vma->vm_mm, mem_cgroup_try_charge(pages[i], vma->vm_mm,
......
...@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm, ...@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* Only allocate from the target node */ /* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
/* /*
* Before allocating the hugepage, release the mmap_sem read lock. * Before allocating the hugepage, release the mmap_sem read lock.
...@@ -1309,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm, ...@@ -1309,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
/* Only allocate from the target node */ /* Only allocate from the target node */
gfp = alloc_hugepage_khugepaged_gfpmask() | gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
__GFP_OTHER_NODE | __GFP_THISNODE;
new_page = khugepaged_alloc_page(hpage, gfp, node); new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) { if (!new_page) {
......
...@@ -2584,8 +2584,7 @@ int __isolate_free_page(struct page *page, unsigned int order) ...@@ -2584,8 +2584,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
* *
* Must be called with interrupts disabled. * Must be called with interrupts disabled.
*/ */
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
gfp_t flags)
{ {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
enum zone_stat_item local_stat = NUMA_LOCAL; enum zone_stat_item local_stat = NUMA_LOCAL;
...@@ -2667,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -2667,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
} }
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags); zone_statistics(preferred_zone, zone);
local_irq_restore(flags); local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page); VM_BUG_ON_PAGE(bad_range(zone, page), page);
......
...@@ -655,7 +655,6 @@ static const struct { ...@@ -655,7 +655,6 @@ static const struct {
{ "__GFP_RECLAIM", "R" }, { "__GFP_RECLAIM", "R" },
{ "__GFP_DIRECT_RECLAIM", "DR" }, { "__GFP_DIRECT_RECLAIM", "DR" },
{ "__GFP_KSWAPD_RECLAIM", "KR" }, { "__GFP_KSWAPD_RECLAIM", "KR" },
{ "__GFP_OTHER_NODE", "ON" },
}; };
static size_t max_gfp_len; static size_t max_gfp_len;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册