提交 9af744d7 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

lib/show_mem.c: teach show_mem to work with the given nodemask

show_mem() allows to filter out node specific data which is irrelevant
to the allocation request via SHOW_MEM_FILTER_NODES.  The filtering is
done in skip_free_areas_node which skips all nodes which are not in the
mems_allowed of the current process.  This works most of the time as
expected because the nodemask shouldn't be outside of the allocating
task but there are some exceptions.  E.g.  memory hotplug might want to
request allocations from outside of the allowed nodes (see
new_node_page).

Get rid of this hardcoded behavior and push the allocation mask down the
show_mem path and use it instead of cpuset_current_mems_allowed.  NULL
nodemask is interpreted as cpuset_current_mems_allowed.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20170117091543.25850-5-mhocko@kernel.orgSigned-off-by: NMichal Hocko <mhocko@suse.com>
Acked-by: NMel Gorman <mgorman@suse.de>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 6d23f8a5
......@@ -916,7 +916,7 @@ cmds(struct pt_regs *excp)
memzcan();
break;
case 'i':
show_mem(0);
show_mem(0, NULL);
break;
default:
termch = cmd;
......
......@@ -82,7 +82,7 @@ static void prom_sync_me(void)
"nop\n\t" : : "r" (&trapbase));
prom_printf("PROM SYNC COMMAND...\n");
show_free_areas(0);
show_free_areas(0, NULL);
if (!is_idle_task(current)) {
local_irq_enable();
sys_sync();
......
......@@ -914,7 +914,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
show_free_areas(0);
show_free_areas(0, NULL);
continue;
}
......
......@@ -317,7 +317,7 @@ static struct sysrq_key_op sysrq_ftrace_dump_op = {
static void sysrq_handle_showmem(int key)
{
show_mem(0);
show_mem(0, NULL);
}
static struct sysrq_key_op sysrq_showmem_op = {
.handler = sysrq_handle_showmem,
......
......@@ -572,7 +572,7 @@ static void fn_scroll_back(struct vc_data *vc)
static void fn_show_mem(struct vc_data *vc)
{
show_mem(0);
show_mem(0, NULL);
}
static void fn_show_state(struct vc_data *vc)
......
......@@ -1152,8 +1152,7 @@ extern void pagefault_out_of_memory(void);
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
int shmem_zero_setup(struct vm_area_struct *);
#ifdef CONFIG_SHMEM
......@@ -1934,7 +1933,7 @@ extern void setup_per_zone_wmarks(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
extern void show_mem(unsigned int flags);
extern void show_mem(unsigned int flags, nodemask_t *nodemask);
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
......
......@@ -9,13 +9,13 @@
#include <linux/quicklist.h>
#include <linux/cma.h>
void show_mem(unsigned int filter)
void show_mem(unsigned int filter, nodemask_t *nodemask)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n");
show_free_areas(filter);
show_free_areas(filter, nodemask);
for_each_online_pgdat(pgdat) {
unsigned long flags;
......
......@@ -1191,7 +1191,7 @@ static int do_mmap_private(struct vm_area_struct *vma,
enomem:
pr_err("Allocation of length %lu from process %d (%s) failed\n",
len, current->pid, current->comm);
show_free_areas(0);
show_free_areas(0, NULL);
return -ENOMEM;
}
......@@ -1412,13 +1412,13 @@ unsigned long do_mmap(struct file *file,
kmem_cache_free(vm_region_jar, region);
pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
show_free_areas(0);
show_free_areas(0, NULL);
return -ENOMEM;
error_getting_region:
pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
len, current->pid);
show_free_areas(0);
show_free_areas(0, NULL);
return -ENOMEM;
}
......
......@@ -417,7 +417,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
if (oc->memcg)
mem_cgroup_print_oom_info(oc->memcg, p);
else
show_mem(SHOW_MEM_FILTER_NODES);
show_mem(SHOW_MEM_FILTER_NODES, nm);
if (sysctl_oom_dump_tasks)
dump_tasks(oc->memcg, oc->nodemask);
}
......
......@@ -3005,7 +3005,7 @@ static inline bool should_suppress_show_mem(void)
return ret;
}
static void warn_alloc_show_mem(gfp_t gfp_mask)
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
{
unsigned int filter = SHOW_MEM_FILTER_NODES;
static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
......@@ -3025,7 +3025,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask)
if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES;
show_mem(filter);
show_mem(filter, nodemask);
}
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
......@@ -3052,7 +3052,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
cpuset_print_current_mems_allowed();
dump_stack();
warn_alloc_show_mem(gfp_mask);
warn_alloc_show_mem(gfp_mask, nm);
}
static inline struct page *
......@@ -4274,20 +4274,20 @@ void si_meminfo_node(struct sysinfo *val, int nid)
* Determine whether the node should be displayed or not, depending on whether
* SHOW_MEM_FILTER_NODES was passed to show_free_areas().
*/
bool skip_free_areas_node(unsigned int flags, int nid)
static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
{
bool ret = false;
unsigned int cpuset_mems_cookie;
if (!(flags & SHOW_MEM_FILTER_NODES))
goto out;
return false;
do {
cpuset_mems_cookie = read_mems_allowed_begin();
ret = !node_isset(nid, cpuset_current_mems_allowed);
} while (read_mems_allowed_retry(cpuset_mems_cookie));
out:
return ret;
/*
* no node mask - aka implicit memory numa policy. Do not bother with
* the synchronization - read_mems_allowed_begin - because we do not
* have to be precise here.
*/
if (!nodemask)
nodemask = &cpuset_current_mems_allowed;
return !node_isset(nid, *nodemask);
}
#define K(x) ((x) << (PAGE_SHIFT-10))
......@@ -4328,7 +4328,7 @@ static void show_migration_types(unsigned char type)
* SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
* cpuset.
*/
void show_free_areas(unsigned int filter)
void show_free_areas(unsigned int filter, nodemask_t *nodemask)
{
unsigned long free_pcp = 0;
int cpu;
......@@ -4336,7 +4336,7 @@ void show_free_areas(unsigned int filter)
pg_data_t *pgdat;
for_each_populated_zone(zone) {
if (skip_free_areas_node(filter, zone_to_nid(zone)))
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
for_each_online_cpu(cpu)
......@@ -4370,7 +4370,7 @@ void show_free_areas(unsigned int filter)
global_page_state(NR_FREE_CMA_PAGES));
for_each_online_pgdat(pgdat) {
if (skip_free_areas_node(filter, pgdat->node_id))
if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
continue;
printk("Node %d"
......@@ -4422,7 +4422,7 @@ void show_free_areas(unsigned int filter)
for_each_populated_zone(zone) {
int i;
if (skip_free_areas_node(filter, zone_to_nid(zone)))
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
free_pcp = 0;
......@@ -4487,7 +4487,7 @@ void show_free_areas(unsigned int filter)
unsigned long nr[MAX_ORDER], flags, total = 0;
unsigned char types[MAX_ORDER];
if (skip_free_areas_node(filter, zone_to_nid(zone)))
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
show_node(zone);
printk(KERN_CONT "%s: ", zone->name);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册