• D
    shrinker: add node awareness · 0ce3d744
    Dave Chinner 提交于
    Pass the node of the current zone being reclaimed to shrink_slab(),
    allowing the shrinker control nodemask to be set appropriately for node
    aware shrinkers.
    Signed-off-by: NDave Chinner <dchinner@redhat.com>
    Signed-off-by: NGlauber Costa <glommer@openvz.org>
    Acked-by: NMel Gorman <mgorman@suse.de>
    Cc: "Theodore Ts'o" <tytso@mit.edu>
    Cc: Adrian Hunter <adrian.hunter@intel.com>
    Cc: Al Viro <viro@zeniv.linux.org.uk>
    Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
    Cc: Arve Hjønnevåg <arve@android.com>
    Cc: Carlos Maiolino <cmaiolino@redhat.com>
    Cc: Christoph Hellwig <hch@lst.de>
    Cc: Chuck Lever <chuck.lever@oracle.com>
    Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
    Cc: David Rientjes <rientjes@google.com>
    Cc: Gleb Natapov <gleb@redhat.com>
    Cc: Greg Thelen <gthelen@google.com>
    Cc: J. Bruce Fields <bfields@redhat.com>
    Cc: Jan Kara <jack@suse.cz>
    Cc: Jerome Glisse <jglisse@redhat.com>
    Cc: John Stultz <john.stultz@linaro.org>
    Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
    Cc: Kent Overstreet <koverstreet@google.com>
    Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Cc: Marcelo Tosatti <mtosatti@redhat.com>
    Cc: Mel Gorman <mgorman@suse.de>
    Cc: Steven Whitehouse <swhiteho@redhat.com>
    Cc: Thomas Hellstrom <thellstrom@vmware.com>
    Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
    Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
    Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
    0ce3d744
drop_caches.c 1.5 KB
/*
 * Implement the manual drop-all-pagecache function
 */

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/writeback.h>
#include <linux/sysctl.h>
#include <linux/gfp.h>
#include "internal.h"

/* A global variable is a bit ugly, but it keeps the code simple */
int sysctl_drop_caches;

static void drop_pagecache_sb(struct super_block *sb, void *unused)
{
	struct inode *inode, *toput_inode = NULL;

	spin_lock(&inode_sb_list_lock);
	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		spin_lock(&inode->i_lock);
		if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
		    (inode->i_mapping->nrpages == 0)) {
			spin_unlock(&inode->i_lock);
			continue;
		}
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(&inode_sb_list_lock);
		invalidate_mapping_pages(inode->i_mapping, 0, -1);
		iput(toput_inode);
		toput_inode = inode;
		spin_lock(&inode_sb_list_lock);
	}
	spin_unlock(&inode_sb_list_lock);
	iput(toput_inode);
}

static void drop_slab(void)
{
	int nr_objects;
	struct shrink_control shrink = {
		.gfp_mask = GFP_KERNEL,
	};

	nodes_setall(shrink.nodes_to_scan);
	do {
		nr_objects = shrink_slab(&shrink, 1000, 1000);
	} while (nr_objects > 10);
}

int drop_caches_sysctl_handler(ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
	int ret;

	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (ret)
		return ret;
	if (write) {
		if (sysctl_drop_caches & 1)
			iterate_supers(drop_pagecache_sb, NULL);
		if (sysctl_drop_caches & 2)
			drop_slab();
	}
	return 0;
}
反馈
建议
客服 返回
顶部