shrinker.c 2.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * f2fs shrinker support
 *   the basic infra was copied from fs/ubifs/shrinker.c
 *
 * Copyright (c) 2015 Motorola Mobility
 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/fs.h>
#include <linux/f2fs_fs.h>

#include "f2fs.h"

static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock);
static unsigned int shrinker_run_no;

J
Jaegeuk Kim 已提交
21 22 23 24 25
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
}

J
Jaegeuk Kim 已提交
26 27 28 29 30
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
	return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
}

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
unsigned long f2fs_shrink_count(struct shrinker *shrink,
				struct shrink_control *sc)
{
	struct f2fs_sb_info *sbi;
	struct list_head *p;
	unsigned long count = 0;

	spin_lock(&f2fs_list_lock);
	p = f2fs_list.next;
	while (p != &f2fs_list) {
		sbi = list_entry(p, struct f2fs_sb_info, s_list);

		/* stop f2fs_put_super */
		if (!mutex_trylock(&sbi->umount_mutex)) {
			p = p->next;
			continue;
		}
		spin_unlock(&f2fs_list_lock);

J
Jaegeuk Kim 已提交
50 51 52
		/* count extent cache entries */
		count += __count_extent_cache(sbi);

J
Jaegeuk Kim 已提交
53 54
		/* shrink clean nat cache entries */
		count += __count_nat_entries(sbi);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92

		spin_lock(&f2fs_list_lock);
		p = p->next;
		mutex_unlock(&sbi->umount_mutex);
	}
	spin_unlock(&f2fs_list_lock);
	return count;
}

unsigned long f2fs_shrink_scan(struct shrinker *shrink,
				struct shrink_control *sc)
{
	unsigned long nr = sc->nr_to_scan;
	struct f2fs_sb_info *sbi;
	struct list_head *p;
	unsigned int run_no;
	unsigned long freed = 0;

	spin_lock(&f2fs_list_lock);
	do {
		run_no = ++shrinker_run_no;
	} while (run_no == 0);
	p = f2fs_list.next;
	while (p != &f2fs_list) {
		sbi = list_entry(p, struct f2fs_sb_info, s_list);

		if (sbi->shrinker_run_no == run_no)
			break;

		/* stop f2fs_put_super */
		if (!mutex_trylock(&sbi->umount_mutex)) {
			p = p->next;
			continue;
		}
		spin_unlock(&f2fs_list_lock);

		sbi->shrinker_run_no = run_no;

J
Jaegeuk Kim 已提交
93 94 95
		/* shrink extent cache entries */
		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);

J
Jaegeuk Kim 已提交
96
		/* shrink clean nat cache entries */
J
Jaegeuk Kim 已提交
97 98
		if (freed < nr)
			freed += try_to_free_nats(sbi, nr - freed);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

		spin_lock(&f2fs_list_lock);
		p = p->next;
		list_move_tail(&sbi->s_list, &f2fs_list);
		mutex_unlock(&sbi->umount_mutex);
		if (freed >= nr)
			break;
	}
	spin_unlock(&f2fs_list_lock);
	return freed;
}

void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
{
	spin_lock(&f2fs_list_lock);
	list_add_tail(&sbi->s_list, &f2fs_list);
	spin_unlock(&f2fs_list_lock);
}

void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
{
	spin_lock(&f2fs_list_lock);
	list_del(&sbi->s_list);
	spin_unlock(&f2fs_list_lock);
}