page_cgroup.c 6.1 KB
Newer Older
1 2 3 4 5 6
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
#include <linux/bit_spinlock.h>
#include <linux/page_cgroup.h>
#include <linux/hash.h>
7
#include <linux/slab.h>
8
#include <linux/memory.h>
9
#include <linux/vmalloc.h>
10
#include <linux/cgroup.h>
11 12 13 14 15 16 17 18 19 20 21 22 23

static void __meminit
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
{
	pc->flags = 0;
	pc->mem_cgroup = NULL;
	pc->page = pfn_to_page(pfn);
}
static unsigned long total_usage;

#if !defined(CONFIG_SPARSEMEM)


A
Al Viro 已提交
24
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
{
	pgdat->node_page_cgroup = NULL;
}

struct page_cgroup *lookup_page_cgroup(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset;
	struct page_cgroup *base;

	base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
	if (unlikely(!base))
		return NULL;

	offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
	return base + offset;
}

static int __init alloc_node_page_cgroup(int nid)
{
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	unsigned long start_pfn, nr_pages, index;

	start_pfn = NODE_DATA(nid)->node_start_pfn;
	nr_pages = NODE_DATA(nid)->node_spanned_pages;

52 53 54
	if (!nr_pages)
		return 0;

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
	table_size = sizeof(struct page_cgroup) * nr_pages;

	base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
	if (!base)
		return -ENOMEM;
	for (index = 0; index < nr_pages; index++) {
		pc = base + index;
		__init_page_cgroup(pc, start_pfn + index);
	}
	NODE_DATA(nid)->node_page_cgroup = base;
	total_usage += table_size;
	return 0;
}

void __init page_cgroup_init(void)
{

	int nid, fail;

75 76 77
	if (mem_cgroup_subsys.disabled)
		return;

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
	for_each_online_node(nid)  {
		fail = alloc_node_page_cgroup(nid);
		if (fail)
			goto fail;
	}
	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
	printk(KERN_INFO "please try cgroup_disable=memory option if you"
	" don't want\n");
	return;
fail:
	printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
	printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
	panic("Out of memory");
}

#else /* CONFIG_FLAT_NODE_MEM_MAP */

struct page_cgroup *lookup_page_cgroup(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	struct mem_section *section = __pfn_to_section(pfn);

	return section->page_cgroup + pfn;
}

A
Al Viro 已提交
103 104
/* __alloc_bootmem...() is protected by !slab_available() */
int __init_refok init_section_page_cgroup(unsigned long pfn)
105 106 107 108 109 110 111 112
{
	struct mem_section *section;
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	int nid, index;

	section = __pfn_to_section(pfn);

113 114 115 116 117 118 119 120 121 122
	if (!section->page_cgroup) {
		nid = page_to_nid(pfn_to_page(pfn));
		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
		if (slab_is_available()) {
			base = kmalloc_node(table_size, GFP_KERNEL, nid);
			if (!base)
				base = vmalloc_node(table_size, nid);
		} else {
			base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
				table_size,
123
				PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
124 125 126 127 128 129 130 131 132 133 134 135
		}
	} else {
		/*
 		 * We don't have to allocate page_cgroup again, but
		 * address of memmap may be changed. So, we have to initialize
		 * again.
		 */
		base = section->page_cgroup + pfn;
		table_size = 0;
		/* check address of memmap is changed or not. */
		if (base->page == pfn_to_page(pfn))
			return 0;
136
	}
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162

	if (!base) {
		printk(KERN_ERR "page cgroup allocation failure\n");
		return -ENOMEM;
	}

	for (index = 0; index < PAGES_PER_SECTION; index++) {
		pc = base + index;
		__init_page_cgroup(pc, pfn + index);
	}

	section = __pfn_to_section(pfn);
	section->page_cgroup = base - pfn;
	total_usage += table_size;
	return 0;
}
#ifdef CONFIG_MEMORY_HOTPLUG
void __free_page_cgroup(unsigned long pfn)
{
	struct mem_section *ms;
	struct page_cgroup *base;

	ms = __pfn_to_section(pfn);
	if (!ms || !ms->page_cgroup)
		return;
	base = ms->page_cgroup + pfn;
163
	if (is_vmalloc_addr(base)) {
164
		vfree(base);
165 166 167 168 169 170 171 172
		ms->page_cgroup = NULL;
	} else {
		struct page *page = virt_to_page(base);
		if (!PageReserved(page)) { /* Is bootmem ? */
			kfree(base);
			ms->page_cgroup = NULL;
		}
	}
173 174
}

A
Al Viro 已提交
175
int __meminit online_page_cgroup(unsigned long start_pfn,
176 177 178 179 180 181
			unsigned long nr_pages,
			int nid)
{
	unsigned long start, end, pfn;
	int fail = 0;

182
	start = start_pfn & ~(PAGES_PER_SECTION - 1);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);

	for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
		if (!pfn_present(pfn))
			continue;
		fail = init_section_page_cgroup(pfn);
	}
	if (!fail)
		return 0;

	/* rollback */
	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
		__free_page_cgroup(pfn);

	return -ENOMEM;
}

A
Al Viro 已提交
200
int __meminit offline_page_cgroup(unsigned long start_pfn,
201 202 203 204
		unsigned long nr_pages, int nid)
{
	unsigned long start, end, pfn;

205
	start = start_pfn & ~(PAGES_PER_SECTION - 1);
206 207 208 209 210 211 212 213
	end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);

	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
		__free_page_cgroup(pfn);
	return 0;

}

A
Al Viro 已提交
214
static int __meminit page_cgroup_callback(struct notifier_block *self,
215 216 217 218 219 220 221 222 223 224 225 226 227
			       unsigned long action, void *arg)
{
	struct memory_notify *mn = arg;
	int ret = 0;
	switch (action) {
	case MEM_GOING_ONLINE:
		ret = online_page_cgroup(mn->start_pfn,
				   mn->nr_pages, mn->status_change_nid);
		break;
	case MEM_OFFLINE:
		offline_page_cgroup(mn->start_pfn,
				mn->nr_pages, mn->status_change_nid);
		break;
228
	case MEM_CANCEL_ONLINE:
229 230 231 232 233 234
	case MEM_GOING_OFFLINE:
		break;
	case MEM_ONLINE:
	case MEM_CANCEL_OFFLINE:
		break;
	}
235 236 237 238 239 240

	if (ret)
		ret = notifier_from_errno(ret);
	else
		ret = NOTIFY_OK;

241 242 243 244 245 246 247 248 249 250
	return ret;
}

#endif

void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int fail = 0;

251 252 253
	if (mem_cgroup_subsys.disabled)
		return;

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
		if (!pfn_present(pfn))
			continue;
		fail = init_section_page_cgroup(pfn);
	}
	if (fail) {
		printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
		panic("Out of memory");
	} else {
		hotplug_memory_notifier(page_cgroup_callback, 0);
	}
	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
	printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
	" want\n");
}

A
Al Viro 已提交
270
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
271 272 273 274 275
{
	return;
}

#endif