memory_hotplug.c 6.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 *  linux/mm/memory_hotplug.c
 *
 *  Copyright (C)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/pagevec.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
23
#include <linux/ioport.h>
24 25 26 27 28

#include <asm/tlbflush.h>

extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
			  unsigned long size);
29
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
30 31 32 33 34 35 36
{
	struct pglist_data *pgdat = zone->zone_pgdat;
	int nr_pages = PAGES_PER_SECTION;
	int nid = pgdat->node_id;
	int zone_type;

	zone_type = zone - pgdat->node_zones;
37 38 39 40 41 42
	if (!populated_zone(zone)) {
		int ret = 0;
		ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
		if (ret < 0)
			return ret;
	}
43 44
	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
	zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45
	return 0;
46 47
}

48 49
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
				  int nr_pages);
50 51 52 53 54
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
{
	int nr_pages = PAGES_PER_SECTION;
	int ret;

55
	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
56 57 58 59

	if (ret < 0)
		return ret;

60 61 62 63 64
	ret = __add_zone(zone, phys_start_pfn);

	if (ret < 0)
		return ret;

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	return register_new_memory(__pfn_to_section(phys_start_pfn));
}

/*
 * Reasonably generic function for adding memory.  It is
 * expected that archs that support memory hotplug will
 * call this function after deciding the zone to which to
 * add the new pages.
 */
int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
		 unsigned long nr_pages)
{
	unsigned long i;
	int err = 0;

	for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
		err = __add_section(zone, phys_start_pfn + i);

83 84 85 86
		/* We want to keep adding the rest of the
		 * sections if the first ones already exist
		 */
		if (err && (err != -EEXIST))
87 88 89 90 91
			break;
	}

	return err;
}
92
EXPORT_SYMBOL_GPL(__add_pages);
93 94 95 96 97 98 99 100 101 102 103 104

static void grow_zone_span(struct zone *zone,
		unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long old_zone_end_pfn;

	zone_span_writelock(zone);

	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	if (start_pfn < zone->zone_start_pfn)
		zone->zone_start_pfn = start_pfn;

105 106
	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
				zone->zone_start_pfn;
107 108 109 110 111 112 113 114 115 116 117 118 119

	zone_span_writeunlock(zone);
}

static void grow_pgdat_span(struct pglist_data *pgdat,
		unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long old_pgdat_end_pfn =
		pgdat->node_start_pfn + pgdat->node_spanned_pages;

	if (start_pfn < pgdat->node_start_pfn)
		pgdat->node_start_pfn = start_pfn;

120 121
	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
					pgdat->node_start_pfn;
122 123 124 125 126 127 128
}

int online_pages(unsigned long pfn, unsigned long nr_pages)
{
	unsigned long i;
	unsigned long flags;
	unsigned long onlined_pages = 0;
129 130 131
	struct resource res;
	u64 section_end;
	unsigned long start_pfn;
132
	struct zone *zone;
133
	int need_zonelists_rebuild = 0;
134 135 136 137 138 139 140 141 142 143 144 145

	/*
	 * This doesn't need a lock to do pfn_to_page().
	 * The section can't be removed here because of the
	 * memory_block->state_sem.
	 */
	zone = page_zone(pfn_to_page(pfn));
	pgdat_resize_lock(zone->zone_pgdat, &flags);
	grow_zone_span(zone, pfn, pfn + nr_pages);
	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
	pgdat_resize_unlock(zone->zone_pgdat, &flags);

146 147 148 149 150 151 152 153
	/*
	 * If this zone is not populated, then it is not in zonelist.
	 * This means the page allocator ignores this zone.
	 * So, zonelist must be updated after online.
	 */
	if (!populated_zone(zone))
		need_zonelists_rebuild = 1;

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
	res.start = (u64)pfn << PAGE_SHIFT;
	res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
	res.flags = IORESOURCE_MEM; /* we just need system ram */
	section_end = res.end;

	while (find_next_system_ram(&res) >= 0) {
		start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
		nr_pages = (unsigned long)
                           ((res.end + 1 - res.start) >> PAGE_SHIFT);

		if (PageReserved(pfn_to_page(start_pfn))) {
			/* this region's page is not onlined now */
			for (i = 0; i < nr_pages; i++) {
				struct page *page = pfn_to_page(start_pfn + i);
				online_page(page);
				onlined_pages++;
			}
		}

		res.start = res.end + 1;
		res.end = section_end;
175 176
	}
	zone->present_pages += onlined_pages;
177
	zone->zone_pgdat->node_present_pages += onlined_pages;
178

179 180
	setup_per_zone_pages_min();

181 182
	if (need_zonelists_rebuild)
		build_all_zonelists();
183
	vm_total_pages = nr_free_pagecache_pages();
184 185
	return 0;
}
186

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
{
	struct pglist_data *pgdat;
	unsigned long zones_size[MAX_NR_ZONES] = {0};
	unsigned long zholes_size[MAX_NR_ZONES] = {0};
	unsigned long start_pfn = start >> PAGE_SHIFT;

	pgdat = arch_alloc_nodedata(nid);
	if (!pgdat)
		return NULL;

	arch_refresh_nodedata(nid, pgdat);

	/* we can use NODE_DATA(nid) from here */

	/* init node's zones as empty zones, we don't have any present pages.*/
	free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);

	return pgdat;
}

static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
{
	arch_refresh_nodedata(nid, NULL);
	arch_free_nodedata(pgdat);
	return;
}

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
/* add this memory to iomem resource */
static void register_memory_resource(u64 start, u64 size)
{
	struct resource *res;

	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
	BUG_ON(!res);

	res->name = "System RAM";
	res->start = start;
	res->end = start + size - 1;
	res->flags = IORESOURCE_MEM;
	if (request_resource(&iomem_resource, res) < 0) {
		printk("System RAM resource %llx - %llx cannot be added\n",
		(unsigned long long)res->start, (unsigned long long)res->end);
		kfree(res);
	}
}



236 237
int add_memory(int nid, u64 start, u64 size)
{
238 239
	pg_data_t *pgdat = NULL;
	int new_pgdat = 0;
240 241
	int ret;

242 243 244 245 246 247 248 249 250 251
	if (!node_online(nid)) {
		pgdat = hotadd_new_pgdat(nid, start);
		if (!pgdat)
			return -ENOMEM;
		new_pgdat = 1;
		ret = kswapd_run(nid);
		if (ret)
			goto error;
	}

252 253 254
	/* call arch's memory hotadd */
	ret = arch_add_memory(nid, start, size);

255 256 257
	if (ret < 0)
		goto error;

258
	/* we online node here. we can't roll back from here. */
259 260
	node_set_online(nid);

261 262 263 264 265 266 267 268 269 270
	if (new_pgdat) {
		ret = register_one_node(nid);
		/*
		 * If sysfs file of new node can't create, cpu on the node
		 * can't be hot-added. There is no rollback way now.
		 * So, check by BUG_ON() to catch it reluctantly..
		 */
		BUG_ON(ret);
	}

271 272 273
	/* register this memory as resource */
	register_memory_resource(start, size);

274 275 276 277 278 279
	return ret;
error:
	/* rollback pgdat allocation and others */
	if (new_pgdat)
		rollback_node_hotadd(nid, pgdat);

280 281 282
	return ret;
}
EXPORT_SYMBOL_GPL(add_memory);