memory_hotplug.c 7.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 *  linux/mm/memory_hotplug.c
 *
 *  Copyright (C)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/pagevec.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
23
#include <linux/ioport.h>
24 25 26 27 28

#include <asm/tlbflush.h>

extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
			  unsigned long size);
29
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
30 31 32 33 34 35 36
{
	struct pglist_data *pgdat = zone->zone_pgdat;
	int nr_pages = PAGES_PER_SECTION;
	int nid = pgdat->node_id;
	int zone_type;

	zone_type = zone - pgdat->node_zones;
37 38 39 40 41 42
	if (!populated_zone(zone)) {
		int ret = 0;
		ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
		if (ret < 0)
			return ret;
	}
43 44
	memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
	zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45
	return 0;
46 47
}

48 49
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
				  int nr_pages);
50 51 52 53 54
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
{
	int nr_pages = PAGES_PER_SECTION;
	int ret;

55 56 57
	if (pfn_valid(phys_start_pfn))
		return -EEXIST;

58
	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
59 60 61 62

	if (ret < 0)
		return ret;

63 64 65 66 67
	ret = __add_zone(zone, phys_start_pfn);

	if (ret < 0)
		return ret;

68 69 70 71 72 73 74 75 76 77 78 79 80 81
	return register_new_memory(__pfn_to_section(phys_start_pfn));
}

/*
 * Reasonably generic function for adding memory.  It is
 * expected that archs that support memory hotplug will
 * call this function after deciding the zone to which to
 * add the new pages.
 */
int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
		 unsigned long nr_pages)
{
	unsigned long i;
	int err = 0;
82 83 84 85
	int start_sec, end_sec;
	/* during initialize mem_map, align hot-added range to section */
	start_sec = pfn_to_section_nr(phys_start_pfn);
	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
86

87 88
	for (i = start_sec; i <= end_sec; i++) {
		err = __add_section(zone, i << PFN_SECTION_SHIFT);
89

90 91 92 93
		/*
		 * EEXIST is finally dealed with by ioresource collision
		 * check. see add_memory() => register_memory_resource()
		 * Warning will be printed if there is collision.
94 95
		 */
		if (err && (err != -EEXIST))
96
			break;
97
		err = 0;
98 99 100 101
	}

	return err;
}
102
EXPORT_SYMBOL_GPL(__add_pages);
103 104 105 106 107 108 109 110 111 112 113 114

static void grow_zone_span(struct zone *zone,
		unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long old_zone_end_pfn;

	zone_span_writelock(zone);

	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
	if (start_pfn < zone->zone_start_pfn)
		zone->zone_start_pfn = start_pfn;

115 116
	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
				zone->zone_start_pfn;
117 118 119 120 121 122 123 124 125 126 127 128 129

	zone_span_writeunlock(zone);
}

static void grow_pgdat_span(struct pglist_data *pgdat,
		unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long old_pgdat_end_pfn =
		pgdat->node_start_pfn + pgdat->node_spanned_pages;

	if (start_pfn < pgdat->node_start_pfn)
		pgdat->node_start_pfn = start_pfn;

130 131
	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
					pgdat->node_start_pfn;
132 133 134 135 136 137 138
}

int online_pages(unsigned long pfn, unsigned long nr_pages)
{
	unsigned long i;
	unsigned long flags;
	unsigned long onlined_pages = 0;
139 140 141
	struct resource res;
	u64 section_end;
	unsigned long start_pfn;
142
	struct zone *zone;
143
	int need_zonelists_rebuild = 0;
144 145 146 147 148 149 150 151 152 153 154 155

	/*
	 * This doesn't need a lock to do pfn_to_page().
	 * The section can't be removed here because of the
	 * memory_block->state_sem.
	 */
	zone = page_zone(pfn_to_page(pfn));
	pgdat_resize_lock(zone->zone_pgdat, &flags);
	grow_zone_span(zone, pfn, pfn + nr_pages);
	grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
	pgdat_resize_unlock(zone->zone_pgdat, &flags);

156 157 158 159 160 161 162 163
	/*
	 * If this zone is not populated, then it is not in zonelist.
	 * This means the page allocator ignores this zone.
	 * So, zonelist must be updated after online.
	 */
	if (!populated_zone(zone))
		need_zonelists_rebuild = 1;

164 165 166 167 168
	res.start = (u64)pfn << PAGE_SHIFT;
	res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
	res.flags = IORESOURCE_MEM; /* we just need system ram */
	section_end = res.end;

169
	while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
		start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
		nr_pages = (unsigned long)
                           ((res.end + 1 - res.start) >> PAGE_SHIFT);

		if (PageReserved(pfn_to_page(start_pfn))) {
			/* this region's page is not onlined now */
			for (i = 0; i < nr_pages; i++) {
				struct page *page = pfn_to_page(start_pfn + i);
				online_page(page);
				onlined_pages++;
			}
		}

		res.start = res.end + 1;
		res.end = section_end;
185 186
	}
	zone->present_pages += onlined_pages;
187
	zone->zone_pgdat->node_present_pages += onlined_pages;
188

189 190
	setup_per_zone_pages_min();

191 192
	if (need_zonelists_rebuild)
		build_all_zonelists();
193
	vm_total_pages = nr_free_pagecache_pages();
194 195
	return 0;
}
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
{
	struct pglist_data *pgdat;
	unsigned long zones_size[MAX_NR_ZONES] = {0};
	unsigned long zholes_size[MAX_NR_ZONES] = {0};
	unsigned long start_pfn = start >> PAGE_SHIFT;

	pgdat = arch_alloc_nodedata(nid);
	if (!pgdat)
		return NULL;

	arch_refresh_nodedata(nid, pgdat);

	/* we can use NODE_DATA(nid) from here */

	/* init node's zones as empty zones, we don't have any present pages.*/
	free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);

	return pgdat;
}

static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
{
	arch_refresh_nodedata(nid, NULL);
	arch_free_nodedata(pgdat);
	return;
}

225
/* add this memory to iomem resource */
226
static struct resource *register_memory_resource(u64 start, u64 size)
227 228 229 230 231 232 233 234 235 236 237 238 239
{
	struct resource *res;
	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
	BUG_ON(!res);

	res->name = "System RAM";
	res->start = start;
	res->end = start + size - 1;
	res->flags = IORESOURCE_MEM;
	if (request_resource(&iomem_resource, res) < 0) {
		printk("System RAM resource %llx - %llx cannot be added\n",
		(unsigned long long)res->start, (unsigned long long)res->end);
		kfree(res);
240
		res = NULL;
241
	}
242 243 244 245 246 247 248 249 250 251
	return res;
}

static void release_memory_resource(struct resource *res)
{
	if (!res)
		return;
	release_resource(res);
	kfree(res);
	return;
252 253 254 255
}



256 257
int add_memory(int nid, u64 start, u64 size)
{
258 259
	pg_data_t *pgdat = NULL;
	int new_pgdat = 0;
260
	struct resource *res;
261 262
	int ret;

263 264 265 266
	res = register_memory_resource(start, size);
	if (!res)
		return -EEXIST;

267 268 269 270 271 272 273 274 275 276
	if (!node_online(nid)) {
		pgdat = hotadd_new_pgdat(nid, start);
		if (!pgdat)
			return -ENOMEM;
		new_pgdat = 1;
		ret = kswapd_run(nid);
		if (ret)
			goto error;
	}

277 278 279
	/* call arch's memory hotadd */
	ret = arch_add_memory(nid, start, size);

280 281 282
	if (ret < 0)
		goto error;

283
	/* we online node here. we can't roll back from here. */
284 285
	node_set_online(nid);

286 287 288 289 290 291 292 293 294 295
	if (new_pgdat) {
		ret = register_one_node(nid);
		/*
		 * If sysfs file of new node can't create, cpu on the node
		 * can't be hot-added. There is no rollback way now.
		 * So, check by BUG_ON() to catch it reluctantly..
		 */
		BUG_ON(ret);
	}

296 297 298 299 300
	return ret;
error:
	/* rollback pgdat allocation and others */
	if (new_pgdat)
		rollback_node_hotadd(nid, pgdat);
301 302
	if (res)
		release_memory_resource(res);
303

304 305 306
	return ret;
}
EXPORT_SYMBOL_GPL(add_memory);