memblock.c 8.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/range.h>

/* Check for already reserved areas */
static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
{
	struct memblock_region *r;
	u64 addr = *addrp, last;
	u64 size = *sizep;
	bool changed = false;

again:
	last = addr + size;
	for_each_memblock(reserved, r) {
		if (last > r->base && addr < r->base) {
			size = r->base - addr;
			changed = true;
			goto again;
		}
		if (last > (r->base + r->size) && addr < (r->base + r->size)) {
			addr = round_up(r->base + r->size, align);
			size = last - addr;
			changed = true;
			goto again;
		}
		if (last <= (r->base + r->size) && addr >= r->base) {
			(*sizep)++;
			return false;
		}
	}
	if (changed) {
		*addrp = addr;
		*sizep = size;
	}
	return changed;
}

static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start,
			 u64 *sizep, u64 align)
{
	u64 addr, last;

	addr = round_up(ei_start, align);
	if (addr < start)
		addr = round_up(start, align);
	if (addr >= ei_last)
		goto out;
	*sizep = ei_last - addr;
	while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last)
		;
	last = addr + *sizep;
	if (last > ei_last)
		goto out;

	return addr;

out:
	return MEMBLOCK_ERROR;
}

/*
 * Find next free range after start, and size is returned in *sizep
 */
u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
{
	struct memblock_region *r;

	for_each_memblock(memory, r) {
		u64 ei_start = r->base;
		u64 ei_last = ei_start + r->size;
		u64 addr;

		addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start,
					 sizep, align);

		if (addr != MEMBLOCK_ERROR)
			return addr;
	}

	return MEMBLOCK_ERROR;
}
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static __init struct range *find_range_array(int count)
{
	u64 end, size, mem;
	struct range *range;

	size = sizeof(struct range) * count;
	end = memblock.current_limit;

	mem = memblock_find_in_range(0, end, size, sizeof(struct range));
	if (mem == MEMBLOCK_ERROR)
		panic("can not find more space for range array");

	/*
	 * This range is tempoaray, so don't reserve it, it will not be
	 * overlapped because We will not alloccate new buffer before
	 * We discard this one
	 */
	range = __va(mem);
	memset(range, 0, size);

	return range;
}

static void __init memblock_x86_subtract_reserved(struct range *range, int az)
{
	u64 final_start, final_end;
	struct memblock_region *r;

	/* Take out region array itself at first*/
	memblock_free_reserved_regions();

120
	memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
121 122

	for_each_memblock(reserved, r) {
123
		memblock_dbg("  [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
		final_start = PFN_DOWN(r->base);
		final_end = PFN_UP(r->base + r->size);
		if (final_start >= final_end)
			continue;
		subtract_range(range, az, final_start, final_end);
	}

	/* Put region array back ? */
	memblock_reserve_reserved_regions();
}

struct count_data {
	int nr;
};

static int __init count_work_fn(unsigned long start_pfn,
				unsigned long end_pfn, void *datax)
{
	struct count_data *data = datax;

	data->nr++;

	return 0;
}

static int __init count_early_node_map(int nodeid)
{
	struct count_data data;

	data.nr = 0;
	work_with_active_regions(nodeid, count_work_fn, &data);

	return data.nr;
}

int __init get_free_all_memory_range(struct range **rangep, int nodeid)
{
	int count;
	struct range *range;
	int nr_range;

	count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;

	range = find_range_array(count);
	nr_range = 0;

	/*
	 * Use early_node_map[] and memblock.reserved.region to get range array
	 * at first
	 */
	nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
#ifdef CONFIG_X86_32
	subtract_range(range, count, max_low_pfn, -1ULL);
#endif
	memblock_x86_subtract_reserved(range, count);
	nr_range = clean_sort_range(range, count);

	*rangep = range;
	return nr_range;
}
184

185
static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
{
	int i, count;
	struct range *range;
	int nr_range;
	u64 final_start, final_end;
	u64 free_size;
	struct memblock_region *r;

	count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;

	range = find_range_array(count);
	nr_range = 0;

	addr = PFN_UP(addr);
	limit = PFN_DOWN(limit);

	for_each_memblock(memory, r) {
		final_start = PFN_UP(r->base);
		final_end = PFN_DOWN(r->base + r->size);
		if (final_start >= final_end)
			continue;
		if (final_start >= limit || final_end <= addr)
			continue;

		nr_range = add_range(range, count, nr_range, final_start, final_end);
	}
	subtract_range(range, count, 0, addr);
	subtract_range(range, count, limit, -1ULL);
214 215 216 217

	/* Subtract memblock.reserved.region in range ? */
	if (!get_free)
		goto sort_and_count_them;
218 219 220 221 222 223 224 225 226 227
	for_each_memblock(reserved, r) {
		final_start = PFN_DOWN(r->base);
		final_end = PFN_UP(r->base + r->size);
		if (final_start >= final_end)
			continue;
		if (final_start >= limit || final_end <= addr)
			continue;

		subtract_range(range, count, final_start, final_end);
	}
228 229

sort_and_count_them:
230 231 232 233 234 235 236 237 238
	nr_range = clean_sort_range(range, count);

	free_size = 0;
	for (i = 0; i < nr_range; i++)
		free_size += range[i].end - range[i].start;

	return free_size << PAGE_SHIFT;
}

239 240 241 242 243 244 245 246 247 248
u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
{
	return __memblock_x86_memory_in_range(addr, limit, true);
}

u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
{
	return __memblock_x86_memory_in_range(addr, limit, false);
}

249 250 251 252 253
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
{
	if (start == end)
		return;

254
	if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
255 256
		return;

257 258
	memblock_dbg("    memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);

259 260 261 262 263 264 265 266
	memblock_reserve(start, end - start);
}

void __init memblock_x86_free_range(u64 start, u64 end)
{
	if (start == end)
		return;

267
	if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
268 269
		return;

270 271
	memblock_dbg("       memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);

272 273
	memblock_free(start, end - start);
}
274

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
/*
 * Need to call this function after memblock_x86_register_active_regions,
 * so early_node_map[] is filled already.
 */
u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
{
	u64 addr;
	addr = find_memory_core_early(nid, size, align, start, end);
	if (addr != MEMBLOCK_ERROR)
		return addr;

	/* Fallback, should already have start end within node range */
	return memblock_find_in_range(start, end, size, align);
}

290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/*
 * Finds an active region in the address range from start_pfn to last_pfn and
 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
 */
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
				  unsigned long start_pfn,
				  unsigned long last_pfn,
				  unsigned long *ei_startpfn,
				  unsigned long *ei_endpfn)
{
	u64 align = PAGE_SIZE;

	*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
	*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;

	/* Skip map entries smaller than a page */
	if (*ei_startpfn >= *ei_endpfn)
		return 0;

	/* Skip if map is outside the node */
	if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
		return 0;

	/* Check for overlaps */
	if (*ei_startpfn < start_pfn)
		*ei_startpfn = start_pfn;
	if (*ei_endpfn > last_pfn)
		*ei_endpfn = last_pfn;

	return 1;
}

/* Walk the memblock.memory map and register active regions within a node */
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
					 unsigned long last_pfn)
{
	unsigned long ei_startpfn;
	unsigned long ei_endpfn;
	struct memblock_region *r;

	for_each_memblock(memory, r)
		if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
					   &ei_startpfn, &ei_endpfn))
			add_active_range(nid, ei_startpfn, ei_endpfn);
}

/*
 * Find the hole size (in bytes) in the memory range.
 * @start: starting address of the memory range to scan
 * @end: ending address of the memory range to scan
 */
u64 __init memblock_x86_hole_size(u64 start, u64 end)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long last_pfn = end >> PAGE_SHIFT;
	unsigned long ei_startpfn, ei_endpfn, ram = 0;
	struct memblock_region *r;

	for_each_memblock(memory, r)
		if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
					   &ei_startpfn, &ei_endpfn))
			ram += ei_endpfn - ei_startpfn;

	return end - start - ((u64)ram << PAGE_SHIFT);
}