kaslr.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * kaslr.c
 *
 * This contains the routines needed to generate a reasonable level of
 * entropy to choose a randomized kernel base address offset in support
 * of Kernel Address Space Layout Randomization (KASLR). Additionally
 * handles walking the physical memory maps (and tracking memory regions
 * to avoid) in order to select a physical memory location that can
 * contain the entire properly aligned running kernel image.
 *
 */
12
#include "misc.h"
13
#include "error.h"
14

15 16
#include <asm/msr.h>
#include <asm/archrandom.h>
17
#include <asm/e820.h>
18

19 20 21 22 23 24 25
#include <generated/compile.h>
#include <linux/module.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>

/* Simplified build-specific string for starting entropy. */
26
static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
27 28
		LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
#define I8254_PORT_CONTROL	0x43
#define I8254_PORT_COUNTER0	0x40
#define I8254_CMD_READBACK	0xC0
#define I8254_SELECT_COUNTER0	0x02
#define I8254_STATUS_NOTREADY	0x40
static inline u16 i8254(void)
{
	u16 status, timer;

	do {
		outb(I8254_PORT_CONTROL,
		     I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
		status = inb(I8254_PORT_COUNTER0);
		timer  = inb(I8254_PORT_COUNTER0);
		timer |= inb(I8254_PORT_COUNTER0) << 8;
	} while (status & I8254_STATUS_NOTREADY);

	return timer;
}

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static unsigned long rotate_xor(unsigned long hash, const void *area,
				size_t size)
{
	size_t i;
	unsigned long *ptr = (unsigned long *)area;

	for (i = 0; i < size / sizeof(hash); i++) {
		/* Rotate by odd number of bits and XOR. */
		hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
		hash ^= ptr[i];
	}

	return hash;
}

/* Attempt to create a simple but unpredictable starting entropy. */
static unsigned long get_random_boot(void)
{
	unsigned long hash = 0;

	hash = rotate_xor(hash, build_str, sizeof(build_str));
70
	hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
71 72 73 74

	return hash;
}

75 76
static unsigned long get_random_long(void)
{
77 78 79 80 81
#ifdef CONFIG_X86_64
	const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
#else
	const unsigned long mix_const = 0x3f39e593UL;
#endif
82 83 84 85
	unsigned long raw, random = get_random_boot();
	bool use_i8254 = true;

	debug_putstr("KASLR using");
86 87

	if (has_cpuflag(X86_FEATURE_RDRAND)) {
88 89 90 91 92
		debug_putstr(" RDRAND");
		if (rdrand_long(&raw)) {
			random ^= raw;
			use_i8254 = false;
		}
93 94 95
	}

	if (has_cpuflag(X86_FEATURE_TSC)) {
96
		debug_putstr(" RDTSC");
97
		raw = rdtsc();
98

99 100 101
		random ^= raw;
		use_i8254 = false;
	}
102

103 104 105
	if (use_i8254) {
		debug_putstr(" i8254");
		random ^= i8254();
106 107
	}

108 109 110 111 112 113
	/* Circular multiply for better bit diffusion */
	asm("mul %3"
	    : "=a" (random), "=d" (raw)
	    : "a" (random), "rm" (mix_const));
	random += raw;

114 115
	debug_putstr("...\n");

116 117
	return random;
}
118

119 120 121 122 123
struct mem_vector {
	unsigned long start;
	unsigned long size;
};

124 125 126 127 128 129 130 131
enum mem_avoid_index {
	MEM_AVOID_ZO_RANGE = 0,
	MEM_AVOID_INITRD,
	MEM_AVOID_CMDLINE,
	MEM_AVOID_BOOTPARAMS,
	MEM_AVOID_MAX,
};

132
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
{
	/* Item at least partially before region. */
	if (item->start < region->start)
		return false;
	/* Item at least partially after region. */
	if (item->start + item->size > region->start + region->size)
		return false;
	return true;
}

static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
{
	/* Item one is entirely before item two. */
	if (one->start + one->size <= two->start)
		return false;
	/* Item one is entirely after item two. */
	if (one->start >= two->start + two->size)
		return false;
	return true;
}

156
/*
157 158 159
 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
 * The mem_avoid array is used to store the ranges that need to be avoided
 * when KASLR searches for an appropriate random address. We must avoid any
160
 * regions that are unsafe to overlap with during decompression, and other
161 162 163 164 165
 * things like the initrd, cmdline and boot_params. This comment seeks to
 * explain mem_avoid as clearly as possible since incorrect mem_avoid
 * memory ranges lead to really hard to debug boot failures.
 *
 * The initrd, cmdline, and boot_params are trivial to identify for
166
 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
 * MEM_AVOID_BOOTPARAMS respectively below.
 *
 * What is not obvious how to avoid is the range of memory that is used
 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
 * the compressed kernel (ZO) and its run space, which is used to extract
 * the uncompressed kernel (VO) and relocs.
 *
 * ZO's full run size sits against the end of the decompression buffer, so
 * we can calculate where text, data, bss, etc of ZO are positioned more
 * easily.
 *
 * For additional background, the decompression calculations can be found
 * in header.S, and the memory diagram is based on the one found in misc.c.
 *
 * The following conditions are already enforced by the image layouts and
 * associated code:
 *  - input + input_size >= output + output_size
 *  - kernel_total_size <= init_size
 *  - kernel_total_size <= output_size (see Note below)
 *  - output + init_size >= output + output_size
187
 *
188 189 190 191 192
 * (Note that kernel_total_size and output_size have no fundamental
 * relationship, but output_size is passed to choose_random_location
 * as a maximum of the two. The diagram is showing a case where
 * kernel_total_size is larger than output_size, but this case is
 * handled by bumping output_size.)
193
 *
194
 * The above conditions can be illustrated by a diagram:
195
 *
196 197 198 199 200 201 202
 * 0   output            input            input+input_size    output+init_size
 * |     |                 |                             |             |
 * |     |                 |                             |             |
 * |-----|--------|--------|--------------|-----------|--|-------------|
 *                |                       |           |
 *                |                       |           |
 * output+init_size-ZO_INIT_SIZE  output+output_size  output+kernel_total_size
203
 *
204 205
 * [output, output+init_size) is the entire memory range used for
 * extracting the compressed image.
206
 *
207 208
 * [output, output+kernel_total_size) is the range needed for the
 * uncompressed kernel (VO) and its run size (bss, brk, etc).
209
 *
210 211 212
 * [output, output+output_size) is VO plus relocs (i.e. the entire
 * uncompressed payload contained by ZO). This is the area of the buffer
 * written to during decompression.
213
 *
214 215 216
 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
 * range of the copied ZO and decompression code. (i.e. the range
 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
217
 *
218 219 220
 * [input, input+input_size) is the original copied compressed image (ZO)
 * (i.e. it does not include its run size). This range must be avoided
 * because it contains the data used for decompression.
221
 *
222 223 224
 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
 * range includes ZO's heap and stack, and must be avoided since it
 * performs the decompression.
225
 *
226 227 228
 * Since the above two ranges need to be avoided and they are adjacent,
 * they can be merged, resulting in: [input, output+init_size) which
 * becomes the MEM_AVOID_ZO_RANGE below.
229
 */
230
static void mem_avoid_init(unsigned long input, unsigned long input_size,
231
			   unsigned long output)
232
{
233
	unsigned long init_size = boot_params->hdr.init_size;
234 235 236 237 238 239
	u64 initrd_start, initrd_size;
	u64 cmd_line, cmd_line_size;
	char *ptr;

	/*
	 * Avoid the region that is unsafe to overlap during
240
	 * decompression.
241
	 */
242 243
	mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
	mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
244 245
	add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
			 mem_avoid[MEM_AVOID_ZO_RANGE].size);
246 247

	/* Avoid initrd. */
248 249 250 251
	initrd_start  = (u64)boot_params->ext_ramdisk_image << 32;
	initrd_start |= boot_params->hdr.ramdisk_image;
	initrd_size  = (u64)boot_params->ext_ramdisk_size << 32;
	initrd_size |= boot_params->hdr.ramdisk_size;
252 253
	mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
	mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
254
	/* No need to set mapping for initrd, it will be handled in VO. */
255 256

	/* Avoid kernel command line. */
257 258
	cmd_line  = (u64)boot_params->ext_cmd_line_ptr << 32;
	cmd_line |= boot_params->hdr.cmd_line_ptr;
259 260 261 262
	/* Calculate size of cmd_line. */
	ptr = (char *)(unsigned long)cmd_line;
	for (cmd_line_size = 0; ptr[cmd_line_size++]; )
		;
263 264
	mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
	mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
265 266
	add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
			 mem_avoid[MEM_AVOID_CMDLINE].size);
267

268 269 270
	/* Avoid boot parameters. */
	mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
	mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
271 272 273 274 275 276 277 278 279
	add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
			 mem_avoid[MEM_AVOID_BOOTPARAMS].size);

	/* We don't need to set a mapping for setup_data. */

#ifdef CONFIG_X86_VERBOSE_BOOTUP
	/* Make sure video RAM can be used. */
	add_identity_map(0, PMD_SIZE);
#endif
280 281 282
}

/* Does this memory vector overlap a known avoided area? */
283
static bool mem_avoid_overlap(struct mem_vector *img)
284 285
{
	int i;
286
	struct setup_data *ptr;
287 288 289 290 291 292

	for (i = 0; i < MEM_AVOID_MAX; i++) {
		if (mem_overlaps(img, &mem_avoid[i]))
			return true;
	}

293
	/* Avoid all entries in the setup_data linked list. */
294
	ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
295 296 297
	while (ptr) {
		struct mem_vector avoid;

298
		avoid.start = (unsigned long)ptr;
299 300 301 302 303 304 305 306
		avoid.size = sizeof(*ptr) + ptr->len;

		if (mem_overlaps(img, &avoid))
			return true;

		ptr = (struct setup_data *)(unsigned long)ptr->next;
	}

307 308 309
	return false;
}

310
static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN];
311 312 313 314 315 316 317 318 319 320

struct slot_area {
	unsigned long addr;
	int num;
};

#define MAX_SLOT_AREA 100

static struct slot_area slot_areas[MAX_SLOT_AREA];

321
static unsigned long slot_max;
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static unsigned long slot_area_index;

static void store_slot_info(struct mem_vector *region, unsigned long image_size)
{
	struct slot_area slot_area;

	if (slot_area_index == MAX_SLOT_AREA)
		return;

	slot_area.addr = region->start;
	slot_area.num = (region->size - image_size) /
			CONFIG_PHYSICAL_ALIGN + 1;

	if (slot_area.num > 0) {
		slot_areas[slot_area_index++] = slot_area;
		slot_max += slot_area.num;
	}
}

342 343 344
static void slots_append(unsigned long addr)
{
	/* Overflowing the slots list should be impossible. */
345
	if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN)
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
		return;

	slots[slot_max++] = addr;
}

static unsigned long slots_fetch_random(void)
{
	/* Handle case of no slots stored. */
	if (slot_max == 0)
		return 0;

	return slots[get_random_long() % slot_max];
}

static void process_e820_entry(struct e820entry *entry,
			       unsigned long minimum,
			       unsigned long image_size)
{
	struct mem_vector region, img;

	/* Skip non-RAM entries. */
	if (entry->type != E820_RAM)
		return;

	/* Ignore entries entirely above our maximum. */
371
	if (entry->addr >= KERNEL_IMAGE_SIZE)
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
		return;

	/* Ignore entries entirely below our minimum. */
	if (entry->addr + entry->size < minimum)
		return;

	region.start = entry->addr;
	region.size = entry->size;

	/* Potentially raise address to minimum location. */
	if (region.start < minimum)
		region.start = minimum;

	/* Potentially raise address to meet alignment requirements. */
	region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);

	/* Did we raise the address above the bounds of this e820 region? */
	if (region.start > entry->addr + entry->size)
		return;

	/* Reduce size by any delta from the original address. */
	region.size -= region.start - entry->addr;

	/* Reduce maximum size to fit end of image within maximum limit. */
396 397
	if (region.start + region.size > KERNEL_IMAGE_SIZE)
		region.size = KERNEL_IMAGE_SIZE - region.start;
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418

	/* Walk each aligned slot and check for avoided areas. */
	for (img.start = region.start, img.size = image_size ;
	     mem_contains(&region, &img) ;
	     img.start += CONFIG_PHYSICAL_ALIGN) {
		if (mem_avoid_overlap(&img))
			continue;
		slots_append(img.start);
	}
}

static unsigned long find_random_addr(unsigned long minimum,
				      unsigned long size)
{
	int i;
	unsigned long addr;

	/* Make sure minimum is aligned. */
	minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);

	/* Verify potential e820 positions, appending to slots list. */
419 420
	for (i = 0; i < boot_params->e820_entries; i++) {
		process_e820_entry(&boot_params->e820_map[i], minimum, size);
421 422 423 424 425
	}

	return slots_fetch_random();
}

426 427 428 429 430
/*
 * Since this function examines addresses much more numerically,
 * it takes the input and output pointers as 'unsigned long'.
 */
unsigned char *choose_random_location(unsigned long input,
431
				      unsigned long input_size,
432
				      unsigned long output,
433 434
				      unsigned long output_size)
{
K
Kees Cook 已提交
435
	unsigned long choice = output;
436
	unsigned long random_addr;
437

438 439
#ifdef CONFIG_HIBERNATION
	if (!cmdline_find_option_bool("kaslr")) {
440
		warn("KASLR disabled: 'kaslr' not on cmdline (hibernation selected).");
441 442 443
		goto out;
	}
#else
444
	if (cmdline_find_option_bool("nokaslr")) {
445
		warn("KASLR disabled: 'nokaslr' on cmdline.");
446 447
		goto out;
	}
448
#endif
449

450
	boot_params->hdr.loadflags |= KASLR_FLAG;
451

452
	/* Record the various known unsafe memory ranges. */
453
	mem_avoid_init(input, input_size, output);
454 455

	/* Walk e820 and find a random address. */
K
Kees Cook 已提交
456
	random_addr = find_random_addr(output, output_size);
457
	if (!random_addr) {
458
		warn("KASLR disabled: could not find suitable E820 region!");
459 460 461 462
		goto out;
	}

	/* Always enforce the minimum. */
463
	if (random_addr < choice)
464
		goto out;
465

466
	choice = random_addr;
467 468

	add_identity_map(choice, output_size);
469 470

	/* This actually loads the identity pagetable on x86_64. */
471
	finalize_identity_maps();
472 473 474
out:
	return (unsigned char *)choice;
}