lguest.c 85.3 KB
Newer Older
R
Rusty Russell 已提交
1 2 3 4 5 6
/*P:100
 * This is the Launcher code, a simple program which lays out the "physical"
 * memory for the new Guest by mapping the kernel image and the virtual
 * devices, then opens /dev/lguest to tell the kernel about the Guest and
 * control it.
:*/
7 8 9 10 11 12 13 14 15 16
#define _LARGEFILE64_SOURCE
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <err.h>
#include <stdint.h>
#include <stdlib.h>
#include <elf.h>
#include <sys/mman.h>
17
#include <sys/param.h>
18 19 20
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
21
#include <sys/eventfd.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <fcntl.h>
#include <stdbool.h>
#include <errno.h>
#include <ctype.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <time.h>
#include <netinet/in.h>
#include <net/if.h>
#include <linux/sockios.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
R
Rusty Russell 已提交
37 38
#include <assert.h>
#include <sched.h>
39 40
#include <limits.h>
#include <stddef.h>
41
#include <signal.h>
42 43
#include <pwd.h>
#include <grp.h>
44
#include <sys/user.h>
45
#include <linux/pci_regs.h>
46

47 48 49 50
#ifndef VIRTIO_F_ANY_LAYOUT
#define VIRTIO_F_ANY_LAYOUT		27
#endif

R
Rusty Russell 已提交
51
/*L:110
R
Rusty Russell 已提交
52
 * We can ignore the 43 include files we need for this program, but I do want
R
Rusty Russell 已提交
53
 * to draw attention to the use of kernel-style types.
54 55 56 57
 *
 * As Linus said, "C is a Spartan language, and so should your naming be."  I
 * like these abbreviations, so we define them here.  Note that u64 is always
 * unsigned long long, which works on all Linux systems: this means that we can
R
Rusty Russell 已提交
58 59
 * use %llu in printf for any u64.
 */
60 61 62 63
typedef unsigned long long u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
64
/*:*/
65

66
#define VIRTIO_CONFIG_NO_LEGACY
67
#define VIRTIO_PCI_NO_LEGACY
68
#define VIRTIO_BLK_NO_LEGACY
69 70 71

/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
#include "../../include/uapi/linux/virtio_config.h"
72
#include "../../include/uapi/linux/virtio_net.h"
73
#include "../../include/uapi/linux/virtio_blk.h"
74
#include "../../include/uapi/linux/virtio_console.h"
75
#include "../../include/uapi/linux/virtio_rng.h"
76
#include <linux/virtio_ring.h>
77
#include "../../include/uapi/linux/virtio_pci.h"
78 79 80
#include <asm/bootparam.h>
#include "../../include/linux/lguest_launcher.h"

81 82 83 84
#define BRIDGE_PFX "bridge:"
#ifndef SIOCBRADDIF
#define SIOCBRADDIF	0x89a2		/* add interface to bridge      */
#endif
85 86
/* We can have up to 256 pages for devices. */
#define DEVICE_PAGES 256
R
Rusty Russell 已提交
87 88
/* This will occupy 3 pages: it must be a power of 2. */
#define VIRTQUEUE_NUM 256
89

R
Rusty Russell 已提交
90 91 92 93
/*L:120
 * verbose is both a global flag and a macro.  The C preprocessor allows
 * this, and although I wouldn't recommend it, it works quite nicely here.
 */
94 95 96
static bool verbose;
#define verbose(args...) \
	do { if (verbose) printf(args); } while(0)
97 98
/*:*/

99 100 101
/* The pointer to the start of guest memory. */
static void *guest_base;
/* The maximum guest physical address allowed, and maximum possible. */
102
static unsigned long guest_limit, guest_max, guest_mmio;
103 104
/* The /dev/lguest file descriptor. */
static int lguest_fd;
105

106 107 108
/* a per-cpu variable indicating whose vcpu is currently running */
static unsigned int __thread cpu_id;

109 110 111
/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
#define MAX_PCI_DEVICES 32

112
/* This is our list of devices. */
113
struct device_list {
R
Rusty Russell 已提交
114 115 116 117 118 119
	/* Counter to assign interrupt numbers. */
	unsigned int next_irq;

	/* Counter to print out convenient device numbers. */
	unsigned int device_num;

120 121
	/* PCI devices. */
	struct device *pci[MAX_PCI_DEVICES];
122 123
};

R
Rusty Russell 已提交
124 125 126
/* The list of Guest devices, based on command line arguments. */
static struct device_list devices;

127 128
struct virtio_pci_cfg_cap {
	struct virtio_pci_cap cap;
129
	u32 pci_cfg_data; /* Data for BAR access. */
130 131 132 133 134 135 136 137 138 139
};

struct virtio_pci_mmio {
	struct virtio_pci_common_cfg cfg;
	u16 notify;
	u8 isr;
	u8 padding;
	/* Device-specific configuration follows this. */
};

140 141 142 143 144 145 146 147 148 149 150 151 152
/* This is the layout (little-endian) of the PCI config space. */
struct pci_config {
	u16 vendor_id, device_id;
	u16 command, status;
	u8 revid, prog_if, subclass, class;
	u8 cacheline_size, lat_timer, header_type, bist;
	u32 bar[6];
	u32 cardbus_cis_ptr;
	u16 subsystem_vendor_id, subsystem_device_id;
	u32 expansion_rom_addr;
	u8 capabilities, reserved1[3];
	u32 reserved2;
	u8 irq_line, irq_pin, min_grant, max_latency;
153 154 155 156 157 158 159

	/* Now, this is the linked capability list. */
	struct virtio_pci_cap common;
	struct virtio_pci_notify_cap notify;
	struct virtio_pci_cap isr;
	struct virtio_pci_cap device;
	struct virtio_pci_cfg_cap cfg_access;
160 161
};

162
/* The device structure describes a single device. */
163
struct device {
R
Rusty Russell 已提交
164 165
	/* The name of this device, for --verbose. */
	const char *name;
166

R
Rusty Russell 已提交
167 168
	/* Any queues attached to this device */
	struct virtqueue *vq;
169

170 171
	/* Is it operational */
	bool running;
172

173 174 175 176 177 178
	/* PCI configuration */
	union {
		struct pci_config config;
		u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
	};

179 180 181
	/* Features we offer, and those accepted. */
	u64 features, features_accepted;

182 183 184
	/* Device-specific config hangs off the end of this. */
	struct virtio_pci_mmio *mmio;

185 186 187 188
	/* PCI MMIO resources (all in BAR0) */
	size_t mmio_size;
	u32 mmio_addr;

189 190 191 192
	/* Device-specific data. */
	void *priv;
};

R
Rusty Russell 已提交
193
/* The virtqueue structure describes a queue attached to a device. */
194
struct virtqueue {
R
Rusty Russell 已提交
195 196 197 198 199 200 201 202
	struct virtqueue *next;

	/* Which device owns me. */
	struct device *dev;

	/* The actual ring of buffers. */
	struct vring vring;

203 204 205
	/* The information about this virtqueue (we only use queue_size on) */
	struct virtio_pci_common_cfg pci_config;

R
Rusty Russell 已提交
206 207 208
	/* Last available index we saw. */
	u16 last_avail_idx;

209 210 211
	/* How many are used since we sent last irq? */
	unsigned int pending_used;

212 213
	/* Eventfd where Guest notifications arrive. */
	int eventfd;
R
Rusty Russell 已提交
214

215 216 217
	/* Function for the thread which is servicing this virtqueue. */
	void (*service)(struct virtqueue *vq);
	pid_t thread;
R
Rusty Russell 已提交
218 219
};

B
Balaji Rao 已提交
220 221 222
/* Remember the arguments to the program so we can "reboot" */
static char **main_args;

223 224 225
/* The original tty settings to restore on exit. */
static struct termios orig_term;

R
Rusty Russell 已提交
226 227
/*
 * We have to be careful with barriers: our devices are all run in separate
228
 * threads and so we need to make sure that changes visible to the Guest happen
R
Rusty Russell 已提交
229 230
 * in precise order.
 */
231
#define wmb() __asm__ __volatile__("" : : : "memory")
R
Rusty Russell 已提交
232 233
#define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
R
Rusty Russell 已提交
234

235 236 237
/* Wrapper for the last available index.  Makes it easier to change. */
#define lg_last_avail(vq)	((vq)->last_avail_idx)

R
Rusty Russell 已提交
238 239 240 241
/*
 * The virtio configuration space is defined to be little-endian.  x86 is
 * little-endian too, but it's nice to be explicit so we have these helpers.
 */
R
Rusty Russell 已提交
242 243 244 245 246
#define cpu_to_le16(v16) (v16)
#define cpu_to_le32(v32) (v32)
#define cpu_to_le64(v64) (v64)
#define le16_to_cpu(v16) (v16)
#define le32_to_cpu(v32) (v32)
247
#define le64_to_cpu(v64) (v64)
R
Rusty Russell 已提交
248

R
Rusty Russell 已提交
249 250 251 252 253 254 255 256 257 258 259 260
/* Is this iovec empty? */
static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
{
	unsigned int i;

	for (i = 0; i < num_iov; i++)
		if (iov[i].iov_len)
			return false;
	return true;
}

/* Take len bytes from the front of this iovec. */
261 262
static void iov_consume(struct iovec iov[], unsigned num_iov,
			void *dest, unsigned len)
R
Rusty Russell 已提交
263 264 265 266 267 268 269
{
	unsigned int i;

	for (i = 0; i < num_iov; i++) {
		unsigned int used;

		used = iov[i].iov_len < len ? iov[i].iov_len : len;
270 271 272 273
		if (dest) {
			memcpy(dest, iov[i].iov_base, used);
			dest += used;
		}
R
Rusty Russell 已提交
274 275 276 277
		iov[i].iov_base += used;
		iov[i].iov_len -= used;
		len -= used;
	}
278 279
	if (len != 0)
		errx(1, "iovec too short!");
R
Rusty Russell 已提交
280 281
}

R
Rusty Russell 已提交
282 283 284 285 286 287
/*L:100
 * The Launcher code itself takes us out into userspace, that scary place where
 * pointers run wild and free!  Unfortunately, like most userspace programs,
 * it's quite boring (which is why everyone likes to hack on the kernel!).
 * Perhaps if you make up an Lguest Drinking Game at this point, it will get
 * you through this section.  Or, maybe not.
288 289 290 291 292 293
 *
 * The Launcher sets up a big chunk of memory to be the Guest's "physical"
 * memory and stores it in "guest_base".  In other words, Guest physical ==
 * Launcher virtual with an offset.
 *
 * This can be tough to get your head around, but usually it just means that we
294
 * use these trivial conversion functions when the Guest gives us its
R
Rusty Russell 已提交
295 296
 * "physical" addresses:
 */
297 298 299 300 301 302 303 304 305 306
static void *from_guest_phys(unsigned long addr)
{
	return guest_base + addr;
}

static unsigned long to_guest_phys(const void *addr)
{
	return (addr - guest_base);
}

307 308 309 310
/*L:130
 * Loading the Kernel.
 *
 * We start with couple of simple helper routines.  open_or_die() avoids
R
Rusty Russell 已提交
311 312
 * error-checking code cluttering the callers:
 */
313 314 315 316 317 318 319 320
static int open_or_die(const char *name, int flags)
{
	int fd = open(name, flags);
	if (fd < 0)
		err(1, "Failed to open %s", name);
	return fd;
}

321 322
/* map_zeroed_pages() takes a number of pages. */
static void *map_zeroed_pages(unsigned int num)
323
{
324 325
	int fd = open_or_die("/dev/zero", O_RDONLY);
	void *addr;
326

R
Rusty Russell 已提交
327 328
	/*
	 * We use a private mapping (ie. if we write to the page, it will be
329 330
	 * copied). We allocate an extra two pages PROT_NONE to act as guard
	 * pages against read/write attempts that exceed allocated space.
R
Rusty Russell 已提交
331
	 */
332 333 334
	addr = mmap(NULL, getpagesize() * (num+2),
		    PROT_NONE, MAP_PRIVATE, fd, 0);

335
	if (addr == MAP_FAILED)
336
		err(1, "Mmapping %u pages of /dev/zero", num);
R
Rusty Russell 已提交
337

338 339 340 341
	if (mprotect(addr + getpagesize(), getpagesize() * num,
		     PROT_READ|PROT_WRITE) == -1)
		err(1, "mprotect rw %u pages failed", num);

R
Rusty Russell 已提交
342 343 344 345
	/*
	 * One neat mmap feature is that you can close the fd, and it
	 * stays mapped.
	 */
346
	close(fd);
347

348 349
	/* Return address after PROT_NONE page */
	return addr + getpagesize();
350 351
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/* Get some bytes which won't be mapped into the guest. */
static unsigned long get_mmio_region(size_t size)
{
	unsigned long addr = guest_mmio;
	size_t i;

	if (!size)
		return addr;

	/* Size has to be a power of 2 (and multiple of 16) */
	for (i = 1; i < size; i <<= 1);

	guest_mmio += i;

	return addr;
}

R
Rusty Russell 已提交
369 370
/*
 * This routine is used to load the kernel or initrd.  It tries mmap, but if
371
 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries),
R
Rusty Russell 已提交
372 373
 * it falls back to reading the memory in.
 */
374 375 376 377
static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
{
	ssize_t r;

R
Rusty Russell 已提交
378 379
	/*
	 * We map writable even though for some segments are marked read-only.
380 381 382 383 384
	 * The kernel really wants to be writable: it patches its own
	 * instructions.
	 *
	 * MAP_PRIVATE means that the page won't be copied until a write is
	 * done to it.  This allows us to share untouched memory between
R
Rusty Russell 已提交
385 386
	 * Guests.
	 */
387
	if (mmap(addr, len, PROT_READ|PROT_WRITE,
388 389 390 391 392 393 394 395 396
		 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
		return;

	/* pread does a seek and a read in one shot: saves a few lines. */
	r = pread(fd, addr, len, offset);
	if (r != len)
		err(1, "Reading offset %lu len %lu gave %zi", offset, len, r);
}

R
Rusty Russell 已提交
397 398
/*
 * This routine takes an open vmlinux image, which is in ELF, and maps it into
399 400 401 402
 * the Guest memory.  ELF = Embedded Linking Format, which is the format used
 * by all modern binaries on Linux including the kernel.
 *
 * The ELF headers give *two* addresses: a physical address, and a virtual
403 404
 * address.  We use the physical address; the Guest will map itself to the
 * virtual address.
405
 *
R
Rusty Russell 已提交
406 407
 * We return the starting address.
 */
408
static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
409 410 411 412
{
	Elf32_Phdr phdr[ehdr->e_phnum];
	unsigned int i;

R
Rusty Russell 已提交
413 414 415 416
	/*
	 * Sanity checks on the main ELF header: an x86 executable with a
	 * reasonable number of correctly-sized program headers.
	 */
417 418 419 420 421 422
	if (ehdr->e_type != ET_EXEC
	    || ehdr->e_machine != EM_386
	    || ehdr->e_phentsize != sizeof(Elf32_Phdr)
	    || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
		errx(1, "Malformed elf header");

R
Rusty Russell 已提交
423 424
	/*
	 * An ELF executable contains an ELF header and a number of "program"
425
	 * headers which indicate which parts ("segments") of the program to
R
Rusty Russell 已提交
426 427
	 * load where.
	 */
428 429

	/* We read in all the program headers at once: */
430 431 432 433 434
	if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
		err(1, "Seeking to program headers");
	if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
		err(1, "Reading program headers");

R
Rusty Russell 已提交
435 436 437 438
	/*
	 * Try all the headers: there are usually only three.  A read-only one,
	 * a read-write one, and a "note" section which we don't load.
	 */
439
	for (i = 0; i < ehdr->e_phnum; i++) {
440
		/* If this isn't a loadable segment, we ignore it */
441 442 443 444 445 446
		if (phdr[i].p_type != PT_LOAD)
			continue;

		verbose("Section %i: size %i addr %p\n",
			i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);

447
		/* We map this section of the file at its physical address. */
448
		map_at(elf_fd, from_guest_phys(phdr[i].p_paddr),
449
		       phdr[i].p_offset, phdr[i].p_filesz);
450 451
	}

452 453
	/* The entry point is given in the ELF header. */
	return ehdr->e_entry;
454 455
}

R
Rusty Russell 已提交
456 457 458 459
/*L:150
 * A bzImage, unlike an ELF file, is not meant to be loaded.  You're supposed
 * to jump into it and it will unpack itself.  We used to have to perform some
 * hairy magic because the unpacking code scared me.
460
 *
R
Rusty Russell 已提交
461 462
 * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote
 * a small patch to jump over the tricky bits in the Guest, so now we just read
R
Rusty Russell 已提交
463 464
 * the funky header so we know where in the file to load, and away we go!
 */
465
static unsigned long load_bzimage(int fd)
466
{
467
	struct boot_params boot;
R
Rusty Russell 已提交
468 469 470 471
	int r;
	/* Modern bzImages get loaded at 1M. */
	void *p = from_guest_phys(0x100000);

R
Rusty Russell 已提交
472 473
	/*
	 * Go back to the start of the file and read the header.  It should be
P
Paul Bolle 已提交
474
	 * a Linux boot header (see Documentation/x86/boot.txt)
R
Rusty Russell 已提交
475
	 */
R
Rusty Russell 已提交
476
	lseek(fd, 0, SEEK_SET);
477
	read(fd, &boot, sizeof(boot));
R
Rusty Russell 已提交
478

479 480
	/* Inside the setup_hdr, we expect the magic "HdrS" */
	if (memcmp(&boot.hdr.header, "HdrS", 4) != 0)
R
Rusty Russell 已提交
481 482
		errx(1, "This doesn't look like a bzImage to me");

483 484
	/* Skip over the extra sectors of the header. */
	lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET);
R
Rusty Russell 已提交
485 486 487 488 489

	/* Now read everything into memory. in nice big chunks. */
	while ((r = read(fd, p, 65536)) > 0)
		p += r;

490 491
	/* Finally, code32_start tells us where to enter the kernel. */
	return boot.hdr.code32_start;
492 493
}

R
Rusty Russell 已提交
494 495
/*L:140
 * Loading the kernel is easy when it's a "vmlinux", but most kernels
R
Rusty Russell 已提交
496
 * come wrapped up in the self-decompressing "bzImage" format.  With a little
R
Rusty Russell 已提交
497 498
 * work, we can load those, too.
 */
499
static unsigned long load_kernel(int fd)
500 501 502
{
	Elf32_Ehdr hdr;

503
	/* Read in the first few bytes. */
504 505 506
	if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
		err(1, "Reading kernel");

507
	/* If it's an ELF file, it starts with "\177ELF" */
508
	if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
509
		return map_elf(fd, &hdr);
510

511
	/* Otherwise we assume it's a bzImage, and try to load it. */
512
	return load_bzimage(fd);
513 514
}

R
Rusty Russell 已提交
515 516
/*
 * This is a trivial little helper to align pages.  Andi Kleen hated it because
517 518 519
 * it calls getpagesize() twice: "it's dumb code."
 *
 * Kernel guys get really het up about optimization, even when it's not
R
Rusty Russell 已提交
520 521
 * necessary.  I leave this code as a reaction against that.
 */
522 523
static inline unsigned long page_align(unsigned long addr)
{
524
	/* Add upwards and truncate downwards. */
525 526 527
	return ((addr + getpagesize()-1) & ~(getpagesize()-1));
}

R
Rusty Russell 已提交
528 529 530 531 532
/*L:180
 * An "initial ram disk" is a disk image loaded into memory along with the
 * kernel which the kernel can use to boot from without needing any drivers.
 * Most distributions now use this as standard: the initrd contains the code to
 * load the appropriate driver modules for the current machine.
533 534
 *
 * Importantly, James Morris works for RedHat, and Fedora uses initrds for its
R
Rusty Russell 已提交
535 536
 * kernels.  He sent me this (and tells me when I break it).
 */
537 538 539 540 541 542 543
static unsigned long load_initrd(const char *name, unsigned long mem)
{
	int ifd;
	struct stat st;
	unsigned long len;

	ifd = open_or_die(name, O_RDONLY);
544
	/* fstat() is needed to get the file size. */
545 546 547
	if (fstat(ifd, &st) < 0)
		err(1, "fstat() on initrd '%s'", name);

R
Rusty Russell 已提交
548 549 550 551
	/*
	 * We map the initrd at the top of memory, but mmap wants it to be
	 * page-aligned, so we round the size up for that.
	 */
552
	len = page_align(st.st_size);
553
	map_at(ifd, from_guest_phys(mem - len), 0, st.st_size);
R
Rusty Russell 已提交
554 555 556 557
	/*
	 * Once a file is mapped, you can close the file descriptor.  It's a
	 * little odd, but quite useful.
	 */
558
	close(ifd);
559
	verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len);
560 561

	/* We return the initrd size. */
562 563
	return len;
}
R
Rusty Russell 已提交
564
/*:*/
565

R
Rusty Russell 已提交
566 567 568 569
/*
 * Simple routine to roll all the commandline arguments together with spaces
 * between them.
 */
570 571 572 573 574
static void concat(char *dst, char *args[])
{
	unsigned int i, len = 0;

	for (i = 0; args[i]; i++) {
575 576 577 578
		if (i) {
			strcat(dst+len, " ");
			len++;
		}
579
		strcpy(dst+len, args[i]);
580
		len += strlen(args[i]);
581 582 583 584 585
	}
	/* In case it's empty. */
	dst[len] = '\0';
}

R
Rusty Russell 已提交
586 587
/*L:185
 * This is where we actually tell the kernel to initialize the Guest.  We
R
Rusty Russell 已提交
588
 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
589
 * the base of Guest "physical" memory, the top physical page to allow and the
R
Rusty Russell 已提交
590 591
 * entry point for the Guest.
 */
592
static void tell_kernel(unsigned long start)
593
{
594 595
	unsigned long args[] = { LHREQ_INITIALIZE,
				 (unsigned long)guest_base,
596
				 guest_limit / getpagesize(), start,
597 598 599 600
				 (guest_mmio+getpagesize()-1) / getpagesize() };
	verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
		guest_base, guest_base + guest_limit,
		guest_limit, guest_mmio);
601 602
	lguest_fd = open_or_die("/dev/lguest", O_RDWR);
	if (write(lguest_fd, args, sizeof(args)) < 0)
603 604
		err(1, "Writing to /dev/lguest");
}
605
/*:*/
606

R
Rusty Russell 已提交
607
/*L:200
608 609
 * Device Handling.
 *
R
Rusty Russell 已提交
610
 * When the Guest gives us a buffer, it sends an array of addresses and sizes.
611
 * We need to make sure it's not trying to reach into the Launcher itself, so
R
Rusty Russell 已提交
612
 * we have a convenient routine which checks it and exits with an error message
613 614
 * if something funny is going on:
 */
615 616 617
static void *_check_pointer(unsigned long addr, unsigned int size,
			    unsigned int line)
{
R
Rusty Russell 已提交
618
	/*
619 620
	 * Check if the requested address and size exceeds the allocated memory,
	 * or addr + size wraps around.
R
Rusty Russell 已提交
621
	 */
622
	if ((addr + size) > guest_limit || (addr + size) < addr)
R
Rusty Russell 已提交
623
		errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
R
Rusty Russell 已提交
624 625 626 627
	/*
	 * We return a pointer for the caller's convenience, now we know it's
	 * safe to use.
	 */
628
	return from_guest_phys(addr);
629
}
630
/* A macro which transparently hands the line number to the real function. */
631 632
#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)

R
Rusty Russell 已提交
633 634
/*
 * Each buffer in the virtqueues is actually a chain of descriptors.  This
R
Rusty Russell 已提交
635
 * function returns the next descriptor in the chain, or vq->vring.num if we're
R
Rusty Russell 已提交
636 637
 * at the end.
 */
638 639
static unsigned next_desc(struct vring_desc *desc,
			  unsigned int i, unsigned int max)
R
Rusty Russell 已提交
640 641 642 643
{
	unsigned int next;

	/* If this descriptor says it doesn't chain, we're done. */
644 645
	if (!(desc[i].flags & VRING_DESC_F_NEXT))
		return max;
R
Rusty Russell 已提交
646 647

	/* Check they're not leading us off end of descriptors. */
648
	next = desc[i].next;
R
Rusty Russell 已提交
649 650 651
	/* Make sure compiler knows to grab that: we don't want it changing! */
	wmb();

652
	if (next >= max)
R
Rusty Russell 已提交
653 654 655 656 657
		errx(1, "Desc next is %u", next);

	return next;
}

R
Rusty Russell 已提交
658 659 660 661
/*
 * This actually sends the interrupt for this virtqueue, if we've used a
 * buffer.
 */
662 663
static void trigger_irq(struct virtqueue *vq)
{
664
	unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line };
665

666 667 668 669 670
	/* Don't inform them if nothing used. */
	if (!vq->pending_used)
		return;
	vq->pending_used = 0;

671 672
	/* If they don't want an interrupt, don't send one... */
	if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
673
		return;
674
	}
675

676 677 678 679 680 681 682
	/*
	 * 4.1.4.5.1:
	 *
	 *  If MSI-X capability is disabled, the device MUST set the Queue
	 *  Interrupt bit in ISR status before sending a virtqueue notification
	 *  to the driver.
	 */
683
	vq->dev->mmio->isr = 0x1;
684

685 686
	/* Send the Guest an interrupt tell them we used something up. */
	if (write(lguest_fd, buf, sizeof(buf)) != 0)
687
		err(1, "Triggering irq %i", vq->dev->config.irq_line);
688 689
}

R
Rusty Russell 已提交
690
/*
R
Rusty Russell 已提交
691
 * This looks in the virtqueue for the first available buffer, and converts
R
Rusty Russell 已提交
692 693 694 695
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
R
Rusty Russell 已提交
696
 * This function waits if necessary, and returns the descriptor number found.
R
Rusty Russell 已提交
697
 */
698 699 700
static unsigned wait_for_vq_desc(struct virtqueue *vq,
				 struct iovec iov[],
				 unsigned int *out_num, unsigned int *in_num)
R
Rusty Russell 已提交
701
{
702 703
	unsigned int i, head, max;
	struct vring_desc *desc;
704 705
	u16 last_avail = lg_last_avail(vq);

R
Rusty Russell 已提交
706
	/* There's nothing available? */
707 708 709
	while (last_avail == vq->vring.avail->idx) {
		u64 event;

R
Rusty Russell 已提交
710 711 712 713
		/*
		 * Since we're about to sleep, now is a good time to tell the
		 * Guest about what we've used up to now.
		 */
714 715
		trigger_irq(vq);

716 717 718
		/* OK, now we need to know about added descriptors. */
		vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;

R
Rusty Russell 已提交
719 720 721 722
		/*
		 * They could have slipped one in as we were doing that: make
		 * sure it's written, then check again.
		 */
723 724 725 726 727 728
		mb();
		if (last_avail != vq->vring.avail->idx) {
			vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
			break;
		}

729 730 731
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
			errx(1, "Event read failed?");
732 733 734

		/* We don't need to be notified again. */
		vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
735
	}
R
Rusty Russell 已提交
736 737

	/* Check it isn't doing very strange things with descriptor numbers. */
738
	if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
R
Rusty Russell 已提交
739
		errx(1, "Guest moved used index from %u to %u",
740
		     last_avail, vq->vring.avail->idx);
R
Rusty Russell 已提交
741

R
Rusty Russell 已提交
742 743 744 745 746 747
	/* 
	 * Make sure we read the descriptor number *after* we read the ring
	 * update; don't let the cpu or compiler change the order.
	 */
	rmb();

R
Rusty Russell 已提交
748 749 750 751
	/*
	 * Grab the next descriptor number they're advertising, and increment
	 * the index we've seen.
	 */
752 753
	head = vq->vring.avail->ring[last_avail % vq->vring.num];
	lg_last_avail(vq)++;
R
Rusty Russell 已提交
754 755 756 757 758 759 760 761

	/* If their number is silly, that's a fatal mistake. */
	if (head >= vq->vring.num)
		errx(1, "Guest says index %u is available", head);

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;

762 763
	max = vq->vring.num;
	desc = vq->vring.desc;
R
Rusty Russell 已提交
764
	i = head;
765

R
Rusty Russell 已提交
766 767 768 769 770 771
	/*
	 * We have to read the descriptor after we read the descriptor number,
	 * but there's a data dependency there so the CPU shouldn't reorder
	 * that: no rmb() required.
	 */

772 773 774 775 776 777 778 779 780
	do {
		/*
		 * If this is an indirect entry, then this buffer contains a
		 * descriptor table which we handle as if it's any normal
		 * descriptor chain.
		 */
		if (desc[i].flags & VRING_DESC_F_INDIRECT) {
			if (desc[i].len % sizeof(struct vring_desc))
				errx(1, "Invalid size for indirect buffer table");
781

782 783 784 785
			max = desc[i].len / sizeof(struct vring_desc);
			desc = check_pointer(desc[i].addr, desc[i].len);
			i = 0;
		}
786

R
Rusty Russell 已提交
787
		/* Grab the first descriptor, and check it's OK. */
788
		iov[*out_num + *in_num].iov_len = desc[i].len;
R
Rusty Russell 已提交
789
		iov[*out_num + *in_num].iov_base
790
			= check_pointer(desc[i].addr, desc[i].len);
R
Rusty Russell 已提交
791
		/* If this is an input descriptor, increment that count. */
792
		if (desc[i].flags & VRING_DESC_F_WRITE)
R
Rusty Russell 已提交
793 794
			(*in_num)++;
		else {
R
Rusty Russell 已提交
795 796 797 798
			/*
			 * If it's an output descriptor, they're all supposed
			 * to come before any input descriptors.
			 */
R
Rusty Russell 已提交
799 800 801 802 803 804
			if (*in_num)
				errx(1, "Descriptor has out after in");
			(*out_num)++;
		}

		/* If we've got too many, that implies a descriptor loop. */
805
		if (*out_num + *in_num > max)
R
Rusty Russell 已提交
806
			errx(1, "Looped descriptor");
807
	} while ((i = next_desc(desc, i, max)) != max);
808

R
Rusty Russell 已提交
809
	return head;
810 811
}

R
Rusty Russell 已提交
812
/*
R
Rusty Russell 已提交
813 814 815
 * After we've used one of their buffers, we tell the Guest about it.  Sometime
 * later we'll want to send them an interrupt using trigger_irq(); note that
 * wait_for_vq_desc() does that for us if it has to wait.
R
Rusty Russell 已提交
816
 */
R
Rusty Russell 已提交
817
static void add_used(struct virtqueue *vq, unsigned int head, int len)
818
{
R
Rusty Russell 已提交
819 820
	struct vring_used_elem *used;

R
Rusty Russell 已提交
821 822 823 824
	/*
	 * The virtqueue contains a ring of used buffers.  Get a pointer to the
	 * next entry in that used ring.
	 */
R
Rusty Russell 已提交
825 826 827 828 829 830
	used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
	used->id = head;
	used->len = len;
	/* Make sure buffer is written before we update index. */
	wmb();
	vq->vring.used->idx++;
831
	vq->pending_used++;
832 833
}

R
Rusty Russell 已提交
834
/* And here's the combo meal deal.  Supersize me! */
835
static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len)
836
{
R
Rusty Russell 已提交
837
	add_used(vq, head, len);
838
	trigger_irq(vq);
839 840
}

R
Rusty Russell 已提交
841 842 843
/*
 * The Console
 *
R
Rusty Russell 已提交
844 845
 * We associate some data with the console for our exit hack.
 */
846
struct console_abort {
847
	/* How many times have they hit ^C? */
848
	int count;
849
	/* When did they start? */
850 851 852
	struct timeval start;
};

853
/* This is the routine which handles console input (ie. stdin). */
854
static void console_input(struct virtqueue *vq)
855 856
{
	int len;
R
Rusty Russell 已提交
857
	unsigned int head, in_num, out_num;
858 859
	struct console_abort *abort = vq->dev->priv;
	struct iovec iov[vq->vring.num];
860

R
Rusty Russell 已提交
861
	/* Make sure there's a descriptor available. */
862
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
863
	if (out_num)
R
Rusty Russell 已提交
864
		errx(1, "Output buffers in console in queue?");
865

R
Rusty Russell 已提交
866
	/* Read into it.  This is where we usually wait. */
867
	len = readv(STDIN_FILENO, iov, in_num);
868
	if (len <= 0) {
869
		/* Ran out of input? */
870
		warnx("Failed to get console input, ignoring console.");
R
Rusty Russell 已提交
871 872 873 874
		/*
		 * For simplicity, dying threads kill the whole Launcher.  So
		 * just nap here.
		 */
875 876
		for (;;)
			pause();
877 878
	}

R
Rusty Russell 已提交
879
	/* Tell the Guest we used a buffer. */
880
	add_used_and_trigger(vq, head, len);
881

R
Rusty Russell 已提交
882 883
	/*
	 * Three ^C within one second?  Exit.
884
	 *
885 886 887
	 * This is such a hack, but works surprisingly well.  Each ^C has to
	 * be in a buffer by itself, so they can't be too fast.  But we check
	 * that we get three within about a second, so they can't be too
R
Rusty Russell 已提交
888 889
	 * slow.
	 */
890
	if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) {
891
		abort->count = 0;
892 893
		return;
	}
894

895 896 897 898 899 900 901 902 903 904 905
	abort->count++;
	if (abort->count == 1)
		gettimeofday(&abort->start, NULL);
	else if (abort->count == 3) {
		struct timeval now;
		gettimeofday(&now, NULL);
		/* Kill all Launcher processes with SIGINT, like normal ^C */
		if (now.tv_sec <= abort->start.tv_sec+1)
			kill(0, SIGINT);
		abort->count = 0;
	}
906 907
}

908 909
/* This is the routine which handles console output (ie. stdout). */
static void console_output(struct virtqueue *vq)
910
{
R
Rusty Russell 已提交
911 912 913
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];

R
Rusty Russell 已提交
914
	/* We usually wait in here, for the Guest to give us something. */
915 916 917
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in console output queue?");
R
Rusty Russell 已提交
918 919

	/* writev can return a partial write, so we loop here. */
920 921
	while (!iov_empty(iov, out)) {
		int len = writev(STDOUT_FILENO, iov, out);
922 923 924 925
		if (len <= 0) {
			warn("Write to stdout gave %i (%d)", len, errno);
			break;
		}
926
		iov_consume(iov, out, NULL, len);
R
Rusty Russell 已提交
927
	}
R
Rusty Russell 已提交
928 929 930 931 932

	/*
	 * We're finished with that buffer: if we're going to sleep,
	 * wait_for_vq_desc() will prod the Guest with an interrupt.
	 */
933
	add_used(vq, head, 0);
934 935
}

R
Rusty Russell 已提交
936 937 938 939
/*
 * The Network
 *
 * Handling output for network is also simple: we get all the output buffers
940
 * and write them to /dev/net/tun.
941
 */
942 943 944 945 946
struct net_info {
	int tunfd;
};

static void net_output(struct virtqueue *vq)
947
{
948 949
	struct net_info *net_info = vq->dev->priv;
	unsigned int head, out, in;
R
Rusty Russell 已提交
950
	struct iovec iov[vq->vring.num];
951

R
Rusty Russell 已提交
952
	/* We usually wait in here for the Guest to give us a packet. */
953 954 955
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in net output queue?");
R
Rusty Russell 已提交
956 957 958 959
	/*
	 * Send the whole thing through to /dev/net/tun.  It expects the exact
	 * same format: what a coincidence!
	 */
960
	if (writev(net_info->tunfd, iov, out) < 0)
961
		warnx("Write to tun failed (%d)?", errno);
R
Rusty Russell 已提交
962 963 964 965 966

	/*
	 * Done with that one; wait_for_vq_desc() will send the interrupt if
	 * all packets are processed.
	 */
967
	add_used(vq, head, 0);
968 969
}

R
Rusty Russell 已提交
970 971 972 973 974 975
/*
 * Handling network input is a bit trickier, because I've tried to optimize it.
 *
 * First we have a helper routine which tells is if from this file descriptor
 * (ie. the /dev/net/tun device) will block:
 */
976 977 978 979 980 981 982 983 984
static bool will_block(int fd)
{
	fd_set fdset;
	struct timeval zero = { 0, 0 };
	FD_ZERO(&fdset);
	FD_SET(fd, &fdset);
	return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
}

R
Rusty Russell 已提交
985 986 987 988 989
/*
 * This handles packets coming in from the tun device to our Guest.  Like all
 * service routines, it gets called again as soon as it returns, so you don't
 * see a while(1) loop here.
 */
990
static void net_input(struct virtqueue *vq)
991 992
{
	int len;
993 994 995 996
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];
	struct net_info *net_info = vq->dev->priv;

R
Rusty Russell 已提交
997 998 999 1000
	/*
	 * Get a descriptor to write an incoming packet into.  This will also
	 * send an interrupt if they're out of descriptors.
	 */
1001 1002 1003
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (out)
		errx(1, "Output buffers in net input queue?");
1004

R
Rusty Russell 已提交
1005 1006 1007 1008
	/*
	 * If it looks like we'll block reading from the tun device, send them
	 * an interrupt.
	 */
1009 1010 1011
	if (vq->pending_used && will_block(net_info->tunfd))
		trigger_irq(vq);

R
Rusty Russell 已提交
1012 1013 1014 1015
	/*
	 * Read in the packet.  This is where we normally wait (when there's no
	 * incoming network traffic).
	 */
1016
	len = readv(net_info->tunfd, iov, in);
1017
	if (len <= 0)
1018
		warn("Failed to read from tun (%d).", errno);
R
Rusty Russell 已提交
1019 1020 1021 1022 1023

	/*
	 * Mark that packet buffer as used, but don't interrupt here.  We want
	 * to wait until we've done as much work as we can.
	 */
1024
	add_used(vq, head, len);
1025
}
R
Rusty Russell 已提交
1026
/*:*/
1027

R
Rusty Russell 已提交
1028
/* This is the helper to create threads: run the service routine in a loop. */
1029 1030 1031
static int do_thread(void *_vq)
{
	struct virtqueue *vq = _vq;
R
Rusty Russell 已提交
1032

1033 1034 1035 1036
	for (;;)
		vq->service(vq);
	return 0;
}
R
Rusty Russell 已提交
1037

R
Rusty Russell 已提交
1038 1039 1040 1041
/*
 * When a child dies, we kill our entire process group with SIGTERM.  This
 * also has the side effect that the shell restores the console for us!
 */
1042 1043 1044
static void kill_launcher(int signal)
{
	kill(0, SIGTERM);
1045 1046
}

1047 1048 1049 1050 1051 1052
static void reset_vq_pci_config(struct virtqueue *vq)
{
	vq->pci_config.queue_size = VIRTQUEUE_NUM;
	vq->pci_config.queue_enable = 0;
}

1053
static void reset_device(struct device *dev)
1054
{
1055 1056 1057 1058 1059
	struct virtqueue *vq;

	verbose("Resetting device %s\n", dev->name);

	/* Clear any features they've acked. */
1060
	dev->features_accepted = 0;
1061 1062 1063 1064

	/* We're going to be explicitly killing threads, so ignore them. */
	signal(SIGCHLD, SIG_IGN);

1065 1066 1067 1068 1069 1070 1071 1072 1073
	/*
	 * 4.1.4.3.1:
	 *
	 *   The device MUST present a 0 in queue_enable on reset. 
	 *
	 * This means we set it here, and reset the saved ones in every vq.
	 */
	dev->mmio->cfg.queue_enable = 0;

1074
	/* Get rid of the virtqueue threads */
1075
	for (vq = dev->vq; vq; vq = vq->next) {
1076 1077
		vq->last_avail_idx = 0;
		reset_vq_pci_config(vq);
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
		if (vq->thread != (pid_t)-1) {
			kill(vq->thread, SIGTERM);
			waitpid(vq->thread, NULL, 0);
			vq->thread = (pid_t)-1;
		}
	}
	dev->running = false;

	/* Now we care if threads die. */
	signal(SIGCHLD, (void *)kill_launcher);
1088 1089
}

1090
static void cleanup_devices(void)
R
Rusty Russell 已提交
1091
{
1092 1093
	unsigned int i;

1094 1095 1096 1097 1098
	for (i = 1; i < MAX_PCI_DEVICES; i++) {
		struct device *d = devices.pci[i];
		if (!d)
			continue;
		reset_device(d);
1099
	}
R
Rusty Russell 已提交
1100

1101 1102 1103 1104
	/* If we saved off the original terminal settings, restore them now. */
	if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
		tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
}
R
Rusty Russell 已提交
1105

1106 1107 1108 1109 1110
/*L:217
 * We do PCI.  This is mainly done to let us test the kernel virtio PCI
 * code.
 */

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
/* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */
static struct device pci_host_bridge;

static void init_pci_host_bridge(void)
{
	pci_host_bridge.name = "PCI Host Bridge";
	pci_host_bridge.config.class = 0x06; /* bridge */
	pci_host_bridge.config.subclass = 0; /* host bridge */
	devices.pci[0] = &pci_host_bridge;
}

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
/* The IO ports used to read the PCI config space. */
#define PCI_CONFIG_ADDR 0xCF8
#define PCI_CONFIG_DATA 0xCFC

/*
 * Not really portable, but does help readability: this is what the Guest
 * writes to the PCI_CONFIG_ADDR IO port.
 */
union pci_config_addr {
	struct {
		unsigned mbz: 2;
		unsigned offset: 6;
		unsigned funcnum: 3;
		unsigned devnum: 5;
		unsigned busnum: 8;
		unsigned reserved: 7;
		unsigned enabled : 1;
	} bits;
	u32 val;
};

/*
 * We cache what they wrote to the address port, so we know what they're
 * talking about when they access the data port.
 */
static union pci_config_addr pci_config_addr;

static struct device *find_pci_device(unsigned int index)
{
	return devices.pci[index];
}

/* PCI can do 1, 2 and 4 byte reads; we handle that here. */
static void ioread(u16 off, u32 v, u32 mask, u32 *val)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*val = (v >> (off * 8)) & mask;
}

/* PCI can do 1, 2 and 4 byte writes; we handle that here. */
static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*dst &= ~(mask << (off * 8));
	*dst |= (v & mask) << (off * 8);
}

/*
 * Where PCI_CONFIG_DATA accesses depends on the previous write to
 * PCI_CONFIG_ADDR.
 */
static struct device *dev_and_reg(u32 *reg)
{
	if (!pci_config_addr.bits.enabled)
		return NULL;

	if (pci_config_addr.bits.funcnum != 0)
		return NULL;

	if (pci_config_addr.bits.busnum != 0)
		return NULL;

	if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
		return NULL;

	*reg = pci_config_addr.bits.offset;
	return find_pci_device(pci_config_addr.bits.devnum);
}

R
Rusty Russell 已提交
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
/*
 * We can get invalid combinations of values while they're writing, so we
 * only fault if they try to write with some invalid bar/offset/length.
 */
static bool valid_bar_access(struct device *d,
			     struct virtio_pci_cfg_cap *cfg_access)
{
	/* We only have 1 bar (BAR0) */
	if (cfg_access->cap.bar != 0)
		return false;

	/* Check it's within BAR0. */
	if (cfg_access->cap.offset >= d->mmio_size
	    || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size)
		return false;

	/* Check length is 1, 2 or 4. */
	if (cfg_access->cap.length != 1
	    && cfg_access->cap.length != 2
	    && cfg_access->cap.length != 4)
		return false;

1215 1216 1217 1218 1219 1220
	/*
	 * 4.1.4.7.2:
	 *
	 *  The driver MUST NOT write a cap.offset which is not a multiple of
	 *  cap.length (ie. all accesses MUST be aligned).
	 */
R
Rusty Russell 已提交
1221 1222 1223 1224 1225 1226 1227
	if (cfg_access->cap.offset % cfg_access->cap.length != 0)
		return false;

	/* Return pointer into word in BAR0. */
	return true;
}

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
/* Is this accessing the PCI config address port?. */
static bool is_pci_addr_port(u16 port)
{
	return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
}

static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
{
	iowrite(port - PCI_CONFIG_ADDR, val, mask,
		&pci_config_addr.val);
	verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
		pci_config_addr.bits.enabled ? "" : " DISABLED",
		val, mask,
		pci_config_addr.bits.busnum,
		pci_config_addr.bits.devnum,
		pci_config_addr.bits.funcnum,
		pci_config_addr.bits.offset);
	return true;
}

static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
{
	ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
}

/* Is this accessing the PCI config data port?. */
static bool is_pci_data_port(u16 port)
{
	return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
}

R
Rusty Russell 已提交
1259 1260
static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask);

1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
{
	u32 reg, portoff;
	struct device *d = dev_and_reg(&reg);

	/* Complain if they don't belong to a device. */
	if (!d)
		return false;

	/* They can do 1 byte writes, etc. */
	portoff = port - PCI_CONFIG_DATA;

	/*
	 * PCI uses a weird way to determine the BAR size: the OS
	 * writes all 1's, and sees which ones stick.
	 */
	if (&d->config_words[reg] == &d->config.bar[0]) {
		int i;

		iowrite(portoff, val, mask, &d->config.bar[0]);
		for (i = 0; (1 << i) < d->mmio_size; i++)
			d->config.bar[0] &= ~(1 << i);
		return true;
	} else if ((&d->config_words[reg] > &d->config.bar[0]
		    && &d->config_words[reg] <= &d->config.bar[6])
		   || &d->config_words[reg] == &d->config.expansion_rom_addr) {
		/* Allow writing to any other BAR, or expansion ROM */
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
		/* We let them overide latency timer and cacheline size */
	} else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
		/* Only let them change the first two fields. */
		if (mask == 0xFFFFFFFF)
			mask = 0xFFFF;
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
	} else if (&d->config_words[reg] == (void *)&d->config.command
		   && mask == 0xFFFF) {
		/* Ignore command writes. */
		return true;
R
Rusty Russell 已提交
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	} else if (&d->config_words[reg]
		   == (void *)&d->config.cfg_access.cap.bar
		   || &d->config_words[reg]
		   == &d->config.cfg_access.cap.length
		   || &d->config_words[reg]
		   == &d->config.cfg_access.cap.offset) {

		/*
		 * The VIRTIO_PCI_CAP_PCI_CFG capability
		 * provides a backdoor to access the MMIO
		 * regions without mapping them.  Weird, but
		 * useful.
		 */
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
1316
	} else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
R
Rusty Russell 已提交
1317 1318
		u32 write_mask;

1319 1320 1321 1322 1323 1324 1325 1326 1327
		/*
		 * 4.1.4.7.1:
		 *
		 *  Upon detecting driver write access to pci_cfg_data, the
		 *  device MUST execute a write access at offset cap.offset at
		 *  BAR selected by cap.bar using the first cap.length bytes
		 *  from pci_cfg_data.
		 */

R
Rusty Russell 已提交
1328 1329 1330 1331
		/* Must be bar 0 */
		if (!valid_bar_access(d, &d->config.cfg_access))
			return false;

1332
		iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data);
R
Rusty Russell 已提交
1333 1334 1335 1336 1337 1338 1339

		/*
		 * Now emulate a write.  The mask we use is set by
		 * len, *not* this write!
		 */
		write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1;
		verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n",
1340
			d->config.cfg_access.pci_cfg_data, write_mask,
R
Rusty Russell 已提交
1341 1342 1343 1344 1345
			d->config.cfg_access.cap.bar,
			d->config.cfg_access.cap.offset,
			d->config.cfg_access.cap.length);

		emulate_mmio_write(d, d->config.cfg_access.cap.offset,
1346 1347
				   d->config.cfg_access.pci_cfg_data,
				   write_mask);
R
Rusty Russell 已提交
1348
		return true;
1349 1350
	}

1351 1352 1353 1354 1355 1356 1357
	/*
	 * 4.1.4.1:
	 *
	 *  The driver MUST NOT write into any field of the capability
	 *  structure, with the exception of those with cap_type
	 *  VIRTIO_PCI_CAP_PCI_CFG...
	 */
1358 1359 1360
	return false;
}

R
Rusty Russell 已提交
1361 1362
static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask);

1363 1364 1365 1366 1367 1368 1369
static void pci_data_ioread(u16 port, u32 mask, u32 *val)
{
	u32 reg;
	struct device *d = dev_and_reg(&reg);

	if (!d)
		return;
R
Rusty Russell 已提交
1370 1371

	/* Read through the PCI MMIO access window is special */
1372
	if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
R
Rusty Russell 已提交
1373 1374
		u32 read_mask;

1375 1376 1377 1378 1379 1380 1381 1382
		/*
		 * 4.1.4.7.1:
		 *
		 *  Upon detecting driver read access to pci_cfg_data, the
		 *  device MUST execute a read access of length cap.length at
		 *  offset cap.offset at BAR selected by cap.bar and store the
		 *  first cap.length bytes in pci_cfg_data.
		 */
R
Rusty Russell 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		/* Must be bar 0 */
		if (!valid_bar_access(d, &d->config.cfg_access))
			errx(1, "Invalid cfg_access to bar%u, offset %u len %u",
			     d->config.cfg_access.cap.bar,
			     d->config.cfg_access.cap.offset,
			     d->config.cfg_access.cap.length);

		/*
		 * Read into the window.  The mask we use is set by
		 * len, *not* this read!
		 */
		read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1;
1395
		d->config.cfg_access.pci_cfg_data
R
Rusty Russell 已提交
1396 1397 1398 1399
			= emulate_mmio_read(d,
					    d->config.cfg_access.cap.offset,
					    read_mask);
		verbose("Window read %#x/%#x from bar %u, offset %u len %u\n",
1400
			d->config.cfg_access.pci_cfg_data, read_mask,
R
Rusty Russell 已提交
1401 1402 1403 1404
			d->config.cfg_access.cap.bar,
			d->config.cfg_access.cap.offset,
			d->config.cfg_access.cap.length);
	}
1405 1406 1407
	ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
}

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
/*L:216
 * This is where we emulate a handful of Guest instructions.  It's ugly
 * and we used to do it in the kernel but it grew over time.
 */

/*
 * We use the ptrace syscall's pt_regs struct to talk about registers
 * to lguest: these macros convert the names to the offsets.
 */
#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
#define setreg(name, val) \
	setreg_off(offsetof(struct user_regs_struct, name), (val))

static u32 getreg_off(size_t offset)
{
	u32 r;
	unsigned long args[] = { LHREQ_GETREG, offset };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Getting register %u", offset);
	if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
		err(1, "Reading register %u", offset);

	return r;
}

static void setreg_off(size_t offset, u32 val)
{
	unsigned long args[] = { LHREQ_SETREG, offset, val };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Setting register %u", offset);
}

1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
/* Get register by instruction encoding */
static u32 getreg_num(unsigned regnum, u32 mask)
{
	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4))
		return getreg_num(regnum & 0x3, 0xFFFF) >> 8;

	switch (regnum) {
	case 0: return getreg(eax) & mask;
	case 1: return getreg(ecx) & mask;
	case 2: return getreg(edx) & mask;
	case 3: return getreg(ebx) & mask;
	case 4: return getreg(esp) & mask;
	case 5: return getreg(ebp) & mask;
	case 6: return getreg(esi) & mask;
	case 7: return getreg(edi) & mask;
	}
	abort();
}

/* Set register by instruction encoding */
static void setreg_num(unsigned regnum, u32 val, u32 mask)
{
	/* Don't try to set bits out of range */
	assert(~(val & ~mask));

	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4)) {
		/* Construct the 16 bits we want. */
		val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
		setreg_num(regnum & 0x3, val, 0xFFFF);
		return;
	}

	switch (regnum) {
	case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
	case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
	case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
	case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
	case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
	case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
	case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
	case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
	}
	abort();
}

/* Get bytes of displacement appended to instruction, from r/m encoding */
static u32 insn_displacement_len(u8 mod_reg_rm)
{
	/* Switch on the mod bits */
	switch (mod_reg_rm >> 6) {
	case 0:
		/* If mod == 0, and r/m == 101, 16-bit displacement follows */
		if ((mod_reg_rm & 0x7) == 0x5)
			return 2;
		/* Normally, mod == 0 means no literal displacement */
		return 0;
	case 1:
		/* One byte displacement */
		return 1;
	case 2:
		/* Four byte displacement */
		return 4;
	case 3:
		/* Register mode */
		return 0;
	}
	abort();
}

1513 1514 1515 1516 1517 1518
static void emulate_insn(const u8 insn[])
{
	unsigned long args[] = { LHREQ_TRAP, 13 };
	unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
	unsigned int eax, port, mask;
	/*
1519
	 * Default is to return all-ones on IO port reads, which traditionally
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
	 * means "there's nothing there".
	 */
	u32 val = 0xFFFFFFFF;

	/*
	 * This must be the Guest kernel trying to do something, not userspace!
	 * The bottom two bits of the CS segment register are the privilege
	 * level.
	 */
	if ((getreg(xcs) & 3) != 0x1)
		goto no_emulate;

	/* Decoding x86 instructions is icky. */

	/*
	 * Around 2.6.33, the kernel started using an emulation for the
	 * cmpxchg8b instruction in early boot on many configurations.  This
	 * code isn't paravirtualized, and it tries to disable interrupts.
	 * Ignore it, which will Mostly Work.
	 */
	if (insn[insnlen] == 0xfa) {
		/* "cli", or Clear Interrupt Enable instruction.  Skip it. */
		insnlen = 1;
		goto skip_insn;
	}

	/*
	 * 0x66 is an "operand prefix".  It means a 16, not 32 bit in/out.
	 */
	if (insn[insnlen] == 0x66) {
		small_operand = 1;
		/* The instruction is 1 byte so far, read the next byte. */
		insnlen = 1;
	}

	/* If the lower bit isn't set, it's a single byte access */
	byte_access = !(insn[insnlen] & 1);

	/*
	 * Now we can ignore the lower bit and decode the 4 opcodes
	 * we need to emulate.
	 */
	switch (insn[insnlen] & 0xFE) {
	case 0xE4: /* in     <next byte>,%al */
		port = insn[insnlen+1];
		insnlen += 2;
		in = 1;
		break;
	case 0xEC: /* in     (%dx),%al */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		in = 1;
		break;
	case 0xE6: /* out    %al,<next byte> */
		port = insn[insnlen+1];
		insnlen += 2;
		break;
	case 0xEE: /* out    %al,(%dx) */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		break;
	default:
		/* OK, we don't know what this is, can't emulate. */
		goto no_emulate;
	}

	/* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
	if (byte_access)
		mask = 0xFF;
	else if (small_operand)
		mask = 0xFFFF;
	else
		mask = 0xFFFFFFFF;

	/*
	 * If it was an "IN" instruction, they expect the result to be read
	 * into %eax, so we change %eax.
	 */
	eax = getreg(eax);

	if (in) {
1601 1602 1603 1604 1605 1606 1607 1608
		/* This is the PS/2 keyboard status; 1 means ready for output */
		if (port == 0x64)
			val = 1;
		else if (is_pci_addr_port(port))
			pci_addr_ioread(port, mask, &val);
		else if (is_pci_data_port(port))
			pci_data_ioread(port, mask, &val);

1609 1610 1611 1612 1613 1614
		/* Clear the bits we're about to read */
		eax &= ~mask;
		/* Copy bits in from val. */
		eax |= val & mask;
		/* Now update the register. */
		setreg(eax, eax);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
	} else {
		if (is_pci_addr_port(port)) {
			if (!pci_addr_iowrite(port, mask, eax))
				goto bad_io;
		} else if (is_pci_data_port(port)) {
			if (!pci_data_iowrite(port, mask, eax))
				goto bad_io;
		}
		/* There are many other ports, eg. CMOS clock, serial
		 * and parallel ports, so we ignore them all. */
1625 1626 1627 1628 1629 1630 1631 1632 1633
	}

	verbose("IO %s of %x to %u: %#08x\n",
		in ? "IN" : "OUT", mask, port, eax);
skip_insn:
	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
	return;

1634 1635 1636 1637
bad_io:
	warnx("Attempt to %s port %u (%#x mask)",
	      in ? "read from" : "write to", port, mask);

1638 1639 1640 1641 1642 1643
no_emulate:
	/* Inject trap into Guest. */
	if (write(lguest_fd, args, sizeof(args)) < 0)
		err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
}

1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
static struct device *find_mmio_region(unsigned long paddr, u32 *off)
{
	unsigned int i;

	for (i = 1; i < MAX_PCI_DEVICES; i++) {
		struct device *d = devices.pci[i];

		if (!d)
			continue;
		if (paddr < d->mmio_addr)
			continue;
		if (paddr >= d->mmio_addr + d->mmio_size)
			continue;
		*off = paddr - d->mmio_addr;
		return d;
	}
	return NULL;
}

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
/* FIXME: Use vq array. */
static struct virtqueue *vq_by_num(struct device *d, u32 num)
{
	struct virtqueue *vq = d->vq;

	while (num-- && vq)
		vq = vq->next;

	return vq;
}

static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
			   struct virtqueue *vq)
{
	vq->pci_config = *cfg;
}

static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
			      struct virtqueue *vq)
{
	/* Only restore the per-vq part */
	size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);

	memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
	       sizeof(*cfg) - off);
}

/*
 * When they enable the virtqueue, we check that their setup is valid.
 */
static void enable_virtqueue(struct device *d, struct virtqueue *vq)
{
	/*
	 * Create stack for thread.  Since the stack grows upwards, we point
	 * the stack pointer to the end of this region.
	 */
	char *stack = malloc(32768);

	/* Because lguest is 32 bit, all the descriptor high bits must be 0 */
	if (vq->pci_config.queue_desc_hi
	    || vq->pci_config.queue_avail_hi
	    || vq->pci_config.queue_used_hi)
		errx(1, "%s: invalid 64-bit queue address", d->name);

	/* Initialize the virtqueue and check they're all in range. */
	vq->vring.num = vq->pci_config.queue_size;
	vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo,
				       sizeof(*vq->vring.desc) * vq->vring.num);
	vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo,
					sizeof(*vq->vring.avail)
					+ (sizeof(vq->vring.avail->ring[0])
					   * vq->vring.num));
	vq->vring.used = check_pointer(vq->pci_config.queue_used_lo,
				       sizeof(*vq->vring.used)
				       + (sizeof(vq->vring.used->ring[0])
					  * vq->vring.num));


	/* Create a zero-initialized eventfd. */
	vq->eventfd = eventfd(0, 0);
	if (vq->eventfd < 0)
		err(1, "Creating eventfd");

	/*
	 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
	 * we get a signal if it dies.
	 */
	vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
	if (vq->thread == (pid_t)-1)
		err(1, "Creating clone");
}

1735 1736
static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
{
1737 1738 1739 1740
	struct virtqueue *vq;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1741 1742 1743 1744 1745 1746 1747
		/*
		 * 4.1.4.3.1:
		 *
		 * The device MUST present the feature bits it is offering in
		 * device_feature, starting at bit device_feature_select ∗ 32
		 * for any device_feature_select written by the driver
		 */
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
		if (val == 0)
			d->mmio->cfg.device_feature = d->features;
		else if (val == 1)
			d->mmio->cfg.device_feature = (d->features >> 32);
		else
			d->mmio->cfg.device_feature = 0;
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
		if (val > 1)
			errx(1, "%s: Unexpected driver select %u",
			     d->name, val);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
		if (d->mmio->cfg.guest_feature_select == 0) {
			d->features_accepted &= ~((u64)0xFFFFFFFF);
			d->features_accepted |= val;
		} else {
			assert(d->mmio->cfg.guest_feature_select == 1);
1766
			d->features_accepted &= 0xFFFFFFFF;
1767 1768 1769 1770 1771 1772 1773 1774
			d->features_accepted |= ((u64)val) << 32;
		}
		if (d->features_accepted & ~d->features)
			errx(1, "%s: over-accepted features %#llx of %#llx",
			     d->name, d->features_accepted, d->features);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.device_status):
		verbose("%s: device status -> %#x\n", d->name, val);
1775 1776 1777 1778 1779 1780
		/*
		 * 4.1.4.3.1:
		 * 
		 *  The device MUST reset when 0 is written to device_status,
		 *  and present a 0 in device_status once that is done.
		 */
1781
		if (val == 0)
1782
			reset_device(d);
1783 1784 1785
		goto write_through8;
	case offsetof(struct virtio_pci_mmio, cfg.queue_select):
		vq = vq_by_num(d, val);
1786 1787 1788 1789 1790 1791
		/*
		 * 4.1.4.3.1:
		 *
		 *  The device MUST present a 0 in queue_size if the virtqueue
		 *  corresponding to the current queue_select is unavailable.
		 */
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803
		if (!vq) {
			d->mmio->cfg.queue_size = 0;
			goto write_through16;
		}
		/* Save registers for old vq, if it was a valid vq */
		if (d->mmio->cfg.queue_size)
			save_vq_config(&d->mmio->cfg,
				       vq_by_num(d, d->mmio->cfg.queue_select));
		/* Restore the registers for the queue they asked for */
		restore_vq_config(&d->mmio->cfg, vq);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_size):
1804 1805 1806 1807 1808 1809
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST NOT write a value which is not a power of 2
		 *  to queue_size.
		 */
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
		if (val & (val-1))
			errx(1, "%s: invalid queue size %u\n", d->name, val);
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue size on live device",
			     d->name);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
		errx(1, "%s: attempt to set MSIX vector to %u",
		     d->name, val);
	case offsetof(struct virtio_pci_mmio, cfg.queue_enable):
1820 1821 1822 1823 1824
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST NOT write a 0 to queue_enable.
		 */
1825 1826 1827 1828 1829
		if (val != 1)
			errx(1, "%s: setting queue_enable to %u", d->name, val);
		d->mmio->cfg.queue_enable = val;
		save_vq_config(&d->mmio->cfg,
			       vq_by_num(d, d->mmio->cfg.queue_select));
1830 1831 1832 1833 1834 1835
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST configure the other virtqueue fields before
		 *  enabling the virtqueue with queue_enable.
		 */
1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
		enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select));
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
		errx(1, "%s: attempt to write to queue_notify_off", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
1846 1847 1848 1849 1850 1851
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST configure the other virtqueue fields before
		 *  enabling the virtqueue with queue_enable.
		 */
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue on live device",
			     d->name);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, notify):
		vq = vq_by_num(d, val);
		if (!vq)
			errx(1, "Invalid vq notification on %u", val);
		/* Notify the process handling this vq by adding 1 to eventfd */
		write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		errx(1, "%s: Unexpected write to isr", d->name);
1865 1866 1867 1868 1869 1870 1871 1872 1873
	/* Weird corner case: write to emerg_wr of console */
	case sizeof(struct virtio_pci_mmio)
		+ offsetof(struct virtio_console_config, emerg_wr):
		if (strcmp(d->name, "console") == 0) {
			char c = val;
			write(STDOUT_FILENO, &c, 1);
			goto write_through32;
		}
		/* Fall through... */
1874
	default:
1875 1876 1877 1878 1879 1880
		/*
		 * 4.1.4.3.2:
		 *
		 *   The driver MUST NOT write to device_feature, num_queues,
		 *   config_generation or queue_notify_off.
		 */
1881 1882 1883
		errx(1, "%s: Unexpected write to offset %u", d->name, off);
	}

1884 1885 1886 1887 1888 1889 1890 1891

	/*
	 * 4.1.3.1:
	 *
	 *  The driver MUST access each field using the “natural” access
	 *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
	 *  16-bit fields and 8-bit accesses for 8-bit fields.
	 */
1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
write_through32:
	if (mask != 0xFFFFFFFF) {
		errx(1, "%s: non-32-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
		return;
	}
	memcpy((char *)d->mmio + off, &val, 4);
	return;

write_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)",
		     d->name, mask, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 2);
	return;

write_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 1);
	return;
1914 1915 1916 1917
}

static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
{
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
	u8 isr;
	u32 val = 0;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.device_feature):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
		goto read_through32;
	case offsetof(struct virtio_pci_mmio, cfg.msix_config):
		errx(1, "%s: read of msix_config", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.num_queues):
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, cfg.device_status):
	case offsetof(struct virtio_pci_mmio, cfg.config_generation):
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
		/*
		 * 4.1.4.3.1:
		 *
		 *  The device MUST present a changed config_generation after
		 *  the driver has read a device-specific configuration value
		 *  which has changed since any part of the device-specific
		 *  configuration was last read.
		 *
		 * This is simple: none of our devices change config, so this
		 * is always 0.
		 */
1944 1945 1946 1947 1948 1949 1950 1951
		goto read_through8;
	case offsetof(struct virtio_pci_mmio, notify):
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		if (mask != 0xFF)
			errx(1, "%s: non-8-bit read from offset %u (%#x)",
			     d->name, off, getreg(eip));
		isr = d->mmio->isr;
1952 1953 1954 1955 1956
		/*
		 * 4.1.4.5.1:
		 *
		 *  The device MUST reset ISR status to 0 on driver read. 
		 */
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
		d->mmio->isr = 0;
		return isr;
	case offsetof(struct virtio_pci_mmio, padding):
		errx(1, "%s: read from padding (%#x)",
		     d->name, getreg(eip));
	default:
		/* Read from device config space, beware unaligned overflow */
		if (off > d->mmio_size - 4)
			errx(1, "%s: read past end (%#x)",
			     d->name, getreg(eip));
		if (mask == 0xFFFFFFFF)
			goto read_through32;
		else if (mask == 0xFFFF)
			goto read_through16;
		else
			goto read_through8;
	}

1975 1976 1977 1978 1979 1980 1981
	/*
	 * 4.1.3.1:
	 *
	 *  The driver MUST access each field using the “natural” access
	 *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
	 *  16-bit fields and 8-bit accesses for 8-bit fields.
	 */
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
read_through32:
	if (mask != 0xFFFFFFFF)
		errx(1, "%s: non-32-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 4);
	return val;

read_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 2);
	return val;

read_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 1);
	return val;
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
}

static void emulate_mmio(unsigned long paddr, const u8 *insn)
{
	u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
	struct device *d = find_mmio_region(paddr, &off);
	unsigned long args[] = { LHREQ_TRAP, 14 };

	if (!d) {
		warnx("MMIO touching %#08lx (not a device)", paddr);
		goto reinject;
	}

	/* Prefix makes it a 16 bit op */
	if (insn[0] == 0x66) {
		mask = 0xFFFF;
		insnlen++;
	}

	/* iowrite */
	if (insn[insnlen] == 0x89) {
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[insnlen] == 0x8b) { /* ioread */
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[0] == 0x88) { /* 8-bit iowrite */
		mask = 0xff;
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else if (insn[0] == 0x8a) { /* 8-bit ioread */
		mask = 0xff;
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[1] >> 3) & 0x7, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else {
		warnx("Unknown MMIO instruction touching %#08lx:"
		     " %02x %02x %02x %02x at %u",
		     paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
	reinject:
		/* Inject trap into Guest. */
		if (write(lguest_fd, args, sizeof(args)) < 0)
			err(1, "Reinjecting trap 14 for fault at %#x",
			    getreg(eip));
		return;
	}

	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
}
2058

2059 2060 2061 2062 2063
/*L:190
 * Device Setup
 *
 * All devices need a descriptor so the Guest knows it exists, and a "struct
 * device" so the Launcher can keep track of it.  We have common helper
2064 2065
 * routines to allocate and manage them.
 */
2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
static void add_pci_virtqueue(struct device *dev,
			      void (*service)(struct virtqueue *))
{
	struct virtqueue **i, *vq = malloc(sizeof(*vq));

	/* Initialize the virtqueue */
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;

	/*
	 * This is the routine the service thread will run, and its Process ID
	 * once it's running.
	 */
	vq->service = service;
	vq->thread = (pid_t)-1;

	/* Initialize the configuration. */
2084
	reset_vq_pci_config(vq);
2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097
	vq->pci_config.queue_notify_off = 0;

	/* Add one to the number of queues */
	vq->dev->mmio->cfg.num_queues++;

	/*
	 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
	 * second.
	 */
	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
}

2098
/* The Guest accesses the feature bits via the PCI common config MMIO region */
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
static void add_pci_feature(struct device *dev, unsigned bit)
{
	dev->features |= (1ULL << bit);
}

/* For devices with no config. */
static void no_device_config(struct device *dev)
{
	dev->mmio_addr = get_mmio_region(dev->mmio_size);

	dev->config.bar[0] = dev->mmio_addr;
	/* Bottom 4 bits must be zero */
	assert(~(dev->config.bar[0] & 0xF));
}

/* This puts the device config into BAR0 */
static void set_device_config(struct device *dev, const void *conf, size_t len)
{
	/* Set up BAR 0 */
	dev->mmio_size += len;
	dev->mmio = realloc(dev->mmio, dev->mmio_size);
	memcpy(dev->mmio + 1, conf, len);

2122 2123 2124 2125 2126 2127 2128
	/*
	 * 4.1.4.6:
	 *
	 *  The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG
	 *  capability for any device type which has a device-specific
	 *  configuration.
	 */
2129 2130 2131 2132
	/* Hook up device cfg */
	dev->config.cfg_access.cap.cap_next
		= offsetof(struct pci_config, device);

2133 2134 2135 2136 2137 2138 2139 2140
	/*
	 * 4.1.4.6.1:
	 *
	 *  The offset for the device-specific configuration MUST be 4-byte
	 *  aligned.
	 */
	assert(dev->config.cfg_access.cap.cap_next % 4 == 0);

2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
	/* Fix up device cfg field length. */
	dev->config.device.length = len;

	/* The rest is the same as the no-config case */
	no_device_config(dev);
}

static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
		     size_t bar_offset, size_t bar_bytes, u8 next)
{
	cap->cap_vndr = PCI_CAP_ID_VNDR;
	cap->cap_next = next;
	cap->cap_len = caplen;
	cap->cfg_type = type;
	cap->bar = 0;
	memset(cap->padding, 0, sizeof(cap->padding));
	cap->offset = bar_offset;
	cap->length = bar_bytes;
}

/*
 * This sets up the pci_config structure, as defined in the virtio 1.0
 * standard (and PCI standard).
 */
static void init_pci_config(struct pci_config *pci, u16 type,
			    u8 class, u8 subclass)
{
	size_t bar_offset, bar_len;

2170 2171 2172 2173 2174 2175
	/*
	 * 4.1.4.4.1:
	 *
	 *  The device MUST either present notify_off_multiplier as an even
	 *  power of 2, or present notify_off_multiplier as 0.
	 */
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191
	memset(pci, 0, sizeof(*pci));

	/* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
	pci->vendor_id = 0x1AF4;
	/* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
	pci->device_id = 0x1040 + type;

	/*
	 * PCI have specific codes for different types of devices.
	 * Linux doesn't care, but it's a good clue for people looking
	 * at the device.
	 */
	pci->class = class;
	pci->subclass = subclass;

	/*
2192 2193 2194 2195
	 * 4.1.2.1:
	 *
	 *  Non-transitional devices SHOULD have a PCI Revision ID of 1 or
	 *  higher
2196 2197 2198 2199
	 */
	pci->revid = 1;

	/*
2200 2201 2202 2203
	 * 4.1.2.1:
	 *
	 *  Non-transitional devices SHOULD have a PCI Subsystem Device ID of
	 *  0x40 or higher.
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	 */
	pci->subsystem_device_id = 0x40;

	/* We use our dummy interrupt controller, and irq_line is the irq */
	pci->irq_line = devices.next_irq++;
	pci->irq_pin = 0;

	/* Support for extended capabilities. */
	pci->status = (1 << 4);

	/* Link them in. */
2215 2216 2217 2218 2219 2220
	/*
	 * 4.1.4.3.1:
	 *
	 *  The device MUST present at least one common configuration
	 *  capability.
	 */
2221 2222
	pci->capabilities = offsetof(struct pci_config, common);

2223 2224 2225
	/* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */
	assert(pci->capabilities % 4 == 0);

2226 2227 2228 2229 2230 2231
	bar_offset = offsetof(struct virtio_pci_mmio, cfg);
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
	init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, notify));

2232 2233 2234 2235 2236
	/*
	 * 4.1.4.4.1:
	 *
	 *  The device MUST present at least one notification capability.
	 */
2237 2238
	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
2239 2240 2241 2242 2243 2244 2245 2246

	/*
	 * 4.1.4.4.1:
	 *
	 *  The cap.offset MUST be 2-byte aligned.
	 */
	assert(pci->common.cap_next % 2 == 0);

2247
	/* FIXME: Use a non-zero notify_off, for per-queue notification? */
2248 2249 2250 2251 2252 2253 2254 2255 2256
	/*
	 * 4.1.4.4.1:
	 *
	 *  The value cap.length presented by the device MUST be at least 2 and
	 *  MUST be large enough to support queue notification offsets for all
	 *  supported queues in all possible configurations.
	 */
	assert(bar_len >= 2);

2257 2258 2259 2260 2261 2262 2263
	init_cap(&pci->notify.cap, sizeof(pci->notify),
		 VIRTIO_PCI_CAP_NOTIFY_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, isr));

	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
2264 2265 2266 2267 2268 2269
	/*
	 * 4.1.4.5.1:
	 *
	 *  The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG
	 *  capability.
	 */
2270 2271 2272 2273 2274
	init_cap(&pci->isr, sizeof(pci->isr),
		 VIRTIO_PCI_CAP_ISR_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, cfg_access));

2275 2276 2277 2278 2279 2280
	/*
	 * 4.1.4.7.1:
	 *
	 * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG
	 * capability.
	 */
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
	/* This doesn't have any presence in the BAR */
	init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
		 VIRTIO_PCI_CAP_PCI_CFG,
		 0, 0, 0);

	bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
	assert(bar_offset == sizeof(struct virtio_pci_mmio));

	/*
	 * This gets sewn in and length set in set_device_config().
	 * Some devices don't have a device configuration interface, so
	 * we never expose this if we don't call set_device_config().
	 */
	init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
		 bar_offset, 0, 0);
}

R
Rusty Russell 已提交
2298
/*
2299 2300 2301 2302
 * This routine does all the creation and setup of a new device, but we don't
 * actually place the MMIO region until we know the size (if any) of the
 * device-specific config.  And we don't actually start the service threads
 * until later.
2303
 *
R
Rusty Russell 已提交
2304 2305
 * See what I mean about userspace being boring?
 */
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static struct device *new_pci_device(const char *name, u16 type,
				     u8 class, u8 subclass)
{
	struct device *dev = malloc(sizeof(*dev));

	/* Now we populate the fields one at a time. */
	dev->name = name;
	dev->vq = NULL;
	dev->running = false;
	dev->mmio_size = sizeof(struct virtio_pci_mmio);
	dev->mmio = calloc(1, dev->mmio_size);
	dev->features = (u64)1 << VIRTIO_F_VERSION_1;
	dev->features_accepted = 0;

2320
	if (devices.device_num + 1 >= MAX_PCI_DEVICES)
2321 2322 2323 2324 2325 2326 2327 2328 2329
		errx(1, "Can only handle 31 PCI devices");

	init_pci_config(&dev->config, type, class, subclass);
	assert(!devices.pci[devices.device_num+1]);
	devices.pci[++devices.device_num] = dev;

	return dev;
}

R
Rusty Russell 已提交
2330 2331 2332 2333
/*
 * Our first setup routine is the console.  It's a fairly simple device, but
 * UNIX tty handling makes it uglier than it could be.
 */
R
Rusty Russell 已提交
2334
static void setup_console(void)
2335 2336
{
	struct device *dev;
2337
	struct virtio_console_config conf;
2338

2339
	/* If we can save the initial standard input settings... */
2340 2341
	if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
		struct termios term = orig_term;
R
Rusty Russell 已提交
2342 2343 2344 2345
		/*
		 * Then we turn off echo, line buffering and ^C etc: We want a
		 * raw input stream to the Guest.
		 */
2346 2347 2348 2349
		term.c_lflag &= ~(ISIG|ICANON|ECHO);
		tcsetattr(STDIN_FILENO, TCSANOW, &term);
	}

2350
	dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00);
2351

2352
	/* We store the console state in dev->priv, and initialize it. */
2353 2354 2355
	dev->priv = malloc(sizeof(struct console_abort));
	((struct console_abort *)dev->priv)->count = 0;

R
Rusty Russell 已提交
2356 2357
	/*
	 * The console needs two virtqueues: the input then the output.  When
2358 2359
	 * they put something the input queue, we make sure we're listening to
	 * stdin.  When they put something in the output queue, we write it to
R
Rusty Russell 已提交
2360 2361
	 * stdout.
	 */
2362 2363 2364
	add_pci_virtqueue(dev, console_input);
	add_pci_virtqueue(dev, console_output);

2365 2366 2367
	/* We need a configuration area for the emerg_wr early writes. */
	add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE);
	set_device_config(dev, &conf, sizeof(conf));
R
Rusty Russell 已提交
2368

2369
	verbose("device %u: console\n", devices.device_num);
2370
}
R
Rusty Russell 已提交
2371
/*:*/
2372

R
Rusty Russell 已提交
2373 2374
/*M:010
 * Inter-guest networking is an interesting area.  Simplest is to have a
R
Rusty Russell 已提交
2375 2376
 * --sharenet=<name> option which opens or creates a named pipe.  This can be
 * used to send packets to another guest in a 1:1 manner.
2377
 *
R
Rusty Russell 已提交
2378
 * More sophisticated is to use one of the tools developed for project like UML
R
Rusty Russell 已提交
2379
 * to do networking.
2380
 *
R
Rusty Russell 已提交
2381 2382 2383 2384 2385 2386 2387
 * Faster is to do virtio bonding in kernel.  Doing this 1:1 would be
 * completely generic ("here's my vring, attach to your vring") and would work
 * for any traffic.  Of course, namespace and permissions issues need to be
 * dealt with.  A more sophisticated "multi-channel" virtio_net.c could hide
 * multiple inter-guest channels behind one interface, although it would
 * require some manner of hotplugging new virtio channels.
 *
R
Rusty Russell 已提交
2388
 * Finally, we could use a virtio network switch in the kernel, ie. vhost.
R
Rusty Russell 已提交
2389
:*/
2390 2391 2392

static u32 str2ip(const char *ipaddr)
{
2393
	unsigned int b[4];
2394

2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
	if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4)
		errx(1, "Failed to parse IP address '%s'", ipaddr);
	return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
}

static void str2mac(const char *macaddr, unsigned char mac[6])
{
	unsigned int m[6];
	if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x",
		   &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6)
		errx(1, "Failed to parse mac address '%s'", macaddr);
	mac[0] = m[0];
	mac[1] = m[1];
	mac[2] = m[2];
	mac[3] = m[3];
	mac[4] = m[4];
	mac[5] = m[5];
2412 2413
}

R
Rusty Russell 已提交
2414 2415
/*
 * This code is "adapted" from libbridge: it attaches the Host end of the
2416 2417 2418
 * network device to the bridge device specified by the command line.
 *
 * This is yet another James Morris contribution (I'm an IP-level guy, so I
R
Rusty Russell 已提交
2419 2420
 * dislike bridging), and I just try not to break it.
 */
2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
static void add_to_bridge(int fd, const char *if_name, const char *br_name)
{
	int ifidx;
	struct ifreq ifr;

	if (!*br_name)
		errx(1, "must specify bridge name");

	ifidx = if_nametoindex(if_name);
	if (!ifidx)
		errx(1, "interface %s does not exist!", if_name);

	strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
2434
	ifr.ifr_name[IFNAMSIZ-1] = '\0';
2435 2436 2437 2438 2439
	ifr.ifr_ifindex = ifidx;
	if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
		err(1, "can't add %s to bridge %s", if_name, br_name);
}

R
Rusty Russell 已提交
2440 2441
/*
 * This sets up the Host end of the network device with an IP address, brings
2442
 * it up so packets will flow, the copies the MAC address into the hwaddr
R
Rusty Russell 已提交
2443 2444
 * pointer.
 */
2445
static void configure_device(int fd, const char *tapif, u32 ipaddr)
2446 2447
{
	struct ifreq ifr;
2448
	struct sockaddr_in sin;
2449 2450

	memset(&ifr, 0, sizeof(ifr));
2451 2452 2453
	strcpy(ifr.ifr_name, tapif);

	/* Don't read these incantations.  Just cut & paste them like I did! */
2454 2455 2456
	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = htonl(ipaddr);
	memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
2457
	if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
2458
		err(1, "Setting %s interface address", tapif);
2459 2460
	ifr.ifr_flags = IFF_UP;
	if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
2461 2462 2463 2464
		err(1, "Bringing interface %s up", tapif);
}

static int get_tun_device(char tapif[IFNAMSIZ])
2465 2466
{
	struct ifreq ifr;
2467
	int vnet_hdr_sz;
2468 2469 2470 2471
	int netfd;

	/* Start with this zeroed.  Messy but sure. */
	memset(&ifr, 0, sizeof(ifr));
2472

R
Rusty Russell 已提交
2473 2474
	/*
	 * We open the /dev/net/tun device and tell it we want a tap device.  A
2475 2476
	 * tap device is like a tun device, only somehow different.  To tell
	 * the truth, I completely blundered my way through this code, but it
R
Rusty Russell 已提交
2477 2478
	 * works now!
	 */
2479
	netfd = open_or_die("/dev/net/tun", O_RDWR);
2480
	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
2481 2482 2483
	strcpy(ifr.ifr_name, "tap%d");
	if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
		err(1, "configuring /dev/net/tun");
2484

2485 2486 2487 2488
	if (ioctl(netfd, TUNSETOFFLOAD,
		  TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0)
		err(1, "Could not set features for tun device");

R
Rusty Russell 已提交
2489 2490 2491 2492
	/*
	 * We don't need checksums calculated for packets coming in this
	 * device: trust us!
	 */
2493 2494
	ioctl(netfd, TUNSETNOCSUM, 1);

2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506
	/*
	 * In virtio before 1.0 (aka legacy virtio), we added a 16-bit
	 * field at the end of the network header iff
	 * VIRTIO_NET_F_MRG_RXBUF was negotiated.  For virtio 1.0,
	 * that became the norm, but we need to tell the tun device
	 * about our expanded header (which is called
	 * virtio_net_hdr_mrg_rxbuf in the legacy system).
	 */
	vnet_hdr_sz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0)
		err(1, "Setting tun header size to %u", vnet_hdr_sz);

2507 2508 2509 2510
	memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
	return netfd;
}

R
Rusty Russell 已提交
2511 2512
/*L:195
 * Our network is a Host<->Guest network.  This can either use bridging or
2513 2514
 * routing, but the principle is the same: it uses the "tun" device to inject
 * packets into the Host as if they came in from a normal network card.  We
R
Rusty Russell 已提交
2515 2516
 * just shunt packets between the Guest and the tun device.
 */
2517 2518 2519
static void setup_tun_net(char *arg)
{
	struct device *dev;
2520 2521
	struct net_info *net_info = malloc(sizeof(*net_info));
	int ipfd;
2522 2523 2524 2525 2526
	u32 ip = INADDR_ANY;
	bool bridging = false;
	char tapif[IFNAMSIZ], *p;
	struct virtio_net_config conf;

2527
	net_info->tunfd = get_tun_device(tapif);
2528

R
Rusty Russell 已提交
2529
	/* First we create a new network device. */
2530
	dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00);
2531
	dev->priv = net_info;
2532

R
Rusty Russell 已提交
2533
	/* Network devices need a recv and a send queue, just like console. */
2534 2535
	add_pci_virtqueue(dev, net_input);
	add_pci_virtqueue(dev, net_output);
2536

R
Rusty Russell 已提交
2537 2538 2539 2540
	/*
	 * We need a socket to perform the magic network ioctls to bring up the
	 * tap interface, connect to the bridge etc.  Any socket will do!
	 */
2541 2542 2543 2544
	ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
	if (ipfd < 0)
		err(1, "opening IP socket");

2545
	/* If the command line was --tunnet=bridge:<name> do bridging. */
2546
	if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
2547 2548 2549 2550 2551 2552 2553 2554
		arg += strlen(BRIDGE_PFX);
		bridging = true;
	}

	/* A mac address may follow the bridge name or IP address */
	p = strchr(arg, ':');
	if (p) {
		str2mac(p+1, conf.mac);
2555
		add_pci_feature(dev, VIRTIO_NET_F_MAC);
2556 2557 2558 2559 2560 2561 2562
		*p = '\0';
	}

	/* arg is now either an IP address or a bridge name */
	if (bridging)
		add_to_bridge(ipfd, tapif, arg);
	else
2563 2564
		ip = str2ip(arg);

2565 2566
	/* Set up the tun device. */
	configure_device(ipfd, tapif, ip);
2567

2568
	/* Expect Guest to handle everything except UFO */
2569 2570 2571 2572 2573 2574 2575 2576
	add_pci_feature(dev, VIRTIO_NET_F_CSUM);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN);
2577
	/* We handle indirect ring entries */
2578 2579
	add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
	set_device_config(dev, &conf, sizeof(conf));
2580

2581
	/* We don't need the socket any more; setup is done. */
2582 2583
	close(ipfd);

2584 2585 2586 2587 2588 2589
	if (bridging)
		verbose("device %u: tun %s attached to bridge: %s\n",
			devices.device_num, tapif, arg);
	else
		verbose("device %u: tun %s: %s\n",
			devices.device_num, tapif, arg);
2590
}
R
Rusty Russell 已提交
2591
/*:*/
R
Rusty Russell 已提交
2592

R
Rusty Russell 已提交
2593
/* This hangs off device->priv. */
2594
struct vblk_info {
R
Rusty Russell 已提交
2595 2596 2597 2598 2599 2600 2601 2602
	/* The size of the file. */
	off64_t len;

	/* The file descriptor for the file. */
	int fd;

};

R
Rusty Russell 已提交
2603 2604 2605
/*L:210
 * The Disk
 *
R
Rusty Russell 已提交
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
 * The disk only has one virtqueue, so it only has one thread.  It is really
 * simple: the Guest asks for a block number and we read or write that position
 * in the file.
 *
 * Before we serviced each virtqueue in a separate thread, that was unacceptably
 * slow: the Guest waits until the read is finished before running anything
 * else, even if it could have been doing useful work.
 *
 * We could have used async I/O, except it's reputed to suck so hard that
 * characters actually go missing from your code when you try to use it.
R
Rusty Russell 已提交
2616
 */
2617
static void blk_request(struct virtqueue *vq)
R
Rusty Russell 已提交
2618
{
2619
	struct vblk_info *vblk = vq->dev->priv;
R
Rusty Russell 已提交
2620
	unsigned int head, out_num, in_num, wlen;
2621
	int ret, i;
2622
	u8 *in;
2623
	struct virtio_blk_outhdr out;
2624
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
2625 2626
	off64_t off;

R
Rusty Russell 已提交
2627 2628 2629 2630
	/*
	 * Get the next request, where we normally wait.  It triggers the
	 * interrupt to acknowledge previously serviced requests (if any).
	 */
2631
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
2632

2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646
	/* Copy the output header from the front of the iov (adjusts iov) */
	iov_consume(iov, out_num, &out, sizeof(out));

	/* Find and trim end of iov input array, for our status byte. */
	in = NULL;
	for (i = out_num + in_num - 1; i >= out_num; i--) {
		if (iov[i].iov_len > 0) {
			in = iov[i].iov_base + iov[i].iov_len - 1;
			iov[i].iov_len--;
			break;
		}
	}
	if (!in)
		errx(1, "Bad virtblk cmd with no room for status");
R
Rusty Russell 已提交
2647

R
Rusty Russell 已提交
2648 2649 2650 2651
	/*
	 * For historical reasons, block operations are expressed in 512 byte
	 * "sectors".
	 */
2652
	off = out.sector * 512;
R
Rusty Russell 已提交
2653

2654
	if (out.type & VIRTIO_BLK_T_OUT) {
R
Rusty Russell 已提交
2655 2656 2657 2658 2659 2660
		/*
		 * Write
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to write past end.
		 */
R
Rusty Russell 已提交
2661
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
2662
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
2663

2664 2665
		ret = writev(vblk->fd, iov, out_num);
		verbose("WRITE to sector %llu: %i\n", out.sector, ret);
R
Rusty Russell 已提交
2666

R
Rusty Russell 已提交
2667 2668
		/*
		 * Grr... Now we know how long the descriptor they sent was, we
R
Rusty Russell 已提交
2669
		 * make sure they didn't try to write over the end of the block
R
Rusty Russell 已提交
2670 2671
		 * file (possibly extending it).
		 */
R
Rusty Russell 已提交
2672 2673 2674 2675 2676 2677
		if (ret > 0 && off + ret > vblk->len) {
			/* Trim it back to the correct length */
			ftruncate64(vblk->fd, vblk->len);
			/* Die, bad Guest, die. */
			errx(1, "Write past end %llu+%u", off, ret);
		}
2678 2679 2680

		wlen = sizeof(*in);
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
2681
	} else if (out.type & VIRTIO_BLK_T_FLUSH) {
2682 2683 2684
		/* Flush */
		ret = fdatasync(vblk->fd);
		verbose("FLUSH fdatasync: %i\n", ret);
2685
		wlen = sizeof(*in);
2686
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
R
Rusty Russell 已提交
2687
	} else {
R
Rusty Russell 已提交
2688 2689 2690 2691 2692 2693
		/*
		 * Read
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to read past end.
		 */
R
Rusty Russell 已提交
2694
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
2695
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
2696

2697
		ret = readv(vblk->fd, iov + out_num, in_num);
R
Rusty Russell 已提交
2698
		if (ret >= 0) {
2699
			wlen = sizeof(*in) + ret;
2700
			*in = VIRTIO_BLK_S_OK;
R
Rusty Russell 已提交
2701
		} else {
2702
			wlen = sizeof(*in);
2703
			*in = VIRTIO_BLK_S_IOERR;
R
Rusty Russell 已提交
2704 2705 2706
		}
	}

R
Rusty Russell 已提交
2707
	/* Finished that request. */
2708
	add_used(vq, head, wlen);
R
Rusty Russell 已提交
2709 2710
}

R
Rusty Russell 已提交
2711
/*L:198 This actually sets up a virtual block device. */
R
Rusty Russell 已提交
2712 2713 2714 2715
static void setup_block_file(const char *filename)
{
	struct device *dev;
	struct vblk_info *vblk;
2716
	struct virtio_blk_config conf;
R
Rusty Russell 已提交
2717

2718 2719
	/* Create the device. */
	dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80);
R
Rusty Russell 已提交
2720

R
Rusty Russell 已提交
2721
	/* The device has one virtqueue, where the Guest places requests. */
2722
	add_pci_virtqueue(dev, blk_request);
R
Rusty Russell 已提交
2723 2724 2725 2726 2727 2728 2729 2730 2731

	/* Allocate the room for our own bookkeeping */
	vblk = dev->priv = malloc(sizeof(*vblk));

	/* First we open the file and store the length. */
	vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
	vblk->len = lseek64(vblk->fd, 0, SEEK_END);

	/* Tell Guest how many sectors this device has. */
2732
	conf.capacity = cpu_to_le64(vblk->len / 512);
R
Rusty Russell 已提交
2733

R
Rusty Russell 已提交
2734 2735 2736 2737
	/*
	 * Tell Guest not to put in too many descriptors at once: two are used
	 * for the in and out elements.
	 */
2738
	add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX);
2739 2740
	conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);

2741
	set_device_config(dev, &conf, sizeof(struct virtio_blk_config));
R
Rusty Russell 已提交
2742 2743

	verbose("device %u: virtblock %llu sectors\n",
2744
		devices.device_num, le64_to_cpu(conf.capacity));
R
Rusty Russell 已提交
2745
}
R
Rusty Russell 已提交
2746

R
Rusty Russell 已提交
2747
/*L:211
2748
 * Our random number generator device reads from /dev/urandom into the Guest's
R
Rusty Russell 已提交
2749
 * input buffers.  The usual case is that the Guest doesn't want random numbers
2750
 * and so has no buffers although /dev/urandom is still readable, whereas
R
Rusty Russell 已提交
2751 2752
 * console is the reverse.
 *
R
Rusty Russell 已提交
2753 2754 2755 2756 2757 2758
 * The same logic applies, however.
 */
struct rng_info {
	int rfd;
};

2759
static void rng_input(struct virtqueue *vq)
R
Rusty Russell 已提交
2760 2761 2762
{
	int len;
	unsigned int head, in_num, out_num, totlen = 0;
2763 2764
	struct rng_info *rng_info = vq->dev->priv;
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
2765 2766

	/* First we need a buffer from the Guests's virtqueue. */
2767
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
2768 2769 2770
	if (out_num)
		errx(1, "Output buffers in rng?");

R
Rusty Russell 已提交
2771
	/*
R
Rusty Russell 已提交
2772 2773
	 * Just like the console write, we loop to cover the whole iovec.
	 * In this case, short reads actually happen quite a bit.
R
Rusty Russell 已提交
2774
	 */
R
Rusty Russell 已提交
2775
	while (!iov_empty(iov, in_num)) {
2776
		len = readv(rng_info->rfd, iov, in_num);
R
Rusty Russell 已提交
2777
		if (len <= 0)
2778
			err(1, "Read from /dev/urandom gave %i", len);
2779
		iov_consume(iov, in_num, NULL, len);
R
Rusty Russell 已提交
2780 2781 2782 2783
		totlen += len;
	}

	/* Tell the Guest about the new input. */
2784
	add_used(vq, head, totlen);
R
Rusty Russell 已提交
2785 2786
}

R
Rusty Russell 已提交
2787 2788 2789
/*L:199
 * This creates a "hardware" random number device for the Guest.
 */
R
Rusty Russell 已提交
2790 2791 2792
static void setup_rng(void)
{
	struct device *dev;
2793
	struct rng_info *rng_info = malloc(sizeof(*rng_info));
R
Rusty Russell 已提交
2794

2795 2796
	/* Our device's private info simply contains the /dev/urandom fd. */
	rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
R
Rusty Russell 已提交
2797

R
Rusty Russell 已提交
2798
	/* Create the new device. */
2799
	dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0);
2800
	dev->priv = rng_info;
R
Rusty Russell 已提交
2801 2802

	/* The device has one virtqueue, where the Guest places inbufs. */
2803
	add_pci_virtqueue(dev, rng_input);
R
Rusty Russell 已提交
2804

2805 2806 2807 2808
	/* We don't have any configuration space */
	no_device_config(dev);

	verbose("device %u: rng\n", devices.device_num);
R
Rusty Russell 已提交
2809
}
2810
/* That's the end of device setup. */
B
Balaji Rao 已提交
2811

2812
/*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */
B
Balaji Rao 已提交
2813 2814 2815 2816
static void __attribute__((noreturn)) restart_guest(void)
{
	unsigned int i;

R
Rusty Russell 已提交
2817 2818 2819 2820
	/*
	 * Since we don't track all open fds, we simply close everything beyond
	 * stderr.
	 */
B
Balaji Rao 已提交
2821 2822
	for (i = 3; i < FD_SETSIZE; i++)
		close(i);
2823

2824 2825 2826
	/* Reset all the devices (kills all threads). */
	cleanup_devices();

B
Balaji Rao 已提交
2827 2828 2829
	execv(main_args[0], main_args);
	err(1, "Could not exec %s", main_args[0]);
}
2830

R
Rusty Russell 已提交
2831 2832 2833 2834
/*L:220
 * Finally we reach the core of the Launcher which runs the Guest, serves
 * its input and output, and finally, lays it to rest.
 */
2835
static void __attribute__((noreturn)) run_guest(void)
2836 2837
{
	for (;;) {
2838
		struct lguest_pending notify;
2839 2840 2841
		int readval;

		/* We read from the /dev/lguest device to run the Guest. */
2842 2843
		readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id);
		if (readval == sizeof(notify)) {
2844
			if (notify.trap == 13) {
2845 2846 2847
				verbose("Emulating instruction at %#x\n",
					getreg(eip));
				emulate_insn(notify.insn);
2848 2849 2850 2851
			} else if (notify.trap == 14) {
				verbose("Emulating MMIO at %#x\n",
					getreg(eip));
				emulate_mmio(notify.addr, notify.insn);
2852 2853 2854
			} else
				errx(1, "Unknown trap %i addr %#08x\n",
				     notify.trap, notify.addr);
2855
		/* ENOENT means the Guest died.  Reading tells us why. */
2856 2857
		} else if (errno == ENOENT) {
			char reason[1024] = { 0 };
2858
			pread(lguest_fd, reason, sizeof(reason)-1, cpu_id);
2859
			errx(1, "%s", reason);
B
Balaji Rao 已提交
2860 2861 2862
		/* ERESTART means that we need to reboot the guest */
		} else if (errno == ERESTART) {
			restart_guest();
2863 2864
		/* Anything else means a bug or incompatible change. */
		} else
2865 2866 2867
			err(1, "Running guest failed");
	}
}
2868
/*L:240
R
Rusty Russell 已提交
2869 2870 2871
 * This is the end of the Launcher.  The good news: we are over halfway
 * through!  The bad news: the most fiendish part of the code still lies ahead
 * of us.
2872
 *
R
Rusty Russell 已提交
2873 2874
 * Are you ready?  Take a deep breath and join me in the core of the Host, in
 * "make Host".
R
Rusty Russell 已提交
2875
:*/
2876 2877 2878 2879 2880

static struct option opts[] = {
	{ "verbose", 0, NULL, 'v' },
	{ "tunnet", 1, NULL, 't' },
	{ "block", 1, NULL, 'b' },
R
Rusty Russell 已提交
2881
	{ "rng", 0, NULL, 'r' },
2882
	{ "initrd", 1, NULL, 'i' },
2883 2884
	{ "username", 1, NULL, 'u' },
	{ "chroot", 1, NULL, 'c' },
2885 2886 2887 2888 2889
	{ NULL },
};
static void usage(void)
{
	errx(1, "Usage: lguest [--verbose] "
2890
	     "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n"
2891 2892 2893 2894
	     "|--block=<filename>|--initrd=<filename>]...\n"
	     "<mem-in-mb> vmlinux [args...]");
}

2895
/*L:105 The main routine is where the real work begins: */
2896 2897
int main(int argc, char *argv[])
{
R
Rusty Russell 已提交
2898
	/* Memory, code startpoint and size of the (optional) initrd. */
2899
	unsigned long mem = 0, start, initrd_size = 0;
2900 2901
	/* Two temporaries. */
	int i, c;
2902
	/* The boot information for the Guest. */
2903
	struct boot_params *boot;
2904
	/* If they specify an initrd file to load. */
2905 2906
	const char *initrd_name = NULL;

2907 2908 2909 2910 2911 2912
	/* Password structure for initgroups/setres[gu]id */
	struct passwd *user_details = NULL;

	/* Directory to chroot to */
	char *chroot_path = NULL;

B
Balaji Rao 已提交
2913 2914 2915
	/* Save the args: we "reboot" by execing ourselves again. */
	main_args = argv;

R
Rusty Russell 已提交
2916
	/*
2917 2918
	 * First we initialize the device list.  We remember next interrupt
	 * number to use for devices (1: remember that 0 is used by the timer).
R
Rusty Russell 已提交
2919
	 */
R
Rusty Russell 已提交
2920
	devices.next_irq = 1;
2921

R
Rusty Russell 已提交
2922
	/* We're CPU 0.  In fact, that's the only CPU possible right now. */
2923
	cpu_id = 0;
R
Rusty Russell 已提交
2924

R
Rusty Russell 已提交
2925 2926
	/*
	 * We need to know how much memory so we can set up the device
2927 2928
	 * descriptor and memory pages for the devices as we parse the command
	 * line.  So we quickly look through the arguments to find the amount
R
Rusty Russell 已提交
2929 2930
	 * of memory now.
	 */
2931 2932
	for (i = 1; i < argc; i++) {
		if (argv[i][0] != '-') {
2933
			mem = atoi(argv[i]) * 1024 * 1024;
R
Rusty Russell 已提交
2934 2935
			/*
			 * We start by mapping anonymous pages over all of
2936 2937
			 * guest-physical memory range.  This fills it with 0,
			 * and ensures that the Guest won't be killed when it
R
Rusty Russell 已提交
2938 2939
			 * tries to access it.
			 */
2940 2941 2942
			guest_base = map_zeroed_pages(mem / getpagesize()
						      + DEVICE_PAGES);
			guest_limit = mem;
2943
			guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
2944 2945 2946
			break;
		}
	}
2947

2948 2949 2950
	/* We always have a console device, and it's always device 1. */
	setup_console();

2951
	/* The options are fairly straight-forward */
2952 2953 2954 2955 2956 2957
	while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
		switch (c) {
		case 'v':
			verbose = true;
			break;
		case 't':
R
Rusty Russell 已提交
2958
			setup_tun_net(optarg);
2959 2960
			break;
		case 'b':
R
Rusty Russell 已提交
2961
			setup_block_file(optarg);
2962
			break;
R
Rusty Russell 已提交
2963 2964 2965
		case 'r':
			setup_rng();
			break;
2966 2967 2968
		case 'i':
			initrd_name = optarg;
			break;
2969 2970 2971 2972 2973 2974 2975 2976
		case 'u':
			user_details = getpwnam(optarg);
			if (!user_details)
				err(1, "getpwnam failed, incorrect username?");
			break;
		case 'c':
			chroot_path = optarg;
			break;
2977 2978 2979 2980 2981
		default:
			warnx("Unknown argument %s", argv[optind]);
			usage();
		}
	}
R
Rusty Russell 已提交
2982 2983 2984 2985
	/*
	 * After the other arguments we expect memory and kernel image name,
	 * followed by command line arguments for the kernel.
	 */
2986 2987 2988
	if (optind + 2 > argc)
		usage();

2989 2990
	verbose("Guest base is at %p\n", guest_base);

2991 2992 2993
	/* Initialize the (fake) PCI host bridge device. */
	init_pci_host_bridge();

2994
	/* Now we load the kernel */
2995
	start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
2996

2997 2998 2999
	/* Boot information is stashed at physical address 0 */
	boot = from_guest_phys(0);

3000
	/* Map the initrd image if requested (at top of physical memory) */
3001 3002
	if (initrd_name) {
		initrd_size = load_initrd(initrd_name, mem);
R
Rusty Russell 已提交
3003 3004 3005 3006
		/*
		 * These are the location in the Linux boot header where the
		 * start and size of the initrd are expected to be found.
		 */
3007 3008
		boot->hdr.ramdisk_image = mem - initrd_size;
		boot->hdr.ramdisk_size = initrd_size;
3009
		/* The bootloader type 0xFF means "unknown"; that's OK. */
3010
		boot->hdr.type_of_loader = 0xFF;
3011 3012
	}

R
Rusty Russell 已提交
3013 3014 3015 3016
	/*
	 * The Linux boot header contains an "E820" memory map: ours is a
	 * simple, single region.
	 */
3017 3018
	boot->e820_entries = 1;
	boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM });
R
Rusty Russell 已提交
3019 3020 3021 3022
	/*
	 * The boot header contains a command line pointer: we put the command
	 * line after the boot header.
	 */
3023
	boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1);
R
Rusty Russell 已提交
3024
	/* We use a simple helper to copy the arguments separated by spaces. */
3025
	concat((char *)(boot + 1), argv+optind+2);
3026

3027 3028 3029
	/* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
	boot->hdr.kernel_alignment = 0x1000000;

3030
	/* Boot protocol version: 2.07 supports the fields for lguest. */
3031
	boot->hdr.version = 0x207;
3032 3033

	/* The hardware_subarch value of "1" tells the Guest it's an lguest. */
3034
	boot->hdr.hardware_subarch = 1;
3035

3036 3037
	/* Tell the entry path not to try to reload segment registers. */
	boot->hdr.loadflags |= KEEP_SEGMENTS;
3038

R
Rusty Russell 已提交
3039
	/* We tell the kernel to initialize the Guest. */
3040
	tell_kernel(start);
3041

R
Rusty Russell 已提交
3042
	/* Ensure that we terminate if a device-servicing child dies. */
3043 3044 3045 3046
	signal(SIGCHLD, kill_launcher);

	/* If we exit via err(), this kills all the threads, restores tty. */
	atexit(cleanup_devices);
3047

3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
	/* If requested, chroot to a directory */
	if (chroot_path) {
		if (chroot(chroot_path) != 0)
			err(1, "chroot(\"%s\") failed", chroot_path);

		if (chdir("/") != 0)
			err(1, "chdir(\"/\") failed");

		verbose("chroot done\n");
	}

	/* If requested, drop privileges */
	if (user_details) {
		uid_t u;
		gid_t g;

		u = user_details->pw_uid;
		g = user_details->pw_gid;

		if (initgroups(user_details->pw_name, g) != 0)
			err(1, "initgroups failed");

		if (setresgid(g, g, g) != 0)
			err(1, "setresgid failed");

		if (setresuid(u, u, u) != 0)
			err(1, "setresuid failed");

		verbose("Dropping privileges completed\n");
	}

3079
	/* Finally, run the Guest.  This doesn't return. */
3080
	run_guest();
3081
}
3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093
/*:*/

/*M:999
 * Mastery is done: you now know everything I do.
 *
 * But surely you have seen code, features and bugs in your wanderings which
 * you now yearn to attack?  That is the real game, and I look forward to you
 * patching and forking lguest into the Your-Name-Here-visor.
 *
 * Farewell, and good coding!
 * Rusty Russell.
 */