lguest.c 86.0 KB
Newer Older
R
Rusty Russell 已提交
1 2 3 4 5 6
/*P:100
 * This is the Launcher code, a simple program which lays out the "physical"
 * memory for the new Guest by mapping the kernel image and the virtual
 * devices, then opens /dev/lguest to tell the kernel about the Guest and
 * control it.
:*/
7 8 9 10 11 12 13 14 15 16
#define _LARGEFILE64_SOURCE
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <err.h>
#include <stdint.h>
#include <stdlib.h>
#include <elf.h>
#include <sys/mman.h>
17
#include <sys/param.h>
18 19 20
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
21
#include <sys/eventfd.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <fcntl.h>
#include <stdbool.h>
#include <errno.h>
#include <ctype.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <time.h>
#include <netinet/in.h>
#include <net/if.h>
#include <linux/sockios.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
R
Rusty Russell 已提交
37 38
#include <assert.h>
#include <sched.h>
39 40
#include <limits.h>
#include <stddef.h>
41
#include <signal.h>
42 43
#include <pwd.h>
#include <grp.h>
44
#include <sys/user.h>
45
#include <linux/pci_regs.h>
46

47 48 49 50
#ifndef VIRTIO_F_ANY_LAYOUT
#define VIRTIO_F_ANY_LAYOUT		27
#endif

R
Rusty Russell 已提交
51
/*L:110
R
Rusty Russell 已提交
52
 * We can ignore the 43 include files we need for this program, but I do want
R
Rusty Russell 已提交
53
 * to draw attention to the use of kernel-style types.
54 55 56 57
 *
 * As Linus said, "C is a Spartan language, and so should your naming be."  I
 * like these abbreviations, so we define them here.  Note that u64 is always
 * unsigned long long, which works on all Linux systems: this means that we can
R
Rusty Russell 已提交
58 59
 * use %llu in printf for any u64.
 */
60 61 62 63
typedef unsigned long long u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
64
/*:*/
65

66 67 68 69
#define VIRTIO_PCI_NO_LEGACY

/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
#include "../../include/uapi/linux/virtio_config.h"
70 71 72 73 74
#include <linux/virtio_net.h>
#include <linux/virtio_blk.h>
#include <linux/virtio_console.h>
#include <linux/virtio_rng.h>
#include <linux/virtio_ring.h>
75
#include "../../include/uapi/linux/virtio_pci.h"
76 77 78
#include <asm/bootparam.h>
#include "../../include/linux/lguest_launcher.h"

79 80 81 82
#define BRIDGE_PFX "bridge:"
#ifndef SIOCBRADDIF
#define SIOCBRADDIF	0x89a2		/* add interface to bridge      */
#endif
83 84
/* We can have up to 256 pages for devices. */
#define DEVICE_PAGES 256
R
Rusty Russell 已提交
85 86
/* This will occupy 3 pages: it must be a power of 2. */
#define VIRTQUEUE_NUM 256
87

R
Rusty Russell 已提交
88 89 90 91
/*L:120
 * verbose is both a global flag and a macro.  The C preprocessor allows
 * this, and although I wouldn't recommend it, it works quite nicely here.
 */
92 93 94
static bool verbose;
#define verbose(args...) \
	do { if (verbose) printf(args); } while(0)
95 96
/*:*/

97 98 99
/* The pointer to the start of guest memory. */
static void *guest_base;
/* The maximum guest physical address allowed, and maximum possible. */
100
static unsigned long guest_limit, guest_max, guest_mmio;
101 102
/* The /dev/lguest file descriptor. */
static int lguest_fd;
103

104 105 106
/* a per-cpu variable indicating whose vcpu is currently running */
static unsigned int __thread cpu_id;

107 108 109
/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
#define MAX_PCI_DEVICES 32

110
/* This is our list of devices. */
111
struct device_list {
R
Rusty Russell 已提交
112 113 114 115 116 117
	/* Counter to assign interrupt numbers. */
	unsigned int next_irq;

	/* Counter to print out convenient device numbers. */
	unsigned int device_num;

118
	/* The descriptor page for the devices. */
R
Rusty Russell 已提交
119 120
	u8 *descpage;

121
	/* A single linked list of devices. */
122
	struct device *dev;
R
Rusty Russell 已提交
123
	/* And a pointer to the last device for easy append. */
124
	struct device *lastdev;
125 126 127

	/* PCI devices. */
	struct device *pci[MAX_PCI_DEVICES];
128 129
};

R
Rusty Russell 已提交
130 131 132
/* The list of Guest devices, based on command line arguments. */
static struct device_list devices;

133 134 135 136 137 138 139 140 141 142 143 144 145
struct virtio_pci_cfg_cap {
	struct virtio_pci_cap cap;
	u32 window; /* Data for BAR access. */
};

struct virtio_pci_mmio {
	struct virtio_pci_common_cfg cfg;
	u16 notify;
	u8 isr;
	u8 padding;
	/* Device-specific configuration follows this. */
};

146 147 148 149 150 151 152 153 154 155 156 157 158
/* This is the layout (little-endian) of the PCI config space. */
struct pci_config {
	u16 vendor_id, device_id;
	u16 command, status;
	u8 revid, prog_if, subclass, class;
	u8 cacheline_size, lat_timer, header_type, bist;
	u32 bar[6];
	u32 cardbus_cis_ptr;
	u16 subsystem_vendor_id, subsystem_device_id;
	u32 expansion_rom_addr;
	u8 capabilities, reserved1[3];
	u32 reserved2;
	u8 irq_line, irq_pin, min_grant, max_latency;
159 160 161 162 163 164 165 166

	/* Now, this is the linked capability list. */
	struct virtio_pci_cap common;
	struct virtio_pci_notify_cap notify;
	struct virtio_pci_cap isr;
	struct virtio_pci_cap device;
	/* FIXME: Implement this! */
	struct virtio_pci_cfg_cap cfg_access;
167 168
};

169
/* The device structure describes a single device. */
170
struct device {
171
	/* The linked-list pointer. */
172
	struct device *next;
R
Rusty Russell 已提交
173

174
	/* The device's descriptor, as mapped into the Guest. */
175
	struct lguest_device_desc *desc;
R
Rusty Russell 已提交
176

177 178 179 180
	/* We can't trust desc values once Guest has booted: we use these. */
	unsigned int feature_len;
	unsigned int num_vq;

R
Rusty Russell 已提交
181 182
	/* The name of this device, for --verbose. */
	const char *name;
183

R
Rusty Russell 已提交
184 185
	/* Any queues attached to this device */
	struct virtqueue *vq;
186

187 188
	/* Is it operational */
	bool running;
189

190 191 192 193 194 195
	/* PCI configuration */
	union {
		struct pci_config config;
		u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
	};

196 197 198
	/* Features we offer, and those accepted. */
	u64 features, features_accepted;

199 200 201
	/* Device-specific config hangs off the end of this. */
	struct virtio_pci_mmio *mmio;

202 203 204 205
	/* PCI MMIO resources (all in BAR0) */
	size_t mmio_size;
	u32 mmio_addr;

206 207 208 209
	/* Device-specific data. */
	void *priv;
};

R
Rusty Russell 已提交
210
/* The virtqueue structure describes a queue attached to a device. */
211
struct virtqueue {
R
Rusty Russell 已提交
212 213 214 215 216 217 218 219 220 221 222
	struct virtqueue *next;

	/* Which device owns me. */
	struct device *dev;

	/* The configuration for this queue. */
	struct lguest_vqconfig config;

	/* The actual ring of buffers. */
	struct vring vring;

223 224 225
	/* The information about this virtqueue (we only use queue_size on) */
	struct virtio_pci_common_cfg pci_config;

R
Rusty Russell 已提交
226 227 228
	/* Last available index we saw. */
	u16 last_avail_idx;

229 230 231
	/* How many are used since we sent last irq? */
	unsigned int pending_used;

232 233
	/* Eventfd where Guest notifications arrive. */
	int eventfd;
R
Rusty Russell 已提交
234

235 236 237
	/* Function for the thread which is servicing this virtqueue. */
	void (*service)(struct virtqueue *vq);
	pid_t thread;
R
Rusty Russell 已提交
238 239
};

B
Balaji Rao 已提交
240 241 242
/* Remember the arguments to the program so we can "reboot" */
static char **main_args;

243 244 245
/* The original tty settings to restore on exit. */
static struct termios orig_term;

R
Rusty Russell 已提交
246 247
/*
 * We have to be careful with barriers: our devices are all run in separate
248
 * threads and so we need to make sure that changes visible to the Guest happen
R
Rusty Russell 已提交
249 250
 * in precise order.
 */
251
#define wmb() __asm__ __volatile__("" : : : "memory")
R
Rusty Russell 已提交
252 253
#define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
R
Rusty Russell 已提交
254

255 256 257
/* Wrapper for the last available index.  Makes it easier to change. */
#define lg_last_avail(vq)	((vq)->last_avail_idx)

R
Rusty Russell 已提交
258 259 260 261
/*
 * The virtio configuration space is defined to be little-endian.  x86 is
 * little-endian too, but it's nice to be explicit so we have these helpers.
 */
R
Rusty Russell 已提交
262 263 264 265 266
#define cpu_to_le16(v16) (v16)
#define cpu_to_le32(v32) (v32)
#define cpu_to_le64(v64) (v64)
#define le16_to_cpu(v16) (v16)
#define le32_to_cpu(v32) (v32)
267
#define le64_to_cpu(v64) (v64)
R
Rusty Russell 已提交
268

R
Rusty Russell 已提交
269 270 271 272 273 274 275 276 277 278 279 280
/* Is this iovec empty? */
static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
{
	unsigned int i;

	for (i = 0; i < num_iov; i++)
		if (iov[i].iov_len)
			return false;
	return true;
}

/* Take len bytes from the front of this iovec. */
281 282
static void iov_consume(struct iovec iov[], unsigned num_iov,
			void *dest, unsigned len)
R
Rusty Russell 已提交
283 284 285 286 287 288 289
{
	unsigned int i;

	for (i = 0; i < num_iov; i++) {
		unsigned int used;

		used = iov[i].iov_len < len ? iov[i].iov_len : len;
290 291 292 293
		if (dest) {
			memcpy(dest, iov[i].iov_base, used);
			dest += used;
		}
R
Rusty Russell 已提交
294 295 296 297
		iov[i].iov_base += used;
		iov[i].iov_len -= used;
		len -= used;
	}
298 299
	if (len != 0)
		errx(1, "iovec too short!");
R
Rusty Russell 已提交
300 301
}

R
Rusty Russell 已提交
302 303 304 305
/* The device virtqueue descriptors are followed by feature bitmasks. */
static u8 *get_feature_bits(struct device *dev)
{
	return (u8 *)(dev->desc + 1)
306
		+ dev->num_vq * sizeof(struct lguest_vqconfig);
R
Rusty Russell 已提交
307 308
}

R
Rusty Russell 已提交
309 310 311 312 313 314
/*L:100
 * The Launcher code itself takes us out into userspace, that scary place where
 * pointers run wild and free!  Unfortunately, like most userspace programs,
 * it's quite boring (which is why everyone likes to hack on the kernel!).
 * Perhaps if you make up an Lguest Drinking Game at this point, it will get
 * you through this section.  Or, maybe not.
315 316 317 318 319 320
 *
 * The Launcher sets up a big chunk of memory to be the Guest's "physical"
 * memory and stores it in "guest_base".  In other words, Guest physical ==
 * Launcher virtual with an offset.
 *
 * This can be tough to get your head around, but usually it just means that we
321
 * use these trivial conversion functions when the Guest gives us its
R
Rusty Russell 已提交
322 323
 * "physical" addresses:
 */
324 325 326 327 328 329 330 331 332 333
static void *from_guest_phys(unsigned long addr)
{
	return guest_base + addr;
}

static unsigned long to_guest_phys(const void *addr)
{
	return (addr - guest_base);
}

334 335 336 337
/*L:130
 * Loading the Kernel.
 *
 * We start with couple of simple helper routines.  open_or_die() avoids
R
Rusty Russell 已提交
338 339
 * error-checking code cluttering the callers:
 */
340 341 342 343 344 345 346 347
static int open_or_die(const char *name, int flags)
{
	int fd = open(name, flags);
	if (fd < 0)
		err(1, "Failed to open %s", name);
	return fd;
}

348 349
/* map_zeroed_pages() takes a number of pages. */
static void *map_zeroed_pages(unsigned int num)
350
{
351 352
	int fd = open_or_die("/dev/zero", O_RDONLY);
	void *addr;
353

R
Rusty Russell 已提交
354 355
	/*
	 * We use a private mapping (ie. if we write to the page, it will be
356 357
	 * copied). We allocate an extra two pages PROT_NONE to act as guard
	 * pages against read/write attempts that exceed allocated space.
R
Rusty Russell 已提交
358
	 */
359 360 361
	addr = mmap(NULL, getpagesize() * (num+2),
		    PROT_NONE, MAP_PRIVATE, fd, 0);

362
	if (addr == MAP_FAILED)
363
		err(1, "Mmapping %u pages of /dev/zero", num);
R
Rusty Russell 已提交
364

365 366 367 368
	if (mprotect(addr + getpagesize(), getpagesize() * num,
		     PROT_READ|PROT_WRITE) == -1)
		err(1, "mprotect rw %u pages failed", num);

R
Rusty Russell 已提交
369 370 371 372
	/*
	 * One neat mmap feature is that you can close the fd, and it
	 * stays mapped.
	 */
373
	close(fd);
374

375 376
	/* Return address after PROT_NONE page */
	return addr + getpagesize();
377 378 379 380 381 382 383 384 385 386 387
}

/* Get some more pages for a device. */
static void *get_pages(unsigned int num)
{
	void *addr = from_guest_phys(guest_limit);

	guest_limit += num * getpagesize();
	if (guest_limit > guest_max)
		errx(1, "Not enough memory for devices");
	return addr;
388 389
}

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
/* Get some bytes which won't be mapped into the guest. */
static unsigned long get_mmio_region(size_t size)
{
	unsigned long addr = guest_mmio;
	size_t i;

	if (!size)
		return addr;

	/* Size has to be a power of 2 (and multiple of 16) */
	for (i = 1; i < size; i <<= 1);

	guest_mmio += i;

	return addr;
}

R
Rusty Russell 已提交
407 408
/*
 * This routine is used to load the kernel or initrd.  It tries mmap, but if
409
 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries),
R
Rusty Russell 已提交
410 411
 * it falls back to reading the memory in.
 */
412 413 414 415
static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
{
	ssize_t r;

R
Rusty Russell 已提交
416 417
	/*
	 * We map writable even though for some segments are marked read-only.
418 419 420 421 422
	 * The kernel really wants to be writable: it patches its own
	 * instructions.
	 *
	 * MAP_PRIVATE means that the page won't be copied until a write is
	 * done to it.  This allows us to share untouched memory between
R
Rusty Russell 已提交
423 424
	 * Guests.
	 */
425
	if (mmap(addr, len, PROT_READ|PROT_WRITE,
426 427 428 429 430 431 432 433 434
		 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
		return;

	/* pread does a seek and a read in one shot: saves a few lines. */
	r = pread(fd, addr, len, offset);
	if (r != len)
		err(1, "Reading offset %lu len %lu gave %zi", offset, len, r);
}

R
Rusty Russell 已提交
435 436
/*
 * This routine takes an open vmlinux image, which is in ELF, and maps it into
437 438 439 440
 * the Guest memory.  ELF = Embedded Linking Format, which is the format used
 * by all modern binaries on Linux including the kernel.
 *
 * The ELF headers give *two* addresses: a physical address, and a virtual
441 442
 * address.  We use the physical address; the Guest will map itself to the
 * virtual address.
443
 *
R
Rusty Russell 已提交
444 445
 * We return the starting address.
 */
446
static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
447 448 449 450
{
	Elf32_Phdr phdr[ehdr->e_phnum];
	unsigned int i;

R
Rusty Russell 已提交
451 452 453 454
	/*
	 * Sanity checks on the main ELF header: an x86 executable with a
	 * reasonable number of correctly-sized program headers.
	 */
455 456 457 458 459 460
	if (ehdr->e_type != ET_EXEC
	    || ehdr->e_machine != EM_386
	    || ehdr->e_phentsize != sizeof(Elf32_Phdr)
	    || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
		errx(1, "Malformed elf header");

R
Rusty Russell 已提交
461 462
	/*
	 * An ELF executable contains an ELF header and a number of "program"
463
	 * headers which indicate which parts ("segments") of the program to
R
Rusty Russell 已提交
464 465
	 * load where.
	 */
466 467

	/* We read in all the program headers at once: */
468 469 470 471 472
	if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
		err(1, "Seeking to program headers");
	if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
		err(1, "Reading program headers");

R
Rusty Russell 已提交
473 474 475 476
	/*
	 * Try all the headers: there are usually only three.  A read-only one,
	 * a read-write one, and a "note" section which we don't load.
	 */
477
	for (i = 0; i < ehdr->e_phnum; i++) {
478
		/* If this isn't a loadable segment, we ignore it */
479 480 481 482 483 484
		if (phdr[i].p_type != PT_LOAD)
			continue;

		verbose("Section %i: size %i addr %p\n",
			i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);

485
		/* We map this section of the file at its physical address. */
486
		map_at(elf_fd, from_guest_phys(phdr[i].p_paddr),
487
		       phdr[i].p_offset, phdr[i].p_filesz);
488 489
	}

490 491
	/* The entry point is given in the ELF header. */
	return ehdr->e_entry;
492 493
}

R
Rusty Russell 已提交
494 495 496 497
/*L:150
 * A bzImage, unlike an ELF file, is not meant to be loaded.  You're supposed
 * to jump into it and it will unpack itself.  We used to have to perform some
 * hairy magic because the unpacking code scared me.
498
 *
R
Rusty Russell 已提交
499 500
 * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote
 * a small patch to jump over the tricky bits in the Guest, so now we just read
R
Rusty Russell 已提交
501 502
 * the funky header so we know where in the file to load, and away we go!
 */
503
static unsigned long load_bzimage(int fd)
504
{
505
	struct boot_params boot;
R
Rusty Russell 已提交
506 507 508 509
	int r;
	/* Modern bzImages get loaded at 1M. */
	void *p = from_guest_phys(0x100000);

R
Rusty Russell 已提交
510 511
	/*
	 * Go back to the start of the file and read the header.  It should be
P
Paul Bolle 已提交
512
	 * a Linux boot header (see Documentation/x86/boot.txt)
R
Rusty Russell 已提交
513
	 */
R
Rusty Russell 已提交
514
	lseek(fd, 0, SEEK_SET);
515
	read(fd, &boot, sizeof(boot));
R
Rusty Russell 已提交
516

517 518
	/* Inside the setup_hdr, we expect the magic "HdrS" */
	if (memcmp(&boot.hdr.header, "HdrS", 4) != 0)
R
Rusty Russell 已提交
519 520
		errx(1, "This doesn't look like a bzImage to me");

521 522
	/* Skip over the extra sectors of the header. */
	lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET);
R
Rusty Russell 已提交
523 524 525 526 527

	/* Now read everything into memory. in nice big chunks. */
	while ((r = read(fd, p, 65536)) > 0)
		p += r;

528 529
	/* Finally, code32_start tells us where to enter the kernel. */
	return boot.hdr.code32_start;
530 531
}

R
Rusty Russell 已提交
532 533
/*L:140
 * Loading the kernel is easy when it's a "vmlinux", but most kernels
R
Rusty Russell 已提交
534
 * come wrapped up in the self-decompressing "bzImage" format.  With a little
R
Rusty Russell 已提交
535 536
 * work, we can load those, too.
 */
537
static unsigned long load_kernel(int fd)
538 539 540
{
	Elf32_Ehdr hdr;

541
	/* Read in the first few bytes. */
542 543 544
	if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
		err(1, "Reading kernel");

545
	/* If it's an ELF file, it starts with "\177ELF" */
546
	if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
547
		return map_elf(fd, &hdr);
548

549
	/* Otherwise we assume it's a bzImage, and try to load it. */
550
	return load_bzimage(fd);
551 552
}

R
Rusty Russell 已提交
553 554
/*
 * This is a trivial little helper to align pages.  Andi Kleen hated it because
555 556 557
 * it calls getpagesize() twice: "it's dumb code."
 *
 * Kernel guys get really het up about optimization, even when it's not
R
Rusty Russell 已提交
558 559
 * necessary.  I leave this code as a reaction against that.
 */
560 561
static inline unsigned long page_align(unsigned long addr)
{
562
	/* Add upwards and truncate downwards. */
563 564 565
	return ((addr + getpagesize()-1) & ~(getpagesize()-1));
}

R
Rusty Russell 已提交
566 567 568 569 570
/*L:180
 * An "initial ram disk" is a disk image loaded into memory along with the
 * kernel which the kernel can use to boot from without needing any drivers.
 * Most distributions now use this as standard: the initrd contains the code to
 * load the appropriate driver modules for the current machine.
571 572
 *
 * Importantly, James Morris works for RedHat, and Fedora uses initrds for its
R
Rusty Russell 已提交
573 574
 * kernels.  He sent me this (and tells me when I break it).
 */
575 576 577 578 579 580 581
static unsigned long load_initrd(const char *name, unsigned long mem)
{
	int ifd;
	struct stat st;
	unsigned long len;

	ifd = open_or_die(name, O_RDONLY);
582
	/* fstat() is needed to get the file size. */
583 584 585
	if (fstat(ifd, &st) < 0)
		err(1, "fstat() on initrd '%s'", name);

R
Rusty Russell 已提交
586 587 588 589
	/*
	 * We map the initrd at the top of memory, but mmap wants it to be
	 * page-aligned, so we round the size up for that.
	 */
590
	len = page_align(st.st_size);
591
	map_at(ifd, from_guest_phys(mem - len), 0, st.st_size);
R
Rusty Russell 已提交
592 593 594 595
	/*
	 * Once a file is mapped, you can close the file descriptor.  It's a
	 * little odd, but quite useful.
	 */
596
	close(ifd);
597
	verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len);
598 599

	/* We return the initrd size. */
600 601
	return len;
}
R
Rusty Russell 已提交
602
/*:*/
603

R
Rusty Russell 已提交
604 605 606 607
/*
 * Simple routine to roll all the commandline arguments together with spaces
 * between them.
 */
608 609 610 611 612
static void concat(char *dst, char *args[])
{
	unsigned int i, len = 0;

	for (i = 0; args[i]; i++) {
613 614 615 616
		if (i) {
			strcat(dst+len, " ");
			len++;
		}
617
		strcpy(dst+len, args[i]);
618
		len += strlen(args[i]);
619 620 621 622 623
	}
	/* In case it's empty. */
	dst[len] = '\0';
}

R
Rusty Russell 已提交
624 625
/*L:185
 * This is where we actually tell the kernel to initialize the Guest.  We
R
Rusty Russell 已提交
626
 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
627
 * the base of Guest "physical" memory, the top physical page to allow and the
R
Rusty Russell 已提交
628 629
 * entry point for the Guest.
 */
630
static void tell_kernel(unsigned long start)
631
{
632 633
	unsigned long args[] = { LHREQ_INITIALIZE,
				 (unsigned long)guest_base,
634
				 guest_limit / getpagesize(), start,
635 636 637 638
				 (guest_mmio+getpagesize()-1) / getpagesize() };
	verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
		guest_base, guest_base + guest_limit,
		guest_limit, guest_mmio);
639 640
	lguest_fd = open_or_die("/dev/lguest", O_RDWR);
	if (write(lguest_fd, args, sizeof(args)) < 0)
641 642
		err(1, "Writing to /dev/lguest");
}
643
/*:*/
644

R
Rusty Russell 已提交
645
/*L:200
646 647
 * Device Handling.
 *
R
Rusty Russell 已提交
648
 * When the Guest gives us a buffer, it sends an array of addresses and sizes.
649
 * We need to make sure it's not trying to reach into the Launcher itself, so
R
Rusty Russell 已提交
650
 * we have a convenient routine which checks it and exits with an error message
651 652
 * if something funny is going on:
 */
653 654 655
static void *_check_pointer(unsigned long addr, unsigned int size,
			    unsigned int line)
{
R
Rusty Russell 已提交
656
	/*
657 658
	 * Check if the requested address and size exceeds the allocated memory,
	 * or addr + size wraps around.
R
Rusty Russell 已提交
659
	 */
660
	if ((addr + size) > guest_limit || (addr + size) < addr)
R
Rusty Russell 已提交
661
		errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
R
Rusty Russell 已提交
662 663 664 665
	/*
	 * We return a pointer for the caller's convenience, now we know it's
	 * safe to use.
	 */
666
	return from_guest_phys(addr);
667
}
668
/* A macro which transparently hands the line number to the real function. */
669 670
#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)

R
Rusty Russell 已提交
671 672
/*
 * Each buffer in the virtqueues is actually a chain of descriptors.  This
R
Rusty Russell 已提交
673
 * function returns the next descriptor in the chain, or vq->vring.num if we're
R
Rusty Russell 已提交
674 675
 * at the end.
 */
676 677
static unsigned next_desc(struct vring_desc *desc,
			  unsigned int i, unsigned int max)
R
Rusty Russell 已提交
678 679 680 681
{
	unsigned int next;

	/* If this descriptor says it doesn't chain, we're done. */
682 683
	if (!(desc[i].flags & VRING_DESC_F_NEXT))
		return max;
R
Rusty Russell 已提交
684 685

	/* Check they're not leading us off end of descriptors. */
686
	next = desc[i].next;
R
Rusty Russell 已提交
687 688 689
	/* Make sure compiler knows to grab that: we don't want it changing! */
	wmb();

690
	if (next >= max)
R
Rusty Russell 已提交
691 692 693 694 695
		errx(1, "Desc next is %u", next);

	return next;
}

R
Rusty Russell 已提交
696 697 698 699
/*
 * This actually sends the interrupt for this virtqueue, if we've used a
 * buffer.
 */
700 701 702 703
static void trigger_irq(struct virtqueue *vq)
{
	unsigned long buf[] = { LHREQ_IRQ, vq->config.irq };

704 705 706 707 708
	/* Don't inform them if nothing used. */
	if (!vq->pending_used)
		return;
	vq->pending_used = 0;

709 710
	/* If they don't want an interrupt, don't send one... */
	if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
711
		return;
712
	}
713

714 715 716 717
	/* For a PCI device, set isr to 1 (queue interrupt pending) */
	if (vq->dev->mmio)
		vq->dev->mmio->isr = 0x1;

718 719 720 721 722
	/* Send the Guest an interrupt tell them we used something up. */
	if (write(lguest_fd, buf, sizeof(buf)) != 0)
		err(1, "Triggering irq %i", vq->config.irq);
}

R
Rusty Russell 已提交
723
/*
R
Rusty Russell 已提交
724
 * This looks in the virtqueue for the first available buffer, and converts
R
Rusty Russell 已提交
725 726 727 728
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
R
Rusty Russell 已提交
729
 * This function waits if necessary, and returns the descriptor number found.
R
Rusty Russell 已提交
730
 */
731 732 733
static unsigned wait_for_vq_desc(struct virtqueue *vq,
				 struct iovec iov[],
				 unsigned int *out_num, unsigned int *in_num)
R
Rusty Russell 已提交
734
{
735 736
	unsigned int i, head, max;
	struct vring_desc *desc;
737 738
	u16 last_avail = lg_last_avail(vq);

R
Rusty Russell 已提交
739
	/* There's nothing available? */
740 741 742
	while (last_avail == vq->vring.avail->idx) {
		u64 event;

R
Rusty Russell 已提交
743 744 745 746
		/*
		 * Since we're about to sleep, now is a good time to tell the
		 * Guest about what we've used up to now.
		 */
747 748
		trigger_irq(vq);

749 750 751
		/* OK, now we need to know about added descriptors. */
		vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;

R
Rusty Russell 已提交
752 753 754 755
		/*
		 * They could have slipped one in as we were doing that: make
		 * sure it's written, then check again.
		 */
756 757 758 759 760 761
		mb();
		if (last_avail != vq->vring.avail->idx) {
			vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
			break;
		}

762 763 764
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
			errx(1, "Event read failed?");
765 766 767

		/* We don't need to be notified again. */
		vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
768
	}
R
Rusty Russell 已提交
769 770

	/* Check it isn't doing very strange things with descriptor numbers. */
771
	if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
R
Rusty Russell 已提交
772
		errx(1, "Guest moved used index from %u to %u",
773
		     last_avail, vq->vring.avail->idx);
R
Rusty Russell 已提交
774

R
Rusty Russell 已提交
775 776 777 778 779 780
	/* 
	 * Make sure we read the descriptor number *after* we read the ring
	 * update; don't let the cpu or compiler change the order.
	 */
	rmb();

R
Rusty Russell 已提交
781 782 783 784
	/*
	 * Grab the next descriptor number they're advertising, and increment
	 * the index we've seen.
	 */
785 786
	head = vq->vring.avail->ring[last_avail % vq->vring.num];
	lg_last_avail(vq)++;
R
Rusty Russell 已提交
787 788 789 790 791 792 793 794

	/* If their number is silly, that's a fatal mistake. */
	if (head >= vq->vring.num)
		errx(1, "Guest says index %u is available", head);

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;

795 796
	max = vq->vring.num;
	desc = vq->vring.desc;
R
Rusty Russell 已提交
797
	i = head;
798

R
Rusty Russell 已提交
799 800 801 802 803 804
	/*
	 * We have to read the descriptor after we read the descriptor number,
	 * but there's a data dependency there so the CPU shouldn't reorder
	 * that: no rmb() required.
	 */

R
Rusty Russell 已提交
805 806 807 808
	/*
	 * If this is an indirect entry, then this buffer contains a descriptor
	 * table which we handle as if it's any normal descriptor chain.
	 */
809 810 811 812 813 814 815 816 817
	if (desc[i].flags & VRING_DESC_F_INDIRECT) {
		if (desc[i].len % sizeof(struct vring_desc))
			errx(1, "Invalid size for indirect buffer table");

		max = desc[i].len / sizeof(struct vring_desc);
		desc = check_pointer(desc[i].addr, desc[i].len);
		i = 0;
	}

R
Rusty Russell 已提交
818 819
	do {
		/* Grab the first descriptor, and check it's OK. */
820
		iov[*out_num + *in_num].iov_len = desc[i].len;
R
Rusty Russell 已提交
821
		iov[*out_num + *in_num].iov_base
822
			= check_pointer(desc[i].addr, desc[i].len);
R
Rusty Russell 已提交
823
		/* If this is an input descriptor, increment that count. */
824
		if (desc[i].flags & VRING_DESC_F_WRITE)
R
Rusty Russell 已提交
825 826
			(*in_num)++;
		else {
R
Rusty Russell 已提交
827 828 829 830
			/*
			 * If it's an output descriptor, they're all supposed
			 * to come before any input descriptors.
			 */
R
Rusty Russell 已提交
831 832 833 834 835 836
			if (*in_num)
				errx(1, "Descriptor has out after in");
			(*out_num)++;
		}

		/* If we've got too many, that implies a descriptor loop. */
837
		if (*out_num + *in_num > max)
R
Rusty Russell 已提交
838
			errx(1, "Looped descriptor");
839
	} while ((i = next_desc(desc, i, max)) != max);
840

R
Rusty Russell 已提交
841
	return head;
842 843
}

R
Rusty Russell 已提交
844
/*
R
Rusty Russell 已提交
845 846 847
 * After we've used one of their buffers, we tell the Guest about it.  Sometime
 * later we'll want to send them an interrupt using trigger_irq(); note that
 * wait_for_vq_desc() does that for us if it has to wait.
R
Rusty Russell 已提交
848
 */
R
Rusty Russell 已提交
849
static void add_used(struct virtqueue *vq, unsigned int head, int len)
850
{
R
Rusty Russell 已提交
851 852
	struct vring_used_elem *used;

R
Rusty Russell 已提交
853 854 855 856
	/*
	 * The virtqueue contains a ring of used buffers.  Get a pointer to the
	 * next entry in that used ring.
	 */
R
Rusty Russell 已提交
857 858 859 860 861 862
	used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
	used->id = head;
	used->len = len;
	/* Make sure buffer is written before we update index. */
	wmb();
	vq->vring.used->idx++;
863
	vq->pending_used++;
864 865
}

R
Rusty Russell 已提交
866
/* And here's the combo meal deal.  Supersize me! */
867
static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len)
868
{
R
Rusty Russell 已提交
869
	add_used(vq, head, len);
870
	trigger_irq(vq);
871 872
}

R
Rusty Russell 已提交
873 874 875
/*
 * The Console
 *
R
Rusty Russell 已提交
876 877
 * We associate some data with the console for our exit hack.
 */
878
struct console_abort {
879
	/* How many times have they hit ^C? */
880
	int count;
881
	/* When did they start? */
882 883 884
	struct timeval start;
};

885
/* This is the routine which handles console input (ie. stdin). */
886
static void console_input(struct virtqueue *vq)
887 888
{
	int len;
R
Rusty Russell 已提交
889
	unsigned int head, in_num, out_num;
890 891
	struct console_abort *abort = vq->dev->priv;
	struct iovec iov[vq->vring.num];
892

R
Rusty Russell 已提交
893
	/* Make sure there's a descriptor available. */
894
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
895
	if (out_num)
R
Rusty Russell 已提交
896
		errx(1, "Output buffers in console in queue?");
897

R
Rusty Russell 已提交
898
	/* Read into it.  This is where we usually wait. */
899
	len = readv(STDIN_FILENO, iov, in_num);
900
	if (len <= 0) {
901
		/* Ran out of input? */
902
		warnx("Failed to get console input, ignoring console.");
R
Rusty Russell 已提交
903 904 905 906
		/*
		 * For simplicity, dying threads kill the whole Launcher.  So
		 * just nap here.
		 */
907 908
		for (;;)
			pause();
909 910
	}

R
Rusty Russell 已提交
911
	/* Tell the Guest we used a buffer. */
912
	add_used_and_trigger(vq, head, len);
913

R
Rusty Russell 已提交
914 915
	/*
	 * Three ^C within one second?  Exit.
916
	 *
917 918 919
	 * This is such a hack, but works surprisingly well.  Each ^C has to
	 * be in a buffer by itself, so they can't be too fast.  But we check
	 * that we get three within about a second, so they can't be too
R
Rusty Russell 已提交
920 921
	 * slow.
	 */
922
	if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) {
923
		abort->count = 0;
924 925
		return;
	}
926

927 928 929 930 931 932 933 934 935 936 937
	abort->count++;
	if (abort->count == 1)
		gettimeofday(&abort->start, NULL);
	else if (abort->count == 3) {
		struct timeval now;
		gettimeofday(&now, NULL);
		/* Kill all Launcher processes with SIGINT, like normal ^C */
		if (now.tv_sec <= abort->start.tv_sec+1)
			kill(0, SIGINT);
		abort->count = 0;
	}
938 939
}

940 941
/* This is the routine which handles console output (ie. stdout). */
static void console_output(struct virtqueue *vq)
942
{
R
Rusty Russell 已提交
943 944 945
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];

R
Rusty Russell 已提交
946
	/* We usually wait in here, for the Guest to give us something. */
947 948 949
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in console output queue?");
R
Rusty Russell 已提交
950 951

	/* writev can return a partial write, so we loop here. */
952 953
	while (!iov_empty(iov, out)) {
		int len = writev(STDOUT_FILENO, iov, out);
954 955 956 957
		if (len <= 0) {
			warn("Write to stdout gave %i (%d)", len, errno);
			break;
		}
958
		iov_consume(iov, out, NULL, len);
R
Rusty Russell 已提交
959
	}
R
Rusty Russell 已提交
960 961 962 963 964

	/*
	 * We're finished with that buffer: if we're going to sleep,
	 * wait_for_vq_desc() will prod the Guest with an interrupt.
	 */
965
	add_used(vq, head, 0);
966 967
}

R
Rusty Russell 已提交
968 969 970 971
/*
 * The Network
 *
 * Handling output for network is also simple: we get all the output buffers
972
 * and write them to /dev/net/tun.
973
 */
974 975 976 977 978
struct net_info {
	int tunfd;
};

static void net_output(struct virtqueue *vq)
979
{
980 981
	struct net_info *net_info = vq->dev->priv;
	unsigned int head, out, in;
R
Rusty Russell 已提交
982
	struct iovec iov[vq->vring.num];
983

R
Rusty Russell 已提交
984
	/* We usually wait in here for the Guest to give us a packet. */
985 986 987
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in net output queue?");
R
Rusty Russell 已提交
988 989 990 991
	/*
	 * Send the whole thing through to /dev/net/tun.  It expects the exact
	 * same format: what a coincidence!
	 */
992
	if (writev(net_info->tunfd, iov, out) < 0)
993
		warnx("Write to tun failed (%d)?", errno);
R
Rusty Russell 已提交
994 995 996 997 998

	/*
	 * Done with that one; wait_for_vq_desc() will send the interrupt if
	 * all packets are processed.
	 */
999
	add_used(vq, head, 0);
1000 1001
}

R
Rusty Russell 已提交
1002 1003 1004 1005 1006 1007
/*
 * Handling network input is a bit trickier, because I've tried to optimize it.
 *
 * First we have a helper routine which tells is if from this file descriptor
 * (ie. the /dev/net/tun device) will block:
 */
1008 1009 1010 1011 1012 1013 1014 1015 1016
static bool will_block(int fd)
{
	fd_set fdset;
	struct timeval zero = { 0, 0 };
	FD_ZERO(&fdset);
	FD_SET(fd, &fdset);
	return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
}

R
Rusty Russell 已提交
1017 1018 1019 1020 1021
/*
 * This handles packets coming in from the tun device to our Guest.  Like all
 * service routines, it gets called again as soon as it returns, so you don't
 * see a while(1) loop here.
 */
1022
static void net_input(struct virtqueue *vq)
1023 1024
{
	int len;
1025 1026 1027 1028
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];
	struct net_info *net_info = vq->dev->priv;

R
Rusty Russell 已提交
1029 1030 1031 1032
	/*
	 * Get a descriptor to write an incoming packet into.  This will also
	 * send an interrupt if they're out of descriptors.
	 */
1033 1034 1035
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (out)
		errx(1, "Output buffers in net input queue?");
1036

R
Rusty Russell 已提交
1037 1038 1039 1040
	/*
	 * If it looks like we'll block reading from the tun device, send them
	 * an interrupt.
	 */
1041 1042 1043
	if (vq->pending_used && will_block(net_info->tunfd))
		trigger_irq(vq);

R
Rusty Russell 已提交
1044 1045 1046 1047
	/*
	 * Read in the packet.  This is where we normally wait (when there's no
	 * incoming network traffic).
	 */
1048
	len = readv(net_info->tunfd, iov, in);
1049
	if (len <= 0)
1050
		warn("Failed to read from tun (%d).", errno);
R
Rusty Russell 已提交
1051 1052 1053 1054 1055

	/*
	 * Mark that packet buffer as used, but don't interrupt here.  We want
	 * to wait until we've done as much work as we can.
	 */
1056
	add_used(vq, head, len);
1057
}
R
Rusty Russell 已提交
1058
/*:*/
1059

R
Rusty Russell 已提交
1060
/* This is the helper to create threads: run the service routine in a loop. */
1061 1062 1063
static int do_thread(void *_vq)
{
	struct virtqueue *vq = _vq;
R
Rusty Russell 已提交
1064

1065 1066 1067 1068
	for (;;)
		vq->service(vq);
	return 0;
}
R
Rusty Russell 已提交
1069

R
Rusty Russell 已提交
1070 1071 1072 1073
/*
 * When a child dies, we kill our entire process group with SIGTERM.  This
 * also has the side effect that the shell restores the console for us!
 */
1074 1075 1076
static void kill_launcher(int signal)
{
	kill(0, SIGTERM);
1077 1078
}

1079
static void reset_device(struct device *dev)
1080
{
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	struct virtqueue *vq;

	verbose("Resetting device %s\n", dev->name);

	/* Clear any features they've acked. */
	memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len);

	/* We're going to be explicitly killing threads, so ignore them. */
	signal(SIGCHLD, SIG_IGN);

	/* Zero out the virtqueues, get rid of their threads */
	for (vq = dev->vq; vq; vq = vq->next) {
		if (vq->thread != (pid_t)-1) {
			kill(vq->thread, SIGTERM);
			waitpid(vq->thread, NULL, 0);
			vq->thread = (pid_t)-1;
		}
		memset(vq->vring.desc, 0,
		       vring_size(vq->config.num, LGUEST_VRING_ALIGN));
		lg_last_avail(vq) = 0;
	}
	dev->running = false;

	/* Now we care if threads die. */
	signal(SIGCHLD, (void *)kill_launcher);
1106 1107
}

R
Rusty Russell 已提交
1108 1109 1110
/*L:216
 * This actually creates the thread which services the virtqueue for a device.
 */
1111
static void create_thread(struct virtqueue *vq)
1112
{
R
Rusty Russell 已提交
1113
	/*
R
Rusty Russell 已提交
1114 1115
	 * Create stack for thread.  Since the stack grows upwards, we point
	 * the stack pointer to the end of this region.
R
Rusty Russell 已提交
1116
	 */
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	char *stack = malloc(32768);
	unsigned long args[] = { LHREQ_EVENTFD,
				 vq->config.pfn*getpagesize(), 0 };

	/* Create a zero-initialized eventfd. */
	vq->eventfd = eventfd(0, 0);
	if (vq->eventfd < 0)
		err(1, "Creating eventfd");
	args[2] = vq->eventfd;

R
Rusty Russell 已提交
1127 1128 1129 1130
	/*
	 * Attach an eventfd to this virtqueue: it will go off when the Guest
	 * does an LHCALL_NOTIFY for this vq.
	 */
1131 1132 1133
	if (write(lguest_fd, &args, sizeof(args)) != 0)
		err(1, "Attaching eventfd");

R
Rusty Russell 已提交
1134 1135 1136 1137
	/*
	 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
	 * we get a signal if it dies.
	 */
1138 1139 1140
	vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
	if (vq->thread == (pid_t)-1)
		err(1, "Creating clone");
R
Rusty Russell 已提交
1141 1142

	/* We close our local copy now the child has it. */
1143
	close(vq->eventfd);
1144 1145
}

1146
static void start_device(struct device *dev)
R
Rusty Russell 已提交
1147
{
1148
	unsigned int i;
R
Rusty Russell 已提交
1149 1150
	struct virtqueue *vq;

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
	verbose("Device %s OK: offered", dev->name);
	for (i = 0; i < dev->feature_len; i++)
		verbose(" %02x", get_feature_bits(dev)[i]);
	verbose(", accepted");
	for (i = 0; i < dev->feature_len; i++)
		verbose(" %02x", get_feature_bits(dev)
			[dev->feature_len+i]);

	for (vq = dev->vq; vq; vq = vq->next) {
		if (vq->service)
			create_thread(vq);
	}
	dev->running = true;
}

static void cleanup_devices(void)
{
	struct device *dev;

	for (dev = devices.dev; dev; dev = dev->next)
		reset_device(dev);
R
Rusty Russell 已提交
1172

1173 1174 1175 1176
	/* If we saved off the original terminal settings, restore them now. */
	if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
		tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
}
R
Rusty Russell 已提交
1177

1178 1179 1180 1181 1182 1183 1184
/* When the Guest tells us they updated the status field, we handle it. */
static void update_device_status(struct device *dev)
{
	/* A zero status is a reset, otherwise it's a set of flags. */
	if (dev->desc->status == 0)
		reset_device(dev);
	else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) {
1185
		warnx("Device %s configuration FAILED", dev->name);
1186 1187
		if (dev->running)
			reset_device(dev);
1188 1189 1190 1191
	} else {
		if (dev->running)
			err(1, "Device %s features finalized twice", dev->name);
		start_device(dev);
R
Rusty Russell 已提交
1192 1193 1194
	}
}

R
Rusty Russell 已提交
1195 1196 1197 1198
/*L:215
 * This is the generic routine we call when the Guest uses LHCALL_NOTIFY.  In
 * particular, it's used to notify us of device status changes during boot.
 */
1199
static void handle_output(unsigned long addr)
1200 1201
{
	struct device *i;
R
Rusty Russell 已提交
1202

1203
	/* Check each device. */
R
Rusty Russell 已提交
1204
	for (i = devices.dev; i; i = i->next) {
1205 1206
		struct virtqueue *vq;

R
Rusty Russell 已提交
1207 1208 1209 1210
		/*
		 * Notifications to device descriptors mean they updated the
		 * device status.
		 */
R
Rusty Russell 已提交
1211
		if (from_guest_phys(addr) == i->desc) {
1212
			update_device_status(i);
R
Rusty Russell 已提交
1213 1214 1215
			return;
		}

1216
		/* Devices should not be used before features are finalized. */
R
Rusty Russell 已提交
1217
		for (vq = i->vq; vq; vq = vq->next) {
1218
			if (addr != vq->config.pfn*getpagesize())
R
Rusty Russell 已提交
1219
				continue;
1220
			errx(1, "Notification on %s before setup!", i->name);
1221 1222
		}
	}
1223

R
Rusty Russell 已提交
1224 1225 1226 1227 1228
	/*
	 * Early console write is done using notify on a nul-terminated string
	 * in Guest memory.  It's also great for hacking debugging messages
	 * into a Guest.
	 */
R
Rusty Russell 已提交
1229 1230 1231 1232 1233
	if (addr >= guest_limit)
		errx(1, "Bad NOTIFY %#lx", addr);

	write(STDOUT_FILENO, from_guest_phys(addr),
	      strnlen(from_guest_phys(addr), guest_limit - addr));
1234 1235
}

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
/*L:217
 * We do PCI.  This is mainly done to let us test the kernel virtio PCI
 * code.
 */

/* The IO ports used to read the PCI config space. */
#define PCI_CONFIG_ADDR 0xCF8
#define PCI_CONFIG_DATA 0xCFC

/*
 * Not really portable, but does help readability: this is what the Guest
 * writes to the PCI_CONFIG_ADDR IO port.
 */
union pci_config_addr {
	struct {
		unsigned mbz: 2;
		unsigned offset: 6;
		unsigned funcnum: 3;
		unsigned devnum: 5;
		unsigned busnum: 8;
		unsigned reserved: 7;
		unsigned enabled : 1;
	} bits;
	u32 val;
};

/*
 * We cache what they wrote to the address port, so we know what they're
 * talking about when they access the data port.
 */
static union pci_config_addr pci_config_addr;

static struct device *find_pci_device(unsigned int index)
{
	return devices.pci[index];
}

/* PCI can do 1, 2 and 4 byte reads; we handle that here. */
static void ioread(u16 off, u32 v, u32 mask, u32 *val)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*val = (v >> (off * 8)) & mask;
}

/* PCI can do 1, 2 and 4 byte writes; we handle that here. */
static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*dst &= ~(mask << (off * 8));
	*dst |= (v & mask) << (off * 8);
}

/*
 * Where PCI_CONFIG_DATA accesses depends on the previous write to
 * PCI_CONFIG_ADDR.
 */
static struct device *dev_and_reg(u32 *reg)
{
	if (!pci_config_addr.bits.enabled)
		return NULL;

	if (pci_config_addr.bits.funcnum != 0)
		return NULL;

	if (pci_config_addr.bits.busnum != 0)
		return NULL;

	if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
		return NULL;

	*reg = pci_config_addr.bits.offset;
	return find_pci_device(pci_config_addr.bits.devnum);
}

/* Is this accessing the PCI config address port?. */
static bool is_pci_addr_port(u16 port)
{
	return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
}

static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
{
	iowrite(port - PCI_CONFIG_ADDR, val, mask,
		&pci_config_addr.val);
	verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
		pci_config_addr.bits.enabled ? "" : " DISABLED",
		val, mask,
		pci_config_addr.bits.busnum,
		pci_config_addr.bits.devnum,
		pci_config_addr.bits.funcnum,
		pci_config_addr.bits.offset);
	return true;
}

static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
{
	ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
}

/* Is this accessing the PCI config data port?. */
static bool is_pci_data_port(u16 port)
{
	return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
}

static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
{
	u32 reg, portoff;
	struct device *d = dev_and_reg(&reg);

	/* Complain if they don't belong to a device. */
	if (!d)
		return false;

	/* They can do 1 byte writes, etc. */
	portoff = port - PCI_CONFIG_DATA;

	/*
	 * PCI uses a weird way to determine the BAR size: the OS
	 * writes all 1's, and sees which ones stick.
	 */
	if (&d->config_words[reg] == &d->config.bar[0]) {
		int i;

		iowrite(portoff, val, mask, &d->config.bar[0]);
		for (i = 0; (1 << i) < d->mmio_size; i++)
			d->config.bar[0] &= ~(1 << i);
		return true;
	} else if ((&d->config_words[reg] > &d->config.bar[0]
		    && &d->config_words[reg] <= &d->config.bar[6])
		   || &d->config_words[reg] == &d->config.expansion_rom_addr) {
		/* Allow writing to any other BAR, or expansion ROM */
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
		/* We let them overide latency timer and cacheline size */
	} else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
		/* Only let them change the first two fields. */
		if (mask == 0xFFFFFFFF)
			mask = 0xFFFF;
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
	} else if (&d->config_words[reg] == (void *)&d->config.command
		   && mask == 0xFFFF) {
		/* Ignore command writes. */
		return true;
	}

	/* Complain about other writes. */
	return false;
}

static void pci_data_ioread(u16 port, u32 mask, u32 *val)
{
	u32 reg;
	struct device *d = dev_and_reg(&reg);

	if (!d)
		return;
	ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
}

1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
/*L:216
 * This is where we emulate a handful of Guest instructions.  It's ugly
 * and we used to do it in the kernel but it grew over time.
 */

/*
 * We use the ptrace syscall's pt_regs struct to talk about registers
 * to lguest: these macros convert the names to the offsets.
 */
#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
#define setreg(name, val) \
	setreg_off(offsetof(struct user_regs_struct, name), (val))

static u32 getreg_off(size_t offset)
{
	u32 r;
	unsigned long args[] = { LHREQ_GETREG, offset };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Getting register %u", offset);
	if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
		err(1, "Reading register %u", offset);

	return r;
}

static void setreg_off(size_t offset, u32 val)
{
	unsigned long args[] = { LHREQ_SETREG, offset, val };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Setting register %u", offset);
}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
/* Get register by instruction encoding */
static u32 getreg_num(unsigned regnum, u32 mask)
{
	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4))
		return getreg_num(regnum & 0x3, 0xFFFF) >> 8;

	switch (regnum) {
	case 0: return getreg(eax) & mask;
	case 1: return getreg(ecx) & mask;
	case 2: return getreg(edx) & mask;
	case 3: return getreg(ebx) & mask;
	case 4: return getreg(esp) & mask;
	case 5: return getreg(ebp) & mask;
	case 6: return getreg(esi) & mask;
	case 7: return getreg(edi) & mask;
	}
	abort();
}

/* Set register by instruction encoding */
static void setreg_num(unsigned regnum, u32 val, u32 mask)
{
	/* Don't try to set bits out of range */
	assert(~(val & ~mask));

	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4)) {
		/* Construct the 16 bits we want. */
		val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
		setreg_num(regnum & 0x3, val, 0xFFFF);
		return;
	}

	switch (regnum) {
	case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
	case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
	case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
	case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
	case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
	case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
	case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
	case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
	}
	abort();
}

/* Get bytes of displacement appended to instruction, from r/m encoding */
static u32 insn_displacement_len(u8 mod_reg_rm)
{
	/* Switch on the mod bits */
	switch (mod_reg_rm >> 6) {
	case 0:
		/* If mod == 0, and r/m == 101, 16-bit displacement follows */
		if ((mod_reg_rm & 0x7) == 0x5)
			return 2;
		/* Normally, mod == 0 means no literal displacement */
		return 0;
	case 1:
		/* One byte displacement */
		return 1;
	case 2:
		/* Four byte displacement */
		return 4;
	case 3:
		/* Register mode */
		return 0;
	}
	abort();
}

1504 1505 1506 1507 1508 1509
static void emulate_insn(const u8 insn[])
{
	unsigned long args[] = { LHREQ_TRAP, 13 };
	unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
	unsigned int eax, port, mask;
	/*
1510
	 * Default is to return all-ones on IO port reads, which traditionally
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
	 * means "there's nothing there".
	 */
	u32 val = 0xFFFFFFFF;

	/*
	 * This must be the Guest kernel trying to do something, not userspace!
	 * The bottom two bits of the CS segment register are the privilege
	 * level.
	 */
	if ((getreg(xcs) & 3) != 0x1)
		goto no_emulate;

	/* Decoding x86 instructions is icky. */

	/*
	 * Around 2.6.33, the kernel started using an emulation for the
	 * cmpxchg8b instruction in early boot on many configurations.  This
	 * code isn't paravirtualized, and it tries to disable interrupts.
	 * Ignore it, which will Mostly Work.
	 */
	if (insn[insnlen] == 0xfa) {
		/* "cli", or Clear Interrupt Enable instruction.  Skip it. */
		insnlen = 1;
		goto skip_insn;
	}

	/*
	 * 0x66 is an "operand prefix".  It means a 16, not 32 bit in/out.
	 */
	if (insn[insnlen] == 0x66) {
		small_operand = 1;
		/* The instruction is 1 byte so far, read the next byte. */
		insnlen = 1;
	}

	/* If the lower bit isn't set, it's a single byte access */
	byte_access = !(insn[insnlen] & 1);

	/*
	 * Now we can ignore the lower bit and decode the 4 opcodes
	 * we need to emulate.
	 */
	switch (insn[insnlen] & 0xFE) {
	case 0xE4: /* in     <next byte>,%al */
		port = insn[insnlen+1];
		insnlen += 2;
		in = 1;
		break;
	case 0xEC: /* in     (%dx),%al */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		in = 1;
		break;
	case 0xE6: /* out    %al,<next byte> */
		port = insn[insnlen+1];
		insnlen += 2;
		break;
	case 0xEE: /* out    %al,(%dx) */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		break;
	default:
		/* OK, we don't know what this is, can't emulate. */
		goto no_emulate;
	}

	/* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
	if (byte_access)
		mask = 0xFF;
	else if (small_operand)
		mask = 0xFFFF;
	else
		mask = 0xFFFFFFFF;

	/*
	 * If it was an "IN" instruction, they expect the result to be read
	 * into %eax, so we change %eax.
	 */
	eax = getreg(eax);

	if (in) {
1592 1593 1594 1595 1596 1597 1598 1599
		/* This is the PS/2 keyboard status; 1 means ready for output */
		if (port == 0x64)
			val = 1;
		else if (is_pci_addr_port(port))
			pci_addr_ioread(port, mask, &val);
		else if (is_pci_data_port(port))
			pci_data_ioread(port, mask, &val);

1600 1601 1602 1603 1604 1605
		/* Clear the bits we're about to read */
		eax &= ~mask;
		/* Copy bits in from val. */
		eax |= val & mask;
		/* Now update the register. */
		setreg(eax, eax);
1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
	} else {
		if (is_pci_addr_port(port)) {
			if (!pci_addr_iowrite(port, mask, eax))
				goto bad_io;
		} else if (is_pci_data_port(port)) {
			if (!pci_data_iowrite(port, mask, eax))
				goto bad_io;
		}
		/* There are many other ports, eg. CMOS clock, serial
		 * and parallel ports, so we ignore them all. */
1616 1617 1618 1619 1620 1621 1622 1623 1624
	}

	verbose("IO %s of %x to %u: %#08x\n",
		in ? "IN" : "OUT", mask, port, eax);
skip_insn:
	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
	return;

1625 1626 1627 1628
bad_io:
	warnx("Attempt to %s port %u (%#x mask)",
	      in ? "read from" : "write to", port, mask);

1629 1630 1631 1632 1633 1634
no_emulate:
	/* Inject trap into Guest. */
	if (write(lguest_fd, args, sizeof(args)) < 0)
		err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
}

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
static struct device *find_mmio_region(unsigned long paddr, u32 *off)
{
	unsigned int i;

	for (i = 1; i < MAX_PCI_DEVICES; i++) {
		struct device *d = devices.pci[i];

		if (!d)
			continue;
		if (paddr < d->mmio_addr)
			continue;
		if (paddr >= d->mmio_addr + d->mmio_size)
			continue;
		*off = paddr - d->mmio_addr;
		return d;
	}
	return NULL;
}

1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
/* FIXME: Use vq array. */
static struct virtqueue *vq_by_num(struct device *d, u32 num)
{
	struct virtqueue *vq = d->vq;

	while (num-- && vq)
		vq = vq->next;

	return vq;
}

static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
			   struct virtqueue *vq)
{
	vq->pci_config = *cfg;
}

static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
			      struct virtqueue *vq)
{
	/* Only restore the per-vq part */
	size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);

	memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
	       sizeof(*cfg) - off);
}

/*
 * When they enable the virtqueue, we check that their setup is valid.
 */
static void enable_virtqueue(struct device *d, struct virtqueue *vq)
{
	/*
	 * Create stack for thread.  Since the stack grows upwards, we point
	 * the stack pointer to the end of this region.
	 */
	char *stack = malloc(32768);

	/* Because lguest is 32 bit, all the descriptor high bits must be 0 */
	if (vq->pci_config.queue_desc_hi
	    || vq->pci_config.queue_avail_hi
	    || vq->pci_config.queue_used_hi)
		errx(1, "%s: invalid 64-bit queue address", d->name);

	/* Initialize the virtqueue and check they're all in range. */
	vq->vring.num = vq->pci_config.queue_size;
	vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo,
				       sizeof(*vq->vring.desc) * vq->vring.num);
	vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo,
					sizeof(*vq->vring.avail)
					+ (sizeof(vq->vring.avail->ring[0])
					   * vq->vring.num));
	vq->vring.used = check_pointer(vq->pci_config.queue_used_lo,
				       sizeof(*vq->vring.used)
				       + (sizeof(vq->vring.used->ring[0])
					  * vq->vring.num));


	/* Create a zero-initialized eventfd. */
	vq->eventfd = eventfd(0, 0);
	if (vq->eventfd < 0)
		err(1, "Creating eventfd");

	/*
	 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
	 * we get a signal if it dies.
	 */
	vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
	if (vq->thread == (pid_t)-1)
		err(1, "Creating clone");
}

static void reset_pci_device(struct device *dev)
{
	/* FIXME */
}

1731 1732
static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
{
1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845
	struct virtqueue *vq;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
		if (val == 0)
			d->mmio->cfg.device_feature = d->features;
		else if (val == 1)
			d->mmio->cfg.device_feature = (d->features >> 32);
		else
			d->mmio->cfg.device_feature = 0;
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
		if (val > 1)
			errx(1, "%s: Unexpected driver select %u",
			     d->name, val);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
		if (d->mmio->cfg.guest_feature_select == 0) {
			d->features_accepted &= ~((u64)0xFFFFFFFF);
			d->features_accepted |= val;
		} else {
			assert(d->mmio->cfg.guest_feature_select == 1);
			d->features_accepted &= ((u64)0xFFFFFFFF << 32);
			d->features_accepted |= ((u64)val) << 32;
		}
		if (d->features_accepted & ~d->features)
			errx(1, "%s: over-accepted features %#llx of %#llx",
			     d->name, d->features_accepted, d->features);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.device_status):
		verbose("%s: device status -> %#x\n", d->name, val);
		if (val == 0)
			reset_pci_device(d);
		goto write_through8;
	case offsetof(struct virtio_pci_mmio, cfg.queue_select):
		vq = vq_by_num(d, val);
		/* Out of range?  Return size 0 */
		if (!vq) {
			d->mmio->cfg.queue_size = 0;
			goto write_through16;
		}
		/* Save registers for old vq, if it was a valid vq */
		if (d->mmio->cfg.queue_size)
			save_vq_config(&d->mmio->cfg,
				       vq_by_num(d, d->mmio->cfg.queue_select));
		/* Restore the registers for the queue they asked for */
		restore_vq_config(&d->mmio->cfg, vq);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_size):
		if (val & (val-1))
			errx(1, "%s: invalid queue size %u\n", d->name, val);
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue size on live device",
			     d->name);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
		errx(1, "%s: attempt to set MSIX vector to %u",
		     d->name, val);
	case offsetof(struct virtio_pci_mmio, cfg.queue_enable):
		if (val != 1)
			errx(1, "%s: setting queue_enable to %u", d->name, val);
		d->mmio->cfg.queue_enable = val;
		save_vq_config(&d->mmio->cfg,
			       vq_by_num(d, d->mmio->cfg.queue_select));
		enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select));
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
		errx(1, "%s: attempt to write to queue_notify_off", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue on live device",
			     d->name);
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, notify):
		vq = vq_by_num(d, val);
		if (!vq)
			errx(1, "Invalid vq notification on %u", val);
		/* Notify the process handling this vq by adding 1 to eventfd */
		write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		errx(1, "%s: Unexpected write to isr", d->name);
	default:
		errx(1, "%s: Unexpected write to offset %u", d->name, off);
	}

write_through32:
	if (mask != 0xFFFFFFFF) {
		errx(1, "%s: non-32-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
		return;
	}
	memcpy((char *)d->mmio + off, &val, 4);
	return;

write_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)",
		     d->name, mask, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 2);
	return;

write_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 1);
	return;
1846 1847 1848 1849
}

static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
{
1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
	u8 isr;
	u32 val = 0;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.device_feature):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
		goto read_through32;
	case offsetof(struct virtio_pci_mmio, cfg.msix_config):
		errx(1, "%s: read of msix_config", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.num_queues):
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, cfg.device_status):
	case offsetof(struct virtio_pci_mmio, cfg.config_generation):
		goto read_through8;
	case offsetof(struct virtio_pci_mmio, notify):
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		if (mask != 0xFF)
			errx(1, "%s: non-8-bit read from offset %u (%#x)",
			     d->name, off, getreg(eip));
		/* Read resets the isr */
		isr = d->mmio->isr;
		d->mmio->isr = 0;
		return isr;
	case offsetof(struct virtio_pci_mmio, padding):
		errx(1, "%s: read from padding (%#x)",
		     d->name, getreg(eip));
	default:
		/* Read from device config space, beware unaligned overflow */
		if (off > d->mmio_size - 4)
			errx(1, "%s: read past end (%#x)",
			     d->name, getreg(eip));
		if (mask == 0xFFFFFFFF)
			goto read_through32;
		else if (mask == 0xFFFF)
			goto read_through16;
		else
			goto read_through8;
	}

read_through32:
	if (mask != 0xFFFFFFFF)
		errx(1, "%s: non-32-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 4);
	return val;

read_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 2);
	return val;

read_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 1);
	return val;
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
}

static void emulate_mmio(unsigned long paddr, const u8 *insn)
{
	u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
	struct device *d = find_mmio_region(paddr, &off);
	unsigned long args[] = { LHREQ_TRAP, 14 };

	if (!d) {
		warnx("MMIO touching %#08lx (not a device)", paddr);
		goto reinject;
	}

	/* Prefix makes it a 16 bit op */
	if (insn[0] == 0x66) {
		mask = 0xFFFF;
		insnlen++;
	}

	/* iowrite */
	if (insn[insnlen] == 0x89) {
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[insnlen] == 0x8b) { /* ioread */
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[0] == 0x88) { /* 8-bit iowrite */
		mask = 0xff;
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else if (insn[0] == 0x8a) { /* 8-bit ioread */
		mask = 0xff;
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[1] >> 3) & 0x7, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else {
		warnx("Unknown MMIO instruction touching %#08lx:"
		     " %02x %02x %02x %02x at %u",
		     paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
	reinject:
		/* Inject trap into Guest. */
		if (write(lguest_fd, args, sizeof(args)) < 0)
			err(1, "Reinjecting trap 14 for fault at %#x",
			    getreg(eip));
		return;
	}

	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
}
1968

1969 1970 1971 1972 1973
/*L:190
 * Device Setup
 *
 * All devices need a descriptor so the Guest knows it exists, and a "struct
 * device" so the Launcher can keep track of it.  We have common helper
1974 1975
 * routines to allocate and manage them.
 */
1976

R
Rusty Russell 已提交
1977 1978
/*
 * The layout of the device page is a "struct lguest_device_desc" followed by a
1979 1980
 * number of virtqueue descriptors, then two sets of feature bits, then an
 * array of configuration bytes.  This routine returns the configuration
R
Rusty Russell 已提交
1981 1982
 * pointer.
 */
1983 1984 1985
static u8 *device_config(const struct device *dev)
{
	return (void *)(dev->desc + 1)
1986 1987
		+ dev->num_vq * sizeof(struct lguest_vqconfig)
		+ dev->feature_len * 2;
R
Rusty Russell 已提交
1988 1989
}

R
Rusty Russell 已提交
1990 1991
/*
 * This routine allocates a new "struct lguest_device_desc" from descriptor
1992
 * table page just above the Guest's normal memory.  It returns a pointer to
R
Rusty Russell 已提交
1993 1994
 * that descriptor.
 */
1995
static struct lguest_device_desc *new_dev_desc(u16 type)
R
Rusty Russell 已提交
1996
{
1997 1998
	struct lguest_device_desc d = { .type = type };
	void *p;
R
Rusty Russell 已提交
1999

2000 2001 2002 2003 2004 2005
	/* Figure out where the next device config is, based on the last one. */
	if (devices.lastdev)
		p = device_config(devices.lastdev)
			+ devices.lastdev->desc->config_len;
	else
		p = devices.descpage;
R
Rusty Russell 已提交
2006

2007 2008 2009
	/* We only have one page for all the descriptors. */
	if (p + sizeof(d) > (void *)devices.descpage + getpagesize())
		errx(1, "Too many devices");
R
Rusty Russell 已提交
2010

2011 2012
	/* p might not be aligned, so we memcpy in. */
	return memcpy(p, &d, sizeof(d));
R
Rusty Russell 已提交
2013 2014
}

R
Rusty Russell 已提交
2015 2016 2017 2018
/*
 * Each device descriptor is followed by the description of its virtqueues.  We
 * specify how many descriptors the virtqueue is to have.
 */
R
Rusty Russell 已提交
2019
static void add_virtqueue(struct device *dev, unsigned int num_descs,
2020
			  void (*service)(struct virtqueue *))
R
Rusty Russell 已提交
2021 2022 2023 2024 2025
{
	unsigned int pages;
	struct virtqueue **i, *vq = malloc(sizeof(*vq));
	void *p;

2026
	/* First we need some memory for this virtqueue. */
2027
	pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1)
2028
		/ getpagesize();
R
Rusty Russell 已提交
2029 2030
	p = get_pages(pages);

2031 2032 2033 2034
	/* Initialize the virtqueue */
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;
R
Rusty Russell 已提交
2035 2036 2037 2038 2039

	/*
	 * This is the routine the service thread will run, and its Process ID
	 * once it's running.
	 */
2040 2041
	vq->service = service;
	vq->thread = (pid_t)-1;
2042

R
Rusty Russell 已提交
2043 2044 2045 2046 2047 2048
	/* Initialize the configuration. */
	vq->config.num = num_descs;
	vq->config.irq = devices.next_irq++;
	vq->config.pfn = to_guest_phys(p) / getpagesize();

	/* Initialize the vring. */
2049
	vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN);
R
Rusty Russell 已提交
2050

R
Rusty Russell 已提交
2051 2052
	/*
	 * Append virtqueue to this device's descriptor.  We use
2053 2054
	 * device_config() to get the end of the device's current virtqueues;
	 * we check that we haven't added any config or feature information
R
Rusty Russell 已提交
2055 2056
	 * yet, otherwise we'd be overwriting them.
	 */
2057 2058
	assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0);
	memcpy(device_config(dev), &vq->config, sizeof(vq->config));
2059
	dev->num_vq++;
2060 2061 2062
	dev->desc->num_vq++;

	verbose("Virtqueue page %#lx\n", to_guest_phys(p));
R
Rusty Russell 已提交
2063

R
Rusty Russell 已提交
2064 2065 2066 2067
	/*
	 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
	 * second.
	 */
R
Rusty Russell 已提交
2068 2069
	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
2070 2071
}

2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
static void add_pci_virtqueue(struct device *dev,
			      void (*service)(struct virtqueue *))
{
	struct virtqueue **i, *vq = malloc(sizeof(*vq));

	/* Initialize the virtqueue */
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;

	/*
	 * This is the routine the service thread will run, and its Process ID
	 * once it's running.
	 */
	vq->service = service;
	vq->thread = (pid_t)-1;

	/* Initialize the configuration. */
	vq->pci_config.queue_size = VIRTQUEUE_NUM;
	vq->pci_config.queue_enable = 0;
	vq->pci_config.queue_notify_off = 0;

	/* Add one to the number of queues */
	vq->dev->mmio->cfg.num_queues++;

	/* FIXME: Do irq per virtqueue, not per device. */
	vq->config.irq = vq->dev->config.irq_line;

	/*
	 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
	 * second.
	 */
	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
}

R
Rusty Russell 已提交
2108 2109 2110 2111
/*
 * The first half of the feature bitmask is for us to advertise features.  The
 * second half is for the Guest to accept features.
 */
2112 2113
static void add_feature(struct device *dev, unsigned bit)
{
R
Rusty Russell 已提交
2114
	u8 *features = get_feature_bits(dev);
2115 2116 2117 2118

	/* We can't extend the feature bits once we've added config bytes */
	if (dev->desc->feature_len <= bit / CHAR_BIT) {
		assert(dev->desc->config_len == 0);
2119
		dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1;
2120 2121 2122 2123 2124
	}

	features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT));
}

2125 2126 2127 2128 2129
static void add_pci_feature(struct device *dev, unsigned bit)
{
	dev->features |= (1ULL << bit);
}

R
Rusty Russell 已提交
2130 2131
/*
 * This routine sets the configuration fields for an existing device's
2132
 * descriptor.  It only works for the last device, but that's OK because that's
R
Rusty Russell 已提交
2133 2134
 * how we use it.
 */
2135 2136 2137 2138 2139 2140 2141 2142 2143
static void set_config(struct device *dev, unsigned len, const void *conf)
{
	/* Check we haven't overflowed our single page. */
	if (device_config(dev) + len > devices.descpage + getpagesize())
		errx(1, "Too many devices");

	/* Copy in the config information, and store the length. */
	memcpy(device_config(dev), conf, len);
	dev->desc->config_len = len;
2144 2145 2146

	/* Size must fit in config_len field (8 bits)! */
	assert(dev->desc->config_len == len);
2147 2148
}

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281
/* For devices with no config. */
static void no_device_config(struct device *dev)
{
	dev->mmio_addr = get_mmio_region(dev->mmio_size);

	dev->config.bar[0] = dev->mmio_addr;
	/* Bottom 4 bits must be zero */
	assert(~(dev->config.bar[0] & 0xF));
}

/* This puts the device config into BAR0 */
static void set_device_config(struct device *dev, const void *conf, size_t len)
{
	/* Set up BAR 0 */
	dev->mmio_size += len;
	dev->mmio = realloc(dev->mmio, dev->mmio_size);
	memcpy(dev->mmio + 1, conf, len);

	/* Hook up device cfg */
	dev->config.cfg_access.cap.cap_next
		= offsetof(struct pci_config, device);

	/* Fix up device cfg field length. */
	dev->config.device.length = len;

	/* The rest is the same as the no-config case */
	no_device_config(dev);
}

static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
		     size_t bar_offset, size_t bar_bytes, u8 next)
{
	cap->cap_vndr = PCI_CAP_ID_VNDR;
	cap->cap_next = next;
	cap->cap_len = caplen;
	cap->cfg_type = type;
	cap->bar = 0;
	memset(cap->padding, 0, sizeof(cap->padding));
	cap->offset = bar_offset;
	cap->length = bar_bytes;
}

/*
 * This sets up the pci_config structure, as defined in the virtio 1.0
 * standard (and PCI standard).
 */
static void init_pci_config(struct pci_config *pci, u16 type,
			    u8 class, u8 subclass)
{
	size_t bar_offset, bar_len;

	/* Save typing: most thing are happy being zero. */
	memset(pci, 0, sizeof(*pci));

	/* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
	pci->vendor_id = 0x1AF4;
	/* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
	pci->device_id = 0x1040 + type;

	/*
	 * PCI have specific codes for different types of devices.
	 * Linux doesn't care, but it's a good clue for people looking
	 * at the device.
	 *
	 * eg :
	 *  VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00
	 *  VIRTIO_ID_NET: class = 0x02, subclass = 0x00
	 *  VIRTIO_ID_BLOCK: class = 0x01, subclass = 0x80
	 *  VIRTIO_ID_RNG: class = 0xff, subclass = 0
	 */
	pci->class = class;
	pci->subclass = subclass;

	/*
	 * 4.1.2.1 Non-transitional devices SHOULD have a PCI Revision
	 * ID of 1 or higher
	 */
	pci->revid = 1;

	/*
	 * 4.1.2.1 Non-transitional devices SHOULD have a PCI
	 * Subsystem Device ID of 0x40 or higher.
	 */
	pci->subsystem_device_id = 0x40;

	/* We use our dummy interrupt controller, and irq_line is the irq */
	pci->irq_line = devices.next_irq++;
	pci->irq_pin = 0;

	/* Support for extended capabilities. */
	pci->status = (1 << 4);

	/* Link them in. */
	pci->capabilities = offsetof(struct pci_config, common);

	bar_offset = offsetof(struct virtio_pci_mmio, cfg);
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
	init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, notify));

	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
	/* FIXME: Use a non-zero notify_off, for per-queue notification? */
	init_cap(&pci->notify.cap, sizeof(pci->notify),
		 VIRTIO_PCI_CAP_NOTIFY_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, isr));

	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
	init_cap(&pci->isr, sizeof(pci->isr),
		 VIRTIO_PCI_CAP_ISR_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, cfg_access));

	/* This doesn't have any presence in the BAR */
	init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
		 VIRTIO_PCI_CAP_PCI_CFG,
		 0, 0, 0);

	bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
	assert(bar_offset == sizeof(struct virtio_pci_mmio));

	/*
	 * This gets sewn in and length set in set_device_config().
	 * Some devices don't have a device configuration interface, so
	 * we never expose this if we don't call set_device_config().
	 */
	init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
		 bar_offset, 0, 0);
}

R
Rusty Russell 已提交
2282 2283
/*
 * This routine does all the creation and setup of a new device, including
R
Rusty Russell 已提交
2284 2285
 * calling new_dev_desc() to allocate the descriptor and device memory.  We
 * don't actually start the service threads until later.
2286
 *
R
Rusty Russell 已提交
2287 2288
 * See what I mean about userspace being boring?
 */
2289
static struct device *new_device(const char *name, u16 type)
2290 2291 2292
{
	struct device *dev = malloc(sizeof(*dev));

2293
	/* Now we populate the fields one at a time. */
R
Rusty Russell 已提交
2294 2295
	dev->desc = new_dev_desc(type);
	dev->name = name;
2296
	dev->vq = NULL;
2297 2298
	dev->feature_len = 0;
	dev->num_vq = 0;
2299
	dev->running = false;
2300
	dev->next = NULL;
2301

R
Rusty Russell 已提交
2302 2303
	/*
	 * Append to device list.  Prepending to a single-linked list is
2304 2305
	 * easier, but the user expects the devices to be arranged on the bus
	 * in command-line order.  The first network device on the command line
R
Rusty Russell 已提交
2306 2307
	 * is eth0, the first block device /dev/vda, etc.
	 */
2308 2309 2310 2311 2312 2313
	if (devices.lastdev)
		devices.lastdev->next = dev;
	else
		devices.dev = dev;
	devices.lastdev = dev;

2314 2315 2316
	return dev;
}

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
static struct device *new_pci_device(const char *name, u16 type,
				     u8 class, u8 subclass)
{
	struct device *dev = malloc(sizeof(*dev));

	/* Now we populate the fields one at a time. */
	dev->desc = NULL;
	dev->name = name;
	dev->vq = NULL;
	dev->feature_len = 0;
	dev->num_vq = 0;
	dev->running = false;
	dev->next = NULL;
	dev->mmio_size = sizeof(struct virtio_pci_mmio);
	dev->mmio = calloc(1, dev->mmio_size);
	dev->features = (u64)1 << VIRTIO_F_VERSION_1;
	dev->features_accepted = 0;

	if (devices.device_num + 1 >= 32)
		errx(1, "Can only handle 31 PCI devices");

	init_pci_config(&dev->config, type, class, subclass);
	assert(!devices.pci[devices.device_num+1]);
	devices.pci[++devices.device_num] = dev;

	return dev;
}

R
Rusty Russell 已提交
2345 2346 2347 2348
/*
 * Our first setup routine is the console.  It's a fairly simple device, but
 * UNIX tty handling makes it uglier than it could be.
 */
R
Rusty Russell 已提交
2349
static void setup_console(void)
2350 2351 2352
{
	struct device *dev;

2353
	/* If we can save the initial standard input settings... */
2354 2355
	if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
		struct termios term = orig_term;
R
Rusty Russell 已提交
2356 2357 2358 2359
		/*
		 * Then we turn off echo, line buffering and ^C etc: We want a
		 * raw input stream to the Guest.
		 */
2360 2361 2362 2363
		term.c_lflag &= ~(ISIG|ICANON|ECHO);
		tcsetattr(STDIN_FILENO, TCSANOW, &term);
	}

2364 2365
	dev = new_device("console", VIRTIO_ID_CONSOLE);

2366
	/* We store the console state in dev->priv, and initialize it. */
2367 2368 2369
	dev->priv = malloc(sizeof(struct console_abort));
	((struct console_abort *)dev->priv)->count = 0;

R
Rusty Russell 已提交
2370 2371
	/*
	 * The console needs two virtqueues: the input then the output.  When
2372 2373
	 * they put something the input queue, we make sure we're listening to
	 * stdin.  When they put something in the output queue, we write it to
R
Rusty Russell 已提交
2374 2375
	 * stdout.
	 */
2376 2377
	add_virtqueue(dev, VIRTQUEUE_NUM, console_input);
	add_virtqueue(dev, VIRTQUEUE_NUM, console_output);
R
Rusty Russell 已提交
2378

2379
	verbose("device %u: console\n", ++devices.device_num);
2380
}
R
Rusty Russell 已提交
2381
/*:*/
2382

R
Rusty Russell 已提交
2383 2384
/*M:010
 * Inter-guest networking is an interesting area.  Simplest is to have a
R
Rusty Russell 已提交
2385 2386
 * --sharenet=<name> option which opens or creates a named pipe.  This can be
 * used to send packets to another guest in a 1:1 manner.
2387
 *
R
Rusty Russell 已提交
2388
 * More sophisticated is to use one of the tools developed for project like UML
R
Rusty Russell 已提交
2389
 * to do networking.
2390
 *
R
Rusty Russell 已提交
2391 2392 2393 2394 2395 2396 2397
 * Faster is to do virtio bonding in kernel.  Doing this 1:1 would be
 * completely generic ("here's my vring, attach to your vring") and would work
 * for any traffic.  Of course, namespace and permissions issues need to be
 * dealt with.  A more sophisticated "multi-channel" virtio_net.c could hide
 * multiple inter-guest channels behind one interface, although it would
 * require some manner of hotplugging new virtio channels.
 *
R
Rusty Russell 已提交
2398
 * Finally, we could use a virtio network switch in the kernel, ie. vhost.
R
Rusty Russell 已提交
2399
:*/
2400 2401 2402

static u32 str2ip(const char *ipaddr)
{
2403
	unsigned int b[4];
2404

2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421
	if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4)
		errx(1, "Failed to parse IP address '%s'", ipaddr);
	return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
}

static void str2mac(const char *macaddr, unsigned char mac[6])
{
	unsigned int m[6];
	if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x",
		   &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6)
		errx(1, "Failed to parse mac address '%s'", macaddr);
	mac[0] = m[0];
	mac[1] = m[1];
	mac[2] = m[2];
	mac[3] = m[3];
	mac[4] = m[4];
	mac[5] = m[5];
2422 2423
}

R
Rusty Russell 已提交
2424 2425
/*
 * This code is "adapted" from libbridge: it attaches the Host end of the
2426 2427 2428
 * network device to the bridge device specified by the command line.
 *
 * This is yet another James Morris contribution (I'm an IP-level guy, so I
R
Rusty Russell 已提交
2429 2430
 * dislike bridging), and I just try not to break it.
 */
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
static void add_to_bridge(int fd, const char *if_name, const char *br_name)
{
	int ifidx;
	struct ifreq ifr;

	if (!*br_name)
		errx(1, "must specify bridge name");

	ifidx = if_nametoindex(if_name);
	if (!ifidx)
		errx(1, "interface %s does not exist!", if_name);

	strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
2444
	ifr.ifr_name[IFNAMSIZ-1] = '\0';
2445 2446 2447 2448 2449
	ifr.ifr_ifindex = ifidx;
	if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
		err(1, "can't add %s to bridge %s", if_name, br_name);
}

R
Rusty Russell 已提交
2450 2451
/*
 * This sets up the Host end of the network device with an IP address, brings
2452
 * it up so packets will flow, the copies the MAC address into the hwaddr
R
Rusty Russell 已提交
2453 2454
 * pointer.
 */
2455
static void configure_device(int fd, const char *tapif, u32 ipaddr)
2456 2457
{
	struct ifreq ifr;
2458
	struct sockaddr_in sin;
2459 2460

	memset(&ifr, 0, sizeof(ifr));
2461 2462 2463
	strcpy(ifr.ifr_name, tapif);

	/* Don't read these incantations.  Just cut & paste them like I did! */
2464 2465 2466
	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = htonl(ipaddr);
	memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
2467
	if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
2468
		err(1, "Setting %s interface address", tapif);
2469 2470
	ifr.ifr_flags = IFF_UP;
	if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
2471 2472 2473 2474
		err(1, "Bringing interface %s up", tapif);
}

static int get_tun_device(char tapif[IFNAMSIZ])
2475 2476
{
	struct ifreq ifr;
2477 2478 2479 2480
	int netfd;

	/* Start with this zeroed.  Messy but sure. */
	memset(&ifr, 0, sizeof(ifr));
2481

R
Rusty Russell 已提交
2482 2483
	/*
	 * We open the /dev/net/tun device and tell it we want a tap device.  A
2484 2485
	 * tap device is like a tun device, only somehow different.  To tell
	 * the truth, I completely blundered my way through this code, but it
R
Rusty Russell 已提交
2486 2487
	 * works now!
	 */
2488
	netfd = open_or_die("/dev/net/tun", O_RDWR);
2489
	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
2490 2491 2492
	strcpy(ifr.ifr_name, "tap%d");
	if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
		err(1, "configuring /dev/net/tun");
2493

2494 2495 2496 2497
	if (ioctl(netfd, TUNSETOFFLOAD,
		  TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0)
		err(1, "Could not set features for tun device");

R
Rusty Russell 已提交
2498 2499 2500 2501
	/*
	 * We don't need checksums calculated for packets coming in this
	 * device: trust us!
	 */
2502 2503
	ioctl(netfd, TUNSETNOCSUM, 1);

2504 2505 2506 2507
	memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
	return netfd;
}

R
Rusty Russell 已提交
2508 2509
/*L:195
 * Our network is a Host<->Guest network.  This can either use bridging or
2510 2511
 * routing, but the principle is the same: it uses the "tun" device to inject
 * packets into the Host as if they came in from a normal network card.  We
R
Rusty Russell 已提交
2512 2513
 * just shunt packets between the Guest and the tun device.
 */
2514 2515 2516
static void setup_tun_net(char *arg)
{
	struct device *dev;
2517 2518
	struct net_info *net_info = malloc(sizeof(*net_info));
	int ipfd;
2519 2520 2521 2522 2523
	u32 ip = INADDR_ANY;
	bool bridging = false;
	char tapif[IFNAMSIZ], *p;
	struct virtio_net_config conf;

2524
	net_info->tunfd = get_tun_device(tapif);
2525

R
Rusty Russell 已提交
2526
	/* First we create a new network device. */
2527 2528
	dev = new_device("net", VIRTIO_ID_NET);
	dev->priv = net_info;
2529

R
Rusty Russell 已提交
2530
	/* Network devices need a recv and a send queue, just like console. */
2531 2532
	add_virtqueue(dev, VIRTQUEUE_NUM, net_input);
	add_virtqueue(dev, VIRTQUEUE_NUM, net_output);
2533

R
Rusty Russell 已提交
2534 2535 2536 2537
	/*
	 * We need a socket to perform the magic network ioctls to bring up the
	 * tap interface, connect to the bridge etc.  Any socket will do!
	 */
2538 2539 2540 2541
	ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
	if (ipfd < 0)
		err(1, "opening IP socket");

2542
	/* If the command line was --tunnet=bridge:<name> do bridging. */
2543
	if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
2544 2545 2546 2547 2548 2549 2550 2551
		arg += strlen(BRIDGE_PFX);
		bridging = true;
	}

	/* A mac address may follow the bridge name or IP address */
	p = strchr(arg, ':');
	if (p) {
		str2mac(p+1, conf.mac);
2552
		add_feature(dev, VIRTIO_NET_F_MAC);
2553 2554 2555 2556 2557 2558 2559
		*p = '\0';
	}

	/* arg is now either an IP address or a bridge name */
	if (bridging)
		add_to_bridge(ipfd, tapif, arg);
	else
2560 2561
		ip = str2ip(arg);

2562 2563
	/* Set up the tun device. */
	configure_device(ipfd, tapif, ip);
2564

2565 2566 2567 2568 2569 2570 2571 2572 2573
	/* Expect Guest to handle everything except UFO */
	add_feature(dev, VIRTIO_NET_F_CSUM);
	add_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
	add_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
	add_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
	add_feature(dev, VIRTIO_NET_F_GUEST_ECN);
	add_feature(dev, VIRTIO_NET_F_HOST_TSO4);
	add_feature(dev, VIRTIO_NET_F_HOST_TSO6);
	add_feature(dev, VIRTIO_NET_F_HOST_ECN);
2574 2575
	/* We handle indirect ring entries */
	add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
2576 2577
	/* We're compliant with the damn spec. */
	add_feature(dev, VIRTIO_F_ANY_LAYOUT);
2578
	set_config(dev, sizeof(conf), &conf);
2579

2580
	/* We don't need the socket any more; setup is done. */
2581 2582
	close(ipfd);

2583 2584 2585 2586 2587 2588 2589 2590
	devices.device_num++;

	if (bridging)
		verbose("device %u: tun %s attached to bridge: %s\n",
			devices.device_num, tapif, arg);
	else
		verbose("device %u: tun %s: %s\n",
			devices.device_num, tapif, arg);
2591
}
R
Rusty Russell 已提交
2592
/*:*/
R
Rusty Russell 已提交
2593

R
Rusty Russell 已提交
2594
/* This hangs off device->priv. */
2595
struct vblk_info {
R
Rusty Russell 已提交
2596 2597 2598 2599 2600 2601 2602 2603
	/* The size of the file. */
	off64_t len;

	/* The file descriptor for the file. */
	int fd;

};

R
Rusty Russell 已提交
2604 2605 2606
/*L:210
 * The Disk
 *
R
Rusty Russell 已提交
2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
 * The disk only has one virtqueue, so it only has one thread.  It is really
 * simple: the Guest asks for a block number and we read or write that position
 * in the file.
 *
 * Before we serviced each virtqueue in a separate thread, that was unacceptably
 * slow: the Guest waits until the read is finished before running anything
 * else, even if it could have been doing useful work.
 *
 * We could have used async I/O, except it's reputed to suck so hard that
 * characters actually go missing from your code when you try to use it.
R
Rusty Russell 已提交
2617
 */
2618
static void blk_request(struct virtqueue *vq)
R
Rusty Russell 已提交
2619
{
2620
	struct vblk_info *vblk = vq->dev->priv;
R
Rusty Russell 已提交
2621
	unsigned int head, out_num, in_num, wlen;
2622
	int ret, i;
2623
	u8 *in;
2624
	struct virtio_blk_outhdr out;
2625
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
2626 2627
	off64_t off;

R
Rusty Russell 已提交
2628 2629 2630 2631
	/*
	 * Get the next request, where we normally wait.  It triggers the
	 * interrupt to acknowledge previously serviced requests (if any).
	 */
2632
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
2633

2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647
	/* Copy the output header from the front of the iov (adjusts iov) */
	iov_consume(iov, out_num, &out, sizeof(out));

	/* Find and trim end of iov input array, for our status byte. */
	in = NULL;
	for (i = out_num + in_num - 1; i >= out_num; i--) {
		if (iov[i].iov_len > 0) {
			in = iov[i].iov_base + iov[i].iov_len - 1;
			iov[i].iov_len--;
			break;
		}
	}
	if (!in)
		errx(1, "Bad virtblk cmd with no room for status");
R
Rusty Russell 已提交
2648

R
Rusty Russell 已提交
2649 2650 2651 2652
	/*
	 * For historical reasons, block operations are expressed in 512 byte
	 * "sectors".
	 */
2653
	off = out.sector * 512;
R
Rusty Russell 已提交
2654

R
Rusty Russell 已提交
2655 2656 2657 2658
	/*
	 * In general the virtio block driver is allowed to try SCSI commands.
	 * It'd be nice if we supported eject, for example, but we don't.
	 */
2659
	if (out.type & VIRTIO_BLK_T_SCSI_CMD) {
R
Rusty Russell 已提交
2660
		fprintf(stderr, "Scsi commands unsupported\n");
2661
		*in = VIRTIO_BLK_S_UNSUPP;
2662
		wlen = sizeof(*in);
2663
	} else if (out.type & VIRTIO_BLK_T_OUT) {
R
Rusty Russell 已提交
2664 2665 2666 2667 2668 2669
		/*
		 * Write
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to write past end.
		 */
R
Rusty Russell 已提交
2670
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
2671
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
2672

2673 2674
		ret = writev(vblk->fd, iov, out_num);
		verbose("WRITE to sector %llu: %i\n", out.sector, ret);
R
Rusty Russell 已提交
2675

R
Rusty Russell 已提交
2676 2677
		/*
		 * Grr... Now we know how long the descriptor they sent was, we
R
Rusty Russell 已提交
2678
		 * make sure they didn't try to write over the end of the block
R
Rusty Russell 已提交
2679 2680
		 * file (possibly extending it).
		 */
R
Rusty Russell 已提交
2681 2682 2683 2684 2685 2686
		if (ret > 0 && off + ret > vblk->len) {
			/* Trim it back to the correct length */
			ftruncate64(vblk->fd, vblk->len);
			/* Die, bad Guest, die. */
			errx(1, "Write past end %llu+%u", off, ret);
		}
2687 2688 2689

		wlen = sizeof(*in);
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
2690
	} else if (out.type & VIRTIO_BLK_T_FLUSH) {
2691 2692 2693
		/* Flush */
		ret = fdatasync(vblk->fd);
		verbose("FLUSH fdatasync: %i\n", ret);
2694
		wlen = sizeof(*in);
2695
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
R
Rusty Russell 已提交
2696
	} else {
R
Rusty Russell 已提交
2697 2698 2699 2700 2701 2702
		/*
		 * Read
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to read past end.
		 */
R
Rusty Russell 已提交
2703
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
2704
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
2705

2706
		ret = readv(vblk->fd, iov + out_num, in_num);
R
Rusty Russell 已提交
2707
		if (ret >= 0) {
2708
			wlen = sizeof(*in) + ret;
2709
			*in = VIRTIO_BLK_S_OK;
R
Rusty Russell 已提交
2710
		} else {
2711
			wlen = sizeof(*in);
2712
			*in = VIRTIO_BLK_S_IOERR;
R
Rusty Russell 已提交
2713 2714 2715
		}
	}

R
Rusty Russell 已提交
2716
	/* Finished that request. */
2717
	add_used(vq, head, wlen);
R
Rusty Russell 已提交
2718 2719
}

R
Rusty Russell 已提交
2720
/*L:198 This actually sets up a virtual block device. */
R
Rusty Russell 已提交
2721 2722 2723 2724
static void setup_block_file(const char *filename)
{
	struct device *dev;
	struct vblk_info *vblk;
2725
	struct virtio_blk_config conf;
R
Rusty Russell 已提交
2726

R
Rusty Russell 已提交
2727
	/* Creat the device. */
2728
	dev = new_device("block", VIRTIO_ID_BLOCK);
R
Rusty Russell 已提交
2729

R
Rusty Russell 已提交
2730
	/* The device has one virtqueue, where the Guest places requests. */
2731
	add_virtqueue(dev, VIRTQUEUE_NUM, blk_request);
R
Rusty Russell 已提交
2732 2733 2734 2735 2736 2737 2738 2739

	/* Allocate the room for our own bookkeeping */
	vblk = dev->priv = malloc(sizeof(*vblk));

	/* First we open the file and store the length. */
	vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
	vblk->len = lseek64(vblk->fd, 0, SEEK_END);

2740 2741
	/* We support FLUSH. */
	add_feature(dev, VIRTIO_BLK_F_FLUSH);
2742

R
Rusty Russell 已提交
2743
	/* Tell Guest how many sectors this device has. */
2744
	conf.capacity = cpu_to_le64(vblk->len / 512);
R
Rusty Russell 已提交
2745

R
Rusty Russell 已提交
2746 2747 2748 2749
	/*
	 * Tell Guest not to put in too many descriptors at once: two are used
	 * for the in and out elements.
	 */
2750 2751 2752
	add_feature(dev, VIRTIO_BLK_F_SEG_MAX);
	conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);

2753 2754
	/* Don't try to put whole struct: we have 8 bit limit. */
	set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf);
R
Rusty Russell 已提交
2755 2756

	verbose("device %u: virtblock %llu sectors\n",
2757
		++devices.device_num, le64_to_cpu(conf.capacity));
R
Rusty Russell 已提交
2758
}
R
Rusty Russell 已提交
2759

R
Rusty Russell 已提交
2760
/*L:211
2761
 * Our random number generator device reads from /dev/urandom into the Guest's
R
Rusty Russell 已提交
2762
 * input buffers.  The usual case is that the Guest doesn't want random numbers
2763
 * and so has no buffers although /dev/urandom is still readable, whereas
R
Rusty Russell 已提交
2764 2765
 * console is the reverse.
 *
R
Rusty Russell 已提交
2766 2767 2768 2769 2770 2771
 * The same logic applies, however.
 */
struct rng_info {
	int rfd;
};

2772
static void rng_input(struct virtqueue *vq)
R
Rusty Russell 已提交
2773 2774 2775
{
	int len;
	unsigned int head, in_num, out_num, totlen = 0;
2776 2777
	struct rng_info *rng_info = vq->dev->priv;
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
2778 2779

	/* First we need a buffer from the Guests's virtqueue. */
2780
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
2781 2782 2783
	if (out_num)
		errx(1, "Output buffers in rng?");

R
Rusty Russell 已提交
2784
	/*
R
Rusty Russell 已提交
2785 2786
	 * Just like the console write, we loop to cover the whole iovec.
	 * In this case, short reads actually happen quite a bit.
R
Rusty Russell 已提交
2787
	 */
R
Rusty Russell 已提交
2788
	while (!iov_empty(iov, in_num)) {
2789
		len = readv(rng_info->rfd, iov, in_num);
R
Rusty Russell 已提交
2790
		if (len <= 0)
2791
			err(1, "Read from /dev/urandom gave %i", len);
2792
		iov_consume(iov, in_num, NULL, len);
R
Rusty Russell 已提交
2793 2794 2795 2796
		totlen += len;
	}

	/* Tell the Guest about the new input. */
2797
	add_used(vq, head, totlen);
R
Rusty Russell 已提交
2798 2799
}

R
Rusty Russell 已提交
2800 2801 2802
/*L:199
 * This creates a "hardware" random number device for the Guest.
 */
R
Rusty Russell 已提交
2803 2804 2805
static void setup_rng(void)
{
	struct device *dev;
2806
	struct rng_info *rng_info = malloc(sizeof(*rng_info));
R
Rusty Russell 已提交
2807

2808 2809
	/* Our device's private info simply contains the /dev/urandom fd. */
	rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
R
Rusty Russell 已提交
2810

R
Rusty Russell 已提交
2811
	/* Create the new device. */
2812 2813
	dev = new_device("rng", VIRTIO_ID_RNG);
	dev->priv = rng_info;
R
Rusty Russell 已提交
2814 2815

	/* The device has one virtqueue, where the Guest places inbufs. */
2816
	add_virtqueue(dev, VIRTQUEUE_NUM, rng_input);
R
Rusty Russell 已提交
2817 2818 2819

	verbose("device %u: rng\n", devices.device_num++);
}
2820
/* That's the end of device setup. */
B
Balaji Rao 已提交
2821

2822
/*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */
B
Balaji Rao 已提交
2823 2824 2825 2826
static void __attribute__((noreturn)) restart_guest(void)
{
	unsigned int i;

R
Rusty Russell 已提交
2827 2828 2829 2830
	/*
	 * Since we don't track all open fds, we simply close everything beyond
	 * stderr.
	 */
B
Balaji Rao 已提交
2831 2832
	for (i = 3; i < FD_SETSIZE; i++)
		close(i);
2833

2834 2835 2836
	/* Reset all the devices (kills all threads). */
	cleanup_devices();

B
Balaji Rao 已提交
2837 2838 2839
	execv(main_args[0], main_args);
	err(1, "Could not exec %s", main_args[0]);
}
2840

R
Rusty Russell 已提交
2841 2842 2843 2844
/*L:220
 * Finally we reach the core of the Launcher which runs the Guest, serves
 * its input and output, and finally, lays it to rest.
 */
2845
static void __attribute__((noreturn)) run_guest(void)
2846 2847
{
	for (;;) {
2848
		struct lguest_pending notify;
2849 2850 2851
		int readval;

		/* We read from the /dev/lguest device to run the Guest. */
2852
		readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id);
2853

R
Rusty Russell 已提交
2854
		/* One unsigned long means the Guest did HCALL_NOTIFY */
2855 2856 2857 2858 2859
		if (readval == sizeof(notify)) {
			if (notify.trap == 0x1F) {
				verbose("Notify on address %#08x\n",
					notify.addr);
				handle_output(notify.addr);
2860 2861 2862 2863
			} else if (notify.trap == 13) {
				verbose("Emulating instruction at %#x\n",
					getreg(eip));
				emulate_insn(notify.insn);
2864 2865 2866 2867
			} else if (notify.trap == 14) {
				verbose("Emulating MMIO at %#x\n",
					getreg(eip));
				emulate_mmio(notify.addr, notify.insn);
2868 2869 2870
			} else
				errx(1, "Unknown trap %i addr %#08x\n",
				     notify.trap, notify.addr);
2871
		/* ENOENT means the Guest died.  Reading tells us why. */
2872 2873
		} else if (errno == ENOENT) {
			char reason[1024] = { 0 };
2874
			pread(lguest_fd, reason, sizeof(reason)-1, cpu_id);
2875
			errx(1, "%s", reason);
B
Balaji Rao 已提交
2876 2877 2878
		/* ERESTART means that we need to reboot the guest */
		} else if (errno == ERESTART) {
			restart_guest();
2879 2880
		/* Anything else means a bug or incompatible change. */
		} else
2881 2882 2883
			err(1, "Running guest failed");
	}
}
2884
/*L:240
R
Rusty Russell 已提交
2885 2886 2887
 * This is the end of the Launcher.  The good news: we are over halfway
 * through!  The bad news: the most fiendish part of the code still lies ahead
 * of us.
2888
 *
R
Rusty Russell 已提交
2889 2890
 * Are you ready?  Take a deep breath and join me in the core of the Host, in
 * "make Host".
R
Rusty Russell 已提交
2891
:*/
2892 2893 2894 2895 2896

static struct option opts[] = {
	{ "verbose", 0, NULL, 'v' },
	{ "tunnet", 1, NULL, 't' },
	{ "block", 1, NULL, 'b' },
R
Rusty Russell 已提交
2897
	{ "rng", 0, NULL, 'r' },
2898
	{ "initrd", 1, NULL, 'i' },
2899 2900
	{ "username", 1, NULL, 'u' },
	{ "chroot", 1, NULL, 'c' },
2901 2902 2903 2904 2905
	{ NULL },
};
static void usage(void)
{
	errx(1, "Usage: lguest [--verbose] "
2906
	     "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n"
2907 2908 2909 2910
	     "|--block=<filename>|--initrd=<filename>]...\n"
	     "<mem-in-mb> vmlinux [args...]");
}

2911
/*L:105 The main routine is where the real work begins: */
2912 2913
int main(int argc, char *argv[])
{
R
Rusty Russell 已提交
2914
	/* Memory, code startpoint and size of the (optional) initrd. */
2915
	unsigned long mem = 0, start, initrd_size = 0;
2916 2917
	/* Two temporaries. */
	int i, c;
2918
	/* The boot information for the Guest. */
2919
	struct boot_params *boot;
2920
	/* If they specify an initrd file to load. */
2921 2922
	const char *initrd_name = NULL;

2923 2924 2925 2926 2927 2928
	/* Password structure for initgroups/setres[gu]id */
	struct passwd *user_details = NULL;

	/* Directory to chroot to */
	char *chroot_path = NULL;

B
Balaji Rao 已提交
2929 2930 2931
	/* Save the args: we "reboot" by execing ourselves again. */
	main_args = argv;

R
Rusty Russell 已提交
2932 2933
	/*
	 * First we initialize the device list.  We keep a pointer to the last
2934
	 * device, and the next interrupt number to use for devices (1:
R
Rusty Russell 已提交
2935 2936
	 * remember that 0 is used by the timer).
	 */
2937
	devices.lastdev = NULL;
R
Rusty Russell 已提交
2938
	devices.next_irq = 1;
2939

R
Rusty Russell 已提交
2940
	/* We're CPU 0.  In fact, that's the only CPU possible right now. */
2941
	cpu_id = 0;
R
Rusty Russell 已提交
2942

R
Rusty Russell 已提交
2943 2944
	/*
	 * We need to know how much memory so we can set up the device
2945 2946
	 * descriptor and memory pages for the devices as we parse the command
	 * line.  So we quickly look through the arguments to find the amount
R
Rusty Russell 已提交
2947 2948
	 * of memory now.
	 */
2949 2950
	for (i = 1; i < argc; i++) {
		if (argv[i][0] != '-') {
2951
			mem = atoi(argv[i]) * 1024 * 1024;
R
Rusty Russell 已提交
2952 2953
			/*
			 * We start by mapping anonymous pages over all of
2954 2955
			 * guest-physical memory range.  This fills it with 0,
			 * and ensures that the Guest won't be killed when it
R
Rusty Russell 已提交
2956 2957
			 * tries to access it.
			 */
2958 2959 2960
			guest_base = map_zeroed_pages(mem / getpagesize()
						      + DEVICE_PAGES);
			guest_limit = mem;
2961
			guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
R
Rusty Russell 已提交
2962
			devices.descpage = get_pages(1);
2963 2964 2965
			break;
		}
	}
2966 2967

	/* The options are fairly straight-forward */
2968 2969 2970 2971 2972 2973
	while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
		switch (c) {
		case 'v':
			verbose = true;
			break;
		case 't':
R
Rusty Russell 已提交
2974
			setup_tun_net(optarg);
2975 2976
			break;
		case 'b':
R
Rusty Russell 已提交
2977
			setup_block_file(optarg);
2978
			break;
R
Rusty Russell 已提交
2979 2980 2981
		case 'r':
			setup_rng();
			break;
2982 2983 2984
		case 'i':
			initrd_name = optarg;
			break;
2985 2986 2987 2988 2989 2990 2991 2992
		case 'u':
			user_details = getpwnam(optarg);
			if (!user_details)
				err(1, "getpwnam failed, incorrect username?");
			break;
		case 'c':
			chroot_path = optarg;
			break;
2993 2994 2995 2996 2997
		default:
			warnx("Unknown argument %s", argv[optind]);
			usage();
		}
	}
R
Rusty Russell 已提交
2998 2999 3000 3001
	/*
	 * After the other arguments we expect memory and kernel image name,
	 * followed by command line arguments for the kernel.
	 */
3002 3003 3004
	if (optind + 2 > argc)
		usage();

3005 3006
	verbose("Guest base is at %p\n", guest_base);

3007
	/* We always have a console device */
R
Rusty Russell 已提交
3008
	setup_console();
3009 3010

	/* Now we load the kernel */
3011
	start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
3012

3013 3014 3015
	/* Boot information is stashed at physical address 0 */
	boot = from_guest_phys(0);

3016
	/* Map the initrd image if requested (at top of physical memory) */
3017 3018
	if (initrd_name) {
		initrd_size = load_initrd(initrd_name, mem);
R
Rusty Russell 已提交
3019 3020 3021 3022
		/*
		 * These are the location in the Linux boot header where the
		 * start and size of the initrd are expected to be found.
		 */
3023 3024
		boot->hdr.ramdisk_image = mem - initrd_size;
		boot->hdr.ramdisk_size = initrd_size;
3025
		/* The bootloader type 0xFF means "unknown"; that's OK. */
3026
		boot->hdr.type_of_loader = 0xFF;
3027 3028
	}

R
Rusty Russell 已提交
3029 3030 3031 3032
	/*
	 * The Linux boot header contains an "E820" memory map: ours is a
	 * simple, single region.
	 */
3033 3034
	boot->e820_entries = 1;
	boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM });
R
Rusty Russell 已提交
3035 3036 3037 3038
	/*
	 * The boot header contains a command line pointer: we put the command
	 * line after the boot header.
	 */
3039
	boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1);
R
Rusty Russell 已提交
3040
	/* We use a simple helper to copy the arguments separated by spaces. */
3041
	concat((char *)(boot + 1), argv+optind+2);
3042

3043 3044 3045
	/* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
	boot->hdr.kernel_alignment = 0x1000000;

3046
	/* Boot protocol version: 2.07 supports the fields for lguest. */
3047
	boot->hdr.version = 0x207;
3048 3049

	/* The hardware_subarch value of "1" tells the Guest it's an lguest. */
3050
	boot->hdr.hardware_subarch = 1;
3051

3052 3053
	/* Tell the entry path not to try to reload segment registers. */
	boot->hdr.loadflags |= KEEP_SEGMENTS;
3054

R
Rusty Russell 已提交
3055
	/* We tell the kernel to initialize the Guest. */
3056
	tell_kernel(start);
3057

R
Rusty Russell 已提交
3058
	/* Ensure that we terminate if a device-servicing child dies. */
3059 3060 3061 3062
	signal(SIGCHLD, kill_launcher);

	/* If we exit via err(), this kills all the threads, restores tty. */
	atexit(cleanup_devices);
3063

3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094
	/* If requested, chroot to a directory */
	if (chroot_path) {
		if (chroot(chroot_path) != 0)
			err(1, "chroot(\"%s\") failed", chroot_path);

		if (chdir("/") != 0)
			err(1, "chdir(\"/\") failed");

		verbose("chroot done\n");
	}

	/* If requested, drop privileges */
	if (user_details) {
		uid_t u;
		gid_t g;

		u = user_details->pw_uid;
		g = user_details->pw_gid;

		if (initgroups(user_details->pw_name, g) != 0)
			err(1, "initgroups failed");

		if (setresgid(g, g, g) != 0)
			err(1, "setresgid failed");

		if (setresuid(u, u, u) != 0)
			err(1, "setresuid failed");

		verbose("Dropping privileges completed\n");
	}

3095
	/* Finally, run the Guest.  This doesn't return. */
3096
	run_guest();
3097
}
3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109
/*:*/

/*M:999
 * Mastery is done: you now know everything I do.
 *
 * But surely you have seen code, features and bugs in your wanderings which
 * you now yearn to attack?  That is the real game, and I look forward to you
 * patching and forking lguest into the Your-Name-Here-visor.
 *
 * Farewell, and good coding!
 * Rusty Russell.
 */