lguest.c 94.6 KB
Newer Older
R
Rusty Russell 已提交
1 2 3 4 5 6
/*P:100
 * This is the Launcher code, a simple program which lays out the "physical"
 * memory for the new Guest by mapping the kernel image and the virtual
 * devices, then opens /dev/lguest to tell the kernel about the Guest and
 * control it.
:*/
7 8 9 10 11 12 13 14 15 16
#define _LARGEFILE64_SOURCE
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <err.h>
#include <stdint.h>
#include <stdlib.h>
#include <elf.h>
#include <sys/mman.h>
17
#include <sys/param.h>
18 19 20
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/wait.h>
21
#include <sys/eventfd.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <fcntl.h>
#include <stdbool.h>
#include <errno.h>
#include <ctype.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include <time.h>
#include <netinet/in.h>
#include <net/if.h>
#include <linux/sockios.h>
#include <linux/if_tun.h>
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
R
Rusty Russell 已提交
37 38
#include <assert.h>
#include <sched.h>
39 40
#include <limits.h>
#include <stddef.h>
41
#include <signal.h>
42 43
#include <pwd.h>
#include <grp.h>
44
#include <sys/user.h>
45
#include <linux/pci_regs.h>
46

47 48 49 50
#ifndef VIRTIO_F_ANY_LAYOUT
#define VIRTIO_F_ANY_LAYOUT		27
#endif

R
Rusty Russell 已提交
51
/*L:110
R
Rusty Russell 已提交
52
 * We can ignore the 43 include files we need for this program, but I do want
R
Rusty Russell 已提交
53
 * to draw attention to the use of kernel-style types.
54 55 56 57
 *
 * As Linus said, "C is a Spartan language, and so should your naming be."  I
 * like these abbreviations, so we define them here.  Note that u64 is always
 * unsigned long long, which works on all Linux systems: this means that we can
R
Rusty Russell 已提交
58 59
 * use %llu in printf for any u64.
 */
60 61 62 63
typedef unsigned long long u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
64
/*:*/
65

66
#define VIRTIO_CONFIG_NO_LEGACY
67
#define VIRTIO_PCI_NO_LEGACY
68
#define VIRTIO_BLK_NO_LEGACY
69 70 71

/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */
#include "../../include/uapi/linux/virtio_config.h"
72
#include "../../include/uapi/linux/virtio_net.h"
73
#include "../../include/uapi/linux/virtio_blk.h"
74
#include "../../include/uapi/linux/virtio_console.h"
75
#include "../../include/uapi/linux/virtio_rng.h"
76
#include <linux/virtio_ring.h>
77
#include "../../include/uapi/linux/virtio_pci.h"
78 79 80
#include <asm/bootparam.h>
#include "../../include/linux/lguest_launcher.h"

81 82 83 84
#define BRIDGE_PFX "bridge:"
#ifndef SIOCBRADDIF
#define SIOCBRADDIF	0x89a2		/* add interface to bridge      */
#endif
85 86
/* We can have up to 256 pages for devices. */
#define DEVICE_PAGES 256
R
Rusty Russell 已提交
87 88
/* This will occupy 3 pages: it must be a power of 2. */
#define VIRTQUEUE_NUM 256
89

R
Rusty Russell 已提交
90 91 92 93
/*L:120
 * verbose is both a global flag and a macro.  The C preprocessor allows
 * this, and although I wouldn't recommend it, it works quite nicely here.
 */
94 95 96
static bool verbose;
#define verbose(args...) \
	do { if (verbose) printf(args); } while(0)
97 98
/*:*/

99 100 101
/* The pointer to the start of guest memory. */
static void *guest_base;
/* The maximum guest physical address allowed, and maximum possible. */
102
static unsigned long guest_limit, guest_max, guest_mmio;
103 104
/* The /dev/lguest file descriptor. */
static int lguest_fd;
105

106 107 108
/* a per-cpu variable indicating whose vcpu is currently running */
static unsigned int __thread cpu_id;

109 110 111
/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */
#define MAX_PCI_DEVICES 32

112
/* This is our list of devices. */
113
struct device_list {
R
Rusty Russell 已提交
114 115 116 117 118 119
	/* Counter to assign interrupt numbers. */
	unsigned int next_irq;

	/* Counter to print out convenient device numbers. */
	unsigned int device_num;

120 121
	/* PCI devices. */
	struct device *pci[MAX_PCI_DEVICES];
122 123
};

R
Rusty Russell 已提交
124 125 126
/* The list of Guest devices, based on command line arguments. */
static struct device_list devices;

127 128
struct virtio_pci_cfg_cap {
	struct virtio_pci_cap cap;
129
	u32 pci_cfg_data; /* Data for BAR access. */
130 131 132 133 134 135 136 137 138 139
};

struct virtio_pci_mmio {
	struct virtio_pci_common_cfg cfg;
	u16 notify;
	u8 isr;
	u8 padding;
	/* Device-specific configuration follows this. */
};

140 141 142 143 144 145 146 147 148 149 150 151 152
/* This is the layout (little-endian) of the PCI config space. */
struct pci_config {
	u16 vendor_id, device_id;
	u16 command, status;
	u8 revid, prog_if, subclass, class;
	u8 cacheline_size, lat_timer, header_type, bist;
	u32 bar[6];
	u32 cardbus_cis_ptr;
	u16 subsystem_vendor_id, subsystem_device_id;
	u32 expansion_rom_addr;
	u8 capabilities, reserved1[3];
	u32 reserved2;
	u8 irq_line, irq_pin, min_grant, max_latency;
153 154 155 156 157 158 159

	/* Now, this is the linked capability list. */
	struct virtio_pci_cap common;
	struct virtio_pci_notify_cap notify;
	struct virtio_pci_cap isr;
	struct virtio_pci_cap device;
	struct virtio_pci_cfg_cap cfg_access;
160 161
};

162
/* The device structure describes a single device. */
163
struct device {
R
Rusty Russell 已提交
164 165
	/* The name of this device, for --verbose. */
	const char *name;
166

R
Rusty Russell 已提交
167 168
	/* Any queues attached to this device */
	struct virtqueue *vq;
169

170 171
	/* Is it operational */
	bool running;
172

173 174 175
	/* Has it written FEATURES_OK but not re-checked it? */
	bool wrote_features_ok;

176 177 178 179 180 181
	/* PCI configuration */
	union {
		struct pci_config config;
		u32 config_words[sizeof(struct pci_config) / sizeof(u32)];
	};

182 183 184
	/* Features we offer, and those accepted. */
	u64 features, features_accepted;

185 186 187
	/* Device-specific config hangs off the end of this. */
	struct virtio_pci_mmio *mmio;

188 189 190 191
	/* PCI MMIO resources (all in BAR0) */
	size_t mmio_size;
	u32 mmio_addr;

192 193 194 195
	/* Device-specific data. */
	void *priv;
};

R
Rusty Russell 已提交
196
/* The virtqueue structure describes a queue attached to a device. */
197
struct virtqueue {
R
Rusty Russell 已提交
198 199 200 201 202
	struct virtqueue *next;

	/* Which device owns me. */
	struct device *dev;

203 204 205
	/* Name for printing errors. */
	const char *name;

R
Rusty Russell 已提交
206 207 208
	/* The actual ring of buffers. */
	struct vring vring;

209 210 211
	/* The information about this virtqueue (we only use queue_size on) */
	struct virtio_pci_common_cfg pci_config;

R
Rusty Russell 已提交
212 213 214
	/* Last available index we saw. */
	u16 last_avail_idx;

215 216 217
	/* How many are used since we sent last irq? */
	unsigned int pending_used;

218 219
	/* Eventfd where Guest notifications arrive. */
	int eventfd;
R
Rusty Russell 已提交
220

221 222 223
	/* Function for the thread which is servicing this virtqueue. */
	void (*service)(struct virtqueue *vq);
	pid_t thread;
R
Rusty Russell 已提交
224 225
};

B
Balaji Rao 已提交
226 227 228
/* Remember the arguments to the program so we can "reboot" */
static char **main_args;

229 230 231
/* The original tty settings to restore on exit. */
static struct termios orig_term;

R
Rusty Russell 已提交
232 233
/*
 * We have to be careful with barriers: our devices are all run in separate
234
 * threads and so we need to make sure that changes visible to the Guest happen
R
Rusty Russell 已提交
235 236
 * in precise order.
 */
237
#define wmb() __asm__ __volatile__("" : : : "memory")
R
Rusty Russell 已提交
238 239
#define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)" : : : "memory")
R
Rusty Russell 已提交
240

241 242 243
/* Wrapper for the last available index.  Makes it easier to change. */
#define lg_last_avail(vq)	((vq)->last_avail_idx)

R
Rusty Russell 已提交
244 245 246 247
/*
 * The virtio configuration space is defined to be little-endian.  x86 is
 * little-endian too, but it's nice to be explicit so we have these helpers.
 */
R
Rusty Russell 已提交
248 249 250 251 252
#define cpu_to_le16(v16) (v16)
#define cpu_to_le32(v32) (v32)
#define cpu_to_le64(v64) (v64)
#define le16_to_cpu(v16) (v16)
#define le32_to_cpu(v32) (v32)
253
#define le64_to_cpu(v64) (v64)
R
Rusty Russell 已提交
254

R
Rusty Russell 已提交
255 256 257 258 259 260 261 262 263 264 265 266
/* Is this iovec empty? */
static bool iov_empty(const struct iovec iov[], unsigned int num_iov)
{
	unsigned int i;

	for (i = 0; i < num_iov; i++)
		if (iov[i].iov_len)
			return false;
	return true;
}

/* Take len bytes from the front of this iovec. */
267 268
static void iov_consume(struct iovec iov[], unsigned num_iov,
			void *dest, unsigned len)
R
Rusty Russell 已提交
269 270 271 272 273 274 275
{
	unsigned int i;

	for (i = 0; i < num_iov; i++) {
		unsigned int used;

		used = iov[i].iov_len < len ? iov[i].iov_len : len;
276 277 278 279
		if (dest) {
			memcpy(dest, iov[i].iov_base, used);
			dest += used;
		}
R
Rusty Russell 已提交
280 281 282 283
		iov[i].iov_base += used;
		iov[i].iov_len -= used;
		len -= used;
	}
284 285
	if (len != 0)
		errx(1, "iovec too short!");
R
Rusty Russell 已提交
286 287
}

R
Rusty Russell 已提交
288 289 290 291 292 293
/*L:100
 * The Launcher code itself takes us out into userspace, that scary place where
 * pointers run wild and free!  Unfortunately, like most userspace programs,
 * it's quite boring (which is why everyone likes to hack on the kernel!).
 * Perhaps if you make up an Lguest Drinking Game at this point, it will get
 * you through this section.  Or, maybe not.
294 295 296 297 298 299
 *
 * The Launcher sets up a big chunk of memory to be the Guest's "physical"
 * memory and stores it in "guest_base".  In other words, Guest physical ==
 * Launcher virtual with an offset.
 *
 * This can be tough to get your head around, but usually it just means that we
300
 * use these trivial conversion functions when the Guest gives us its
R
Rusty Russell 已提交
301 302
 * "physical" addresses:
 */
303 304 305 306 307 308 309 310 311 312
static void *from_guest_phys(unsigned long addr)
{
	return guest_base + addr;
}

static unsigned long to_guest_phys(const void *addr)
{
	return (addr - guest_base);
}

313 314 315 316
/*L:130
 * Loading the Kernel.
 *
 * We start with couple of simple helper routines.  open_or_die() avoids
R
Rusty Russell 已提交
317 318
 * error-checking code cluttering the callers:
 */
319 320 321 322 323 324 325 326
static int open_or_die(const char *name, int flags)
{
	int fd = open(name, flags);
	if (fd < 0)
		err(1, "Failed to open %s", name);
	return fd;
}

327 328
/* map_zeroed_pages() takes a number of pages. */
static void *map_zeroed_pages(unsigned int num)
329
{
330 331
	int fd = open_or_die("/dev/zero", O_RDONLY);
	void *addr;
332

R
Rusty Russell 已提交
333 334
	/*
	 * We use a private mapping (ie. if we write to the page, it will be
335 336
	 * copied). We allocate an extra two pages PROT_NONE to act as guard
	 * pages against read/write attempts that exceed allocated space.
R
Rusty Russell 已提交
337
	 */
338 339 340
	addr = mmap(NULL, getpagesize() * (num+2),
		    PROT_NONE, MAP_PRIVATE, fd, 0);

341
	if (addr == MAP_FAILED)
342
		err(1, "Mmapping %u pages of /dev/zero", num);
R
Rusty Russell 已提交
343

344 345 346 347
	if (mprotect(addr + getpagesize(), getpagesize() * num,
		     PROT_READ|PROT_WRITE) == -1)
		err(1, "mprotect rw %u pages failed", num);

R
Rusty Russell 已提交
348 349 350 351
	/*
	 * One neat mmap feature is that you can close the fd, and it
	 * stays mapped.
	 */
352
	close(fd);
353

354 355
	/* Return address after PROT_NONE page */
	return addr + getpagesize();
356 357
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
/* Get some bytes which won't be mapped into the guest. */
static unsigned long get_mmio_region(size_t size)
{
	unsigned long addr = guest_mmio;
	size_t i;

	if (!size)
		return addr;

	/* Size has to be a power of 2 (and multiple of 16) */
	for (i = 1; i < size; i <<= 1);

	guest_mmio += i;

	return addr;
}

R
Rusty Russell 已提交
375 376
/*
 * This routine is used to load the kernel or initrd.  It tries mmap, but if
377
 * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries),
R
Rusty Russell 已提交
378 379
 * it falls back to reading the memory in.
 */
380 381 382 383
static void map_at(int fd, void *addr, unsigned long offset, unsigned long len)
{
	ssize_t r;

R
Rusty Russell 已提交
384 385
	/*
	 * We map writable even though for some segments are marked read-only.
386 387 388 389 390
	 * The kernel really wants to be writable: it patches its own
	 * instructions.
	 *
	 * MAP_PRIVATE means that the page won't be copied until a write is
	 * done to it.  This allows us to share untouched memory between
R
Rusty Russell 已提交
391 392
	 * Guests.
	 */
393
	if (mmap(addr, len, PROT_READ|PROT_WRITE,
394 395 396 397 398 399 400 401 402
		 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
		return;

	/* pread does a seek and a read in one shot: saves a few lines. */
	r = pread(fd, addr, len, offset);
	if (r != len)
		err(1, "Reading offset %lu len %lu gave %zi", offset, len, r);
}

R
Rusty Russell 已提交
403 404
/*
 * This routine takes an open vmlinux image, which is in ELF, and maps it into
405 406 407 408
 * the Guest memory.  ELF = Embedded Linking Format, which is the format used
 * by all modern binaries on Linux including the kernel.
 *
 * The ELF headers give *two* addresses: a physical address, and a virtual
409 410
 * address.  We use the physical address; the Guest will map itself to the
 * virtual address.
411
 *
R
Rusty Russell 已提交
412 413
 * We return the starting address.
 */
414
static unsigned long map_elf(int elf_fd, const Elf32_Ehdr *ehdr)
415 416 417 418
{
	Elf32_Phdr phdr[ehdr->e_phnum];
	unsigned int i;

R
Rusty Russell 已提交
419 420 421 422
	/*
	 * Sanity checks on the main ELF header: an x86 executable with a
	 * reasonable number of correctly-sized program headers.
	 */
423 424 425 426 427 428
	if (ehdr->e_type != ET_EXEC
	    || ehdr->e_machine != EM_386
	    || ehdr->e_phentsize != sizeof(Elf32_Phdr)
	    || ehdr->e_phnum < 1 || ehdr->e_phnum > 65536U/sizeof(Elf32_Phdr))
		errx(1, "Malformed elf header");

R
Rusty Russell 已提交
429 430
	/*
	 * An ELF executable contains an ELF header and a number of "program"
431
	 * headers which indicate which parts ("segments") of the program to
R
Rusty Russell 已提交
432 433
	 * load where.
	 */
434 435

	/* We read in all the program headers at once: */
436 437 438 439 440
	if (lseek(elf_fd, ehdr->e_phoff, SEEK_SET) < 0)
		err(1, "Seeking to program headers");
	if (read(elf_fd, phdr, sizeof(phdr)) != sizeof(phdr))
		err(1, "Reading program headers");

R
Rusty Russell 已提交
441 442 443 444
	/*
	 * Try all the headers: there are usually only three.  A read-only one,
	 * a read-write one, and a "note" section which we don't load.
	 */
445
	for (i = 0; i < ehdr->e_phnum; i++) {
446
		/* If this isn't a loadable segment, we ignore it */
447 448 449 450 451 452
		if (phdr[i].p_type != PT_LOAD)
			continue;

		verbose("Section %i: size %i addr %p\n",
			i, phdr[i].p_memsz, (void *)phdr[i].p_paddr);

453
		/* We map this section of the file at its physical address. */
454
		map_at(elf_fd, from_guest_phys(phdr[i].p_paddr),
455
		       phdr[i].p_offset, phdr[i].p_filesz);
456 457
	}

458 459
	/* The entry point is given in the ELF header. */
	return ehdr->e_entry;
460 461
}

R
Rusty Russell 已提交
462 463 464 465
/*L:150
 * A bzImage, unlike an ELF file, is not meant to be loaded.  You're supposed
 * to jump into it and it will unpack itself.  We used to have to perform some
 * hairy magic because the unpacking code scared me.
466
 *
R
Rusty Russell 已提交
467 468
 * Fortunately, Jeremy Fitzhardinge convinced me it wasn't that hard and wrote
 * a small patch to jump over the tricky bits in the Guest, so now we just read
R
Rusty Russell 已提交
469 470
 * the funky header so we know where in the file to load, and away we go!
 */
471
static unsigned long load_bzimage(int fd)
472
{
473
	struct boot_params boot;
R
Rusty Russell 已提交
474 475 476 477
	int r;
	/* Modern bzImages get loaded at 1M. */
	void *p = from_guest_phys(0x100000);

R
Rusty Russell 已提交
478 479
	/*
	 * Go back to the start of the file and read the header.  It should be
P
Paul Bolle 已提交
480
	 * a Linux boot header (see Documentation/x86/boot.txt)
R
Rusty Russell 已提交
481
	 */
R
Rusty Russell 已提交
482
	lseek(fd, 0, SEEK_SET);
483
	read(fd, &boot, sizeof(boot));
R
Rusty Russell 已提交
484

485 486
	/* Inside the setup_hdr, we expect the magic "HdrS" */
	if (memcmp(&boot.hdr.header, "HdrS", 4) != 0)
R
Rusty Russell 已提交
487 488
		errx(1, "This doesn't look like a bzImage to me");

489 490
	/* Skip over the extra sectors of the header. */
	lseek(fd, (boot.hdr.setup_sects+1) * 512, SEEK_SET);
R
Rusty Russell 已提交
491 492 493 494 495

	/* Now read everything into memory. in nice big chunks. */
	while ((r = read(fd, p, 65536)) > 0)
		p += r;

496 497
	/* Finally, code32_start tells us where to enter the kernel. */
	return boot.hdr.code32_start;
498 499
}

R
Rusty Russell 已提交
500 501
/*L:140
 * Loading the kernel is easy when it's a "vmlinux", but most kernels
R
Rusty Russell 已提交
502
 * come wrapped up in the self-decompressing "bzImage" format.  With a little
R
Rusty Russell 已提交
503 504
 * work, we can load those, too.
 */
505
static unsigned long load_kernel(int fd)
506 507 508
{
	Elf32_Ehdr hdr;

509
	/* Read in the first few bytes. */
510 511 512
	if (read(fd, &hdr, sizeof(hdr)) != sizeof(hdr))
		err(1, "Reading kernel");

513
	/* If it's an ELF file, it starts with "\177ELF" */
514
	if (memcmp(hdr.e_ident, ELFMAG, SELFMAG) == 0)
515
		return map_elf(fd, &hdr);
516

517
	/* Otherwise we assume it's a bzImage, and try to load it. */
518
	return load_bzimage(fd);
519 520
}

R
Rusty Russell 已提交
521 522
/*
 * This is a trivial little helper to align pages.  Andi Kleen hated it because
523 524 525
 * it calls getpagesize() twice: "it's dumb code."
 *
 * Kernel guys get really het up about optimization, even when it's not
R
Rusty Russell 已提交
526 527
 * necessary.  I leave this code as a reaction against that.
 */
528 529
static inline unsigned long page_align(unsigned long addr)
{
530
	/* Add upwards and truncate downwards. */
531 532 533
	return ((addr + getpagesize()-1) & ~(getpagesize()-1));
}

R
Rusty Russell 已提交
534 535 536 537 538
/*L:180
 * An "initial ram disk" is a disk image loaded into memory along with the
 * kernel which the kernel can use to boot from without needing any drivers.
 * Most distributions now use this as standard: the initrd contains the code to
 * load the appropriate driver modules for the current machine.
539 540
 *
 * Importantly, James Morris works for RedHat, and Fedora uses initrds for its
R
Rusty Russell 已提交
541 542
 * kernels.  He sent me this (and tells me when I break it).
 */
543 544 545 546 547 548 549
static unsigned long load_initrd(const char *name, unsigned long mem)
{
	int ifd;
	struct stat st;
	unsigned long len;

	ifd = open_or_die(name, O_RDONLY);
550
	/* fstat() is needed to get the file size. */
551 552 553
	if (fstat(ifd, &st) < 0)
		err(1, "fstat() on initrd '%s'", name);

R
Rusty Russell 已提交
554 555 556 557
	/*
	 * We map the initrd at the top of memory, but mmap wants it to be
	 * page-aligned, so we round the size up for that.
	 */
558
	len = page_align(st.st_size);
559
	map_at(ifd, from_guest_phys(mem - len), 0, st.st_size);
R
Rusty Russell 已提交
560 561 562 563
	/*
	 * Once a file is mapped, you can close the file descriptor.  It's a
	 * little odd, but quite useful.
	 */
564
	close(ifd);
565
	verbose("mapped initrd %s size=%lu @ %p\n", name, len, (void*)mem-len);
566 567

	/* We return the initrd size. */
568 569
	return len;
}
R
Rusty Russell 已提交
570
/*:*/
571

R
Rusty Russell 已提交
572 573 574 575
/*
 * Simple routine to roll all the commandline arguments together with spaces
 * between them.
 */
576 577 578 579 580
static void concat(char *dst, char *args[])
{
	unsigned int i, len = 0;

	for (i = 0; args[i]; i++) {
581 582 583 584
		if (i) {
			strcat(dst+len, " ");
			len++;
		}
585
		strcpy(dst+len, args[i]);
586
		len += strlen(args[i]);
587 588 589 590 591
	}
	/* In case it's empty. */
	dst[len] = '\0';
}

R
Rusty Russell 已提交
592 593
/*L:185
 * This is where we actually tell the kernel to initialize the Guest.  We
R
Rusty Russell 已提交
594
 * saw the arguments it expects when we looked at initialize() in lguest_user.c:
595
 * the base of Guest "physical" memory, the top physical page to allow and the
R
Rusty Russell 已提交
596 597
 * entry point for the Guest.
 */
598
static void tell_kernel(unsigned long start)
599
{
600 601
	unsigned long args[] = { LHREQ_INITIALIZE,
				 (unsigned long)guest_base,
602
				 guest_limit / getpagesize(), start,
603 604 605 606
				 (guest_mmio+getpagesize()-1) / getpagesize() };
	verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n",
		guest_base, guest_base + guest_limit,
		guest_limit, guest_mmio);
607 608
	lguest_fd = open_or_die("/dev/lguest", O_RDWR);
	if (write(lguest_fd, args, sizeof(args)) < 0)
609 610
		err(1, "Writing to /dev/lguest");
}
611
/*:*/
612

R
Rusty Russell 已提交
613
/*L:200
614 615
 * Device Handling.
 *
R
Rusty Russell 已提交
616
 * When the Guest gives us a buffer, it sends an array of addresses and sizes.
617
 * We need to make sure it's not trying to reach into the Launcher itself, so
R
Rusty Russell 已提交
618
 * we have a convenient routine which checks it and exits with an error message
619 620
 * if something funny is going on:
 */
621 622 623
static void *_check_pointer(unsigned long addr, unsigned int size,
			    unsigned int line)
{
R
Rusty Russell 已提交
624
	/*
625 626
	 * Check if the requested address and size exceeds the allocated memory,
	 * or addr + size wraps around.
R
Rusty Russell 已提交
627
	 */
628
	if ((addr + size) > guest_limit || (addr + size) < addr)
R
Rusty Russell 已提交
629
		errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
R
Rusty Russell 已提交
630 631 632 633
	/*
	 * We return a pointer for the caller's convenience, now we know it's
	 * safe to use.
	 */
634
	return from_guest_phys(addr);
635
}
636
/* A macro which transparently hands the line number to the real function. */
637 638
#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__)

R
Rusty Russell 已提交
639 640
/*
 * Each buffer in the virtqueues is actually a chain of descriptors.  This
R
Rusty Russell 已提交
641
 * function returns the next descriptor in the chain, or vq->vring.num if we're
R
Rusty Russell 已提交
642 643
 * at the end.
 */
644 645
static unsigned next_desc(struct vring_desc *desc,
			  unsigned int i, unsigned int max)
R
Rusty Russell 已提交
646 647 648 649
{
	unsigned int next;

	/* If this descriptor says it doesn't chain, we're done. */
650 651
	if (!(desc[i].flags & VRING_DESC_F_NEXT))
		return max;
R
Rusty Russell 已提交
652 653

	/* Check they're not leading us off end of descriptors. */
654
	next = desc[i].next;
R
Rusty Russell 已提交
655 656 657
	/* Make sure compiler knows to grab that: we don't want it changing! */
	wmb();

658
	if (next >= max)
R
Rusty Russell 已提交
659 660 661 662 663
		errx(1, "Desc next is %u", next);

	return next;
}

R
Rusty Russell 已提交
664 665 666 667
/*
 * This actually sends the interrupt for this virtqueue, if we've used a
 * buffer.
 */
668 669
static void trigger_irq(struct virtqueue *vq)
{
670
	unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line };
671

672 673 674 675 676
	/* Don't inform them if nothing used. */
	if (!vq->pending_used)
		return;
	vq->pending_used = 0;

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	/*
	 * 2.4.7.1:
	 *
	 *  If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
	 *    The driver MUST set flags to 0 or 1. 
	 */
	if (vq->vring.avail->flags > 1)
		errx(1, "%s: avail->flags = %u\n",
		     vq->dev->name, vq->vring.avail->flags);

	/*
	 * 2.4.7.2:
	 *
	 *  If the VIRTIO_F_EVENT_IDX feature bit is not negotiated:
	 *
	 *     - The device MUST ignore the used_event value.
	 *     - After the device writes a descriptor index into the used ring:
	 *         - If flags is 1, the device SHOULD NOT send an interrupt.
	 *         - If flags is 0, the device MUST send an interrupt.
	 */
697
	if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) {
698
		return;
699
	}
700

701 702 703 704 705 706 707
	/*
	 * 4.1.4.5.1:
	 *
	 *  If MSI-X capability is disabled, the device MUST set the Queue
	 *  Interrupt bit in ISR status before sending a virtqueue notification
	 *  to the driver.
	 */
708
	vq->dev->mmio->isr = 0x1;
709

710 711
	/* Send the Guest an interrupt tell them we used something up. */
	if (write(lguest_fd, buf, sizeof(buf)) != 0)
712
		err(1, "Triggering irq %i", vq->dev->config.irq_line);
713 714
}

R
Rusty Russell 已提交
715
/*
R
Rusty Russell 已提交
716
 * This looks in the virtqueue for the first available buffer, and converts
R
Rusty Russell 已提交
717 718 719 720
 * it to an iovec for convenient access.  Since descriptors consist of some
 * number of output then some number of input descriptors, it's actually two
 * iovecs, but we pack them into one and note how many of each there were.
 *
R
Rusty Russell 已提交
721
 * This function waits if necessary, and returns the descriptor number found.
R
Rusty Russell 已提交
722
 */
723 724 725
static unsigned wait_for_vq_desc(struct virtqueue *vq,
				 struct iovec iov[],
				 unsigned int *out_num, unsigned int *in_num)
R
Rusty Russell 已提交
726
{
727 728
	unsigned int i, head, max;
	struct vring_desc *desc;
729 730
	u16 last_avail = lg_last_avail(vq);

731 732 733 734 735 736 737 738
	/*
	 * 2.4.7.1:
	 *
	 *   The driver MUST handle spurious interrupts from the device.
	 *
	 * That's why this is a while loop.
	 */

R
Rusty Russell 已提交
739
	/* There's nothing available? */
740 741 742
	while (last_avail == vq->vring.avail->idx) {
		u64 event;

R
Rusty Russell 已提交
743 744 745 746
		/*
		 * Since we're about to sleep, now is a good time to tell the
		 * Guest about what we've used up to now.
		 */
747 748
		trigger_irq(vq);

749 750 751
		/* OK, now we need to know about added descriptors. */
		vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY;

R
Rusty Russell 已提交
752 753 754 755
		/*
		 * They could have slipped one in as we were doing that: make
		 * sure it's written, then check again.
		 */
756 757 758 759 760 761
		mb();
		if (last_avail != vq->vring.avail->idx) {
			vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
			break;
		}

762 763 764
		/* Nothing new?  Wait for eventfd to tell us they refilled. */
		if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event))
			errx(1, "Event read failed?");
765 766 767

		/* We don't need to be notified again. */
		vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY;
768
	}
R
Rusty Russell 已提交
769 770

	/* Check it isn't doing very strange things with descriptor numbers. */
771
	if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num)
R
Rusty Russell 已提交
772
		errx(1, "Guest moved used index from %u to %u",
773
		     last_avail, vq->vring.avail->idx);
R
Rusty Russell 已提交
774

R
Rusty Russell 已提交
775 776 777 778 779 780
	/* 
	 * Make sure we read the descriptor number *after* we read the ring
	 * update; don't let the cpu or compiler change the order.
	 */
	rmb();

R
Rusty Russell 已提交
781 782 783 784
	/*
	 * Grab the next descriptor number they're advertising, and increment
	 * the index we've seen.
	 */
785 786
	head = vq->vring.avail->ring[last_avail % vq->vring.num];
	lg_last_avail(vq)++;
R
Rusty Russell 已提交
787 788 789 790 791 792 793 794

	/* If their number is silly, that's a fatal mistake. */
	if (head >= vq->vring.num)
		errx(1, "Guest says index %u is available", head);

	/* When we start there are none of either input nor output. */
	*out_num = *in_num = 0;

795 796
	max = vq->vring.num;
	desc = vq->vring.desc;
R
Rusty Russell 已提交
797
	i = head;
798

R
Rusty Russell 已提交
799 800 801 802 803 804
	/*
	 * We have to read the descriptor after we read the descriptor number,
	 * but there's a data dependency there so the CPU shouldn't reorder
	 * that: no rmb() required.
	 */

805 806 807 808 809 810 811
	do {
		/*
		 * If this is an indirect entry, then this buffer contains a
		 * descriptor table which we handle as if it's any normal
		 * descriptor chain.
		 */
		if (desc[i].flags & VRING_DESC_F_INDIRECT) {
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
			/* 2.4.5.3.1:
			 *
			 *  The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
			 *  flag unless the VIRTIO_F_INDIRECT_DESC feature was
			 *  negotiated.
			 */
			if (!(vq->dev->features_accepted &
			      (1<<VIRTIO_RING_F_INDIRECT_DESC)))
				errx(1, "%s: vq indirect not negotiated",
				     vq->dev->name);

			/*
			 * 2.4.5.3.1:
			 *
			 *   The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT
			 *   flag within an indirect descriptor (ie. only one
			 *   table per descriptor).
			 */
			if (desc != vq->vring.desc)
				errx(1, "%s: Indirect within indirect",
				     vq->dev->name);

			/*
			 * Proposed update VIRTIO-134 spells this out:
			 *
			 *   A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT
			 *   and VIRTQ_DESC_F_NEXT in flags.
			 */
			if (desc[i].flags & VRING_DESC_F_NEXT)
				errx(1, "%s: indirect and next together",
				     vq->dev->name);

844 845
			if (desc[i].len % sizeof(struct vring_desc))
				errx(1, "Invalid size for indirect buffer table");
846 847 848 849 850 851 852 853 854
			/*
			 * 2.4.5.3.2:
			 *
			 *  The device MUST ignore the write-only flag
			 *  (flags&VIRTQ_DESC_F_WRITE) in the descriptor that
			 *  refers to an indirect table.
			 *
			 * We ignore it here: :)
			 */
855

856 857 858
			max = desc[i].len / sizeof(struct vring_desc);
			desc = check_pointer(desc[i].addr, desc[i].len);
			i = 0;
859 860 861 862 863 864 865 866 867

			/* 2.4.5.3.1:
			 *
			 *  A driver MUST NOT create a descriptor chain longer
			 *  than the Queue Size of the device.
			 */
			if (max > vq->pci_config.queue_size)
				errx(1, "%s: indirect has too many entries",
				     vq->dev->name);
868
		}
869

R
Rusty Russell 已提交
870
		/* Grab the first descriptor, and check it's OK. */
871
		iov[*out_num + *in_num].iov_len = desc[i].len;
R
Rusty Russell 已提交
872
		iov[*out_num + *in_num].iov_base
873
			= check_pointer(desc[i].addr, desc[i].len);
R
Rusty Russell 已提交
874
		/* If this is an input descriptor, increment that count. */
875
		if (desc[i].flags & VRING_DESC_F_WRITE)
R
Rusty Russell 已提交
876 877
			(*in_num)++;
		else {
R
Rusty Russell 已提交
878 879 880 881
			/*
			 * If it's an output descriptor, they're all supposed
			 * to come before any input descriptors.
			 */
R
Rusty Russell 已提交
882 883 884 885 886 887
			if (*in_num)
				errx(1, "Descriptor has out after in");
			(*out_num)++;
		}

		/* If we've got too many, that implies a descriptor loop. */
888
		if (*out_num + *in_num > max)
R
Rusty Russell 已提交
889
			errx(1, "Looped descriptor");
890
	} while ((i = next_desc(desc, i, max)) != max);
891

R
Rusty Russell 已提交
892
	return head;
893 894
}

R
Rusty Russell 已提交
895
/*
R
Rusty Russell 已提交
896 897 898
 * After we've used one of their buffers, we tell the Guest about it.  Sometime
 * later we'll want to send them an interrupt using trigger_irq(); note that
 * wait_for_vq_desc() does that for us if it has to wait.
R
Rusty Russell 已提交
899
 */
R
Rusty Russell 已提交
900
static void add_used(struct virtqueue *vq, unsigned int head, int len)
901
{
R
Rusty Russell 已提交
902 903
	struct vring_used_elem *used;

R
Rusty Russell 已提交
904 905 906 907
	/*
	 * The virtqueue contains a ring of used buffers.  Get a pointer to the
	 * next entry in that used ring.
	 */
R
Rusty Russell 已提交
908 909 910 911 912 913
	used = &vq->vring.used->ring[vq->vring.used->idx % vq->vring.num];
	used->id = head;
	used->len = len;
	/* Make sure buffer is written before we update index. */
	wmb();
	vq->vring.used->idx++;
914
	vq->pending_used++;
915 916
}

R
Rusty Russell 已提交
917
/* And here's the combo meal deal.  Supersize me! */
918
static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len)
919
{
R
Rusty Russell 已提交
920
	add_used(vq, head, len);
921
	trigger_irq(vq);
922 923
}

R
Rusty Russell 已提交
924 925 926
/*
 * The Console
 *
R
Rusty Russell 已提交
927 928
 * We associate some data with the console for our exit hack.
 */
929
struct console_abort {
930
	/* How many times have they hit ^C? */
931
	int count;
932
	/* When did they start? */
933 934 935
	struct timeval start;
};

936
/* This is the routine which handles console input (ie. stdin). */
937
static void console_input(struct virtqueue *vq)
938 939
{
	int len;
R
Rusty Russell 已提交
940
	unsigned int head, in_num, out_num;
941 942
	struct console_abort *abort = vq->dev->priv;
	struct iovec iov[vq->vring.num];
943

R
Rusty Russell 已提交
944
	/* Make sure there's a descriptor available. */
945
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
946
	if (out_num)
R
Rusty Russell 已提交
947
		errx(1, "Output buffers in console in queue?");
948

R
Rusty Russell 已提交
949
	/* Read into it.  This is where we usually wait. */
950
	len = readv(STDIN_FILENO, iov, in_num);
951
	if (len <= 0) {
952
		/* Ran out of input? */
953
		warnx("Failed to get console input, ignoring console.");
R
Rusty Russell 已提交
954 955 956 957
		/*
		 * For simplicity, dying threads kill the whole Launcher.  So
		 * just nap here.
		 */
958 959
		for (;;)
			pause();
960 961
	}

R
Rusty Russell 已提交
962
	/* Tell the Guest we used a buffer. */
963
	add_used_and_trigger(vq, head, len);
964

R
Rusty Russell 已提交
965 966
	/*
	 * Three ^C within one second?  Exit.
967
	 *
968 969 970
	 * This is such a hack, but works surprisingly well.  Each ^C has to
	 * be in a buffer by itself, so they can't be too fast.  But we check
	 * that we get three within about a second, so they can't be too
R
Rusty Russell 已提交
971 972
	 * slow.
	 */
973
	if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) {
974
		abort->count = 0;
975 976
		return;
	}
977

978 979 980 981 982 983 984 985 986 987 988
	abort->count++;
	if (abort->count == 1)
		gettimeofday(&abort->start, NULL);
	else if (abort->count == 3) {
		struct timeval now;
		gettimeofday(&now, NULL);
		/* Kill all Launcher processes with SIGINT, like normal ^C */
		if (now.tv_sec <= abort->start.tv_sec+1)
			kill(0, SIGINT);
		abort->count = 0;
	}
989 990
}

991 992
/* This is the routine which handles console output (ie. stdout). */
static void console_output(struct virtqueue *vq)
993
{
R
Rusty Russell 已提交
994 995 996
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];

R
Rusty Russell 已提交
997
	/* We usually wait in here, for the Guest to give us something. */
998 999 1000
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in console output queue?");
R
Rusty Russell 已提交
1001 1002

	/* writev can return a partial write, so we loop here. */
1003 1004
	while (!iov_empty(iov, out)) {
		int len = writev(STDOUT_FILENO, iov, out);
1005 1006 1007 1008
		if (len <= 0) {
			warn("Write to stdout gave %i (%d)", len, errno);
			break;
		}
1009
		iov_consume(iov, out, NULL, len);
R
Rusty Russell 已提交
1010
	}
R
Rusty Russell 已提交
1011 1012 1013 1014 1015

	/*
	 * We're finished with that buffer: if we're going to sleep,
	 * wait_for_vq_desc() will prod the Guest with an interrupt.
	 */
1016
	add_used(vq, head, 0);
1017 1018
}

R
Rusty Russell 已提交
1019 1020 1021 1022
/*
 * The Network
 *
 * Handling output for network is also simple: we get all the output buffers
1023
 * and write them to /dev/net/tun.
1024
 */
1025 1026 1027 1028 1029
struct net_info {
	int tunfd;
};

static void net_output(struct virtqueue *vq)
1030
{
1031 1032
	struct net_info *net_info = vq->dev->priv;
	unsigned int head, out, in;
R
Rusty Russell 已提交
1033
	struct iovec iov[vq->vring.num];
1034

R
Rusty Russell 已提交
1035
	/* We usually wait in here for the Guest to give us a packet. */
1036 1037 1038
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (in)
		errx(1, "Input buffers in net output queue?");
R
Rusty Russell 已提交
1039 1040 1041 1042
	/*
	 * Send the whole thing through to /dev/net/tun.  It expects the exact
	 * same format: what a coincidence!
	 */
1043
	if (writev(net_info->tunfd, iov, out) < 0)
1044
		warnx("Write to tun failed (%d)?", errno);
R
Rusty Russell 已提交
1045 1046 1047 1048 1049

	/*
	 * Done with that one; wait_for_vq_desc() will send the interrupt if
	 * all packets are processed.
	 */
1050
	add_used(vq, head, 0);
1051 1052
}

R
Rusty Russell 已提交
1053 1054 1055 1056 1057 1058
/*
 * Handling network input is a bit trickier, because I've tried to optimize it.
 *
 * First we have a helper routine which tells is if from this file descriptor
 * (ie. the /dev/net/tun device) will block:
 */
1059 1060 1061 1062 1063 1064 1065 1066 1067
static bool will_block(int fd)
{
	fd_set fdset;
	struct timeval zero = { 0, 0 };
	FD_ZERO(&fdset);
	FD_SET(fd, &fdset);
	return select(fd+1, &fdset, NULL, NULL, &zero) != 1;
}

R
Rusty Russell 已提交
1068 1069 1070 1071 1072
/*
 * This handles packets coming in from the tun device to our Guest.  Like all
 * service routines, it gets called again as soon as it returns, so you don't
 * see a while(1) loop here.
 */
1073
static void net_input(struct virtqueue *vq)
1074 1075
{
	int len;
1076 1077 1078 1079
	unsigned int head, out, in;
	struct iovec iov[vq->vring.num];
	struct net_info *net_info = vq->dev->priv;

R
Rusty Russell 已提交
1080 1081 1082 1083
	/*
	 * Get a descriptor to write an incoming packet into.  This will also
	 * send an interrupt if they're out of descriptors.
	 */
1084 1085 1086
	head = wait_for_vq_desc(vq, iov, &out, &in);
	if (out)
		errx(1, "Output buffers in net input queue?");
1087

R
Rusty Russell 已提交
1088 1089 1090 1091
	/*
	 * If it looks like we'll block reading from the tun device, send them
	 * an interrupt.
	 */
1092 1093 1094
	if (vq->pending_used && will_block(net_info->tunfd))
		trigger_irq(vq);

R
Rusty Russell 已提交
1095 1096 1097 1098
	/*
	 * Read in the packet.  This is where we normally wait (when there's no
	 * incoming network traffic).
	 */
1099
	len = readv(net_info->tunfd, iov, in);
1100
	if (len <= 0)
1101
		warn("Failed to read from tun (%d).", errno);
R
Rusty Russell 已提交
1102 1103 1104 1105 1106

	/*
	 * Mark that packet buffer as used, but don't interrupt here.  We want
	 * to wait until we've done as much work as we can.
	 */
1107
	add_used(vq, head, len);
1108
}
R
Rusty Russell 已提交
1109
/*:*/
1110

R
Rusty Russell 已提交
1111
/* This is the helper to create threads: run the service routine in a loop. */
1112 1113 1114
static int do_thread(void *_vq)
{
	struct virtqueue *vq = _vq;
R
Rusty Russell 已提交
1115

1116 1117 1118 1119
	for (;;)
		vq->service(vq);
	return 0;
}
R
Rusty Russell 已提交
1120

R
Rusty Russell 已提交
1121 1122 1123 1124
/*
 * When a child dies, we kill our entire process group with SIGTERM.  This
 * also has the side effect that the shell restores the console for us!
 */
1125 1126 1127
static void kill_launcher(int signal)
{
	kill(0, SIGTERM);
1128 1129
}

1130 1131 1132 1133 1134 1135
static void reset_vq_pci_config(struct virtqueue *vq)
{
	vq->pci_config.queue_size = VIRTQUEUE_NUM;
	vq->pci_config.queue_enable = 0;
}

1136
static void reset_device(struct device *dev)
1137
{
1138 1139 1140 1141 1142
	struct virtqueue *vq;

	verbose("Resetting device %s\n", dev->name);

	/* Clear any features they've acked. */
1143
	dev->features_accepted = 0;
1144 1145 1146 1147

	/* We're going to be explicitly killing threads, so ignore them. */
	signal(SIGCHLD, SIG_IGN);

1148 1149 1150 1151 1152 1153 1154 1155 1156
	/*
	 * 4.1.4.3.1:
	 *
	 *   The device MUST present a 0 in queue_enable on reset. 
	 *
	 * This means we set it here, and reset the saved ones in every vq.
	 */
	dev->mmio->cfg.queue_enable = 0;

1157
	/* Get rid of the virtqueue threads */
1158
	for (vq = dev->vq; vq; vq = vq->next) {
1159 1160
		vq->last_avail_idx = 0;
		reset_vq_pci_config(vq);
1161 1162 1163 1164 1165 1166 1167
		if (vq->thread != (pid_t)-1) {
			kill(vq->thread, SIGTERM);
			waitpid(vq->thread, NULL, 0);
			vq->thread = (pid_t)-1;
		}
	}
	dev->running = false;
1168
	dev->wrote_features_ok = false;
1169 1170 1171

	/* Now we care if threads die. */
	signal(SIGCHLD, (void *)kill_launcher);
1172 1173
}

1174
static void cleanup_devices(void)
R
Rusty Russell 已提交
1175
{
1176 1177
	unsigned int i;

1178 1179 1180 1181 1182
	for (i = 1; i < MAX_PCI_DEVICES; i++) {
		struct device *d = devices.pci[i];
		if (!d)
			continue;
		reset_device(d);
1183
	}
R
Rusty Russell 已提交
1184

1185 1186 1187 1188
	/* If we saved off the original terminal settings, restore them now. */
	if (orig_term.c_lflag & (ISIG|ICANON|ECHO))
		tcsetattr(STDIN_FILENO, TCSANOW, &orig_term);
}
R
Rusty Russell 已提交
1189

1190 1191 1192 1193 1194
/*L:217
 * We do PCI.  This is mainly done to let us test the kernel virtio PCI
 * code.
 */

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
/* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */
static struct device pci_host_bridge;

static void init_pci_host_bridge(void)
{
	pci_host_bridge.name = "PCI Host Bridge";
	pci_host_bridge.config.class = 0x06; /* bridge */
	pci_host_bridge.config.subclass = 0; /* host bridge */
	devices.pci[0] = &pci_host_bridge;
}

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
/* The IO ports used to read the PCI config space. */
#define PCI_CONFIG_ADDR 0xCF8
#define PCI_CONFIG_DATA 0xCFC

/*
 * Not really portable, but does help readability: this is what the Guest
 * writes to the PCI_CONFIG_ADDR IO port.
 */
union pci_config_addr {
	struct {
		unsigned mbz: 2;
		unsigned offset: 6;
		unsigned funcnum: 3;
		unsigned devnum: 5;
		unsigned busnum: 8;
		unsigned reserved: 7;
		unsigned enabled : 1;
	} bits;
	u32 val;
};

/*
 * We cache what they wrote to the address port, so we know what they're
 * talking about when they access the data port.
 */
static union pci_config_addr pci_config_addr;

static struct device *find_pci_device(unsigned int index)
{
	return devices.pci[index];
}

/* PCI can do 1, 2 and 4 byte reads; we handle that here. */
static void ioread(u16 off, u32 v, u32 mask, u32 *val)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*val = (v >> (off * 8)) & mask;
}

/* PCI can do 1, 2 and 4 byte writes; we handle that here. */
static void iowrite(u16 off, u32 v, u32 mask, u32 *dst)
{
	assert(off < 4);
	assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF);
	*dst &= ~(mask << (off * 8));
	*dst |= (v & mask) << (off * 8);
}

/*
 * Where PCI_CONFIG_DATA accesses depends on the previous write to
 * PCI_CONFIG_ADDR.
 */
static struct device *dev_and_reg(u32 *reg)
{
	if (!pci_config_addr.bits.enabled)
		return NULL;

	if (pci_config_addr.bits.funcnum != 0)
		return NULL;

	if (pci_config_addr.bits.busnum != 0)
		return NULL;

	if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config))
		return NULL;

	*reg = pci_config_addr.bits.offset;
	return find_pci_device(pci_config_addr.bits.devnum);
}

R
Rusty Russell 已提交
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
/*
 * We can get invalid combinations of values while they're writing, so we
 * only fault if they try to write with some invalid bar/offset/length.
 */
static bool valid_bar_access(struct device *d,
			     struct virtio_pci_cfg_cap *cfg_access)
{
	/* We only have 1 bar (BAR0) */
	if (cfg_access->cap.bar != 0)
		return false;

	/* Check it's within BAR0. */
	if (cfg_access->cap.offset >= d->mmio_size
	    || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size)
		return false;

	/* Check length is 1, 2 or 4. */
	if (cfg_access->cap.length != 1
	    && cfg_access->cap.length != 2
	    && cfg_access->cap.length != 4)
		return false;

1299 1300 1301 1302 1303 1304
	/*
	 * 4.1.4.7.2:
	 *
	 *  The driver MUST NOT write a cap.offset which is not a multiple of
	 *  cap.length (ie. all accesses MUST be aligned).
	 */
R
Rusty Russell 已提交
1305 1306 1307 1308 1309 1310 1311
	if (cfg_access->cap.offset % cfg_access->cap.length != 0)
		return false;

	/* Return pointer into word in BAR0. */
	return true;
}

1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
/* Is this accessing the PCI config address port?. */
static bool is_pci_addr_port(u16 port)
{
	return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4;
}

static bool pci_addr_iowrite(u16 port, u32 mask, u32 val)
{
	iowrite(port - PCI_CONFIG_ADDR, val, mask,
		&pci_config_addr.val);
	verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n",
		pci_config_addr.bits.enabled ? "" : " DISABLED",
		val, mask,
		pci_config_addr.bits.busnum,
		pci_config_addr.bits.devnum,
		pci_config_addr.bits.funcnum,
		pci_config_addr.bits.offset);
	return true;
}

static void pci_addr_ioread(u16 port, u32 mask, u32 *val)
{
	ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val);
}

/* Is this accessing the PCI config data port?. */
static bool is_pci_data_port(u16 port)
{
	return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4;
}

R
Rusty Russell 已提交
1343 1344
static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask);

1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
static bool pci_data_iowrite(u16 port, u32 mask, u32 val)
{
	u32 reg, portoff;
	struct device *d = dev_and_reg(&reg);

	/* Complain if they don't belong to a device. */
	if (!d)
		return false;

	/* They can do 1 byte writes, etc. */
	portoff = port - PCI_CONFIG_DATA;

	/*
	 * PCI uses a weird way to determine the BAR size: the OS
	 * writes all 1's, and sees which ones stick.
	 */
	if (&d->config_words[reg] == &d->config.bar[0]) {
		int i;

		iowrite(portoff, val, mask, &d->config.bar[0]);
		for (i = 0; (1 << i) < d->mmio_size; i++)
			d->config.bar[0] &= ~(1 << i);
		return true;
	} else if ((&d->config_words[reg] > &d->config.bar[0]
		    && &d->config_words[reg] <= &d->config.bar[6])
		   || &d->config_words[reg] == &d->config.expansion_rom_addr) {
		/* Allow writing to any other BAR, or expansion ROM */
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
		/* We let them overide latency timer and cacheline size */
	} else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) {
		/* Only let them change the first two fields. */
		if (mask == 0xFFFFFFFF)
			mask = 0xFFFF;
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
	} else if (&d->config_words[reg] == (void *)&d->config.command
		   && mask == 0xFFFF) {
		/* Ignore command writes. */
		return true;
R
Rusty Russell 已提交
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
	} else if (&d->config_words[reg]
		   == (void *)&d->config.cfg_access.cap.bar
		   || &d->config_words[reg]
		   == &d->config.cfg_access.cap.length
		   || &d->config_words[reg]
		   == &d->config.cfg_access.cap.offset) {

		/*
		 * The VIRTIO_PCI_CAP_PCI_CFG capability
		 * provides a backdoor to access the MMIO
		 * regions without mapping them.  Weird, but
		 * useful.
		 */
		iowrite(portoff, val, mask, &d->config_words[reg]);
		return true;
1400
	} else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
R
Rusty Russell 已提交
1401 1402
		u32 write_mask;

1403 1404 1405 1406 1407 1408 1409 1410 1411
		/*
		 * 4.1.4.7.1:
		 *
		 *  Upon detecting driver write access to pci_cfg_data, the
		 *  device MUST execute a write access at offset cap.offset at
		 *  BAR selected by cap.bar using the first cap.length bytes
		 *  from pci_cfg_data.
		 */

R
Rusty Russell 已提交
1412 1413 1414 1415
		/* Must be bar 0 */
		if (!valid_bar_access(d, &d->config.cfg_access))
			return false;

1416
		iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data);
R
Rusty Russell 已提交
1417 1418 1419 1420 1421 1422 1423

		/*
		 * Now emulate a write.  The mask we use is set by
		 * len, *not* this write!
		 */
		write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1;
		verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n",
1424
			d->config.cfg_access.pci_cfg_data, write_mask,
R
Rusty Russell 已提交
1425 1426 1427 1428 1429
			d->config.cfg_access.cap.bar,
			d->config.cfg_access.cap.offset,
			d->config.cfg_access.cap.length);

		emulate_mmio_write(d, d->config.cfg_access.cap.offset,
1430 1431
				   d->config.cfg_access.pci_cfg_data,
				   write_mask);
R
Rusty Russell 已提交
1432
		return true;
1433 1434
	}

1435 1436 1437 1438 1439 1440 1441
	/*
	 * 4.1.4.1:
	 *
	 *  The driver MUST NOT write into any field of the capability
	 *  structure, with the exception of those with cap_type
	 *  VIRTIO_PCI_CAP_PCI_CFG...
	 */
1442 1443 1444
	return false;
}

R
Rusty Russell 已提交
1445 1446
static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask);

1447 1448 1449 1450 1451 1452 1453
static void pci_data_ioread(u16 port, u32 mask, u32 *val)
{
	u32 reg;
	struct device *d = dev_and_reg(&reg);

	if (!d)
		return;
R
Rusty Russell 已提交
1454 1455

	/* Read through the PCI MMIO access window is special */
1456
	if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) {
R
Rusty Russell 已提交
1457 1458
		u32 read_mask;

1459 1460 1461 1462 1463 1464 1465 1466
		/*
		 * 4.1.4.7.1:
		 *
		 *  Upon detecting driver read access to pci_cfg_data, the
		 *  device MUST execute a read access of length cap.length at
		 *  offset cap.offset at BAR selected by cap.bar and store the
		 *  first cap.length bytes in pci_cfg_data.
		 */
R
Rusty Russell 已提交
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
		/* Must be bar 0 */
		if (!valid_bar_access(d, &d->config.cfg_access))
			errx(1, "Invalid cfg_access to bar%u, offset %u len %u",
			     d->config.cfg_access.cap.bar,
			     d->config.cfg_access.cap.offset,
			     d->config.cfg_access.cap.length);

		/*
		 * Read into the window.  The mask we use is set by
		 * len, *not* this read!
		 */
		read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1;
1479
		d->config.cfg_access.pci_cfg_data
R
Rusty Russell 已提交
1480 1481 1482 1483
			= emulate_mmio_read(d,
					    d->config.cfg_access.cap.offset,
					    read_mask);
		verbose("Window read %#x/%#x from bar %u, offset %u len %u\n",
1484
			d->config.cfg_access.pci_cfg_data, read_mask,
R
Rusty Russell 已提交
1485 1486 1487 1488
			d->config.cfg_access.cap.bar,
			d->config.cfg_access.cap.offset,
			d->config.cfg_access.cap.length);
	}
1489 1490 1491
	ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val);
}

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525
/*L:216
 * This is where we emulate a handful of Guest instructions.  It's ugly
 * and we used to do it in the kernel but it grew over time.
 */

/*
 * We use the ptrace syscall's pt_regs struct to talk about registers
 * to lguest: these macros convert the names to the offsets.
 */
#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name))
#define setreg(name, val) \
	setreg_off(offsetof(struct user_regs_struct, name), (val))

static u32 getreg_off(size_t offset)
{
	u32 r;
	unsigned long args[] = { LHREQ_GETREG, offset };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Getting register %u", offset);
	if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r))
		err(1, "Reading register %u", offset);

	return r;
}

static void setreg_off(size_t offset, u32 val)
{
	unsigned long args[] = { LHREQ_SETREG, offset, val };

	if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0)
		err(1, "Setting register %u", offset);
}

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
/* Get register by instruction encoding */
static u32 getreg_num(unsigned regnum, u32 mask)
{
	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4))
		return getreg_num(regnum & 0x3, 0xFFFF) >> 8;

	switch (regnum) {
	case 0: return getreg(eax) & mask;
	case 1: return getreg(ecx) & mask;
	case 2: return getreg(edx) & mask;
	case 3: return getreg(ebx) & mask;
	case 4: return getreg(esp) & mask;
	case 5: return getreg(ebp) & mask;
	case 6: return getreg(esi) & mask;
	case 7: return getreg(edi) & mask;
	}
	abort();
}

/* Set register by instruction encoding */
static void setreg_num(unsigned regnum, u32 val, u32 mask)
{
	/* Don't try to set bits out of range */
	assert(~(val & ~mask));

	/* 8 bit ops use regnums 4-7 for high parts of word */
	if (mask == 0xFF && (regnum & 0x4)) {
		/* Construct the 16 bits we want. */
		val = (val << 8) | getreg_num(regnum & 0x3, 0xFF);
		setreg_num(regnum & 0x3, val, 0xFFFF);
		return;
	}

	switch (regnum) {
	case 0: setreg(eax, val | (getreg(eax) & ~mask)); return;
	case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return;
	case 2: setreg(edx, val | (getreg(edx) & ~mask)); return;
	case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return;
	case 4: setreg(esp, val | (getreg(esp) & ~mask)); return;
	case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return;
	case 6: setreg(esi, val | (getreg(esi) & ~mask)); return;
	case 7: setreg(edi, val | (getreg(edi) & ~mask)); return;
	}
	abort();
}

/* Get bytes of displacement appended to instruction, from r/m encoding */
static u32 insn_displacement_len(u8 mod_reg_rm)
{
	/* Switch on the mod bits */
	switch (mod_reg_rm >> 6) {
	case 0:
		/* If mod == 0, and r/m == 101, 16-bit displacement follows */
		if ((mod_reg_rm & 0x7) == 0x5)
			return 2;
		/* Normally, mod == 0 means no literal displacement */
		return 0;
	case 1:
		/* One byte displacement */
		return 1;
	case 2:
		/* Four byte displacement */
		return 4;
	case 3:
		/* Register mode */
		return 0;
	}
	abort();
}

1597 1598 1599 1600 1601 1602
static void emulate_insn(const u8 insn[])
{
	unsigned long args[] = { LHREQ_TRAP, 13 };
	unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access;
	unsigned int eax, port, mask;
	/*
1603
	 * Default is to return all-ones on IO port reads, which traditionally
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
	 * means "there's nothing there".
	 */
	u32 val = 0xFFFFFFFF;

	/*
	 * This must be the Guest kernel trying to do something, not userspace!
	 * The bottom two bits of the CS segment register are the privilege
	 * level.
	 */
	if ((getreg(xcs) & 3) != 0x1)
		goto no_emulate;

	/* Decoding x86 instructions is icky. */

	/*
	 * Around 2.6.33, the kernel started using an emulation for the
	 * cmpxchg8b instruction in early boot on many configurations.  This
	 * code isn't paravirtualized, and it tries to disable interrupts.
	 * Ignore it, which will Mostly Work.
	 */
	if (insn[insnlen] == 0xfa) {
		/* "cli", or Clear Interrupt Enable instruction.  Skip it. */
		insnlen = 1;
		goto skip_insn;
	}

	/*
	 * 0x66 is an "operand prefix".  It means a 16, not 32 bit in/out.
	 */
	if (insn[insnlen] == 0x66) {
		small_operand = 1;
		/* The instruction is 1 byte so far, read the next byte. */
		insnlen = 1;
	}

	/* If the lower bit isn't set, it's a single byte access */
	byte_access = !(insn[insnlen] & 1);

	/*
	 * Now we can ignore the lower bit and decode the 4 opcodes
	 * we need to emulate.
	 */
	switch (insn[insnlen] & 0xFE) {
	case 0xE4: /* in     <next byte>,%al */
		port = insn[insnlen+1];
		insnlen += 2;
		in = 1;
		break;
	case 0xEC: /* in     (%dx),%al */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		in = 1;
		break;
	case 0xE6: /* out    %al,<next byte> */
		port = insn[insnlen+1];
		insnlen += 2;
		break;
	case 0xEE: /* out    %al,(%dx) */
		port = getreg(edx) & 0xFFFF;
		insnlen += 1;
		break;
	default:
		/* OK, we don't know what this is, can't emulate. */
		goto no_emulate;
	}

	/* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */
	if (byte_access)
		mask = 0xFF;
	else if (small_operand)
		mask = 0xFFFF;
	else
		mask = 0xFFFFFFFF;

	/*
	 * If it was an "IN" instruction, they expect the result to be read
	 * into %eax, so we change %eax.
	 */
	eax = getreg(eax);

	if (in) {
1685 1686 1687 1688 1689 1690 1691 1692
		/* This is the PS/2 keyboard status; 1 means ready for output */
		if (port == 0x64)
			val = 1;
		else if (is_pci_addr_port(port))
			pci_addr_ioread(port, mask, &val);
		else if (is_pci_data_port(port))
			pci_data_ioread(port, mask, &val);

1693 1694 1695 1696 1697 1698
		/* Clear the bits we're about to read */
		eax &= ~mask;
		/* Copy bits in from val. */
		eax |= val & mask;
		/* Now update the register. */
		setreg(eax, eax);
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
	} else {
		if (is_pci_addr_port(port)) {
			if (!pci_addr_iowrite(port, mask, eax))
				goto bad_io;
		} else if (is_pci_data_port(port)) {
			if (!pci_data_iowrite(port, mask, eax))
				goto bad_io;
		}
		/* There are many other ports, eg. CMOS clock, serial
		 * and parallel ports, so we ignore them all. */
1709 1710 1711 1712 1713 1714 1715 1716 1717
	}

	verbose("IO %s of %x to %u: %#08x\n",
		in ? "IN" : "OUT", mask, port, eax);
skip_insn:
	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
	return;

1718 1719 1720 1721
bad_io:
	warnx("Attempt to %s port %u (%#x mask)",
	      in ? "read from" : "write to", port, mask);

1722 1723 1724 1725 1726 1727
no_emulate:
	/* Inject trap into Guest. */
	if (write(lguest_fd, args, sizeof(args)) < 0)
		err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip));
}

1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
static struct device *find_mmio_region(unsigned long paddr, u32 *off)
{
	unsigned int i;

	for (i = 1; i < MAX_PCI_DEVICES; i++) {
		struct device *d = devices.pci[i];

		if (!d)
			continue;
		if (paddr < d->mmio_addr)
			continue;
		if (paddr >= d->mmio_addr + d->mmio_size)
			continue;
		*off = paddr - d->mmio_addr;
		return d;
	}
	return NULL;
}

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
/* FIXME: Use vq array. */
static struct virtqueue *vq_by_num(struct device *d, u32 num)
{
	struct virtqueue *vq = d->vq;

	while (num-- && vq)
		vq = vq->next;

	return vq;
}

static void save_vq_config(const struct virtio_pci_common_cfg *cfg,
			   struct virtqueue *vq)
{
	vq->pci_config = *cfg;
}

static void restore_vq_config(struct virtio_pci_common_cfg *cfg,
			      struct virtqueue *vq)
{
	/* Only restore the per-vq part */
	size_t off = offsetof(struct virtio_pci_common_cfg, queue_size);

	memcpy((void *)cfg + off, (void *)&vq->pci_config + off,
	       sizeof(*cfg) - off);
}

/*
1775 1776 1777 1778 1779
 * 4.1.4.3.2:
 *
 *  The driver MUST configure the other virtqueue fields before
 *  enabling the virtqueue with queue_enable.
 *
1780 1781
 * When they enable the virtqueue, we check that their setup is valid.
 */
1782
static void check_virtqueue(struct device *d, struct virtqueue *vq)
1783 1784 1785 1786 1787 1788 1789
{
	/* Because lguest is 32 bit, all the descriptor high bits must be 0 */
	if (vq->pci_config.queue_desc_hi
	    || vq->pci_config.queue_avail_hi
	    || vq->pci_config.queue_used_hi)
		errx(1, "%s: invalid 64-bit queue address", d->name);

1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
	/*
	 * 2.4.1:
	 *
	 *  The driver MUST ensure that the physical address of the first byte
	 *  of each virtqueue part is a multiple of the specified alignment
	 *  value in the above table.
	 */
	if (vq->pci_config.queue_desc_lo % 16
	    || vq->pci_config.queue_avail_lo % 2
	    || vq->pci_config.queue_used_lo % 4)
		errx(1, "%s: invalid alignment in queue addresses", d->name);

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813
	/* Initialize the virtqueue and check they're all in range. */
	vq->vring.num = vq->pci_config.queue_size;
	vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo,
				       sizeof(*vq->vring.desc) * vq->vring.num);
	vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo,
					sizeof(*vq->vring.avail)
					+ (sizeof(vq->vring.avail->ring[0])
					   * vq->vring.num));
	vq->vring.used = check_pointer(vq->pci_config.queue_used_lo,
				       sizeof(*vq->vring.used)
				       + (sizeof(vq->vring.used->ring[0])
					  * vq->vring.num));
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823

	/*
	 * 2.4.9.1:
	 *
	 *   The driver MUST initialize flags in the used ring to 0
	 *   when allocating the used ring.
	 */
	if (vq->vring.used->flags != 0)
		errx(1, "%s: invalid initial used.flags %#x",
		     d->name, vq->vring.used->flags);
1824
}
1825

1826 1827 1828 1829 1830 1831 1832
static void start_virtqueue(struct virtqueue *vq)
{
	/*
	 * Create stack for thread.  Since the stack grows upwards, we point
	 * the stack pointer to the end of this region.
	 */
	char *stack = malloc(32768);
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847

	/* Create a zero-initialized eventfd. */
	vq->eventfd = eventfd(0, 0);
	if (vq->eventfd < 0)
		err(1, "Creating eventfd");

	/*
	 * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so
	 * we get a signal if it dies.
	 */
	vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq);
	if (vq->thread == (pid_t)-1)
		err(1, "Creating clone");
}

1848 1849 1850 1851 1852 1853 1854 1855 1856 1857
static void start_virtqueues(struct device *d)
{
	struct virtqueue *vq;

	for (vq = d->vq; vq; vq = vq->next) {
		if (vq->pci_config.queue_enable)
			start_virtqueue(vq);
	}
}

1858 1859
static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask)
{
1860 1861 1862 1863
	struct virtqueue *vq;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
1864 1865 1866 1867 1868 1869 1870
		/*
		 * 4.1.4.3.1:
		 *
		 * The device MUST present the feature bits it is offering in
		 * device_feature, starting at bit device_feature_select ∗ 32
		 * for any device_feature_select written by the driver
		 */
1871 1872 1873 1874 1875 1876
		if (val == 0)
			d->mmio->cfg.device_feature = d->features;
		else if (val == 1)
			d->mmio->cfg.device_feature = (d->features >> 32);
		else
			d->mmio->cfg.device_feature = 0;
1877
		goto feature_write_through32;
1878 1879 1880 1881
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
		if (val > 1)
			errx(1, "%s: Unexpected driver select %u",
			     d->name, val);
1882
		goto feature_write_through32;
1883 1884 1885 1886 1887 1888
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
		if (d->mmio->cfg.guest_feature_select == 0) {
			d->features_accepted &= ~((u64)0xFFFFFFFF);
			d->features_accepted |= val;
		} else {
			assert(d->mmio->cfg.guest_feature_select == 1);
1889
			d->features_accepted &= 0xFFFFFFFF;
1890 1891
			d->features_accepted |= ((u64)val) << 32;
		}
1892 1893 1894 1895 1896 1897
		/*
		 * 2.2.1:
		 *
		 *   The driver MUST NOT accept a feature which the device did
		 *   not offer
		 */
1898 1899 1900
		if (d->features_accepted & ~d->features)
			errx(1, "%s: over-accepted features %#llx of %#llx",
			     d->name, d->features_accepted, d->features);
1901 1902 1903 1904
		goto feature_write_through32;
	case offsetof(struct virtio_pci_mmio, cfg.device_status): {
		u8 prev;

1905
		verbose("%s: device status -> %#x\n", d->name, val);
1906 1907 1908 1909 1910 1911
		/*
		 * 4.1.4.3.1:
		 * 
		 *  The device MUST reset when 0 is written to device_status,
		 *  and present a 0 in device_status once that is done.
		 */
1912
		if (val == 0) {
1913
			reset_device(d);
1914 1915 1916 1917 1918 1919 1920
			goto write_through8;
		}

		/* 2.1.1: The driver MUST NOT clear a device status bit. */
		if (d->mmio->cfg.device_status & ~val)
			errx(1, "%s: unset of device status bit %#x -> %#x",
			     d->name, d->mmio->cfg.device_status, val);
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931

		/*
		 * 2.1.2:
		 *
		 *  The device MUST NOT consume buffers or notify the driver
		 *  before DRIVER_OK.
		 */
		if (val & VIRTIO_CONFIG_S_DRIVER_OK
		    && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
			start_virtqueues(d);

1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
		/*
		 * 3.1.1:
		 *
		 *   The driver MUST follow this sequence to initialize a device:
		 *   - Reset the device.
		 *   - Set the ACKNOWLEDGE status bit: the guest OS has
                 *     notice the device.
		 *   - Set the DRIVER status bit: the guest OS knows how
                 *     to drive the device.
		 *   - Read device feature bits, and write the subset
		 *     of feature bits understood by the OS and driver
		 *     to the device. During this step the driver MAY
		 *     read (but MUST NOT write) the device-specific
		 *     configuration fields to check that it can
		 *     support the device before accepting it.
		 *   - Set the FEATURES_OK status bit.  The driver
		 *     MUST not accept new feature bits after this
		 *     step.
		 *   - Re-read device status to ensure the FEATURES_OK
		 *     bit is still set: otherwise, the device does
		 *     not support our subset of features and the
		 *     device is unusable.
		 *   - Perform device-specific setup, including
		 *     discovery of virtqueues for the device,
		 *     optional per-bus setup, reading and possibly
		 *     writing the device’s virtio configuration
		 *     space, and population of virtqueues.
		 *   - Set the DRIVER_OK status bit. At this point the
                 *     device is “live”.
		 */
		prev = 0;
		switch (val & ~d->mmio->cfg.device_status) {
		case VIRTIO_CONFIG_S_DRIVER_OK:
			prev |= VIRTIO_CONFIG_S_FEATURES_OK; /* fall thru */
		case VIRTIO_CONFIG_S_FEATURES_OK:
			prev |= VIRTIO_CONFIG_S_DRIVER; /* fall thru */
		case VIRTIO_CONFIG_S_DRIVER:
			prev |= VIRTIO_CONFIG_S_ACKNOWLEDGE; /* fall thru */
		case VIRTIO_CONFIG_S_ACKNOWLEDGE:
			break;
		default:
			errx(1, "%s: unknown device status bit %#x -> %#x",
			     d->name, d->mmio->cfg.device_status, val);
		}
		if (d->mmio->cfg.device_status != prev)
			errx(1, "%s: unexpected status transition %#x -> %#x",
			     d->name, d->mmio->cfg.device_status, val);

		/* If they just wrote FEATURES_OK, we make sure they read */
		switch (val & ~d->mmio->cfg.device_status) {
		case VIRTIO_CONFIG_S_FEATURES_OK:
			d->wrote_features_ok = true;
			break;
		case VIRTIO_CONFIG_S_DRIVER_OK:
			if (d->wrote_features_ok)
				errx(1, "%s: did not re-read FEATURES_OK",
				     d->name);
			break;
		}
1991
		goto write_through8;
1992
	}
1993 1994
	case offsetof(struct virtio_pci_mmio, cfg.queue_select):
		vq = vq_by_num(d, val);
1995 1996 1997 1998 1999 2000
		/*
		 * 4.1.4.3.1:
		 *
		 *  The device MUST present a 0 in queue_size if the virtqueue
		 *  corresponding to the current queue_select is unavailable.
		 */
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
		if (!vq) {
			d->mmio->cfg.queue_size = 0;
			goto write_through16;
		}
		/* Save registers for old vq, if it was a valid vq */
		if (d->mmio->cfg.queue_size)
			save_vq_config(&d->mmio->cfg,
				       vq_by_num(d, d->mmio->cfg.queue_select));
		/* Restore the registers for the queue they asked for */
		restore_vq_config(&d->mmio->cfg, vq);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_size):
2013 2014 2015 2016 2017 2018
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST NOT write a value which is not a power of 2
		 *  to queue_size.
		 */
2019 2020 2021 2022 2023 2024 2025 2026 2027
		if (val & (val-1))
			errx(1, "%s: invalid queue size %u\n", d->name, val);
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue size on live device",
			     d->name);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector):
		errx(1, "%s: attempt to set MSIX vector to %u",
		     d->name, val);
2028 2029 2030
	case offsetof(struct virtio_pci_mmio, cfg.queue_enable): {
		struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select);

2031 2032 2033 2034 2035
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST NOT write a 0 to queue_enable.
		 */
2036 2037
		if (val != 1)
			errx(1, "%s: setting queue_enable to %u", d->name, val);
2038

2039
		/*
2040
		 * 3.1.1:
2041
		 *
2042 2043 2044 2045 2046 2047 2048 2049
		 *  7. Perform device-specific setup, including discovery of
		 *     virtqueues for the device, optional per-bus setup,
		 *     reading and possibly writing the device’s virtio
		 *     configuration space, and population of virtqueues.
		 *  8. Set the DRIVER_OK status bit.
		 *
		 * All our devices require all virtqueues to be enabled, so
		 * they should have done that before setting DRIVER_OK.
2050
		 */
2051 2052 2053 2054 2055 2056
		if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)
			errx(1, "%s: enabling vs after DRIVER_OK", d->name);

		d->mmio->cfg.queue_enable = val;
		save_vq_config(&d->mmio->cfg, vq);
		check_virtqueue(d, vq);
2057
		goto write_through16;
2058
	}
2059 2060 2061 2062 2063 2064 2065 2066
	case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off):
		errx(1, "%s: attempt to write to queue_notify_off", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo):
	case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi):
2067 2068 2069 2070 2071 2072
		/*
		 * 4.1.4.3.2:
		 *
		 *  The driver MUST configure the other virtqueue fields before
		 *  enabling the virtqueue with queue_enable.
		 */
2073 2074 2075
		if (d->mmio->cfg.queue_enable)
			errx(1, "%s: changing queue on live device",
			     d->name);
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095

		/*
		 * 3.1.1:
		 *
		 *  The driver MUST follow this sequence to initialize a device:
		 *...
		 *  5. Set the FEATURES_OK status bit. The driver MUST not
		 *  accept new feature bits after this step.
		 */
		if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK))
			errx(1, "%s: enabling vs before FEATURES_OK", d->name);

		/*
		 *  6. Re-read device status to ensure the FEATURES_OK bit is
		 *     still set...
		 */
		if (d->wrote_features_ok)
			errx(1, "%s: didn't re-read FEATURES_OK before setup",
			     d->name);

2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
		goto write_through32;
	case offsetof(struct virtio_pci_mmio, notify):
		vq = vq_by_num(d, val);
		if (!vq)
			errx(1, "Invalid vq notification on %u", val);
		/* Notify the process handling this vq by adding 1 to eventfd */
		write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8);
		goto write_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		errx(1, "%s: Unexpected write to isr", d->name);
2106 2107 2108 2109 2110 2111 2112 2113 2114
	/* Weird corner case: write to emerg_wr of console */
	case sizeof(struct virtio_pci_mmio)
		+ offsetof(struct virtio_console_config, emerg_wr):
		if (strcmp(d->name, "console") == 0) {
			char c = val;
			write(STDOUT_FILENO, &c, 1);
			goto write_through32;
		}
		/* Fall through... */
2115
	default:
2116 2117 2118 2119 2120 2121
		/*
		 * 4.1.4.3.2:
		 *
		 *   The driver MUST NOT write to device_feature, num_queues,
		 *   config_generation or queue_notify_off.
		 */
2122 2123 2124
		errx(1, "%s: Unexpected write to offset %u", d->name, off);
	}

2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145
feature_write_through32:
	/*
	 * 3.1.1:
	 *
	 *   The driver MUST follow this sequence to initialize a device:
	 *...
	 *   - Set the DRIVER status bit: the guest OS knows how
	 *     to drive the device.
	 *   - Read device feature bits, and write the subset
	 *     of feature bits understood by the OS and driver
	 *     to the device.
	 *...
	 *   - Set the FEATURES_OK status bit. The driver MUST not
	 *     accept new feature bits after this step.
	 */
	if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
		errx(1, "%s: feature write before VIRTIO_CONFIG_S_DRIVER",
		     d->name);
	if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)
		errx(1, "%s: feature write after VIRTIO_CONFIG_S_FEATURES_OK",
		     d->name);
2146 2147 2148 2149 2150 2151 2152 2153

	/*
	 * 4.1.3.1:
	 *
	 *  The driver MUST access each field using the “natural” access
	 *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
	 *  16-bit fields and 8-bit accesses for 8-bit fields.
	 */
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
write_through32:
	if (mask != 0xFFFFFFFF) {
		errx(1, "%s: non-32-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
		return;
	}
	memcpy((char *)d->mmio + off, &val, 4);
	return;

write_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)",
		     d->name, mask, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 2);
	return;

write_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit write to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy((char *)d->mmio + off, &val, 1);
	return;
2176 2177 2178 2179
}

static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask)
{
2180 2181 2182 2183 2184 2185 2186 2187
	u8 isr;
	u32 val = 0;

	switch (off) {
	case offsetof(struct virtio_pci_mmio, cfg.device_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.device_feature):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select):
	case offsetof(struct virtio_pci_mmio, cfg.guest_feature):
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201
		/*
		 * 3.1.1:
		 *
		 *   The driver MUST follow this sequence to initialize a device:
		 *...
		 *   - Set the DRIVER status bit: the guest OS knows how
		 *     to drive the device.
		 *   - Read device feature bits, and write the subset
		 *     of feature bits understood by the OS and driver
		 *     to the device.
		 */
		if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
			errx(1, "%s: feature read before VIRTIO_CONFIG_S_DRIVER",
			     d->name);
2202 2203 2204 2205 2206 2207
		goto read_through32;
	case offsetof(struct virtio_pci_mmio, cfg.msix_config):
		errx(1, "%s: read of msix_config", d->name);
	case offsetof(struct virtio_pci_mmio, cfg.num_queues):
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, cfg.device_status):
2208 2209 2210
		/* As they did read, any write of FEATURES_OK is now fine. */
		d->wrote_features_ok = false;
		goto read_through8;
2211
	case offsetof(struct virtio_pci_mmio, cfg.config_generation):
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222
		/*
		 * 4.1.4.3.1:
		 *
		 *  The device MUST present a changed config_generation after
		 *  the driver has read a device-specific configuration value
		 *  which has changed since any part of the device-specific
		 *  configuration was last read.
		 *
		 * This is simple: none of our devices change config, so this
		 * is always 0.
		 */
2223 2224
		goto read_through8;
	case offsetof(struct virtio_pci_mmio, notify):
2225 2226 2227 2228 2229 2230 2231 2232 2233
		/*
		 * 3.1.1:
		 *
		 *   The driver MUST NOT notify the device before setting
		 *   DRIVER_OK.
		 */
		if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK))
			errx(1, "%s: notify before VIRTIO_CONFIG_S_DRIVER_OK",
			     d->name);
2234 2235 2236 2237 2238 2239
		goto read_through16;
	case offsetof(struct virtio_pci_mmio, isr):
		if (mask != 0xFF)
			errx(1, "%s: non-8-bit read from offset %u (%#x)",
			     d->name, off, getreg(eip));
		isr = d->mmio->isr;
2240 2241 2242 2243 2244
		/*
		 * 4.1.4.5.1:
		 *
		 *  The device MUST reset ISR status to 0 on driver read. 
		 */
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
		d->mmio->isr = 0;
		return isr;
	case offsetof(struct virtio_pci_mmio, padding):
		errx(1, "%s: read from padding (%#x)",
		     d->name, getreg(eip));
	default:
		/* Read from device config space, beware unaligned overflow */
		if (off > d->mmio_size - 4)
			errx(1, "%s: read past end (%#x)",
			     d->name, getreg(eip));
2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271

		/*
		 * 3.1.1:
		 *  The driver MUST follow this sequence to initialize a device:
		 *...
		 *  3. Set the DRIVER status bit: the guest OS knows how to
		 *  drive the device.
		 *  4. Read device feature bits, and write the subset of
		 *  feature bits understood by the OS and driver to the
		 *  device. During this step the driver MAY read (but MUST NOT
		 *  write) the device-specific configuration fields to check
		 *  that it can support the device before accepting it.
		 */
		if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER))
			errx(1, "%s: config read before VIRTIO_CONFIG_S_DRIVER",
			     d->name);

2272 2273 2274 2275 2276 2277 2278 2279
		if (mask == 0xFFFFFFFF)
			goto read_through32;
		else if (mask == 0xFFFF)
			goto read_through16;
		else
			goto read_through8;
	}

2280 2281 2282 2283 2284 2285 2286
	/*
	 * 4.1.3.1:
	 *
	 *  The driver MUST access each field using the “natural” access
	 *  method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for
	 *  16-bit fields and 8-bit accesses for 8-bit fields.
	 */
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
read_through32:
	if (mask != 0xFFFFFFFF)
		errx(1, "%s: non-32-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 4);
	return val;

read_through16:
	if (mask != 0xFFFF)
		errx(1, "%s: non-16-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 2);
	return val;

read_through8:
	if (mask != 0xFF)
		errx(1, "%s: non-8-bit read to offset %u (%#x)",
		     d->name, off, getreg(eip));
	memcpy(&val, (char *)d->mmio + off, 1);
	return val;
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362
}

static void emulate_mmio(unsigned long paddr, const u8 *insn)
{
	u32 val, off, mask = 0xFFFFFFFF, insnlen = 0;
	struct device *d = find_mmio_region(paddr, &off);
	unsigned long args[] = { LHREQ_TRAP, 14 };

	if (!d) {
		warnx("MMIO touching %#08lx (not a device)", paddr);
		goto reinject;
	}

	/* Prefix makes it a 16 bit op */
	if (insn[0] == 0x66) {
		mask = 0xFFFF;
		insnlen++;
	}

	/* iowrite */
	if (insn[insnlen] == 0x89) {
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[insnlen] == 0x8b) { /* ioread */
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask);
		insnlen += 2 + insn_displacement_len(insn[insnlen+1]);
	} else if (insn[0] == 0x88) { /* 8-bit iowrite */
		mask = 0xff;
		/* Next byte is r/m byte: bits 3-5 are register. */
		val = getreg_num((insn[1] >> 3) & 0x7, mask);
		emulate_mmio_write(d, off, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else if (insn[0] == 0x8a) { /* 8-bit ioread */
		mask = 0xff;
		val = emulate_mmio_read(d, off, mask);
		setreg_num((insn[1] >> 3) & 0x7, val, mask);
		insnlen = 2 + insn_displacement_len(insn[1]);
	} else {
		warnx("Unknown MMIO instruction touching %#08lx:"
		     " %02x %02x %02x %02x at %u",
		     paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip));
	reinject:
		/* Inject trap into Guest. */
		if (write(lguest_fd, args, sizeof(args)) < 0)
			err(1, "Reinjecting trap 14 for fault at %#x",
			    getreg(eip));
		return;
	}

	/* Finally, we've "done" the instruction, so move past it. */
	setreg(eip, getreg(eip) + insnlen);
}
2363

2364 2365 2366 2367 2368
/*L:190
 * Device Setup
 *
 * All devices need a descriptor so the Guest knows it exists, and a "struct
 * device" so the Launcher can keep track of it.  We have common helper
2369 2370
 * routines to allocate and manage them.
 */
2371
static void add_pci_virtqueue(struct device *dev,
2372 2373
			      void (*service)(struct virtqueue *),
			      const char *name)
2374 2375 2376 2377 2378 2379 2380
{
	struct virtqueue **i, *vq = malloc(sizeof(*vq));

	/* Initialize the virtqueue */
	vq->next = NULL;
	vq->last_avail_idx = 0;
	vq->dev = dev;
2381
	vq->name = name;
2382 2383 2384 2385 2386 2387 2388 2389 2390

	/*
	 * This is the routine the service thread will run, and its Process ID
	 * once it's running.
	 */
	vq->service = service;
	vq->thread = (pid_t)-1;

	/* Initialize the configuration. */
2391
	reset_vq_pci_config(vq);
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404
	vq->pci_config.queue_notify_off = 0;

	/* Add one to the number of queues */
	vq->dev->mmio->cfg.num_queues++;

	/*
	 * Add to tail of list, so dev->vq is first vq, dev->vq->next is
	 * second.
	 */
	for (i = &dev->vq; *i; i = &(*i)->next);
	*i = vq;
}

2405
/* The Guest accesses the feature bits via the PCI common config MMIO region */
2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428
static void add_pci_feature(struct device *dev, unsigned bit)
{
	dev->features |= (1ULL << bit);
}

/* For devices with no config. */
static void no_device_config(struct device *dev)
{
	dev->mmio_addr = get_mmio_region(dev->mmio_size);

	dev->config.bar[0] = dev->mmio_addr;
	/* Bottom 4 bits must be zero */
	assert(~(dev->config.bar[0] & 0xF));
}

/* This puts the device config into BAR0 */
static void set_device_config(struct device *dev, const void *conf, size_t len)
{
	/* Set up BAR 0 */
	dev->mmio_size += len;
	dev->mmio = realloc(dev->mmio, dev->mmio_size);
	memcpy(dev->mmio + 1, conf, len);

2429 2430 2431 2432 2433 2434 2435
	/*
	 * 4.1.4.6:
	 *
	 *  The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG
	 *  capability for any device type which has a device-specific
	 *  configuration.
	 */
2436 2437 2438 2439
	/* Hook up device cfg */
	dev->config.cfg_access.cap.cap_next
		= offsetof(struct pci_config, device);

2440 2441 2442 2443 2444 2445 2446 2447
	/*
	 * 4.1.4.6.1:
	 *
	 *  The offset for the device-specific configuration MUST be 4-byte
	 *  aligned.
	 */
	assert(dev->config.cfg_access.cap.cap_next % 4 == 0);

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476
	/* Fix up device cfg field length. */
	dev->config.device.length = len;

	/* The rest is the same as the no-config case */
	no_device_config(dev);
}

static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type,
		     size_t bar_offset, size_t bar_bytes, u8 next)
{
	cap->cap_vndr = PCI_CAP_ID_VNDR;
	cap->cap_next = next;
	cap->cap_len = caplen;
	cap->cfg_type = type;
	cap->bar = 0;
	memset(cap->padding, 0, sizeof(cap->padding));
	cap->offset = bar_offset;
	cap->length = bar_bytes;
}

/*
 * This sets up the pci_config structure, as defined in the virtio 1.0
 * standard (and PCI standard).
 */
static void init_pci_config(struct pci_config *pci, u16 type,
			    u8 class, u8 subclass)
{
	size_t bar_offset, bar_len;

2477 2478 2479 2480 2481
	/*
	 * 4.1.4.4.1:
	 *
	 *  The device MUST either present notify_off_multiplier as an even
	 *  power of 2, or present notify_off_multiplier as 0.
2482 2483 2484 2485
	 *
	 * 2.1.2:
	 *
	 *   The device MUST initialize device status to 0 upon reset. 
2486
	 */
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502
	memset(pci, 0, sizeof(*pci));

	/* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */
	pci->vendor_id = 0x1AF4;
	/* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */
	pci->device_id = 0x1040 + type;

	/*
	 * PCI have specific codes for different types of devices.
	 * Linux doesn't care, but it's a good clue for people looking
	 * at the device.
	 */
	pci->class = class;
	pci->subclass = subclass;

	/*
2503 2504 2505 2506
	 * 4.1.2.1:
	 *
	 *  Non-transitional devices SHOULD have a PCI Revision ID of 1 or
	 *  higher
2507 2508 2509 2510
	 */
	pci->revid = 1;

	/*
2511 2512 2513 2514
	 * 4.1.2.1:
	 *
	 *  Non-transitional devices SHOULD have a PCI Subsystem Device ID of
	 *  0x40 or higher.
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525
	 */
	pci->subsystem_device_id = 0x40;

	/* We use our dummy interrupt controller, and irq_line is the irq */
	pci->irq_line = devices.next_irq++;
	pci->irq_pin = 0;

	/* Support for extended capabilities. */
	pci->status = (1 << 4);

	/* Link them in. */
2526 2527 2528 2529 2530 2531
	/*
	 * 4.1.4.3.1:
	 *
	 *  The device MUST present at least one common configuration
	 *  capability.
	 */
2532 2533
	pci->capabilities = offsetof(struct pci_config, common);

2534 2535 2536
	/* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */
	assert(pci->capabilities % 4 == 0);

2537 2538 2539 2540 2541 2542
	bar_offset = offsetof(struct virtio_pci_mmio, cfg);
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg);
	init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, notify));

2543 2544 2545 2546 2547
	/*
	 * 4.1.4.4.1:
	 *
	 *  The device MUST present at least one notification capability.
	 */
2548 2549
	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify);
2550 2551 2552 2553 2554 2555 2556 2557

	/*
	 * 4.1.4.4.1:
	 *
	 *  The cap.offset MUST be 2-byte aligned.
	 */
	assert(pci->common.cap_next % 2 == 0);

2558
	/* FIXME: Use a non-zero notify_off, for per-queue notification? */
2559 2560 2561 2562 2563 2564 2565 2566 2567
	/*
	 * 4.1.4.4.1:
	 *
	 *  The value cap.length presented by the device MUST be at least 2 and
	 *  MUST be large enough to support queue notification offsets for all
	 *  supported queues in all possible configurations.
	 */
	assert(bar_len >= 2);

2568 2569 2570 2571 2572 2573 2574
	init_cap(&pci->notify.cap, sizeof(pci->notify),
		 VIRTIO_PCI_CAP_NOTIFY_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, isr));

	bar_offset += bar_len;
	bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr);
2575 2576 2577 2578 2579 2580
	/*
	 * 4.1.4.5.1:
	 *
	 *  The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG
	 *  capability.
	 */
2581 2582 2583 2584 2585
	init_cap(&pci->isr, sizeof(pci->isr),
		 VIRTIO_PCI_CAP_ISR_CFG,
		 bar_offset, bar_len,
		 offsetof(struct pci_config, cfg_access));

2586 2587 2588 2589 2590 2591
	/*
	 * 4.1.4.7.1:
	 *
	 * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG
	 * capability.
	 */
2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
	/* This doesn't have any presence in the BAR */
	init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access),
		 VIRTIO_PCI_CAP_PCI_CFG,
		 0, 0, 0);

	bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding);
	assert(bar_offset == sizeof(struct virtio_pci_mmio));

	/*
	 * This gets sewn in and length set in set_device_config().
	 * Some devices don't have a device configuration interface, so
	 * we never expose this if we don't call set_device_config().
	 */
	init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG,
		 bar_offset, 0, 0);
}

R
Rusty Russell 已提交
2609
/*
2610 2611 2612 2613
 * This routine does all the creation and setup of a new device, but we don't
 * actually place the MMIO region until we know the size (if any) of the
 * device-specific config.  And we don't actually start the service threads
 * until later.
2614
 *
R
Rusty Russell 已提交
2615 2616
 * See what I mean about userspace being boring?
 */
2617 2618 2619 2620 2621 2622 2623 2624 2625
static struct device *new_pci_device(const char *name, u16 type,
				     u8 class, u8 subclass)
{
	struct device *dev = malloc(sizeof(*dev));

	/* Now we populate the fields one at a time. */
	dev->name = name;
	dev->vq = NULL;
	dev->running = false;
2626
	dev->wrote_features_ok = false;
2627 2628 2629 2630 2631
	dev->mmio_size = sizeof(struct virtio_pci_mmio);
	dev->mmio = calloc(1, dev->mmio_size);
	dev->features = (u64)1 << VIRTIO_F_VERSION_1;
	dev->features_accepted = 0;

2632
	if (devices.device_num + 1 >= MAX_PCI_DEVICES)
2633 2634 2635 2636 2637 2638 2639 2640 2641
		errx(1, "Can only handle 31 PCI devices");

	init_pci_config(&dev->config, type, class, subclass);
	assert(!devices.pci[devices.device_num+1]);
	devices.pci[++devices.device_num] = dev;

	return dev;
}

R
Rusty Russell 已提交
2642 2643 2644 2645
/*
 * Our first setup routine is the console.  It's a fairly simple device, but
 * UNIX tty handling makes it uglier than it could be.
 */
R
Rusty Russell 已提交
2646
static void setup_console(void)
2647 2648
{
	struct device *dev;
2649
	struct virtio_console_config conf;
2650

2651
	/* If we can save the initial standard input settings... */
2652 2653
	if (tcgetattr(STDIN_FILENO, &orig_term) == 0) {
		struct termios term = orig_term;
R
Rusty Russell 已提交
2654 2655 2656 2657
		/*
		 * Then we turn off echo, line buffering and ^C etc: We want a
		 * raw input stream to the Guest.
		 */
2658 2659 2660 2661
		term.c_lflag &= ~(ISIG|ICANON|ECHO);
		tcsetattr(STDIN_FILENO, TCSANOW, &term);
	}

2662
	dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00);
2663

2664
	/* We store the console state in dev->priv, and initialize it. */
2665 2666 2667
	dev->priv = malloc(sizeof(struct console_abort));
	((struct console_abort *)dev->priv)->count = 0;

R
Rusty Russell 已提交
2668 2669
	/*
	 * The console needs two virtqueues: the input then the output.  When
2670 2671
	 * they put something the input queue, we make sure we're listening to
	 * stdin.  When they put something in the output queue, we write it to
R
Rusty Russell 已提交
2672 2673
	 * stdout.
	 */
2674 2675
	add_pci_virtqueue(dev, console_input, "input");
	add_pci_virtqueue(dev, console_output, "output");
2676

2677 2678 2679
	/* We need a configuration area for the emerg_wr early writes. */
	add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE);
	set_device_config(dev, &conf, sizeof(conf));
R
Rusty Russell 已提交
2680

2681
	verbose("device %u: console\n", devices.device_num);
2682
}
R
Rusty Russell 已提交
2683
/*:*/
2684

R
Rusty Russell 已提交
2685 2686
/*M:010
 * Inter-guest networking is an interesting area.  Simplest is to have a
R
Rusty Russell 已提交
2687 2688
 * --sharenet=<name> option which opens or creates a named pipe.  This can be
 * used to send packets to another guest in a 1:1 manner.
2689
 *
R
Rusty Russell 已提交
2690
 * More sophisticated is to use one of the tools developed for project like UML
R
Rusty Russell 已提交
2691
 * to do networking.
2692
 *
R
Rusty Russell 已提交
2693 2694 2695 2696 2697 2698 2699
 * Faster is to do virtio bonding in kernel.  Doing this 1:1 would be
 * completely generic ("here's my vring, attach to your vring") and would work
 * for any traffic.  Of course, namespace and permissions issues need to be
 * dealt with.  A more sophisticated "multi-channel" virtio_net.c could hide
 * multiple inter-guest channels behind one interface, although it would
 * require some manner of hotplugging new virtio channels.
 *
R
Rusty Russell 已提交
2700
 * Finally, we could use a virtio network switch in the kernel, ie. vhost.
R
Rusty Russell 已提交
2701
:*/
2702 2703 2704

static u32 str2ip(const char *ipaddr)
{
2705
	unsigned int b[4];
2706

2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
	if (sscanf(ipaddr, "%u.%u.%u.%u", &b[0], &b[1], &b[2], &b[3]) != 4)
		errx(1, "Failed to parse IP address '%s'", ipaddr);
	return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
}

static void str2mac(const char *macaddr, unsigned char mac[6])
{
	unsigned int m[6];
	if (sscanf(macaddr, "%02x:%02x:%02x:%02x:%02x:%02x",
		   &m[0], &m[1], &m[2], &m[3], &m[4], &m[5]) != 6)
		errx(1, "Failed to parse mac address '%s'", macaddr);
	mac[0] = m[0];
	mac[1] = m[1];
	mac[2] = m[2];
	mac[3] = m[3];
	mac[4] = m[4];
	mac[5] = m[5];
2724 2725
}

R
Rusty Russell 已提交
2726 2727
/*
 * This code is "adapted" from libbridge: it attaches the Host end of the
2728 2729 2730
 * network device to the bridge device specified by the command line.
 *
 * This is yet another James Morris contribution (I'm an IP-level guy, so I
R
Rusty Russell 已提交
2731 2732
 * dislike bridging), and I just try not to break it.
 */
2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
static void add_to_bridge(int fd, const char *if_name, const char *br_name)
{
	int ifidx;
	struct ifreq ifr;

	if (!*br_name)
		errx(1, "must specify bridge name");

	ifidx = if_nametoindex(if_name);
	if (!ifidx)
		errx(1, "interface %s does not exist!", if_name);

	strncpy(ifr.ifr_name, br_name, IFNAMSIZ);
2746
	ifr.ifr_name[IFNAMSIZ-1] = '\0';
2747 2748 2749 2750 2751
	ifr.ifr_ifindex = ifidx;
	if (ioctl(fd, SIOCBRADDIF, &ifr) < 0)
		err(1, "can't add %s to bridge %s", if_name, br_name);
}

R
Rusty Russell 已提交
2752 2753
/*
 * This sets up the Host end of the network device with an IP address, brings
2754
 * it up so packets will flow, the copies the MAC address into the hwaddr
R
Rusty Russell 已提交
2755 2756
 * pointer.
 */
2757
static void configure_device(int fd, const char *tapif, u32 ipaddr)
2758 2759
{
	struct ifreq ifr;
2760
	struct sockaddr_in sin;
2761 2762

	memset(&ifr, 0, sizeof(ifr));
2763 2764 2765
	strcpy(ifr.ifr_name, tapif);

	/* Don't read these incantations.  Just cut & paste them like I did! */
2766 2767 2768
	sin.sin_family = AF_INET;
	sin.sin_addr.s_addr = htonl(ipaddr);
	memcpy(&ifr.ifr_addr, &sin, sizeof(sin));
2769
	if (ioctl(fd, SIOCSIFADDR, &ifr) != 0)
2770
		err(1, "Setting %s interface address", tapif);
2771 2772
	ifr.ifr_flags = IFF_UP;
	if (ioctl(fd, SIOCSIFFLAGS, &ifr) != 0)
2773 2774 2775 2776
		err(1, "Bringing interface %s up", tapif);
}

static int get_tun_device(char tapif[IFNAMSIZ])
2777 2778
{
	struct ifreq ifr;
2779
	int vnet_hdr_sz;
2780 2781 2782 2783
	int netfd;

	/* Start with this zeroed.  Messy but sure. */
	memset(&ifr, 0, sizeof(ifr));
2784

R
Rusty Russell 已提交
2785 2786
	/*
	 * We open the /dev/net/tun device and tell it we want a tap device.  A
2787 2788
	 * tap device is like a tun device, only somehow different.  To tell
	 * the truth, I completely blundered my way through this code, but it
R
Rusty Russell 已提交
2789 2790
	 * works now!
	 */
2791
	netfd = open_or_die("/dev/net/tun", O_RDWR);
2792
	ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_VNET_HDR;
2793 2794 2795
	strcpy(ifr.ifr_name, "tap%d");
	if (ioctl(netfd, TUNSETIFF, &ifr) != 0)
		err(1, "configuring /dev/net/tun");
2796

2797 2798 2799 2800
	if (ioctl(netfd, TUNSETOFFLOAD,
		  TUN_F_CSUM|TUN_F_TSO4|TUN_F_TSO6|TUN_F_TSO_ECN) != 0)
		err(1, "Could not set features for tun device");

R
Rusty Russell 已提交
2801 2802 2803 2804
	/*
	 * We don't need checksums calculated for packets coming in this
	 * device: trust us!
	 */
2805 2806
	ioctl(netfd, TUNSETNOCSUM, 1);

2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
	/*
	 * In virtio before 1.0 (aka legacy virtio), we added a 16-bit
	 * field at the end of the network header iff
	 * VIRTIO_NET_F_MRG_RXBUF was negotiated.  For virtio 1.0,
	 * that became the norm, but we need to tell the tun device
	 * about our expanded header (which is called
	 * virtio_net_hdr_mrg_rxbuf in the legacy system).
	 */
	vnet_hdr_sz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
	if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0)
		err(1, "Setting tun header size to %u", vnet_hdr_sz);

2819 2820 2821 2822
	memcpy(tapif, ifr.ifr_name, IFNAMSIZ);
	return netfd;
}

R
Rusty Russell 已提交
2823 2824
/*L:195
 * Our network is a Host<->Guest network.  This can either use bridging or
2825 2826
 * routing, but the principle is the same: it uses the "tun" device to inject
 * packets into the Host as if they came in from a normal network card.  We
R
Rusty Russell 已提交
2827 2828
 * just shunt packets between the Guest and the tun device.
 */
2829 2830 2831
static void setup_tun_net(char *arg)
{
	struct device *dev;
2832 2833
	struct net_info *net_info = malloc(sizeof(*net_info));
	int ipfd;
2834 2835 2836 2837 2838
	u32 ip = INADDR_ANY;
	bool bridging = false;
	char tapif[IFNAMSIZ], *p;
	struct virtio_net_config conf;

2839
	net_info->tunfd = get_tun_device(tapif);
2840

R
Rusty Russell 已提交
2841
	/* First we create a new network device. */
2842
	dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00);
2843
	dev->priv = net_info;
2844

R
Rusty Russell 已提交
2845
	/* Network devices need a recv and a send queue, just like console. */
2846 2847
	add_pci_virtqueue(dev, net_input, "rx");
	add_pci_virtqueue(dev, net_output, "tx");
2848

R
Rusty Russell 已提交
2849 2850 2851 2852
	/*
	 * We need a socket to perform the magic network ioctls to bring up the
	 * tap interface, connect to the bridge etc.  Any socket will do!
	 */
2853 2854 2855 2856
	ipfd = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
	if (ipfd < 0)
		err(1, "opening IP socket");

2857
	/* If the command line was --tunnet=bridge:<name> do bridging. */
2858
	if (!strncmp(BRIDGE_PFX, arg, strlen(BRIDGE_PFX))) {
2859 2860 2861 2862 2863 2864 2865 2866
		arg += strlen(BRIDGE_PFX);
		bridging = true;
	}

	/* A mac address may follow the bridge name or IP address */
	p = strchr(arg, ':');
	if (p) {
		str2mac(p+1, conf.mac);
2867
		add_pci_feature(dev, VIRTIO_NET_F_MAC);
2868 2869 2870 2871 2872 2873 2874
		*p = '\0';
	}

	/* arg is now either an IP address or a bridge name */
	if (bridging)
		add_to_bridge(ipfd, tapif, arg);
	else
2875 2876
		ip = str2ip(arg);

2877 2878
	/* Set up the tun device. */
	configure_device(ipfd, tapif, ip);
2879

2880
	/* Expect Guest to handle everything except UFO */
2881 2882 2883 2884 2885 2886 2887 2888
	add_pci_feature(dev, VIRTIO_NET_F_CSUM);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6);
	add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6);
	add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN);
2889
	/* We handle indirect ring entries */
2890 2891
	add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC);
	set_device_config(dev, &conf, sizeof(conf));
2892

2893
	/* We don't need the socket any more; setup is done. */
2894 2895
	close(ipfd);

2896 2897 2898 2899 2900 2901
	if (bridging)
		verbose("device %u: tun %s attached to bridge: %s\n",
			devices.device_num, tapif, arg);
	else
		verbose("device %u: tun %s: %s\n",
			devices.device_num, tapif, arg);
2902
}
R
Rusty Russell 已提交
2903
/*:*/
R
Rusty Russell 已提交
2904

R
Rusty Russell 已提交
2905
/* This hangs off device->priv. */
2906
struct vblk_info {
R
Rusty Russell 已提交
2907 2908 2909 2910 2911 2912 2913 2914
	/* The size of the file. */
	off64_t len;

	/* The file descriptor for the file. */
	int fd;

};

R
Rusty Russell 已提交
2915 2916 2917
/*L:210
 * The Disk
 *
R
Rusty Russell 已提交
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
 * The disk only has one virtqueue, so it only has one thread.  It is really
 * simple: the Guest asks for a block number and we read or write that position
 * in the file.
 *
 * Before we serviced each virtqueue in a separate thread, that was unacceptably
 * slow: the Guest waits until the read is finished before running anything
 * else, even if it could have been doing useful work.
 *
 * We could have used async I/O, except it's reputed to suck so hard that
 * characters actually go missing from your code when you try to use it.
R
Rusty Russell 已提交
2928
 */
2929
static void blk_request(struct virtqueue *vq)
R
Rusty Russell 已提交
2930
{
2931
	struct vblk_info *vblk = vq->dev->priv;
R
Rusty Russell 已提交
2932
	unsigned int head, out_num, in_num, wlen;
2933
	int ret, i;
2934
	u8 *in;
2935
	struct virtio_blk_outhdr out;
2936
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
2937 2938
	off64_t off;

R
Rusty Russell 已提交
2939 2940 2941 2942
	/*
	 * Get the next request, where we normally wait.  It triggers the
	 * interrupt to acknowledge previously serviced requests (if any).
	 */
2943
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
2944

2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
	/* Copy the output header from the front of the iov (adjusts iov) */
	iov_consume(iov, out_num, &out, sizeof(out));

	/* Find and trim end of iov input array, for our status byte. */
	in = NULL;
	for (i = out_num + in_num - 1; i >= out_num; i--) {
		if (iov[i].iov_len > 0) {
			in = iov[i].iov_base + iov[i].iov_len - 1;
			iov[i].iov_len--;
			break;
		}
	}
	if (!in)
		errx(1, "Bad virtblk cmd with no room for status");
R
Rusty Russell 已提交
2959

R
Rusty Russell 已提交
2960 2961 2962 2963
	/*
	 * For historical reasons, block operations are expressed in 512 byte
	 * "sectors".
	 */
2964
	off = out.sector * 512;
R
Rusty Russell 已提交
2965

2966
	if (out.type & VIRTIO_BLK_T_OUT) {
R
Rusty Russell 已提交
2967 2968 2969 2970 2971 2972
		/*
		 * Write
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to write past end.
		 */
R
Rusty Russell 已提交
2973
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
2974
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
2975

2976 2977
		ret = writev(vblk->fd, iov, out_num);
		verbose("WRITE to sector %llu: %i\n", out.sector, ret);
R
Rusty Russell 已提交
2978

R
Rusty Russell 已提交
2979 2980
		/*
		 * Grr... Now we know how long the descriptor they sent was, we
R
Rusty Russell 已提交
2981
		 * make sure they didn't try to write over the end of the block
R
Rusty Russell 已提交
2982 2983
		 * file (possibly extending it).
		 */
R
Rusty Russell 已提交
2984 2985 2986 2987 2988 2989
		if (ret > 0 && off + ret > vblk->len) {
			/* Trim it back to the correct length */
			ftruncate64(vblk->fd, vblk->len);
			/* Die, bad Guest, die. */
			errx(1, "Write past end %llu+%u", off, ret);
		}
2990 2991 2992

		wlen = sizeof(*in);
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
2993
	} else if (out.type & VIRTIO_BLK_T_FLUSH) {
2994 2995 2996
		/* Flush */
		ret = fdatasync(vblk->fd);
		verbose("FLUSH fdatasync: %i\n", ret);
2997
		wlen = sizeof(*in);
2998
		*in = (ret >= 0 ? VIRTIO_BLK_S_OK : VIRTIO_BLK_S_IOERR);
R
Rusty Russell 已提交
2999
	} else {
R
Rusty Russell 已提交
3000 3001 3002 3003 3004 3005
		/*
		 * Read
		 *
		 * Move to the right location in the block file.  This can fail
		 * if they try to read past end.
		 */
R
Rusty Russell 已提交
3006
		if (lseek64(vblk->fd, off, SEEK_SET) != off)
3007
			err(1, "Bad seek to sector %llu", out.sector);
R
Rusty Russell 已提交
3008

3009
		ret = readv(vblk->fd, iov + out_num, in_num);
R
Rusty Russell 已提交
3010
		if (ret >= 0) {
3011
			wlen = sizeof(*in) + ret;
3012
			*in = VIRTIO_BLK_S_OK;
R
Rusty Russell 已提交
3013
		} else {
3014
			wlen = sizeof(*in);
3015
			*in = VIRTIO_BLK_S_IOERR;
R
Rusty Russell 已提交
3016 3017 3018
		}
	}

R
Rusty Russell 已提交
3019
	/* Finished that request. */
3020
	add_used(vq, head, wlen);
R
Rusty Russell 已提交
3021 3022
}

R
Rusty Russell 已提交
3023
/*L:198 This actually sets up a virtual block device. */
R
Rusty Russell 已提交
3024 3025 3026 3027
static void setup_block_file(const char *filename)
{
	struct device *dev;
	struct vblk_info *vblk;
3028
	struct virtio_blk_config conf;
R
Rusty Russell 已提交
3029

3030 3031
	/* Create the device. */
	dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80);
R
Rusty Russell 已提交
3032

R
Rusty Russell 已提交
3033
	/* The device has one virtqueue, where the Guest places requests. */
3034
	add_pci_virtqueue(dev, blk_request, "request");
R
Rusty Russell 已提交
3035 3036 3037 3038 3039 3040 3041 3042 3043

	/* Allocate the room for our own bookkeeping */
	vblk = dev->priv = malloc(sizeof(*vblk));

	/* First we open the file and store the length. */
	vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE);
	vblk->len = lseek64(vblk->fd, 0, SEEK_END);

	/* Tell Guest how many sectors this device has. */
3044
	conf.capacity = cpu_to_le64(vblk->len / 512);
R
Rusty Russell 已提交
3045

R
Rusty Russell 已提交
3046 3047 3048 3049
	/*
	 * Tell Guest not to put in too many descriptors at once: two are used
	 * for the in and out elements.
	 */
3050
	add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX);
3051 3052
	conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2);

3053
	set_device_config(dev, &conf, sizeof(struct virtio_blk_config));
R
Rusty Russell 已提交
3054 3055

	verbose("device %u: virtblock %llu sectors\n",
3056
		devices.device_num, le64_to_cpu(conf.capacity));
R
Rusty Russell 已提交
3057
}
R
Rusty Russell 已提交
3058

R
Rusty Russell 已提交
3059
/*L:211
3060
 * Our random number generator device reads from /dev/urandom into the Guest's
R
Rusty Russell 已提交
3061
 * input buffers.  The usual case is that the Guest doesn't want random numbers
3062
 * and so has no buffers although /dev/urandom is still readable, whereas
R
Rusty Russell 已提交
3063 3064
 * console is the reverse.
 *
R
Rusty Russell 已提交
3065 3066 3067 3068 3069 3070
 * The same logic applies, however.
 */
struct rng_info {
	int rfd;
};

3071
static void rng_input(struct virtqueue *vq)
R
Rusty Russell 已提交
3072 3073 3074
{
	int len;
	unsigned int head, in_num, out_num, totlen = 0;
3075 3076
	struct rng_info *rng_info = vq->dev->priv;
	struct iovec iov[vq->vring.num];
R
Rusty Russell 已提交
3077 3078

	/* First we need a buffer from the Guests's virtqueue. */
3079
	head = wait_for_vq_desc(vq, iov, &out_num, &in_num);
R
Rusty Russell 已提交
3080 3081 3082
	if (out_num)
		errx(1, "Output buffers in rng?");

R
Rusty Russell 已提交
3083
	/*
R
Rusty Russell 已提交
3084 3085
	 * Just like the console write, we loop to cover the whole iovec.
	 * In this case, short reads actually happen quite a bit.
R
Rusty Russell 已提交
3086
	 */
R
Rusty Russell 已提交
3087
	while (!iov_empty(iov, in_num)) {
3088
		len = readv(rng_info->rfd, iov, in_num);
R
Rusty Russell 已提交
3089
		if (len <= 0)
3090
			err(1, "Read from /dev/urandom gave %i", len);
3091
		iov_consume(iov, in_num, NULL, len);
R
Rusty Russell 已提交
3092 3093 3094 3095
		totlen += len;
	}

	/* Tell the Guest about the new input. */
3096
	add_used(vq, head, totlen);
R
Rusty Russell 已提交
3097 3098
}

R
Rusty Russell 已提交
3099 3100 3101
/*L:199
 * This creates a "hardware" random number device for the Guest.
 */
R
Rusty Russell 已提交
3102 3103 3104
static void setup_rng(void)
{
	struct device *dev;
3105
	struct rng_info *rng_info = malloc(sizeof(*rng_info));
R
Rusty Russell 已提交
3106

3107 3108
	/* Our device's private info simply contains the /dev/urandom fd. */
	rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY);
R
Rusty Russell 已提交
3109

R
Rusty Russell 已提交
3110
	/* Create the new device. */
3111
	dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0);
3112
	dev->priv = rng_info;
R
Rusty Russell 已提交
3113 3114

	/* The device has one virtqueue, where the Guest places inbufs. */
3115
	add_pci_virtqueue(dev, rng_input, "input");
R
Rusty Russell 已提交
3116

3117 3118 3119 3120
	/* We don't have any configuration space */
	no_device_config(dev);

	verbose("device %u: rng\n", devices.device_num);
R
Rusty Russell 已提交
3121
}
3122
/* That's the end of device setup. */
B
Balaji Rao 已提交
3123

3124
/*L:230 Reboot is pretty easy: clean up and exec() the Launcher afresh. */
B
Balaji Rao 已提交
3125 3126 3127 3128
static void __attribute__((noreturn)) restart_guest(void)
{
	unsigned int i;

R
Rusty Russell 已提交
3129 3130 3131 3132
	/*
	 * Since we don't track all open fds, we simply close everything beyond
	 * stderr.
	 */
B
Balaji Rao 已提交
3133 3134
	for (i = 3; i < FD_SETSIZE; i++)
		close(i);
3135

3136 3137 3138
	/* Reset all the devices (kills all threads). */
	cleanup_devices();

B
Balaji Rao 已提交
3139 3140 3141
	execv(main_args[0], main_args);
	err(1, "Could not exec %s", main_args[0]);
}
3142

R
Rusty Russell 已提交
3143 3144 3145 3146
/*L:220
 * Finally we reach the core of the Launcher which runs the Guest, serves
 * its input and output, and finally, lays it to rest.
 */
3147
static void __attribute__((noreturn)) run_guest(void)
3148 3149
{
	for (;;) {
3150
		struct lguest_pending notify;
3151 3152 3153
		int readval;

		/* We read from the /dev/lguest device to run the Guest. */
3154 3155
		readval = pread(lguest_fd, &notify, sizeof(notify), cpu_id);
		if (readval == sizeof(notify)) {
3156
			if (notify.trap == 13) {
3157 3158 3159
				verbose("Emulating instruction at %#x\n",
					getreg(eip));
				emulate_insn(notify.insn);
3160 3161 3162 3163
			} else if (notify.trap == 14) {
				verbose("Emulating MMIO at %#x\n",
					getreg(eip));
				emulate_mmio(notify.addr, notify.insn);
3164 3165 3166
			} else
				errx(1, "Unknown trap %i addr %#08x\n",
				     notify.trap, notify.addr);
3167
		/* ENOENT means the Guest died.  Reading tells us why. */
3168 3169
		} else if (errno == ENOENT) {
			char reason[1024] = { 0 };
3170
			pread(lguest_fd, reason, sizeof(reason)-1, cpu_id);
3171
			errx(1, "%s", reason);
B
Balaji Rao 已提交
3172 3173 3174
		/* ERESTART means that we need to reboot the guest */
		} else if (errno == ERESTART) {
			restart_guest();
3175 3176
		/* Anything else means a bug or incompatible change. */
		} else
3177 3178 3179
			err(1, "Running guest failed");
	}
}
3180
/*L:240
R
Rusty Russell 已提交
3181 3182 3183
 * This is the end of the Launcher.  The good news: we are over halfway
 * through!  The bad news: the most fiendish part of the code still lies ahead
 * of us.
3184
 *
R
Rusty Russell 已提交
3185 3186
 * Are you ready?  Take a deep breath and join me in the core of the Host, in
 * "make Host".
R
Rusty Russell 已提交
3187
:*/
3188 3189 3190 3191 3192

static struct option opts[] = {
	{ "verbose", 0, NULL, 'v' },
	{ "tunnet", 1, NULL, 't' },
	{ "block", 1, NULL, 'b' },
R
Rusty Russell 已提交
3193
	{ "rng", 0, NULL, 'r' },
3194
	{ "initrd", 1, NULL, 'i' },
3195 3196
	{ "username", 1, NULL, 'u' },
	{ "chroot", 1, NULL, 'c' },
3197 3198 3199 3200 3201
	{ NULL },
};
static void usage(void)
{
	errx(1, "Usage: lguest [--verbose] "
3202
	     "[--tunnet=(<ipaddr>:<macaddr>|bridge:<bridgename>:<macaddr>)\n"
3203 3204 3205 3206
	     "|--block=<filename>|--initrd=<filename>]...\n"
	     "<mem-in-mb> vmlinux [args...]");
}

3207
/*L:105 The main routine is where the real work begins: */
3208 3209
int main(int argc, char *argv[])
{
R
Rusty Russell 已提交
3210
	/* Memory, code startpoint and size of the (optional) initrd. */
3211
	unsigned long mem = 0, start, initrd_size = 0;
3212 3213
	/* Two temporaries. */
	int i, c;
3214
	/* The boot information for the Guest. */
3215
	struct boot_params *boot;
3216
	/* If they specify an initrd file to load. */
3217 3218
	const char *initrd_name = NULL;

3219 3220 3221 3222 3223 3224
	/* Password structure for initgroups/setres[gu]id */
	struct passwd *user_details = NULL;

	/* Directory to chroot to */
	char *chroot_path = NULL;

B
Balaji Rao 已提交
3225 3226 3227
	/* Save the args: we "reboot" by execing ourselves again. */
	main_args = argv;

R
Rusty Russell 已提交
3228
	/*
3229 3230
	 * First we initialize the device list.  We remember next interrupt
	 * number to use for devices (1: remember that 0 is used by the timer).
R
Rusty Russell 已提交
3231
	 */
R
Rusty Russell 已提交
3232
	devices.next_irq = 1;
3233

R
Rusty Russell 已提交
3234
	/* We're CPU 0.  In fact, that's the only CPU possible right now. */
3235
	cpu_id = 0;
R
Rusty Russell 已提交
3236

R
Rusty Russell 已提交
3237 3238
	/*
	 * We need to know how much memory so we can set up the device
3239 3240
	 * descriptor and memory pages for the devices as we parse the command
	 * line.  So we quickly look through the arguments to find the amount
R
Rusty Russell 已提交
3241 3242
	 * of memory now.
	 */
3243 3244
	for (i = 1; i < argc; i++) {
		if (argv[i][0] != '-') {
3245
			mem = atoi(argv[i]) * 1024 * 1024;
R
Rusty Russell 已提交
3246 3247
			/*
			 * We start by mapping anonymous pages over all of
3248 3249
			 * guest-physical memory range.  This fills it with 0,
			 * and ensures that the Guest won't be killed when it
R
Rusty Russell 已提交
3250 3251
			 * tries to access it.
			 */
3252 3253 3254
			guest_base = map_zeroed_pages(mem / getpagesize()
						      + DEVICE_PAGES);
			guest_limit = mem;
3255
			guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize();
3256 3257 3258
			break;
		}
	}
3259

3260 3261 3262
	/* We always have a console device, and it's always device 1. */
	setup_console();

3263
	/* The options are fairly straight-forward */
3264 3265 3266 3267 3268 3269
	while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) {
		switch (c) {
		case 'v':
			verbose = true;
			break;
		case 't':
R
Rusty Russell 已提交
3270
			setup_tun_net(optarg);
3271 3272
			break;
		case 'b':
R
Rusty Russell 已提交
3273
			setup_block_file(optarg);
3274
			break;
R
Rusty Russell 已提交
3275 3276 3277
		case 'r':
			setup_rng();
			break;
3278 3279 3280
		case 'i':
			initrd_name = optarg;
			break;
3281 3282 3283 3284 3285 3286 3287 3288
		case 'u':
			user_details = getpwnam(optarg);
			if (!user_details)
				err(1, "getpwnam failed, incorrect username?");
			break;
		case 'c':
			chroot_path = optarg;
			break;
3289 3290 3291 3292 3293
		default:
			warnx("Unknown argument %s", argv[optind]);
			usage();
		}
	}
R
Rusty Russell 已提交
3294 3295 3296 3297
	/*
	 * After the other arguments we expect memory and kernel image name,
	 * followed by command line arguments for the kernel.
	 */
3298 3299 3300
	if (optind + 2 > argc)
		usage();

3301 3302
	verbose("Guest base is at %p\n", guest_base);

3303 3304 3305
	/* Initialize the (fake) PCI host bridge device. */
	init_pci_host_bridge();

3306
	/* Now we load the kernel */
3307
	start = load_kernel(open_or_die(argv[optind+1], O_RDONLY));
3308

3309 3310 3311
	/* Boot information is stashed at physical address 0 */
	boot = from_guest_phys(0);

3312
	/* Map the initrd image if requested (at top of physical memory) */
3313 3314
	if (initrd_name) {
		initrd_size = load_initrd(initrd_name, mem);
R
Rusty Russell 已提交
3315 3316 3317 3318
		/*
		 * These are the location in the Linux boot header where the
		 * start and size of the initrd are expected to be found.
		 */
3319 3320
		boot->hdr.ramdisk_image = mem - initrd_size;
		boot->hdr.ramdisk_size = initrd_size;
3321
		/* The bootloader type 0xFF means "unknown"; that's OK. */
3322
		boot->hdr.type_of_loader = 0xFF;
3323 3324
	}

R
Rusty Russell 已提交
3325 3326 3327 3328
	/*
	 * The Linux boot header contains an "E820" memory map: ours is a
	 * simple, single region.
	 */
3329 3330
	boot->e820_entries = 1;
	boot->e820_map[0] = ((struct e820entry) { 0, mem, E820_RAM });
R
Rusty Russell 已提交
3331 3332 3333 3334
	/*
	 * The boot header contains a command line pointer: we put the command
	 * line after the boot header.
	 */
3335
	boot->hdr.cmd_line_ptr = to_guest_phys(boot + 1);
R
Rusty Russell 已提交
3336
	/* We use a simple helper to copy the arguments separated by spaces. */
3337
	concat((char *)(boot + 1), argv+optind+2);
3338

3339 3340 3341
	/* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */
	boot->hdr.kernel_alignment = 0x1000000;

3342
	/* Boot protocol version: 2.07 supports the fields for lguest. */
3343
	boot->hdr.version = 0x207;
3344 3345

	/* The hardware_subarch value of "1" tells the Guest it's an lguest. */
3346
	boot->hdr.hardware_subarch = 1;
3347

3348 3349
	/* Tell the entry path not to try to reload segment registers. */
	boot->hdr.loadflags |= KEEP_SEGMENTS;
3350

R
Rusty Russell 已提交
3351
	/* We tell the kernel to initialize the Guest. */
3352
	tell_kernel(start);
3353

R
Rusty Russell 已提交
3354
	/* Ensure that we terminate if a device-servicing child dies. */
3355 3356 3357 3358
	signal(SIGCHLD, kill_launcher);

	/* If we exit via err(), this kills all the threads, restores tty. */
	atexit(cleanup_devices);
3359

3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390
	/* If requested, chroot to a directory */
	if (chroot_path) {
		if (chroot(chroot_path) != 0)
			err(1, "chroot(\"%s\") failed", chroot_path);

		if (chdir("/") != 0)
			err(1, "chdir(\"/\") failed");

		verbose("chroot done\n");
	}

	/* If requested, drop privileges */
	if (user_details) {
		uid_t u;
		gid_t g;

		u = user_details->pw_uid;
		g = user_details->pw_gid;

		if (initgroups(user_details->pw_name, g) != 0)
			err(1, "initgroups failed");

		if (setresgid(g, g, g) != 0)
			err(1, "setresgid failed");

		if (setresuid(u, u, u) != 0)
			err(1, "setresuid failed");

		verbose("Dropping privileges completed\n");
	}

3391
	/* Finally, run the Guest.  This doesn't return. */
3392
	run_guest();
3393
}
3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405
/*:*/

/*M:999
 * Mastery is done: you now know everything I do.
 *
 * But surely you have seen code, features and bugs in your wanderings which
 * you now yearn to attack?  That is the real game, and I look forward to you
 * patching and forking lguest into the Your-Name-Here-visor.
 *
 * Farewell, and good coding!
 * Rusty Russell.
 */