amd_iommu_types.h 11.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <joerg.roedel@amd.com>
 *         Leo Duran <leo.duran@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

H
H. Peter Anvin 已提交
20 21
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
#define _ASM_X86_AMD_IOMMU_TYPES_H
22 23 24 25 26 27 28 29

#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>

/*
 * some size calculation constants
 */
30
#define DEV_TABLE_ENTRY_SIZE		32
31 32 33 34 35 36 37 38 39
#define ALIAS_TABLE_ENTRY_SIZE		2
#define RLOOKUP_TABLE_ENTRY_SIZE	(sizeof(void *))

/* Length of the MMIO region for the AMD IOMMU */
#define MMIO_REGION_LENGTH       0x4000

/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET	0x00
#define MMIO_RANGE_OFFSET	0x0c
40
#define MMIO_MISC_OFFSET	0x10
41 42 43 44 45 46 47 48 49 50 51

/* Masks, shifts and macros to parse the device range capability */
#define MMIO_RANGE_LD_MASK	0xff000000
#define MMIO_RANGE_FD_MASK	0x00ff0000
#define MMIO_RANGE_BUS_MASK	0x0000ff00
#define MMIO_RANGE_LD_SHIFT	24
#define MMIO_RANGE_FD_SHIFT	16
#define MMIO_RANGE_BUS_SHIFT	8
#define MMIO_GET_LD(x)  (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
#define MMIO_GET_FD(x)  (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
52
#define MMIO_MSI_NUM(x)	((x) & 0x1f)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

/* Flag masks for the AMD IOMMU exclusion range */
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
#define MMIO_EXCL_ALLOW_MASK  0x02ULL

/* Used offsets into the MMIO space */
#define MMIO_DEV_TABLE_OFFSET   0x0000
#define MMIO_CMD_BUF_OFFSET     0x0008
#define MMIO_EVT_BUF_OFFSET     0x0010
#define MMIO_CONTROL_OFFSET     0x0018
#define MMIO_EXCL_BASE_OFFSET   0x0020
#define MMIO_EXCL_LIMIT_OFFSET  0x0028
#define MMIO_CMD_HEAD_OFFSET	0x2000
#define MMIO_CMD_TAIL_OFFSET	0x2008
#define MMIO_EVT_HEAD_OFFSET	0x2010
#define MMIO_EVT_TAIL_OFFSET	0x2018
#define MMIO_STATUS_OFFSET	0x2020

71 72 73
/* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK	0x04

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/* event logging constants */
#define EVENT_ENTRY_SIZE	0x10
#define EVENT_TYPE_SHIFT	28
#define EVENT_TYPE_MASK		0xf
#define EVENT_TYPE_ILL_DEV	0x1
#define EVENT_TYPE_IO_FAULT	0x2
#define EVENT_TYPE_DEV_TAB_ERR	0x3
#define EVENT_TYPE_PAGE_TAB_ERR	0x4
#define EVENT_TYPE_ILL_CMD	0x5
#define EVENT_TYPE_CMD_HARD_ERR	0x6
#define EVENT_TYPE_IOTLB_INV_TO	0x7
#define EVENT_TYPE_INV_DEV_REQ	0x8
#define EVENT_DEVID_MASK	0xffff
#define EVENT_DEVID_SHIFT	0
#define EVENT_DOMID_MASK	0xffff
#define EVENT_DOMID_SHIFT	0
#define EVENT_FLAGS_MASK	0xfff
#define EVENT_FLAGS_SHIFT	0x10

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/* feature control bits */
#define CONTROL_IOMMU_EN        0x00ULL
#define CONTROL_HT_TUN_EN       0x01ULL
#define CONTROL_EVT_LOG_EN      0x02ULL
#define CONTROL_EVT_INT_EN      0x03ULL
#define CONTROL_COMWAIT_EN      0x04ULL
#define CONTROL_PASSPW_EN       0x08ULL
#define CONTROL_RESPASSPW_EN    0x09ULL
#define CONTROL_COHERENT_EN     0x0aULL
#define CONTROL_ISOC_EN         0x0bULL
#define CONTROL_CMDBUF_EN       0x0cULL
#define CONTROL_PPFLOG_EN       0x0dULL
#define CONTROL_PPFINT_EN       0x0eULL

/* command specific defines */
#define CMD_COMPL_WAIT          0x01
#define CMD_INV_DEV_ENTRY       0x02
#define CMD_INV_IOMMU_PAGES     0x03

#define CMD_COMPL_WAIT_STORE_MASK	0x01
113
#define CMD_COMPL_WAIT_INT_MASK		0x02
114 115 116
#define CMD_INV_IOMMU_PAGES_SIZE_MASK	0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK	0x02

117 118
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS	0x7fffffffffffffffULL

119 120 121 122 123
/* macros and definitions for device table entries */
#define DEV_ENTRY_VALID         0x00
#define DEV_ENTRY_TRANSLATION   0x01
#define DEV_ENTRY_IR            0x3d
#define DEV_ENTRY_IW            0x3e
124
#define DEV_ENTRY_NO_PAGE_FAULT	0x62
125 126 127 128 129 130 131 132
#define DEV_ENTRY_EX            0x67
#define DEV_ENTRY_SYSMGT1       0x68
#define DEV_ENTRY_SYSMGT2       0x69
#define DEV_ENTRY_INIT_PASS     0xb8
#define DEV_ENTRY_EINT_PASS     0xb9
#define DEV_ENTRY_NMI_PASS      0xba
#define DEV_ENTRY_LINT0_PASS    0xbe
#define DEV_ENTRY_LINT1_PASS    0xbf
133 134
#define DEV_ENTRY_MODE_MASK	0x07
#define DEV_ENTRY_MODE_SHIFT	0x09
135 136 137 138 139 140 141

/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE    8192
#define CMD_BUFFER_ENTRIES 512
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)

142 143 144 145
/* constants for event buffer handling */
#define EVT_BUFFER_SIZE		8192 /* 512 entries */
#define EVT_LEN_MASK		(0x9ULL << 56)

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03

#define IOMMU_PDE_NL_0   0x000ULL
#define IOMMU_PDE_NL_1   0x200ULL
#define IOMMU_PDE_NL_2   0x400ULL
#define IOMMU_PDE_NL_3   0x600ULL

#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)

#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)

#define IOMMU_PTE_P  (1ULL << 0)
164
#define IOMMU_PTE_TV (1ULL << 1)
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
#define IOMMU_PTE_U  (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)

#define IOMMU_L1_PDE(address) \
	((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define IOMMU_L2_PDE(address) \
	((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)

#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)

#define IOMMU_PROT_MASK 0x03
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02

/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB   24
#define IOMMU_CAP_NPCACHE 26

#define MAX_DOMAIN_ID 65536

190 191 192
/* FIXME: move this macro to <linux/pci.h> */
#define PCI_BUS(x) (((x) >> 8) & 0xff)

193 194 195 196
/*
 * This structure contains generic data for  IOMMU protection domains
 * independent of their use.
 */
197
struct protection_domain {
198 199 200 201 202
	spinlock_t lock; /* mostly used to lock the page table*/
	u16 id;		 /* the domain id written to the device table */
	int mode;	 /* paging mode (0-6 levels) */
	u64 *pt_root;	 /* page table root pointer */
	void *priv;	 /* private data */
203 204
};

205 206 207
/*
 * Data container for a dma_ops specific protection domain
 */
208 209
struct dma_ops_domain {
	struct list_head list;
210 211

	/* generic protection domain information */
212
	struct protection_domain domain;
213 214

	/* size of the aperture for the mappings */
215
	unsigned long aperture_size;
216 217

	/* address we start to search for free addresses */
218
	unsigned long next_bit;
219 220

	/* address allocation bitmap */
221
	unsigned long *bitmap;
222 223 224 225 226 227 228

	/*
	 * Array of PTE pages for the aperture. In this array we save all the
	 * leaf pages of the domain page table used for the aperture. This way
	 * we don't need to walk the page table to find a specific PTE. We can
	 * just calculate its address in constant time.
	 */
229
	u64 **pte_pages;
230 231 232

	/* This will be set to true when TLB needs to be flushed */
	bool need_flush;
233 234 235 236 237 238

	/*
	 * if this is a preallocated domain, keep the device for which it was
	 * preallocated in this variable
	 */
	u16 target_dev;
239 240
};

241 242 243 244
/*
 * Structure where we save information about one hardware AMD IOMMU in the
 * system.
 */
245 246
struct amd_iommu {
	struct list_head list;
247 248

	/* locks the accesses to the hardware */
249 250
	spinlock_t lock;

251 252 253
	/* Pointer to PCI device of this IOMMU */
	struct pci_dev *dev;

254
	/* physical address of MMIO space */
255
	u64 mmio_phys;
256
	/* virtual address of MMIO space */
257
	u8 *mmio_base;
258 259

	/* capabilities of that IOMMU read from ACPI */
260
	u32 cap;
261

262 263 264 265 266 267 268
	/*
	 * Capability pointer. There could be more than one IOMMU per PCI
	 * device function if there are more than one AMD IOMMU capability
	 * pointers.
	 */
	u16 cap_ptr;

269 270 271
	/* pci domain of this IOMMU */
	u16 pci_seg;

272
	/* first device this IOMMU handles. read from PCI */
273
	u16 first_device;
274
	/* last device this IOMMU handles. read from PCI */
275
	u16 last_device;
276 277

	/* start of exclusion range of that IOMMU */
278
	u64 exclusion_start;
279
	/* length of exclusion range of that IOMMU */
280 281
	u64 exclusion_length;

282
	/* command buffer virtual address */
283
	u8 *cmd_buf;
284
	/* size of command buffer */
285 286
	u32 cmd_buf_size;

287 288
	/* size of event buffer */
	u32 evt_buf_size;
289 290
	/* event buffer virtual address */
	u8 *evt_buf;
291 292
	/* MSI number for event interrupt */
	u16 evt_msi_num;
293

294 295 296
	/* true if interrupts for this IOMMU are already enabled */
	bool int_enabled;

297 298 299
	/* if one, we need to send a completion wait command */
	int need_sync;

300
	/* default dma_ops domain for that IOMMU */
301 302 303
	struct dma_ops_domain *default_dom;
};

304 305 306 307
/*
 * List with all IOMMUs in the system. This list is not locked because it is
 * only written and read at driver initialization or suspend time
 */
308 309
extern struct list_head amd_iommu_list;

310 311 312
/*
 * Structure defining one entry in the device table
 */
313 314 315 316
struct dev_table_entry {
	u32 data[8];
};

317 318 319
/*
 * One entry for unity mappings parsed out of the ACPI table.
 */
320 321
struct unity_map_entry {
	struct list_head list;
322 323

	/* starting device id this entry is used for (including) */
324
	u16 devid_start;
325
	/* end device id this entry is used for (including) */
326
	u16 devid_end;
327 328

	/* start address to unity map (including) */
329
	u64 address_start;
330
	/* end address to unity map (including) */
331
	u64 address_end;
332 333

	/* required protection */
334 335 336
	int prot;
};

337 338 339 340
/*
 * List of all unity mappings. It is not locked because as runtime it is only
 * read. It is created at ACPI table parsing time.
 */
341 342
extern struct list_head amd_iommu_unity_map;

343 344 345 346 347 348 349 350
/*
 * Data structures for device handling
 */

/*
 * Device table used by hardware. Read and write accesses by software are
 * locked with the amd_iommu_pd_table lock.
 */
351
extern struct dev_table_entry *amd_iommu_dev_table;
352 353 354 355 356

/*
 * Alias table to find requestor ids to device ids. Not locked because only
 * read on runtime.
 */
357
extern u16 *amd_iommu_alias_table;
358 359 360 361

/*
 * Reverse lookup table to find the IOMMU which translates a specific device.
 */
362 363
extern struct amd_iommu **amd_iommu_rlookup_table;

364
/* size of the dma_ops aperture as power of 2 */
365 366
extern unsigned amd_iommu_aperture_order;

367
/* largest PCI device id we expect translation requests for */
368 369 370 371
extern u16 amd_iommu_last_bdf;

/* data structures for protection domain handling */
extern struct protection_domain **amd_iommu_pd_table;
372 373

/* allocation bitmap for domain ids */
374 375
extern unsigned long *amd_iommu_pd_alloc_bitmap;

376
/* will be 1 if device isolation is enabled */
377 378
extern int amd_iommu_isolate;

379 380 381 382 383 384
/*
 * If true, the addresses will be flushed on unmap time, not when
 * they are reused
 */
extern bool amd_iommu_unmap_flush;

385
/* takes a PCI device id and prints it out in a readable form */
386 387 388 389 390 391 392 393 394 395 396
static inline void print_devid(u16 devid, int nl)
{
	int bus = devid >> 8;
	int dev = devid >> 3 & 0x1f;
	int fn  = devid & 0x07;

	printk("%02x:%02x.%x", bus, dev, fn);
	if (nl)
		printk("\n");
}

397 398 399 400 401 402 403
/* takes bus and device/function and returns the device id
 * FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
{
	return (((u16)bus) << 8) | devfn;
}

H
H. Peter Anvin 已提交
404
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */