intel-iommu.h 23.6 KB
Newer Older
1
/*
2 3 4 5 6
 * Copyright © 2006-2015, Intel Corporation.
 *
 * Authors: Ashok Raj <ashok.raj@intel.com>
 *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
 *          David Woodhouse <David.Woodhouse@intel.com>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 */

#ifndef _INTEL_IOMMU_H_
#define _INTEL_IOMMU_H_

#include <linux/types.h>
K
Kay, Allen M 已提交
26
#include <linux/iova.h>
27
#include <linux/io.h>
28 29 30
#include <linux/idr.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
31
#include <linux/iommu.h>
32
#include <linux/io-64-nonatomic-lo-hi.h>
33
#include <linux/dmar.h>
34

35
#include <asm/cacheflush.h>
F
Fenghua Yu 已提交
36
#include <asm/iommu.h>
D
David Miller 已提交
37

38
/*
39
 * VT-d hardware uses 4KiB page size regardless of host page size.
40
 */
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#define VTD_PAGE_SHIFT		(12)
#define VTD_PAGE_SIZE		(1UL << VTD_PAGE_SHIFT)
#define VTD_PAGE_MASK		(((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr)	(((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)

#define VTD_STRIDE_SHIFT        (9)
#define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)

#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define DMA_PTE_LARGE_PAGE (1 << 7)
#define DMA_PTE_SNP (1 << 11)

#define CONTEXT_TT_MULTI_LEVEL	0
#define CONTEXT_TT_DEV_IOTLB	1
#define CONTEXT_TT_PASS_THROUGH 2
/* Extended context entry types */
#define CONTEXT_TT_PT_PASID	4
#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
#define CONTEXT_TT_MASK (7ULL << 2)

#define CONTEXT_DINVE		(1ULL << 8)
#define CONTEXT_PRS		(1ULL << 9)
#define CONTEXT_PASIDE		(1ULL << 11)
65

66 67 68
/*
 * Intel IOMMU register specification per version 1.0 public spec.
 */
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
#define	DMAR_VER_REG	0x0	/* Arch version supported by this IOMMU */
#define	DMAR_CAP_REG	0x8	/* Hardware supported capabilities */
#define	DMAR_ECAP_REG	0x10	/* Extended capabilities supported */
#define	DMAR_GCMD_REG	0x18	/* Global command register */
#define	DMAR_GSTS_REG	0x1c	/* Global status register */
#define	DMAR_RTADDR_REG	0x20	/* Root entry table */
#define	DMAR_CCMD_REG	0x28	/* Context command reg */
#define	DMAR_FSTS_REG	0x34	/* Fault Status register */
#define	DMAR_FECTL_REG	0x38	/* Fault control register */
#define	DMAR_FEDATA_REG	0x3c	/* Fault event interrupt data register */
#define	DMAR_FEADDR_REG	0x40	/* Fault event interrupt addr register */
#define	DMAR_FEUADDR_REG 0x44	/* Upper address register */
#define	DMAR_AFLOG_REG	0x58	/* Advanced Fault control */
#define	DMAR_PMEN_REG	0x64	/* Enable Protected Memory Region */
#define	DMAR_PLMBASE_REG 0x68	/* PMRR Low addr */
#define	DMAR_PLMLIMIT_REG 0x6c	/* PMRR low limit */
#define	DMAR_PHMBASE_REG 0x70	/* pmrr high base addr */
#define	DMAR_PHMLIMIT_REG 0x78	/* pmrr high limit */
87 88
#define DMAR_IQH_REG	0x80	/* Invalidation queue head register */
#define DMAR_IQT_REG	0x88	/* Invalidation queue tail register */
89
#define DMAR_IQ_SHIFT	4	/* Invalidation queue head/tail shift */
90
#define DMAR_IQA_REG	0x90	/* Invalidation queue addr register */
91
#define DMAR_ICS_REG	0x9c	/* Invalidation complete status register */
92
#define DMAR_IRTA_REG	0xb8    /* Interrupt remapping table addr register */
93 94 95 96 97 98 99 100
#define DMAR_PQH_REG	0xc0	/* Page request queue head register */
#define DMAR_PQT_REG	0xc8	/* Page request queue tail register */
#define DMAR_PQA_REG	0xd0	/* Page request queue address register */
#define DMAR_PRS_REG	0xdc	/* Page request status register */
#define DMAR_PECTL_REG	0xe0	/* Page request event control register */
#define	DMAR_PEDATA_REG	0xe4	/* Page request event interrupt data register */
#define	DMAR_PEADDR_REG	0xe8	/* Page request event interrupt addr register */
#define	DMAR_PEUADDR_REG 0xec	/* Page request event Upper address register */
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
#define DMAR_MTRRCAP_REG 0x100	/* MTRR capability register */
#define DMAR_MTRRDEF_REG 0x108	/* MTRR default type register */
#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
#define DMAR_MTRR_FIX16K_80000_REG 0x128
#define DMAR_MTRR_FIX16K_A0000_REG 0x130
#define DMAR_MTRR_FIX4K_C0000_REG 0x138
#define DMAR_MTRR_FIX4K_C8000_REG 0x140
#define DMAR_MTRR_FIX4K_D0000_REG 0x148
#define DMAR_MTRR_FIX4K_D8000_REG 0x150
#define DMAR_MTRR_FIX4K_E0000_REG 0x158
#define DMAR_MTRR_FIX4K_E8000_REG 0x160
#define DMAR_MTRR_FIX4K_F0000_REG 0x168
#define DMAR_MTRR_FIX4K_F8000_REG 0x170
#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
#define DMAR_MTRR_PHYSMASK0_REG 0x188
#define DMAR_MTRR_PHYSBASE1_REG 0x190
#define DMAR_MTRR_PHYSMASK1_REG 0x198
#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
#define DMAR_MTRR_PHYSBASE8_REG 0x200
#define DMAR_MTRR_PHYSMASK8_REG 0x208
#define DMAR_MTRR_PHYSBASE9_REG 0x210
#define DMAR_MTRR_PHYSMASK9_REG 0x218
#define DMAR_VCCAP_REG		0xe00 /* Virtual command capability register */
#define DMAR_VCMD_REG		0xe10 /* Virtual command register */
#define DMAR_VCRSP_REG		0xe20 /* Virtual command response register */
137 138

#define OFFSET_STRIDE		(9)
139 140 141

#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
142 143 144 145 146 147 148

#define DMAR_VER_MAJOR(v)		(((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v)		((v) & 0x0f)

/*
 * Decoding Capability Register
 */
149
#define cap_5lp_support(c)	(((c) >> 60) & 1)
150
#define cap_pi_support(c)	(((c) >> 59) & 1)
151
#define cap_fl1gp_support(c)	(((c) >> 56) & 1)
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
#define cap_read_drain(c)	(((c) >> 55) & 1)
#define cap_write_drain(c)	(((c) >> 54) & 1)
#define cap_max_amask_val(c)	(((c) >> 48) & 0x3f)
#define cap_num_fault_regs(c)	((((c) >> 40) & 0xff) + 1)
#define cap_pgsel_inv(c)	(((c) >> 39) & 1)

#define cap_super_page_val(c)	(((c) >> 34) & 0xf)
#define cap_super_offset(c)	(((find_first_bit(&cap_super_page_val(c), 4)) \
					* OFFSET_STRIDE) + 21)

#define cap_fault_reg_offset(c)	((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
	(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)

#define cap_zlr(c)		(((c) >> 22) & 1)
#define cap_isoch(c)		(((c) >> 23) & 1)
#define cap_mgaw(c)		((((c) >> 16) & 0x3f) + 1)
#define cap_sagaw(c)		(((c) >> 8) & 0x1f)
#define cap_caching_mode(c)	(((c) >> 7) & 1)
#define cap_phmr(c)		(((c) >> 6) & 1)
#define cap_plmr(c)		(((c) >> 5) & 1)
#define cap_rwbf(c)		(((c) >> 4) & 1)
#define cap_afl(c)		(((c) >> 3) & 1)
#define cap_ndoms(c)		(((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
 * Extended Capability Register
 */

180 181
#define ecap_smpwc(e)		(((e) >> 48) & 0x1)
#define ecap_slts(e)		(((e) >> 46) & 0x1)
182
#define ecap_smts(e)		(((e) >> 43) & 0x1)
183
#define ecap_dit(e)		((e >> 41) & 0x1)
184
#define ecap_pasid(e)		((e >> 40) & 0x1)
185 186 187 188 189 190
#define ecap_pss(e)		((e >> 35) & 0x1f)
#define ecap_eafs(e)		((e >> 34) & 0x1)
#define ecap_nwfs(e)		((e >> 33) & 0x1)
#define ecap_srs(e)		((e >> 31) & 0x1)
#define ecap_ers(e)		((e >> 30) & 0x1)
#define ecap_prs(e)		((e >> 29) & 0x1)
191
#define ecap_broken_pasid(e)	((e >> 28) & 0x1)
192 193 194 195
#define ecap_dis(e)		((e >> 27) & 0x1)
#define ecap_nest(e)		((e >> 26) & 0x1)
#define ecap_mts(e)		((e >> 25) & 0x1)
#define ecap_ecs(e)		((e >> 24) & 0x1)
196
#define ecap_iotlb_offset(e) 	((((e) >> 8) & 0x3ff) * 16)
197
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
198
#define ecap_coherent(e)	((e) & 0x1)
199
#define ecap_qis(e)		((e) & 0x2)
F
Fenghua Yu 已提交
200
#define ecap_pass_through(e)	((e >> 6) & 0x1)
201 202
#define ecap_eim_support(e)	((e >> 4) & 0x1)
#define ecap_ir_support(e)	((e >> 3) & 0x1)
Y
Yu Zhao 已提交
203
#define ecap_dev_iotlb_support(e)	(((e) >> 2) & 0x1)
204
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
205
#define ecap_sc_support(e)	((e >> 7) & 0x1) /* Snooping Control */
206 207

/* IOTLB_REG */
208
#define DMA_TLB_FLUSH_GRANU_OFFSET  60
209 210 211
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
212 213
#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
214 215 216 217 218 219 220
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id)	(((u64)((id) & 0xffff)) << 32)
#define DMA_TLB_IVT (((u64)1) << 63)
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_TLB_MAX_SIZE (0x3f)

221
/* INVALID_DESC */
222
#define DMA_CCMD_INVL_GRANU_OFFSET  61
223 224 225
#define DMA_ID_TLB_GLOBAL_FLUSH	(((u64)1) << 4)
#define DMA_ID_TLB_DSI_FLUSH	(((u64)2) << 4)
#define DMA_ID_TLB_PSI_FLUSH	(((u64)3) << 4)
226 227 228 229 230 231 232
#define DMA_ID_TLB_READ_DRAIN	(((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN	(((u64)1) << 6)
#define DMA_ID_TLB_DID(id)	(((u64)((id & 0xffff) << 16)))
#define DMA_ID_TLB_IH_NONLEAF	(((u64)1) << 6)
#define DMA_ID_TLB_ADDR(addr)	(addr)
#define DMA_ID_TLB_ADDR_MASK(mask)	(mask)

M
mark gross 已提交
233 234 235 236
/* PMEN_REG */
#define DMA_PMEN_EPM (((u32)1)<<31)
#define DMA_PMEN_PRS (((u32)1)<<0)

237 238 239 240 241 242
/* GCMD_REG */
#define DMA_GCMD_TE (((u32)1) << 31)
#define DMA_GCMD_SRTP (((u32)1) << 30)
#define DMA_GCMD_SFL (((u32)1) << 29)
#define DMA_GCMD_EAFL (((u32)1) << 28)
#define DMA_GCMD_WBF (((u32)1) << 27)
243 244 245
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
246
#define DMA_GCMD_CFI (((u32) 1) << 23)
247 248 249 250 251 252 253

/* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31)
#define DMA_GSTS_RTPS (((u32)1) << 30)
#define DMA_GSTS_FLS (((u32)1) << 29)
#define DMA_GSTS_AFLS (((u32)1) << 28)
#define DMA_GSTS_WBFS (((u32)1) << 27)
254 255 256
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
257
#define DMA_GSTS_CFIS (((u32)1) << 23)
258

259 260 261
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
#define DMA_CCMD_MASK_NOBIT 0
#define DMA_CCMD_MASK_1BIT 1
#define DMA_CCMD_MASK_2BIT 2
#define DMA_CCMD_MASK_3BIT 3
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))

/* FECTL_REG */
#define DMA_FECTL_IM (((u32)1) << 31)

/* FSTS_REG */
279 280 281 282 283 284
#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
285 286 287 288 289 290 291
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)

/* FRCD_REG, 32 bits access */
#define DMA_FRCD_F (((u32)1) << 31)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
F
Fenghua Yu 已提交
292 293 294
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))

295 296 297
/* PRS_REG */
#define DMA_PRS_PPR	((u32)1)

F
Fenghua Yu 已提交
298 299 300 301 302 303 304
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
do {									\
	cycles_t start_time = get_cycles();				\
	while (1) {							\
		sts = op(iommu->reg + offset);				\
		if (cond)						\
			break;						\
305
		if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
F
Fenghua Yu 已提交
306 307 308 309
			panic("DMAR hardware is malfunctioning\n");	\
		cpu_relax();						\
	}								\
} while (0)
310

311 312 313 314 315
#define QI_LENGTH	256	/* queue length */

enum {
	QI_FREE,
	QI_IN_USE,
316 317
	QI_DONE,
	QI_ABORT
318 319 320 321 322 323 324
};

#define QI_CC_TYPE		0x1
#define QI_IOTLB_TYPE		0x2
#define QI_DIOTLB_TYPE		0x3
#define QI_IEC_TYPE		0x4
#define QI_IWD_TYPE		0x5
325 326 327
#define QI_EIOTLB_TYPE		0x6
#define QI_PC_TYPE		0x7
#define QI_DEIOTLB_TYPE		0x8
328 329
#define QI_PGRP_RESP_TYPE	0x9
#define QI_PSTRM_RESP_TYPE	0xa
330 331 332 333 334 335 336 337

#define QI_IEC_SELECTIVE	(((u64)1) << 4)
#define QI_IEC_IIDEX(idx)	(((u64)(idx & 0xffff) << 32))
#define QI_IEC_IM(m)		(((u64)(m & 0x1f) << 27))

#define QI_IWD_STATUS_DATA(d)	(((u64)d) << 32)
#define QI_IWD_STATUS_WRITE	(((u64)1) << 5)

338 339 340 341
#define QI_IOTLB_DID(did) 	(((u64)did) << 16)
#define QI_IOTLB_DR(dr) 	(((u64)dr) << 7)
#define QI_IOTLB_DW(dw) 	(((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) 	(((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
F
Fenghua Yu 已提交
342
#define QI_IOTLB_ADDR(addr)	(((u64)addr) & VTD_PAGE_MASK)
343 344 345 346 347 348 349 350
#define QI_IOTLB_IH(ih)		(((u64)ih) << 6)
#define QI_IOTLB_AM(am)		(((u8)am))

#define QI_CC_FM(fm)		(((u64)fm) << 48)
#define QI_CC_SID(sid)		(((u64)sid) << 32)
#define QI_CC_DID(did)		(((u64)did) << 16)
#define QI_CC_GRAN(gran)	(((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))

351 352 353
#define QI_DEV_IOTLB_SID(sid)	((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep)	(((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
354
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
355 356 357
#define QI_DEV_IOTLB_SIZE	1
#define QI_DEV_IOTLB_MAX_INVS	32

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
#define QI_PC_PASID(pasid)	(((u64)pasid) << 32)
#define QI_PC_DID(did)		(((u64)did) << 16)
#define QI_PC_GRAN(gran)	(((u64)gran) << 4)

#define QI_PC_ALL_PASIDS	(QI_PC_TYPE | QI_PC_GRAN(0))
#define QI_PC_PASID_SEL		(QI_PC_TYPE | QI_PC_GRAN(1))

#define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_GL(gl)	(((u64)gl) << 7)
#define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
#define QI_EIOTLB_AM(am)	(((u64)am))
#define QI_EIOTLB_PASID(pasid) 	(((u64)pasid) << 32)
#define QI_EIOTLB_DID(did)	(((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) 	(((u64)gran) << 4)

#define QI_DEV_EIOTLB_ADDR(a)	((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE	(((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g)	((u64)g)
#define QI_DEV_EIOTLB_PASID(p)	(((u64)p) << 32)
377 378
#define QI_DEV_EIOTLB_SID(sid)	((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd)	((u64)((qd) & 0x1f) << 4)
379
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
380 381
#define QI_DEV_EIOTLB_MAX_INVS	32

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
#define QI_PGRP_IDX(idx)	(((u64)(idx)) << 55)
#define QI_PGRP_PRIV(priv)	(((u64)(priv)) << 32)
#define QI_PGRP_RESP_CODE(res)	((u64)(res))
#define QI_PGRP_PASID(pasid)	(((u64)(pasid)) << 32)
#define QI_PGRP_DID(did)	(((u64)(did)) << 16)
#define QI_PGRP_PASID_P(p)	(((u64)(p)) << 4)

#define QI_PSTRM_ADDR(addr)	(((u64)(addr)) & VTD_PAGE_MASK)
#define QI_PSTRM_DEVFN(devfn)	(((u64)(devfn)) << 4)
#define QI_PSTRM_RESP_CODE(res)	((u64)(res))
#define QI_PSTRM_IDX(idx)	(((u64)(idx)) << 55)
#define QI_PSTRM_PRIV(priv)	(((u64)(priv)) << 32)
#define QI_PSTRM_BUS(bus)	(((u64)(bus)) << 24)
#define QI_PSTRM_PASID(pasid)	(((u64)(pasid)) << 4)

#define QI_RESP_SUCCESS		0x0
#define QI_RESP_INVALID		0x1
#define QI_RESP_FAILURE		0xf

401 402 403 404 405
#define QI_GRAN_ALL_ALL			0
#define QI_GRAN_NONG_ALL		1
#define QI_GRAN_NONG_PASID		2
#define QI_GRAN_PSI_PASID		3

406 407
#define qi_shift(iommu)		(DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))

408
struct qi_desc {
409 410 411 412
	u64 qw0;
	u64 qw1;
	u64 qw2;
	u64 qw3;
413 414 415
};

struct q_inval {
416
	raw_spinlock_t  q_lock;
417
	void		*desc;          /* invalidation queue */
418 419 420 421 422 423
	int             *desc_status;   /* desc status */
	int             free_head;      /* first free entry */
	int             free_tail;      /* last free entry */
	int             free_cnt;
};

424
#ifdef CONFIG_IRQ_REMAP
425 426 427
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER	8
#define INTR_REMAP_TABLE_REG_SIZE	0xf
428
#define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
429

430 431
#define INTR_REMAP_TABLE_ENTRIES	65536

432 433
struct irq_domain;

434 435
struct ir_table {
	struct irte *base;
436
	unsigned long *bitmap;
437 438 439
};
#endif

440
struct iommu_flush {
441 442
	void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
			      u8 fm, u64 type);
443 444
	void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
			    unsigned int size_order, u64 type);
445 446
};

447 448 449 450 451 452 453 454
enum {
	SR_DMAR_FECTL_REG,
	SR_DMAR_FEDATA_REG,
	SR_DMAR_FEADDR_REG,
	SR_DMAR_FEUADDR_REG,
	MAX_SR_DMAR_REGS
};

455 456 457
#define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1)

458 459
struct pasid_entry;
struct pasid_state_entry;
460
struct page_req_dsc;
461

462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
/*
 * 0: Present
 * 1-11: Reserved
 * 12-63: Context Ptr (12 - (haw-1))
 * 64-127: Reserved
 */
struct root_entry {
	u64     lo;
	u64     hi;
};

/*
 * low 64 bits:
 * 0: present
 * 1: fault processing disable
 * 2-3: translation type
 * 12-63: address space root
 * high 64 bits:
 * 0-2: address width
 * 3-6: aval
 * 8-23: domain id
 */
struct context_entry {
	u64 lo;
	u64 hi;
};

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
struct dmar_domain {
	int	nid;			/* node id */

	unsigned	iommu_refcnt[DMAR_UNITS_SUPPORTED];
					/* Refcount of devices per iommu */


	u16		iommu_did[DMAR_UNITS_SUPPORTED];
					/* Domain ids per IOMMU. Use u16 since
					 * domain ids are 16 bit wide according
					 * to VT-d spec, section 9.3 */

	bool has_iotlb_device;
	struct list_head devices;	/* all devices' list */
	struct iova_domain iovad;	/* iova's that belong to this domain */

	struct dma_pte	*pgd;		/* virtual address */
	int		gaw;		/* max guest address width */

	/* adjusted guest address width, 0 is level 2 30-bit */
	int		agaw;

	int		flags;		/* flags to find out type of domain */

	int		iommu_coherency;/* indicate coherency of iommu access */
	int		iommu_snooping; /* indicate snooping control feature*/
	int		iommu_count;	/* reference count of iommu */
	int		iommu_superpage;/* Level of superpages supported:
					   0 == 4KiB (no superpages), 1 == 2MiB,
					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
	u64		max_addr;	/* maximum mapped address */

	struct iommu_domain domain;	/* generic domain data structure for
					   iommu core */
};

525 526
struct intel_iommu {
	void __iomem	*reg; /* Pointer to hardware regs, virtual addr */
527 528
	u64 		reg_phys; /* physical address of hw register set */
	u64		reg_size; /* size of hw register set */
529 530 531
	u64		cap;
	u64		ecap;
	u32		gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
532
	raw_spinlock_t	register_lock; /* protect register handling */
533
	int		seq_id;	/* sequence id of the iommu */
W
Weidong Han 已提交
534
	int		agaw; /* agaw of this iommu */
F
Fenghua Yu 已提交
535
	int		msagaw; /* max sagaw of this iommu */
536
	unsigned int 	irq, pr_irq;
537
	u16		segment;     /* PCI segment# */
538
	unsigned char 	name[13];    /* Device Name */
539

540
#ifdef CONFIG_INTEL_IOMMU
541
	unsigned long 	*domain_ids; /* bitmap of domains */
542
	struct dmar_domain ***domains; /* ptr to domains */
543
	spinlock_t	lock; /* protect context, domain ids */
544 545
	struct root_entry *root_entry; /* virtual address */

546
	struct iommu_flush flush;
547 548 549 550 551 552 553 554
#endif
#ifdef CONFIG_INTEL_IOMMU_SVM
	/* These are large and need to be contiguous, so we allocate just
	 * one for now. We'll maybe want to rethink that if we truly give
	 * devices away to userspace processes (e.g. for DPDK) and don't
	 * want to trust that userspace will use *only* the PASID it was
	 * told to. But while it's all driver-arbitrated, we're fine. */
	struct pasid_state_entry *pasid_state_table;
555 556
	struct page_req_dsc *prq;
	unsigned char prq_name[16];    /* Name for PRQ interrupt */
557
	u32 pasid_max;
558
#endif
559
	struct q_inval  *qi;            /* Queued invalidation info */
560 561
	u32 *iommu_state; /* Store iommu states between suspend and resume.*/

562
#ifdef CONFIG_IRQ_REMAP
563
	struct ir_table *ir_table;	/* Interrupt remapping info */
564 565
	struct irq_domain *ir_domain;
	struct irq_domain *ir_msi_domain;
566
#endif
567
	struct iommu_device iommu;  /* IOMMU core code handle */
568
	int		node;
569
	u32		flags;      /* Software defined flags */
570 571
};

572 573 574 575
/* PCI domain-device relationship */
struct device_domain_info {
	struct list_head link;	/* link to domain siblings */
	struct list_head global; /* link to global list */
576
	struct list_head table;	/* link to pasid table */
577 578 579 580 581 582 583 584 585 586 587 588 589
	u8 bus;			/* PCI bus number */
	u8 devfn;		/* PCI devfn number */
	u16 pfsid;		/* SRIOV physical function source ID */
	u8 pasid_supported:3;
	u8 pasid_enabled:1;
	u8 pri_supported:1;
	u8 pri_enabled:1;
	u8 ats_supported:1;
	u8 ats_enabled:1;
	u8 ats_qdep;
	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
	struct intel_iommu *iommu; /* IOMMU used by this device */
	struct dmar_domain *domain; /* pointer to domain */
590
	struct pasid_table *pasid_table; /* pasid table */
591 592
};

593 594 595 596 597 598 599
static inline void __iommu_flush_cache(
	struct intel_iommu *iommu, void *addr, int size)
{
	if (!ecap_coherent(iommu->ecap))
		clflush_cache_range(addr, size);
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
/*
 * 0: readable
 * 1: writable
 * 2-6: reserved
 * 7: super page
 * 8-10: available
 * 11: snoop behavior
 * 12-63: Host physcial address
 */
struct dma_pte {
	u64 val;
};

static inline void dma_clear_pte(struct dma_pte *pte)
{
	pte->val = 0;
}

static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
	return pte->val & VTD_PAGE_MASK;
#else
	/* Must have a full atomic 64-bit read */
	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
#endif
}

static inline bool dma_pte_present(struct dma_pte *pte)
{
	return (pte->val & 3) != 0;
}

static inline bool dma_pte_superpage(struct dma_pte *pte)
{
	return (pte->val & DMA_PTE_LARGE_PAGE);
}

static inline int first_pte_in_page(struct dma_pte *pte)
{
	return !((unsigned long)pte & ~VTD_PAGE_MASK);
}

643
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
644
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
645

646
extern int dmar_enable_qi(struct intel_iommu *iommu);
647
extern void dmar_disable_qi(struct intel_iommu *iommu);
648
extern int dmar_reenable_qi(struct intel_iommu *iommu);
649
extern void qi_global_iec(struct intel_iommu *iommu);
650

651 652
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
			     u8 fm, u64 type);
653 654
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
			  unsigned int size_order, u64 type);
J
Jacob Pan 已提交
655 656
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
			u16 qdep, u64 addr, unsigned mask);
657
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
K
Kay, Allen M 已提交
658

659 660
extern int dmar_ir_support(void);

661 662 663 664
struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
665 666
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
				     void *data), void *data);
667
void iommu_flush_write_buffer(struct intel_iommu *iommu);
668

669
#ifdef CONFIG_INTEL_IOMMU_SVM
670 671
int intel_svm_init(struct intel_iommu *iommu);
int intel_svm_exit(struct intel_iommu *iommu);
672 673
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
674

675 676
struct svm_dev_ops;

677 678 679 680
struct intel_svm_dev {
	struct list_head list;
	struct rcu_head rcu;
	struct device *dev;
681
	struct svm_dev_ops *ops;
682 683 684 685 686 687 688 689 690 691
	int users;
	u16 did;
	u16 dev_iotlb:1;
	u16 sid, qdep;
};

struct intel_svm {
	struct mmu_notifier notifier;
	struct mm_struct *mm;
	struct intel_iommu *iommu;
692
	int flags;
693 694
	int pasid;
	struct list_head devs;
695
	struct list_head list;
696 697 698 699 700 701
};

extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif

702 703 704 705 706 707
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
void intel_iommu_debugfs_init(void);
#else
static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */

708
extern const struct attribute_group *intel_iommu_groups[];
709 710 711
bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
					 u8 devfn, int alloc);
712

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
extern int intel_iommu_tboot_noforce;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
	return 0;
}
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
	return 0;
}
#define dmar_disabled	(1)
#define intel_iommu_enabled (0)
#endif

732
#endif