iommu.h 26.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <joerg.roedel@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H

22 23 24
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/types.h>
25
#include <linux/errno.h>
W
Wang YanQing 已提交
26
#include <linux/err.h>
27
#include <linux/of.h>
28

29 30 31
#define IOMMU_READ	(1 << 0)
#define IOMMU_WRITE	(1 << 1)
#define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
32
#define IOMMU_NOEXEC	(1 << 3)
R
Robin Murphy 已提交
33
#define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
34
/*
35 36 37 38 39 40 41
 * Where the bus hardware includes a privilege level as part of its access type
 * markings, and certain devices are capable of issuing transactions marked as
 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
 * given permission flags only apply to accesses at the higher privilege level,
 * and that unprivileged transactions should have as little access as possible.
 * This would usually imply the same permissions as kernel mappings on the CPU,
 * if the IOMMU page table format is equivalent.
42 43
 */
#define IOMMU_PRIV	(1 << 5)
44

45
struct iommu_ops;
A
Alex Williamson 已提交
46
struct iommu_group;
47
struct bus_type;
48
struct device;
49
struct iommu_domain;
50
struct notifier_block;
51
struct iommu_sva;
52 53 54 55 56 57

/* iommu fault flags */
#define IOMMU_FAULT_READ	0x0
#define IOMMU_FAULT_WRITE	0x1

typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
58
			struct device *, unsigned long, int, void *);
59 60
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
				       void *);
61

62 63 64 65 66 67
struct iommu_domain_geometry {
	dma_addr_t aperture_start; /* First address that can be mapped    */
	dma_addr_t aperture_end;   /* Last address that can be mapped     */
	bool force_aperture;       /* DMA only allowed in mappable range? */
};

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
					      implementation              */
#define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */

/*
 * This are the possible domain-types
 *
 *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
 *				  devices
 *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
 *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
 *				  for VMs
 *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
 *				  This flag allows IOMMU drivers to implement
 *				  certain optimizations for these domains
 */
#define IOMMU_DOMAIN_BLOCKED	(0U)
#define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
				 __IOMMU_DOMAIN_DMA_API)

92
struct iommu_domain {
93
	unsigned type;
94
	const struct iommu_ops *ops;
95
	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
96
	iommu_fault_handler_t handler;
97
	void *handler_token;
98
	struct iommu_domain_geometry geometry;
99
	void *iova_cookie;
100 101
};

102 103 104 105
enum iommu_cap {
	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU can enforce cache coherent DMA
					   transactions */
	IOMMU_CAP_INTR_REMAP,		/* IOMMU supports interrupt isolation */
106
	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
107
};
S
Sheng Yang 已提交
108

109 110 111 112 113 114 115 116 117 118 119 120 121
/*
 * Following constraints are specifc to FSL_PAMUV1:
 *  -aperture must be power of 2, and naturally aligned
 *  -number of windows must be power of 2, and address space size
 *   of each window is determined by aperture size / # of windows
 *  -the actual size of the mapped region of a window must be power
 *   of 2 starting with 4KB and physical address must be naturally
 *   aligned.
 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
 * The caller can invoke iommu_domain_get_attr to check if the underlying
 * iommu implementation supports these constraints.
 */

122
enum iommu_attr {
123
	DOMAIN_ATTR_GEOMETRY,
124
	DOMAIN_ATTR_PAGING,
125
	DOMAIN_ATTR_WINDOWS,
126 127 128
	DOMAIN_ATTR_FSL_PAMU_STASH,
	DOMAIN_ATTR_FSL_PAMU_ENABLE,
	DOMAIN_ATTR_FSL_PAMUV1,
129
	DOMAIN_ATTR_NESTING,	/* two stages of translation */
130
	DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
131
	DOMAIN_ATTR_MAX,
132 133
};

134
/* These are the possible reserved region types */
135 136 137 138 139 140 141 142 143 144
enum iommu_resv_type {
	/* Memory regions which must be mapped 1:1 at all times */
	IOMMU_RESV_DIRECT,
	/* Arbitrary "never map this or give it to a device" address ranges */
	IOMMU_RESV_RESERVED,
	/* Hardware MSI region (untranslated) */
	IOMMU_RESV_MSI,
	/* Software-managed MSI translation window */
	IOMMU_RESV_SW_MSI,
};
145

146
/**
147
 * struct iommu_resv_region - descriptor for a reserved memory region
148 149 150 151
 * @list: Linked list pointers
 * @start: System physical start address of the region
 * @length: Length of the region in bytes
 * @prot: IOMMU Protection flags (READ/WRITE/...)
152
 * @type: Type of the reserved region
153
 */
154
struct iommu_resv_region {
155 156 157 158
	struct list_head	list;
	phys_addr_t		start;
	size_t			length;
	int			prot;
159
	enum iommu_resv_type	type;
160 161
};

162 163 164
/* Per device IOMMU features */
enum iommu_dev_features {
	IOMMU_DEV_FEAT_AUX,	/* Aux-domain feature */
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	IOMMU_DEV_FEAT_SVA,	/* Shared Virtual Addresses */
};

#define IOMMU_PASID_INVALID	(-1U)

/**
 * struct iommu_sva_ops - device driver callbacks for an SVA context
 *
 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
 *           @mm_exit returns, the device must not issue any more transaction
 *           with the PASID given as argument.
 *
 *           The @mm_exit handler is allowed to sleep. Be careful about the
 *           locks taken in @mm_exit, because they might lead to deadlocks if
 *           they are also held when dropping references to the mm. Consider the
 *           following call chain:
 *           mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
 *           Using mmput_async() prevents this scenario.
 *
 */
struct iommu_sva_ops {
	iommu_mm_exit_handler_t mm_exit;
187 188
};

189 190
#ifdef CONFIG_IOMMU_API

191 192
/**
 * struct iommu_ops - iommu ops and capabilities
193 194 195
 * @capable: check capability
 * @domain_alloc: allocate iommu domain
 * @domain_free: free iommu domain
196 197 198 199
 * @attach_dev: attach device to an iommu domain
 * @detach_dev: detach device from an iommu domain
 * @map: map a physically contiguous memory region to an iommu domain
 * @unmap: unmap a physically contiguous memory region from an iommu domain
T
Tom Murphy 已提交
200
 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
201
 * @iotlb_range_add: Add a given iova range to the flush queue for this domain
202
 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
203
 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
204
 *            queue
205
 * @iova_to_phys: translate iova to physical address
A
Alex Williamson 已提交
206 207
 * @add_device: add device to iommu grouping
 * @remove_device: remove device from iommu grouping
208
 * @device_group: find iommu group for a particular device
209 210
 * @domain_get_attr: Query domain attributes
 * @domain_set_attr: Change domain attributes
211 212 213
 * @get_resv_regions: Request list of reserved regions for a device
 * @put_resv_regions: Free list of reserved regions for a device
 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
214 215
 * @domain_window_enable: Configure and enable a particular window for a domain
 * @domain_window_disable: Disable a particular window for a domain
216
 * @of_xlate: add OF master IDs to iommu grouping
217 218
 * @is_attach_deferred: Check if domain attach should be deferred from iommu
 *                      driver init to device driver init (default no)
219 220 221 222 223
 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
 *                               iommu specific features.
 * @dev_feat_enabled: check enabled feature
 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
 * @aux_get_pasid: get the pasid given an aux-domain
224 225 226
 * @sva_bind: Bind process address space to device
 * @sva_unbind: Unbind process address space from device
 * @sva_get_pasid: Get PASID associated to a SVA handle
227
 * @pgsize_bitmap: bitmap of all possible supported page sizes
228
 */
229
struct iommu_ops {
230
	bool (*capable)(enum iommu_cap);
231 232

	/* Domain allocation and freeing by the iommu driver */
233
	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
234 235
	void (*domain_free)(struct iommu_domain *);

236 237
	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
238
	int (*map)(struct iommu_domain *domain, unsigned long iova,
239 240 241
		   phys_addr_t paddr, size_t size, int prot);
	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
		     size_t size);
242 243 244
	void (*flush_iotlb_all)(struct iommu_domain *domain);
	void (*iotlb_range_add)(struct iommu_domain *domain,
				unsigned long iova, size_t size);
245
	void (*iotlb_sync_map)(struct iommu_domain *domain);
246
	void (*iotlb_sync)(struct iommu_domain *domain);
247
	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
A
Alex Williamson 已提交
248 249
	int (*add_device)(struct device *dev);
	void (*remove_device)(struct device *dev);
250
	struct iommu_group *(*device_group)(struct device *dev);
251 252 253 254
	int (*domain_get_attr)(struct iommu_domain *domain,
			       enum iommu_attr attr, void *data);
	int (*domain_set_attr)(struct iommu_domain *domain,
			       enum iommu_attr attr, void *data);
255

256 257 258 259 260 261
	/* Request/Free a list of reserved regions for a device */
	void (*get_resv_regions)(struct device *dev, struct list_head *list);
	void (*put_resv_regions)(struct device *dev, struct list_head *list);
	void (*apply_resv_region)(struct device *dev,
				  struct iommu_domain *domain,
				  struct iommu_resv_region *region);
262

263 264
	/* Window handling functions */
	int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
265
				    phys_addr_t paddr, u64 size, int prot);
266 267
	void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);

268
	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
269
	bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
270

271 272 273 274 275 276 277 278 279 280 281
	/* Per device IOMMU features */
	bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
	bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
	int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
	int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);

	/* Aux-domain specific attach/detach entries */
	int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
	void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
	int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);

282 283 284 285 286
	struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
				      void *drvdata);
	void (*sva_unbind)(struct iommu_sva *handle);
	int (*sva_get_pasid)(struct iommu_sva *handle);

287
	unsigned long pgsize_bitmap;
288 289
};

290 291 292 293 294
/**
 * struct iommu_device - IOMMU core representation of one IOMMU hardware
 *			 instance
 * @list: Used by the iommu-core to keep a list of registered iommus
 * @ops: iommu-ops for talking to this iommu
295
 * @dev: struct device for sysfs handling
296 297 298 299
 */
struct iommu_device {
	struct list_head list;
	const struct iommu_ops *ops;
300
	struct fwnode_handle *fwnode;
301
	struct device *dev;
302 303 304 305
};

int  iommu_device_register(struct iommu_device *iommu);
void iommu_device_unregister(struct iommu_device *iommu);
306 307 308 309 310
int  iommu_device_sysfs_add(struct iommu_device *iommu,
			    struct device *parent,
			    const struct attribute_group **groups,
			    const char *fmt, ...) __printf(4, 5);
void iommu_device_sysfs_remove(struct iommu_device *iommu);
311 312
int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
313 314 315 316 317 318 319

static inline void iommu_device_set_ops(struct iommu_device *iommu,
					const struct iommu_ops *ops)
{
	iommu->ops = ops;
}

320 321 322 323 324 325
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
					   struct fwnode_handle *fwnode)
{
	iommu->fwnode = fwnode;
}

326 327 328 329 330
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
	return (struct iommu_device *)dev_get_drvdata(dev);
}

A
Alex Williamson 已提交
331 332 333 334 335 336 337
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE		2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER		3 /* Pre Driver bind */
#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER		4 /* Post Driver bind */
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER	5 /* Pre Driver unbind */
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER	6 /* Post Driver unbind */

338
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
339
extern bool iommu_present(struct bus_type *bus);
340
extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
341
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
342
extern struct iommu_group *iommu_group_get_by_id(int id);
343 344 345 346 347
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
			       struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
				struct device *dev);
348
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
349
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
350
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
351 352
		     phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
353 354 355
			  size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
			       unsigned long iova, size_t size);
356 357
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
			   struct scatterlist *sg,unsigned int nents, int prot);
358
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
359
extern void iommu_set_fault_handler(struct iommu_domain *domain,
360
			iommu_fault_handler_t handler, void *token);
A
Alex Williamson 已提交
361

362 363
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
364
extern int iommu_request_dm_for_dev(struct device *dev);
E
Eric Auger 已提交
365
extern struct iommu_resv_region *
366 367
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
			enum iommu_resv_type type);
E
Eric Auger 已提交
368 369
extern int iommu_get_group_resv_regions(struct iommu_group *group,
					struct list_head *head);
370

A
Alex Williamson 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
extern int iommu_attach_group(struct iommu_domain *domain,
			      struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
			       struct iommu_group *group);
extern struct iommu_group *iommu_group_alloc(void);
extern void *iommu_group_get_iommudata(struct iommu_group *group);
extern void iommu_group_set_iommudata(struct iommu_group *group,
				      void *iommu_data,
				      void (*release)(void *iommu_data));
extern int iommu_group_set_name(struct iommu_group *group, const char *name);
extern int iommu_group_add_device(struct iommu_group *group,
				  struct device *dev);
extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
				    int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
387
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
A
Alex Williamson 已提交
388 389 390 391 392 393
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
					 struct notifier_block *nb);
extern int iommu_group_unregister_notifier(struct iommu_group *group,
					   struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
394
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
395
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
396

397 398 399 400
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
				 void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
				 void *data);
401

402 403
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
404 405
				      phys_addr_t offset, u64 size,
				      int prot);
406
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
407 408 409

extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
			      unsigned long iova, int flags);
410

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
	if (domain->ops->flush_iotlb_all)
		domain->ops->flush_iotlb_all(domain);
}

static inline void iommu_tlb_range_add(struct iommu_domain *domain,
				       unsigned long iova, size_t size)
{
	if (domain->ops->iotlb_range_add)
		domain->ops->iotlb_range_add(domain, iova, size);
}

static inline void iommu_tlb_sync(struct iommu_domain *domain)
{
	if (domain->ops->iotlb_sync)
		domain->ops->iotlb_sync(domain);
}

430 431
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
432 433
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
434 435
/* FSL-MC device grouping function */
struct iommu_group *fsl_mc_device_group(struct device *dev);
436

R
Robin Murphy 已提交
437 438 439 440 441 442 443 444 445 446 447 448
/**
 * struct iommu_fwspec - per-device IOMMU instance data
 * @ops: ops for this device's IOMMU
 * @iommu_fwnode: firmware handle for this device's IOMMU
 * @iommu_priv: IOMMU driver private data for this device
 * @num_ids: number of associated device IDs
 * @ids: IDs which this device may present to the IOMMU
 */
struct iommu_fwspec {
	const struct iommu_ops	*ops;
	struct fwnode_handle	*iommu_fwnode;
	void			*iommu_priv;
449
	u32			flags;
R
Robin Murphy 已提交
450 451 452 453
	unsigned int		num_ids;
	u32			ids[1];
};

454 455 456
/* ATS is supported */
#define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)

457 458 459 460 461 462 463 464
/**
 * struct iommu_sva - handle to a device-mm bond
 */
struct iommu_sva {
	struct device			*dev;
	const struct iommu_sva_ops	*ops;
};

R
Robin Murphy 已提交
465 466 467 468
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
		      const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
469
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
R
Robin Murphy 已提交
470

471 472 473 474 475 476 477 478 479 480 481
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
	return dev->iommu_fwspec;
}

static inline void dev_iommu_fwspec_set(struct device *dev,
					struct iommu_fwspec *fwspec)
{
	dev->iommu_fwspec = fwspec;
}

482 483 484
int iommu_probe_device(struct device *dev);
void iommu_release_device(struct device *dev);

485 486 487 488 489 490 491 492
bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);

493 494 495 496 497 498 499 500
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
					struct mm_struct *mm,
					void *drvdata);
void iommu_sva_unbind_device(struct iommu_sva *handle);
int iommu_sva_set_ops(struct iommu_sva *handle,
		      const struct iommu_sva_ops *ops);
int iommu_sva_get_pasid(struct iommu_sva *handle);

501 502
#else /* CONFIG_IOMMU_API */

503
struct iommu_ops {};
A
Alex Williamson 已提交
504
struct iommu_group {};
R
Robin Murphy 已提交
505
struct iommu_fwspec {};
506
struct iommu_device {};
507

508
static inline bool iommu_present(struct bus_type *bus)
509 510 511 512
{
	return false;
}

513 514 515 516 517
static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
{
	return false;
}

518
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
519 520 521 522
{
	return NULL;
}

523 524 525 526 527
static inline struct iommu_group *iommu_group_get_by_id(int id)
{
	return NULL;
}

528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
static inline void iommu_domain_free(struct iommu_domain *domain)
{
}

static inline int iommu_attach_device(struct iommu_domain *domain,
				      struct device *dev)
{
	return -ENODEV;
}

static inline void iommu_detach_device(struct iommu_domain *domain,
				       struct device *dev)
{
}

543 544 545 546 547
static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
{
	return NULL;
}

548
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
549
			    phys_addr_t paddr, size_t size, int prot)
550 551 552 553
{
	return -ENODEV;
}

554 555
static inline size_t iommu_unmap(struct iommu_domain *domain,
				 unsigned long iova, size_t size)
556
{
557
	return 0;
558 559
}

560 561
static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
				      unsigned long iova, int gfp_order)
562
{
563
	return 0;
564 565
}

O
Olav Haugan 已提交
566 567 568 569
static inline size_t iommu_map_sg(struct iommu_domain *domain,
				  unsigned long iova, struct scatterlist *sg,
				  unsigned int nents, int prot)
{
570
	return 0;
O
Olav Haugan 已提交
571 572
}

573 574 575 576 577 578 579 580 581 582 583 584 585
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}

static inline void iommu_tlb_range_add(struct iommu_domain *domain,
				       unsigned long iova, size_t size)
{
}

static inline void iommu_tlb_sync(struct iommu_domain *domain)
{
}

586 587
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
					     u32 wnd_nr, phys_addr_t paddr,
588
					     u64 size, int prot)
589 590 591 592 593 594 595 596 597
{
	return -ENODEV;
}

static inline void iommu_domain_window_disable(struct iommu_domain *domain,
					       u32 wnd_nr)
{
}

598
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
599 600 601 602
{
	return 0;
}

603
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
604
				iommu_fault_handler_t handler, void *token)
605 606 607
{
}

608
static inline void iommu_get_resv_regions(struct device *dev,
609 610 611 612
					struct list_head *list)
{
}

613
static inline void iommu_put_resv_regions(struct device *dev,
614 615 616 617
					struct list_head *list)
{
}

E
Eric Auger 已提交
618 619 620 621 622 623
static inline int iommu_get_group_resv_regions(struct iommu_group *group,
					       struct list_head *head)
{
	return -ENODEV;
}

624 625 626 627 628
static inline int iommu_request_dm_for_dev(struct device *dev)
{
	return -ENODEV;
}

629 630
static inline int iommu_attach_group(struct iommu_domain *domain,
				     struct iommu_group *group)
A
Alex Williamson 已提交
631 632 633 634
{
	return -ENODEV;
}

635 636
static inline void iommu_detach_group(struct iommu_domain *domain,
				      struct iommu_group *group)
A
Alex Williamson 已提交
637 638 639
{
}

640
static inline struct iommu_group *iommu_group_alloc(void)
A
Alex Williamson 已提交
641 642 643 644
{
	return ERR_PTR(-ENODEV);
}

645
static inline void *iommu_group_get_iommudata(struct iommu_group *group)
A
Alex Williamson 已提交
646 647 648 649
{
	return NULL;
}

650 651 652
static inline void iommu_group_set_iommudata(struct iommu_group *group,
					     void *iommu_data,
					     void (*release)(void *iommu_data))
A
Alex Williamson 已提交
653 654 655
{
}

656 657
static inline int iommu_group_set_name(struct iommu_group *group,
				       const char *name)
A
Alex Williamson 已提交
658 659 660 661
{
	return -ENODEV;
}

662 663
static inline int iommu_group_add_device(struct iommu_group *group,
					 struct device *dev)
A
Alex Williamson 已提交
664 665 666 667
{
	return -ENODEV;
}

668
static inline void iommu_group_remove_device(struct device *dev)
A
Alex Williamson 已提交
669 670 671
{
}

672 673 674
static inline int iommu_group_for_each_dev(struct iommu_group *group,
					   void *data,
					   int (*fn)(struct device *, void *))
A
Alex Williamson 已提交
675 676 677 678
{
	return -ENODEV;
}

679
static inline struct iommu_group *iommu_group_get(struct device *dev)
A
Alex Williamson 已提交
680 681 682 683
{
	return NULL;
}

684
static inline void iommu_group_put(struct iommu_group *group)
A
Alex Williamson 已提交
685 686 687
{
}

688 689
static inline int iommu_group_register_notifier(struct iommu_group *group,
						struct notifier_block *nb)
690 691 692 693
{
	return -ENODEV;
}

694 695
static inline int iommu_group_unregister_notifier(struct iommu_group *group,
						  struct notifier_block *nb)
A
Alex Williamson 已提交
696 697 698 699
{
	return 0;
}

700
static inline int iommu_group_id(struct iommu_group *group)
A
Alex Williamson 已提交
701 702 703
{
	return -ENODEV;
}
704

705 706 707 708 709 710 711 712 713 714 715 716
static inline int iommu_domain_get_attr(struct iommu_domain *domain,
					enum iommu_attr attr, void *data)
{
	return -EINVAL;
}

static inline int iommu_domain_set_attr(struct iommu_domain *domain,
					enum iommu_attr attr, void *data)
{
	return -EINVAL;
}

717
static inline int  iommu_device_register(struct iommu_device *iommu)
718
{
719
	return -ENODEV;
720 721
}

722 723
static inline void iommu_device_set_ops(struct iommu_device *iommu,
					const struct iommu_ops *ops)
724 725 726
{
}

727 728
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
					   struct fwnode_handle *fwnode)
729 730 731
{
}

732 733 734 735 736
static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
{
	return NULL;
}

737
static inline void iommu_device_unregister(struct iommu_device *iommu)
738 739 740
{
}

741 742 743 744
static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
					  struct device *parent,
					  const struct attribute_group **groups,
					  const char *fmt, ...)
745
{
746
	return -ENODEV;
747 748
}

749
static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
750 751 752
{
}

A
Alex Williamson 已提交
753
static inline int iommu_device_link(struct device *dev, struct device *link)
754 755 756 757
{
	return -EINVAL;
}

A
Alex Williamson 已提交
758
static inline void iommu_device_unlink(struct device *dev, struct device *link)
759 760 761
{
}

R
Robin Murphy 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
static inline int iommu_fwspec_init(struct device *dev,
				    struct fwnode_handle *iommu_fwnode,
				    const struct iommu_ops *ops)
{
	return -ENODEV;
}

static inline void iommu_fwspec_free(struct device *dev)
{
}

static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
				       int num_ids)
{
	return -ENODEV;
}

779
static inline
780
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
781 782 783 784
{
	return NULL;
}

785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
static inline bool
iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
{
	return false;
}

static inline bool
iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
{
	return false;
}

static inline int
iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{
	return -ENODEV;
}

static inline int
iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
	return -ENODEV;
}

static inline int
iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
{
	return -ENODEV;
}

static inline void
iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
{
}

static inline int
iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
{
	return -ENODEV;
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
	return NULL;
}

static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
{
}

static inline int iommu_sva_set_ops(struct iommu_sva *handle,
				    const struct iommu_sva_ops *ops)
{
	return -EINVAL;
}

static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
{
	return IOMMU_PASID_INVALID;
}

847 848
#endif /* CONFIG_IOMMU_API */

849 850 851 852 853 854 855
#ifdef CONFIG_IOMMU_DEBUGFS
extern	struct dentry *iommu_debugfs_dir;
void iommu_debugfs_setup(void);
#else
static inline void iommu_debugfs_setup(void) {}
#endif

856
#endif /* __LINUX_IOMMU_H */