iommu.h 10.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
 * Author: Joerg Roedel <joerg.roedel@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H

22
#include <linux/errno.h>
T
Thierry Reding 已提交
23
#include <linux/types.h>
24

25 26
#define IOMMU_READ	(1)
#define IOMMU_WRITE	(2)
27
#define IOMMU_CACHE	(4) /* DMA cache coherency */
28

29
struct iommu_ops;
A
Alex Williamson 已提交
30
struct iommu_group;
31
struct bus_type;
32
struct device;
33
struct iommu_domain;
34
struct notifier_block;
35 36 37 38 39 40

/* iommu fault flags */
#define IOMMU_FAULT_READ	0x0
#define IOMMU_FAULT_WRITE	0x1

typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
41
			struct device *, unsigned long, int, void *);
42

43 44 45 46 47 48
struct iommu_domain_geometry {
	dma_addr_t aperture_start; /* First address that can be mapped    */
	dma_addr_t aperture_end;   /* Last address that can be mapped     */
	bool force_aperture;       /* DMA only allowed in mappable range? */
};

49
struct iommu_domain {
50
	struct iommu_ops *ops;
51
	void *priv;
52
	iommu_fault_handler_t handler;
53
	void *handler_token;
54
	struct iommu_domain_geometry geometry;
55 56
};

S
Sheng Yang 已提交
57
#define IOMMU_CAP_CACHE_COHERENCY	0x1
58
#define IOMMU_CAP_INTR_REMAP		0x2	/* isolates device intrs */
S
Sheng Yang 已提交
59

60 61
enum iommu_attr {
	DOMAIN_ATTR_MAX,
62
	DOMAIN_ATTR_GEOMETRY,
63 64
};

65 66
#ifdef CONFIG_IOMMU_API

67 68 69 70 71 72 73 74 75 76
/**
 * struct iommu_ops - iommu ops and capabilities
 * @domain_init: init iommu domain
 * @domain_destroy: destroy iommu domain
 * @attach_dev: attach device to an iommu domain
 * @detach_dev: detach device from an iommu domain
 * @map: map a physically contiguous memory region to an iommu domain
 * @unmap: unmap a physically contiguous memory region from an iommu domain
 * @iova_to_phys: translate iova to physical address
 * @domain_has_cap: domain capabilities query
A
Alex Williamson 已提交
77 78
 * @add_device: add device to iommu grouping
 * @remove_device: remove device from iommu grouping
79 80
 * @domain_get_attr: Query domain attributes
 * @domain_set_attr: Change domain attributes
81 82
 * @pgsize_bitmap: bitmap of supported page sizes
 */
83 84 85 86 87
struct iommu_ops {
	int (*domain_init)(struct iommu_domain *domain);
	void (*domain_destroy)(struct iommu_domain *domain);
	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
	void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
88
	int (*map)(struct iommu_domain *domain, unsigned long iova,
89 90 91
		   phys_addr_t paddr, size_t size, int prot);
	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
		     size_t size);
92 93
	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
				    unsigned long iova);
S
Sheng Yang 已提交
94 95
	int (*domain_has_cap)(struct iommu_domain *domain,
			      unsigned long cap);
A
Alex Williamson 已提交
96 97
	int (*add_device)(struct device *dev);
	void (*remove_device)(struct device *dev);
98
	int (*device_group)(struct device *dev, unsigned int *groupid);
99 100 101 102
	int (*domain_get_attr)(struct iommu_domain *domain,
			       enum iommu_attr attr, void *data);
	int (*domain_set_attr)(struct iommu_domain *domain,
			       enum iommu_attr attr, void *data);
103
	unsigned long pgsize_bitmap;
104 105
};

A
Alex Williamson 已提交
106 107 108 109 110 111 112
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE		2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER		3 /* Pre Driver bind */
#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER		4 /* Post Driver bind */
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER	5 /* Pre Driver unbind */
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER	6 /* Post Driver unbind */

113
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
114
extern bool iommu_present(struct bus_type *bus);
115
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
116 117 118 119 120
extern void iommu_domain_free(struct iommu_domain *domain);
extern int iommu_attach_device(struct iommu_domain *domain,
			       struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
				struct device *dev);
121
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
122 123 124
		     phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
		       size_t size);
125 126
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
				      unsigned long iova);
S
Sheng Yang 已提交
127 128
extern int iommu_domain_has_cap(struct iommu_domain *domain,
				unsigned long cap);
129
extern void iommu_set_fault_handler(struct iommu_domain *domain,
130
			iommu_fault_handler_t handler, void *token);
A
Alex Williamson 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

extern int iommu_attach_group(struct iommu_domain *domain,
			      struct iommu_group *group);
extern void iommu_detach_group(struct iommu_domain *domain,
			       struct iommu_group *group);
extern struct iommu_group *iommu_group_alloc(void);
extern void *iommu_group_get_iommudata(struct iommu_group *group);
extern void iommu_group_set_iommudata(struct iommu_group *group,
				      void *iommu_data,
				      void (*release)(void *iommu_data));
extern int iommu_group_set_name(struct iommu_group *group, const char *name);
extern int iommu_group_add_device(struct iommu_group *group,
				  struct device *dev);
extern void iommu_group_remove_device(struct device *dev);
extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
				    int (*fn)(struct device *, void *));
extern struct iommu_group *iommu_group_get(struct device *dev);
extern void iommu_group_put(struct iommu_group *group);
extern int iommu_group_register_notifier(struct iommu_group *group,
					 struct notifier_block *nb);
extern int iommu_group_unregister_notifier(struct iommu_group *group,
					   struct notifier_block *nb);
extern int iommu_group_id(struct iommu_group *group);
154

155 156 157 158
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
				 void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
				 void *data);
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

/**
 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
 * @domain: the iommu domain where the fault has happened
 * @dev: the device where the fault has happened
 * @iova: the faulting address
 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
 *
 * This function should be called by the low-level IOMMU implementations
 * whenever IOMMU faults happen, to allow high-level users, that are
 * interested in such events, to know about them.
 *
 * This event may be useful for several possible use cases:
 * - mere logging of the event
 * - dynamic TLB/PTE loading
 * - if restarting of the faulting device is required
 *
 * Returns 0 on success and an appropriate error code otherwise (if dynamic
 * PTE/TLB loading will one day be supported, implementations will be able
 * to tell whether it succeeded or not according to this return value).
179 180 181 182
 *
 * Specifically, -ENOSYS is returned if a fault handler isn't installed
 * (though fault handlers can also return -ENOSYS, in case they want to
 * elicit the default behavior of the IOMMU drivers).
183 184 185 186
 */
static inline int report_iommu_fault(struct iommu_domain *domain,
		struct device *dev, unsigned long iova, int flags)
{
187
	int ret = -ENOSYS;
188

189 190 191 192 193
	/*
	 * if upper layers showed interest and installed a fault handler,
	 * invoke it.
	 */
	if (domain->handler)
194 195
		ret = domain->handler(domain, dev, iova, flags,
						domain->handler_token);
196

197
	return ret;
198 199 200 201
}

#else /* CONFIG_IOMMU_API */

202
struct iommu_ops {};
A
Alex Williamson 已提交
203
struct iommu_group {};
204

205
static inline bool iommu_present(struct bus_type *bus)
206 207 208 209
{
	return false;
}

210
static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
{
	return NULL;
}

static inline void iommu_domain_free(struct iommu_domain *domain)
{
}

static inline int iommu_attach_device(struct iommu_domain *domain,
				      struct device *dev)
{
	return -ENODEV;
}

static inline void iommu_detach_device(struct iommu_domain *domain,
				       struct device *dev)
{
}

230 231 232 233 234 235 236 237 238 239 240 241
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
			    phys_addr_t paddr, int gfp_order, int prot)
{
	return -ENODEV;
}

static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
			      int gfp_order)
{
	return -ENODEV;
}

242 243 244 245 246 247
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
					     unsigned long iova)
{
	return 0;
}

S
Sheng Yang 已提交
248 249 250 251 252 253
static inline int domain_has_cap(struct iommu_domain *domain,
				 unsigned long cap)
{
	return 0;
}

254
static inline void iommu_set_fault_handler(struct iommu_domain *domain,
255
				iommu_fault_handler_t handler, void *token)
256 257 258
{
}

A
Alex Williamson 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
{
	return -ENODEV;
}

void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
{
}

struct iommu_group *iommu_group_alloc(void)
{
	return ERR_PTR(-ENODEV);
}

void *iommu_group_get_iommudata(struct iommu_group *group)
{
	return NULL;
}

void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
			       void (*release)(void *iommu_data))
{
}

int iommu_group_set_name(struct iommu_group *group, const char *name)
{
	return -ENODEV;
}

int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{
	return -ENODEV;
}

void iommu_group_remove_device(struct device *dev)
{
}

int iommu_group_for_each_dev(struct iommu_group *group, void *data,
			     int (*fn)(struct device *, void *))
{
	return -ENODEV;
}

struct iommu_group *iommu_group_get(struct device *dev)
{
	return NULL;
}

void iommu_group_put(struct iommu_group *group)
{
}

int iommu_group_register_notifier(struct iommu_group *group,
				  struct notifier_block *nb)
314 315 316 317
{
	return -ENODEV;
}

A
Alex Williamson 已提交
318 319 320 321 322 323 324 325 326 327
int iommu_group_unregister_notifier(struct iommu_group *group,
				    struct notifier_block *nb)
{
	return 0;
}

int iommu_group_id(struct iommu_group *group)
{
	return -ENODEV;
}
328

329 330 331 332 333 334 335 336 337 338 339 340
static inline int iommu_domain_get_attr(struct iommu_domain *domain,
					enum iommu_attr attr, void *data)
{
	return -EINVAL;
}

static inline int iommu_domain_set_attr(struct iommu_domain *domain,
					enum iommu_attr attr, void *data)
{
	return -EINVAL;
}

341 342 343
#endif /* CONFIG_IOMMU_API */

#endif /* __LINUX_IOMMU_H */