dma-mapping.h 8.4 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
3 4 5 6 7 8 9 10 11

/*
 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
 * documentation.
 */

#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
12
#include <asm-generic/dma-coherent.h>
13

14
extern dma_addr_t bad_dma_address;
15
extern int iommu_merge;
16
extern struct device x86_dma_fallback_dev;
17
extern int panic_on_overflow;
18

19
struct dma_mapping_ops {
20 21
	int             (*mapping_error)(struct device *dev,
					 dma_addr_t dma_addr);
22 23 24 25
	void*           (*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t gfp);
	void            (*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
I
Ingo Molnar 已提交
26
	dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
				size_t size, int direction);
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
				size_t size, int direction);
	void            (*sync_single_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_range_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_single_range_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_sg_for_cpu)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	void            (*sync_sg_for_device)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
				int nents, int direction);
	void            (*unmap_sg)(struct device *hwdev,
				struct scatterlist *sg, int nents,
				int direction);
	int             (*dma_supported)(struct device *hwdev, u64 mask);
	int		is_phys;
};

57
extern struct dma_mapping_ops *dma_ops;
58

59
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
G
Glauber Costa 已提交
60
{
61 62 63 64 65 66 67
#ifdef CONFIG_X86_32
	return dma_ops;
#else
	if (unlikely(!dev) || !dev->archdata.dma_ops)
		return dma_ops;
	else
		return dev->archdata.dma_ops;
H
H. Peter Anvin 已提交
68
#endif /* _ASM_X86_DMA_MAPPING_H */
69 70 71 72 73 74 75 76 77 78 79
}

/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_X86_32
	return 0;
#else
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);
G
Glauber Costa 已提交
80 81

	return (dma_addr == bad_dma_address);
82
#endif
G
Glauber Costa 已提交
83 84
}

85 86
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
87
#define dma_is_consistent(d, h)	(1)
88

89 90 91
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);

92 93 94
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag);

95 96 97 98
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
	       int direction)
{
99 100
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

101
	BUG_ON(!valid_dma_direction(direction));
102
	return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
103 104
}

105 106 107 108
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
		 int direction)
{
109 110
	struct dma_mapping_ops *ops = get_dma_ops(dev);

111
	BUG_ON(!valid_dma_direction(direction));
112 113
	if (ops->unmap_single)
		ops->unmap_single(dev, addr, size, direction);
114 115
}

116 117 118 119
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
	   int nents, int direction)
{
120 121
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

122
	BUG_ON(!valid_dma_direction(direction));
123
	return ops->map_sg(hwdev, sg, nents, direction);
124
}
125 126 127 128 129

static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
	     int direction)
{
130 131
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

132
	BUG_ON(!valid_dma_direction(direction));
133 134
	if (ops->unmap_sg)
		ops->unmap_sg(hwdev, sg, nents, direction);
135
}
136 137 138 139 140

static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			size_t size, int direction)
{
141 142
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

143
	BUG_ON(!valid_dma_direction(direction));
144 145
	if (ops->sync_single_for_cpu)
		ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
146 147 148
	flush_write_buffers();
}

149 150 151 152
static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
			   size_t size, int direction)
{
153 154
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

155
	BUG_ON(!valid_dma_direction(direction));
156 157
	if (ops->sync_single_for_device)
		ops->sync_single_for_device(hwdev, dma_handle, size, direction);
158 159 160
	flush_write_buffers();
}

161 162 163 164
static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			      unsigned long offset, size_t size, int direction)
{
165
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
166

167 168 169 170
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_cpu)
		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
					       size, direction);
171 172
	flush_write_buffers();
}
173 174 175 176 177 178

static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
				 unsigned long offset, size_t size,
				 int direction)
{
179
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
180

181 182 183 184
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_device)
		ops->sync_single_range_for_device(hwdev, dma_handle,
						  offset, size, direction);
185 186 187
	flush_write_buffers();
}

188 189 190 191
static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
		    int nelems, int direction)
{
192 193
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

194
	BUG_ON(!valid_dma_direction(direction));
195 196
	if (ops->sync_sg_for_cpu)
		ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
197 198
	flush_write_buffers();
}
199 200 201 202 203

static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
		       int nelems, int direction)
{
204 205
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

206
	BUG_ON(!valid_dma_direction(direction));
207 208
	if (ops->sync_sg_for_device)
		ops->sync_sg_for_device(hwdev, sg, nelems, direction);
209 210 211

	flush_write_buffers();
}
212 213 214 215 216

static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      size_t offset, size_t size,
				      int direction)
{
217 218
	struct dma_mapping_ops *ops = get_dma_ops(dev);

I
Ingo Molnar 已提交
219
	BUG_ON(!valid_dma_direction(direction));
220 221
	return ops->map_single(dev, page_to_phys(page) + offset,
			       size, direction);
222 223 224 225 226 227 228 229
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
				  size_t size, int direction)
{
	dma_unmap_single(dev, addr, size, direction);
}

230 231 232 233 234 235
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
	enum dma_data_direction dir)
{
	flush_write_buffers();
}
236

237 238 239 240 241 242 243
static inline int dma_get_cache_alignment(void)
{
	/* no easy way to get cache size on all x86, so return the
	 * maximum possible, to be safe */
	return boot_cpu_data.x86_clflush_size;
}

244 245 246 247
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
						    gfp_t gfp)
{
	unsigned long dma_mask = 0;
248

249 250 251 252 253 254 255 256 257
	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;

	return dma_mask;
}

static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
258
#ifdef CONFIG_X86_64
259 260 261 262 263 264 265 266
	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);

	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
		gfp |= GFP_DMA32;
#endif
       return gfp;
}

267 268 269 270 271 272 273
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	void *memory;

274 275
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

276 277 278 279 280 281 282 283
	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;

	if (!dev) {
		dev = &x86_dma_fallback_dev;
		gfp |= GFP_DMA;
	}

284
	if (!is_device_dma_capable(dev))
285 286
		return NULL;

287 288 289 290 291
	if (!ops->alloc_coherent)
		return NULL;

	return ops->alloc_coherent(dev, size, dma_handle,
				   dma_alloc_coherent_gfp_flags(dev, gfp));
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t bus)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());       /* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	if (ops->free_coherent)
		ops->free_coherent(dev, size, vaddr, bus);
}
307

308
#endif