dma-mapping.h 8.4 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
3 4 5 6 7 8 9 10 11

/*
 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
 * documentation.
 */

#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
12
#include <asm-generic/dma-coherent.h>
13

14
extern dma_addr_t bad_dma_address;
15
extern int iommu_merge;
16
extern struct device x86_dma_fallback_dev;
17
extern int panic_on_overflow;
18

19
struct dma_mapping_ops {
20 21
	int             (*mapping_error)(struct device *dev,
					 dma_addr_t dma_addr);
22 23 24 25
	void*           (*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t gfp);
	void            (*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
I
Ingo Molnar 已提交
26
	dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
				size_t size, int direction);
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
				size_t size, int direction);
	void            (*sync_single_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_range_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_single_range_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_sg_for_cpu)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	void            (*sync_sg_for_device)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
				int nents, int direction);
	void            (*unmap_sg)(struct device *hwdev,
				struct scatterlist *sg, int nents,
				int direction);
	int             (*dma_supported)(struct device *hwdev, u64 mask);
	int		is_phys;
};

57
extern struct dma_mapping_ops *dma_ops;
58

59
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
G
Glauber Costa 已提交
60
{
61 62 63 64 65 66 67
#ifdef CONFIG_X86_32
	return dma_ops;
#else
	if (unlikely(!dev) || !dev->archdata.dma_ops)
		return dma_ops;
	else
		return dev->archdata.dma_ops;
68
#endif
69 70 71 72 73 74 75 76
}

/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);
G
Glauber Costa 已提交
77

78
	return (dma_addr == bad_dma_address);
G
Glauber Costa 已提交
79 80
}

81 82
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
83
#define dma_is_consistent(d, h)	(1)
84

85 86 87
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);

88 89 90
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag);

91 92 93 94
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
	       int direction)
{
95 96
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

97
	BUG_ON(!valid_dma_direction(direction));
98
	return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
99 100
}

101 102 103 104
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
		 int direction)
{
105 106
	struct dma_mapping_ops *ops = get_dma_ops(dev);

107
	BUG_ON(!valid_dma_direction(direction));
108 109
	if (ops->unmap_single)
		ops->unmap_single(dev, addr, size, direction);
110 111
}

112 113 114 115
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
	   int nents, int direction)
{
116 117
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

118
	BUG_ON(!valid_dma_direction(direction));
119
	return ops->map_sg(hwdev, sg, nents, direction);
120
}
121 122 123 124 125

static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
	     int direction)
{
126 127
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

128
	BUG_ON(!valid_dma_direction(direction));
129 130
	if (ops->unmap_sg)
		ops->unmap_sg(hwdev, sg, nents, direction);
131
}
132 133 134 135 136

static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			size_t size, int direction)
{
137 138
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

139
	BUG_ON(!valid_dma_direction(direction));
140 141
	if (ops->sync_single_for_cpu)
		ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
142 143 144
	flush_write_buffers();
}

145 146 147 148
static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
			   size_t size, int direction)
{
149 150
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

151
	BUG_ON(!valid_dma_direction(direction));
152 153
	if (ops->sync_single_for_device)
		ops->sync_single_for_device(hwdev, dma_handle, size, direction);
154 155 156
	flush_write_buffers();
}

157 158 159 160
static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			      unsigned long offset, size_t size, int direction)
{
161
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
162

163 164 165 166
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_cpu)
		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
					       size, direction);
167 168
	flush_write_buffers();
}
169 170 171 172 173 174

static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
				 unsigned long offset, size_t size,
				 int direction)
{
175
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
176

177 178 179 180
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_device)
		ops->sync_single_range_for_device(hwdev, dma_handle,
						  offset, size, direction);
181 182 183
	flush_write_buffers();
}

184 185 186 187
static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
		    int nelems, int direction)
{
188 189
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

190
	BUG_ON(!valid_dma_direction(direction));
191 192
	if (ops->sync_sg_for_cpu)
		ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
193 194
	flush_write_buffers();
}
195 196 197 198 199

static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
		       int nelems, int direction)
{
200 201
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

202
	BUG_ON(!valid_dma_direction(direction));
203 204
	if (ops->sync_sg_for_device)
		ops->sync_sg_for_device(hwdev, sg, nelems, direction);
205 206 207

	flush_write_buffers();
}
208 209 210 211 212

static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      size_t offset, size_t size,
				      int direction)
{
213 214
	struct dma_mapping_ops *ops = get_dma_ops(dev);

I
Ingo Molnar 已提交
215
	BUG_ON(!valid_dma_direction(direction));
216 217
	return ops->map_single(dev, page_to_phys(page) + offset,
			       size, direction);
218 219 220 221 222 223 224 225
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
				  size_t size, int direction)
{
	dma_unmap_single(dev, addr, size, direction);
}

226 227 228 229 230 231
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
	enum dma_data_direction dir)
{
	flush_write_buffers();
}
232

233 234 235 236 237 238 239
static inline int dma_get_cache_alignment(void)
{
	/* no easy way to get cache size on all x86, so return the
	 * maximum possible, to be safe */
	return boot_cpu_data.x86_clflush_size;
}

240 241 242 243
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
						    gfp_t gfp)
{
	unsigned long dma_mask = 0;
244

245 246 247 248 249 250 251 252 253 254 255
	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;

	return dma_mask;
}

static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);

256 257 258
	if (dma_mask <= DMA_24BIT_MASK)
		gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
259 260 261 262 263 264
	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
		gfp |= GFP_DMA32;
#endif
       return gfp;
}

265 266 267 268 269 270 271
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	void *memory;

272 273
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

274 275 276 277 278 279 280 281
	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;

	if (!dev) {
		dev = &x86_dma_fallback_dev;
		gfp |= GFP_DMA;
	}

282
	if (!is_device_dma_capable(dev))
283 284
		return NULL;

285 286 287 288 289
	if (!ops->alloc_coherent)
		return NULL;

	return ops->alloc_coherent(dev, size, dma_handle,
				   dma_alloc_coherent_gfp_flags(dev, gfp));
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t bus)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());       /* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	if (ops->free_coherent)
		ops->free_coherent(dev, size, vaddr, bus);
}
305

306
#endif