dma-mapping.h 8.4 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
3 4 5 6 7 8 9 10 11

/*
 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
 * documentation.
 */

#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
12
#include <asm-generic/dma-coherent.h>
13

14
extern dma_addr_t bad_dma_address;
15
extern int iommu_merge;
16
extern struct device x86_dma_fallback_dev;
17
extern int panic_on_overflow;
18

19
struct dma_mapping_ops {
20 21
	int             (*mapping_error)(struct device *dev,
					 dma_addr_t dma_addr);
22 23 24 25
	void*           (*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t gfp);
	void            (*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
I
Ingo Molnar 已提交
26
	dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
				size_t size, int direction);
	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
				size_t size, int direction);
	void            (*sync_single_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, size_t size,
				int direction);
	void            (*sync_single_range_for_cpu)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_single_range_for_device)(struct device *hwdev,
				dma_addr_t dma_handle, unsigned long offset,
				size_t size, int direction);
	void            (*sync_sg_for_cpu)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	void            (*sync_sg_for_device)(struct device *hwdev,
				struct scatterlist *sg, int nelems,
				int direction);
	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
				int nents, int direction);
	void            (*unmap_sg)(struct device *hwdev,
				struct scatterlist *sg, int nents,
				int direction);
	int             (*dma_supported)(struct device *hwdev, u64 mask);
	int		is_phys;
};

57
extern struct dma_mapping_ops *dma_ops;
58

59
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
G
Glauber Costa 已提交
60
{
61 62 63 64 65 66 67
#ifdef CONFIG_X86_32
	return dma_ops;
#else
	if (unlikely(!dev) || !dev->archdata.dma_ops)
		return dma_ops;
	else
		return dev->archdata.dma_ops;
H
H. Peter Anvin 已提交
68
#endif /* _ASM_X86_DMA_MAPPING_H */
69 70 71 72 73
}

/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
74
#ifdef CONFIG_X86_64
75 76 77
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	if (ops->mapping_error)
		return ops->mapping_error(dev, dma_addr);
G
Glauber Costa 已提交
78

79
#endif
80
	return (dma_addr == bad_dma_address);
G
Glauber Costa 已提交
81 82
}

83 84
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
85
#define dma_is_consistent(d, h)	(1)
86

87 88 89
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);

90 91 92
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
					dma_addr_t *dma_addr, gfp_t flag);

93 94 95 96
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
	       int direction)
{
97 98
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

99
	BUG_ON(!valid_dma_direction(direction));
100
	return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
101 102
}

103 104 105 106
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
		 int direction)
{
107 108
	struct dma_mapping_ops *ops = get_dma_ops(dev);

109
	BUG_ON(!valid_dma_direction(direction));
110 111
	if (ops->unmap_single)
		ops->unmap_single(dev, addr, size, direction);
112 113
}

114 115 116 117
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
	   int nents, int direction)
{
118 119
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

120
	BUG_ON(!valid_dma_direction(direction));
121
	return ops->map_sg(hwdev, sg, nents, direction);
122
}
123 124 125 126 127

static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
	     int direction)
{
128 129
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

130
	BUG_ON(!valid_dma_direction(direction));
131 132
	if (ops->unmap_sg)
		ops->unmap_sg(hwdev, sg, nents, direction);
133
}
134 135 136 137 138

static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			size_t size, int direction)
{
139 140
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

141
	BUG_ON(!valid_dma_direction(direction));
142 143
	if (ops->sync_single_for_cpu)
		ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
144 145 146
	flush_write_buffers();
}

147 148 149 150
static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
			   size_t size, int direction)
{
151 152
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

153
	BUG_ON(!valid_dma_direction(direction));
154 155
	if (ops->sync_single_for_device)
		ops->sync_single_for_device(hwdev, dma_handle, size, direction);
156 157 158
	flush_write_buffers();
}

159 160 161 162
static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
			      unsigned long offset, size_t size, int direction)
{
163
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
164

165 166 167 168
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_cpu)
		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
					       size, direction);
169 170
	flush_write_buffers();
}
171 172 173 174 175 176

static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
				 unsigned long offset, size_t size,
				 int direction)
{
177
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);
178

179 180 181 182
	BUG_ON(!valid_dma_direction(direction));
	if (ops->sync_single_range_for_device)
		ops->sync_single_range_for_device(hwdev, dma_handle,
						  offset, size, direction);
183 184 185
	flush_write_buffers();
}

186 187 188 189
static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
		    int nelems, int direction)
{
190 191
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

192
	BUG_ON(!valid_dma_direction(direction));
193 194
	if (ops->sync_sg_for_cpu)
		ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
195 196
	flush_write_buffers();
}
197 198 199 200 201

static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
		       int nelems, int direction)
{
202 203
	struct dma_mapping_ops *ops = get_dma_ops(hwdev);

204
	BUG_ON(!valid_dma_direction(direction));
205 206
	if (ops->sync_sg_for_device)
		ops->sync_sg_for_device(hwdev, sg, nelems, direction);
207 208 209

	flush_write_buffers();
}
210 211 212 213 214

static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
				      size_t offset, size_t size,
				      int direction)
{
215 216
	struct dma_mapping_ops *ops = get_dma_ops(dev);

I
Ingo Molnar 已提交
217
	BUG_ON(!valid_dma_direction(direction));
218 219
	return ops->map_single(dev, page_to_phys(page) + offset,
			       size, direction);
220 221 222 223 224 225 226 227
}

static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
				  size_t size, int direction)
{
	dma_unmap_single(dev, addr, size, direction);
}

228 229 230 231 232 233
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
	enum dma_data_direction dir)
{
	flush_write_buffers();
}
234

235 236 237 238 239 240 241
static inline int dma_get_cache_alignment(void)
{
	/* no easy way to get cache size on all x86, so return the
	 * maximum possible, to be safe */
	return boot_cpu_data.x86_clflush_size;
}

242 243 244 245
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
						    gfp_t gfp)
{
	unsigned long dma_mask = 0;
246

247 248 249 250 251 252 253 254 255 256 257
	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;

	return dma_mask;
}

static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);

258 259 260
	if (dma_mask <= DMA_24BIT_MASK)
		gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
261 262 263 264 265 266
	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
		gfp |= GFP_DMA32;
#endif
       return gfp;
}

267 268 269 270 271 272 273
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);
	void *memory;

274 275
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);

276 277 278 279 280 281 282 283
	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
		return memory;

	if (!dev) {
		dev = &x86_dma_fallback_dev;
		gfp |= GFP_DMA;
	}

284
	if (!is_device_dma_capable(dev))
285 286
		return NULL;

287 288 289 290 291
	if (!ops->alloc_coherent)
		return NULL;

	return ops->alloc_coherent(dev, size, dma_handle,
				   dma_alloc_coherent_gfp_flags(dev, gfp));
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
}

static inline void dma_free_coherent(struct device *dev, size_t size,
				     void *vaddr, dma_addr_t bus)
{
	struct dma_mapping_ops *ops = get_dma_ops(dev);

	WARN_ON(irqs_disabled());       /* for portability */

	if (dma_release_from_coherent(dev, get_order(size), vaddr))
		return;

	if (ops->free_coherent)
		ops->free_coherent(dev, size, vaddr, bus);
}
307

308
#endif