dma-mapping.h 8.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
S
Stephen Rothwell 已提交
2 3 4 5
 * Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
L
Linus Torvalds 已提交
6
 */
S
Stephen Rothwell 已提交
7 8
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
9
#ifdef __KERNEL__
L
Linus Torvalds 已提交
10 11

#include <linux/config.h>
S
Stephen Rothwell 已提交
12 13
#include <linux/types.h>
#include <linux/cache.h>
L
Linus Torvalds 已提交
14 15 16 17
/* need struct page definitions */
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
S
Stephen Rothwell 已提交
18 19

#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28

#ifdef CONFIG_NOT_COHERENT_CACHE
/*
 * DMA-consistent mapping functions for PowerPCs that don't support
 * cache snooping.  These allocate/free a region of uncached mapped
 * memory space for use with DMA devices.  Alternatively, you could
 * allocate the space "normally" and use the cache management functions
 * to ensure it is consistent.
 */
A
Al Viro 已提交
29
extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);

#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
 * Cache coherent cores.
 */

#define __dma_alloc_coherent(gfp, size, handle)	NULL
#define __dma_free_coherent(size, addr)		do { } while (0)
#define __dma_sync(addr, size, rw)		do { } while (0)
#define __dma_sync_page(pg, off, sz, rw)	do { } while (0)

#endif /* ! CONFIG_NOT_COHERENT_CACHE */

S
Stephen Rothwell 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#ifdef CONFIG_PPC64

extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
		size_t size, enum dma_data_direction direction);
extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size,
		enum dma_data_direction direction);
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
		size_t size, enum dma_data_direction direction);
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction direction);
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
		int nhwentries, enum dma_data_direction direction);

#else /* CONFIG_PPC64 */

L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83
#define dma_supported(dev, mask)	(1)

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !dma_supported(dev, mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}

static inline void *dma_alloc_coherent(struct device *dev, size_t size,
84
				       dma_addr_t * dma_handle,
A
Al Viro 已提交
85
				       gfp_t gfp)
L
Linus Torvalds 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
{
#ifdef CONFIG_NOT_COHERENT_CACHE
	return __dma_alloc_coherent(size, dma_handle, gfp);
#else
	void *ret;
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
		gfp |= GFP_DMA;

	ret = (void *)__get_free_pages(gfp, get_order(size));

	if (ret != NULL) {
		memset(ret, 0, size);
		*dma_handle = virt_to_bus(ret);
	}

	return ret;
#endif
}

static inline void
dma_free_coherent(struct device *dev, size_t size, void *vaddr,
		  dma_addr_t dma_handle)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
	__dma_free_coherent(size, vaddr);
#else
	free_pages((unsigned long)vaddr, get_order(size));
#endif
}

static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
	       enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

	__dma_sync(ptr, size, direction);

	return virt_to_bus(ptr);
}

/* We do nothing. */
#define dma_unmap_single(dev, addr, size, dir)	do { } while (0)

static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size,
	     enum dma_data_direction direction)
{
	BUG_ON(direction == DMA_NONE);

	__dma_sync_page(page, offset, size, direction);

142
	return page_to_bus(page) + offset;
L
Linus Torvalds 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
}

/* We do nothing. */
#define dma_unmap_page(dev, handle, size, dir)	do { } while (0)

static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	   enum dma_data_direction direction)
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++) {
		BUG_ON(!sg->page);
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
		sg->dma_address = page_to_bus(sg->page) + sg->offset;
	}

	return nents;
}

/* We don't do anything here. */
#define dma_unmap_sg(dev, sg, nents, dir)	do { } while (0)

S
Stephen Rothwell 已提交
168 169 170 171 172
#endif /* CONFIG_PPC64 */

static inline void dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
173 174 175 176 177
{
	BUG_ON(direction == DMA_NONE);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}

S
Stephen Rothwell 已提交
178 179 180
static inline void dma_sync_single_for_device(struct device *dev,
		dma_addr_t dma_handle, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
181 182 183 184 185
{
	BUG_ON(direction == DMA_NONE);
	__dma_sync(bus_to_virt(dma_handle), size, direction);
}

S
Stephen Rothwell 已提交
186 187 188
static inline void dma_sync_sg_for_cpu(struct device *dev,
		struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
189 190 191 192 193 194 195 196 197
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++)
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}

S
Stephen Rothwell 已提交
198 199 200
static inline void dma_sync_sg_for_device(struct device *dev,
		struct scatterlist *sg, int nents,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
201 202 203 204 205 206 207 208 209
{
	int i;

	BUG_ON(direction == DMA_NONE);

	for (i = 0; i < nents; i++, sg++)
		__dma_sync_page(sg->page, sg->offset, sg->length, direction);
}

S
Stephen Rothwell 已提交
210 211 212 213 214 215 216 217 218
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
#ifdef CONFIG_PPC64
	return (dma_addr == DMA_ERROR_CODE);
#else
	return 0;
#endif
}

L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227 228
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
#define dma_is_consistent(d)	(0)
#else
#define dma_is_consistent(d)	(1)
#endif

static inline int dma_get_cache_alignment(void)
{
S
Stephen Rothwell 已提交
229 230 231
#ifdef CONFIG_PPC64
	/* no easy way to get cache size on all processors, so return
	 * the maximum possible, to be safe */
232
	return (1 << INTERNODE_CACHE_SHIFT);
S
Stephen Rothwell 已提交
233
#else
L
Linus Torvalds 已提交
234 235 236 237 238
	/*
	 * Each processor family will define its own L1_CACHE_SHIFT,
	 * L1_CACHE_BYTES wraps to this, so this is always safe.
	 */
	return L1_CACHE_BYTES;
S
Stephen Rothwell 已提交
239
#endif
L
Linus Torvalds 已提交
240 241
}

S
Stephen Rothwell 已提交
242 243 244
static inline void dma_sync_single_range_for_cpu(struct device *dev,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
245 246 247 248 249
{
	/* just sync everything for now */
	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
}

S
Stephen Rothwell 已提交
250 251 252
static inline void dma_sync_single_range_for_device(struct device *dev,
		dma_addr_t dma_handle, unsigned long offset, size_t size,
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
253 254 255 256 257 258
{
	/* just sync everything for now */
	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}

static inline void dma_cache_sync(void *vaddr, size_t size,
S
Stephen Rothwell 已提交
259
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
260
{
S
Stephen Rothwell 已提交
261
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
262 263 264
	__dma_sync(vaddr, size, (int)direction);
}

S
Stephen Rothwell 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
/*
 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
 */
struct dma_mapping_ops {
	void *		(*alloc_coherent)(struct device *dev, size_t size,
				dma_addr_t *dma_handle, gfp_t flag);
	void		(*free_coherent)(struct device *dev, size_t size,
				void *vaddr, dma_addr_t dma_handle);
	dma_addr_t	(*map_single)(struct device *dev, void *ptr,
				size_t size, enum dma_data_direction direction);
	void		(*unmap_single)(struct device *dev, dma_addr_t dma_addr,
				size_t size, enum dma_data_direction direction);
	int		(*map_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	void		(*unmap_sg)(struct device *dev, struct scatterlist *sg,
				int nents, enum dma_data_direction direction);
	int		(*dma_supported)(struct device *dev, u64 mask);
	int		(*dac_dma_supported)(struct device *dev, u64 mask);
};

285
#endif /* __KERNEL__ */
S
Stephen Rothwell 已提交
286
#endif	/* _ASM_DMA_MAPPING_H */