dma-mapping.h 4.5 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
S
Stephen Rothwell 已提交
2 3 4 5
 * Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
L
Linus Torvalds 已提交
6
 */
S
Stephen Rothwell 已提交
7 8
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
9 10 11 12 13 14 15
#ifdef __KERNEL__

#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
16
#include <linux/dma-debug.h>
17
#include <asm/io.h>
18
#include <asm/swiotlb.h>
19

20
#ifdef CONFIG_PPC64
21
#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
22
#endif
23

24
/* Some dma direct funcs must be visible for use in other dma_ops */
25 26
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
					 dma_addr_t *dma_handle, gfp_t flag,
27
					 unsigned long attrs);
28 29
extern void __dma_direct_free_coherent(struct device *dev, size_t size,
				       void *vaddr, dma_addr_t dma_handle,
30
				       unsigned long attrs);
31 32 33
extern int dma_direct_mmap_coherent(struct device *dev,
				    struct vm_area_struct *vma,
				    void *cpu_addr, dma_addr_t handle,
34
				    size_t size, unsigned long attrs);
35

36 37 38 39 40 41 42 43
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
 * DMA-consistent mapping functions for PowerPCs that don't support
 * cache snooping.  These allocate/free a region of uncached mapped
 * memory space for use with DMA devices.  Alternatively, you could
 * allocate the space "normally" and use the cache management functions
 * to ensure it is consistent.
 */
44 45 46
struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp);
47 48 49 50
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);
51
extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
52 53 54 55 56 57

#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
 * Cache coherent cores.
 */

58
#define __dma_alloc_coherent(dev, gfp, size, handle)	NULL
59 60 61 62 63 64
#define __dma_free_coherent(size, addr)		((void)0)
#define __dma_sync(addr, size, rw)		((void)0)
#define __dma_sync_page(pg, off, sz, rw)	((void)0)

#endif /* ! CONFIG_NOT_COHERENT_CACHE */

65 66 67 68 69 70 71 72
static inline unsigned long device_to_mask(struct device *dev)
{
	if (dev->dma_mask && *dev->dma_mask)
		return *dev->dma_mask;
	/* Assume devices without mask can take 32 bit addresses */
	return 0xfffffffful;
}

73 74 75 76
/*
 * Available generic sets of operations
 */
#ifdef CONFIG_PPC64
77
extern struct dma_map_ops dma_iommu_ops;
78
#endif
79
extern struct dma_map_ops dma_direct_ops;
80

81
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
82 83 84 85 86 87
{
	/* We don't handle the NULL dev case for ISA for now. We could
	 * do it via an out of line call but it is not needed for now. The
	 * only ISA DMA device we support is the floppy and we have a hack
	 * in the floppy driver directly to get a device for us.
	 */
88
	if (unlikely(dev == NULL))
89
		return NULL;
90

91
	return dev->archdata.dma_ops;
92 93
}

94
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
95 96
{
	dev->archdata.dma_ops = ops;
97 98
}

99 100 101 102 103 104 105 106
/*
 * get_dma_offset()
 *
 * Get the dma offset on configurations where the dma address can be determined
 * from the physical address by looking at a simple offset.  Direct dma and
 * swiotlb use this function, but it is typically not used by implementations
 * with an iommu.
 */
107
static inline dma_addr_t get_dma_offset(struct device *dev)
108 109
{
	if (dev)
110
		return dev->archdata.dma_offset;
111 112 113 114

	return PCI_DRAM_OFFSET;
}

115 116 117
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
{
	if (dev)
118
		dev->archdata.dma_offset = off;
119 120
}

121 122 123
/* this will be removed soon */
#define flush_write_buffers()

124 125 126
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);

127
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
128
extern u64 __dma_get_required_mask(struct device *dev);
129

130 131
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
132 133
#ifdef CONFIG_SWIOTLB
	struct dev_archdata *sd = &dev->archdata;
134

135
	if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
136
		return false;
137
#endif
138 139

	if (!dev->dma_mask)
140
		return false;
141

142
	return addr + size - 1 <= *dev->dma_mask;
143 144
}

145 146
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
147
	return paddr + get_dma_offset(dev);
148 149 150 151
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
152
	return daddr - get_dma_offset(dev);
153 154
}

155 156
#define ARCH_HAS_DMA_MMAP_COHERENT

157
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
S
Stephen Rothwell 已提交
158
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
159
{
S
Stephen Rothwell 已提交
160
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
161 162 163
	__dma_sync(vaddr, size, (int)direction);
}

164
#endif /* __KERNEL__ */
S
Stephen Rothwell 已提交
165
#endif	/* _ASM_DMA_MAPPING_H */