dma-mapping.h 4.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
S
Stephen Rothwell 已提交
2 3 4 5
 * Copyright (C) 2004 IBM
 *
 * Implements the generic device dma API for powerpc.
 * the pci and vio busses
L
Linus Torvalds 已提交
6
 */
S
Stephen Rothwell 已提交
7 8
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
9 10 11 12 13 14 15
#ifdef __KERNEL__

#include <linux/types.h>
#include <linux/cache.h>
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
16
#include <linux/dma-attrs.h>
17
#include <linux/dma-debug.h>
18
#include <asm/io.h>
19
#include <asm/swiotlb.h>
20

21
#ifdef CONFIG_PPC64
22
#define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
23
#endif
24

25
/* Some dma direct funcs must be visible for use in other dma_ops */
26 27 28 29 30
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
					 dma_addr_t *dma_handle, gfp_t flag,
					 struct dma_attrs *attrs);
extern void __dma_direct_free_coherent(struct device *dev, size_t size,
				       void *vaddr, dma_addr_t dma_handle,
31
				       struct dma_attrs *attrs);
32 33 34 35
extern int dma_direct_mmap_coherent(struct device *dev,
				    struct vm_area_struct *vma,
				    void *cpu_addr, dma_addr_t handle,
				    size_t size, struct dma_attrs *attrs);
36

37 38 39 40 41 42 43 44
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
 * DMA-consistent mapping functions for PowerPCs that don't support
 * cache snooping.  These allocate/free a region of uncached mapped
 * memory space for use with DMA devices.  Alternatively, you could
 * allocate the space "normally" and use the cache management functions
 * to ensure it is consistent.
 */
45 46 47
struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
				  dma_addr_t *handle, gfp_t gfp);
48 49 50 51
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
				 size_t size, int direction);
52
extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
53 54 55 56 57 58

#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
 * Cache coherent cores.
 */

59
#define __dma_alloc_coherent(dev, gfp, size, handle)	NULL
60 61 62 63 64 65
#define __dma_free_coherent(size, addr)		((void)0)
#define __dma_sync(addr, size, rw)		((void)0)
#define __dma_sync_page(pg, off, sz, rw)	((void)0)

#endif /* ! CONFIG_NOT_COHERENT_CACHE */

66 67 68 69 70 71 72 73
static inline unsigned long device_to_mask(struct device *dev)
{
	if (dev->dma_mask && *dev->dma_mask)
		return *dev->dma_mask;
	/* Assume devices without mask can take 32 bit addresses */
	return 0xfffffffful;
}

74 75 76 77
/*
 * Available generic sets of operations
 */
#ifdef CONFIG_PPC64
78
extern struct dma_map_ops dma_iommu_ops;
79
#endif
80
extern struct dma_map_ops dma_direct_ops;
81

82
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
83 84 85 86 87 88
{
	/* We don't handle the NULL dev case for ISA for now. We could
	 * do it via an out of line call but it is not needed for now. The
	 * only ISA DMA device we support is the floppy and we have a hack
	 * in the floppy driver directly to get a device for us.
	 */
89
	if (unlikely(dev == NULL))
90
		return NULL;
91

92
	return dev->archdata.dma_ops;
93 94
}

95
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
96 97
{
	dev->archdata.dma_ops = ops;
98 99
}

100 101 102 103 104 105 106 107
/*
 * get_dma_offset()
 *
 * Get the dma offset on configurations where the dma address can be determined
 * from the physical address by looking at a simple offset.  Direct dma and
 * swiotlb use this function, but it is typically not used by implementations
 * with an iommu.
 */
108
static inline dma_addr_t get_dma_offset(struct device *dev)
109 110
{
	if (dev)
111
		return dev->archdata.dma_offset;
112 113 114 115

	return PCI_DRAM_OFFSET;
}

116 117 118
static inline void set_dma_offset(struct device *dev, dma_addr_t off)
{
	if (dev)
119
		dev->archdata.dma_offset = off;
120 121
}

122 123 124 125 126
/* this will be removed soon */
#define flush_write_buffers()

#include <asm-generic/dma-mapping-common.h>

127
extern int dma_set_mask(struct device *dev, u64 dma_mask);
128
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
129
extern u64 __dma_get_required_mask(struct device *dev);
130

131 132
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
133 134
#ifdef CONFIG_SWIOTLB
	struct dev_archdata *sd = &dev->archdata;
135

136
	if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
137
		return false;
138
#endif
139 140

	if (!dev->dma_mask)
141
		return false;
142

143
	return addr + size - 1 <= *dev->dma_mask;
144 145
}

146 147
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
148
	return paddr + get_dma_offset(dev);
149 150 151 152
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
153
	return daddr - get_dma_offset(dev);
154 155
}

156 157
#define ARCH_HAS_DMA_MMAP_COHERENT

158
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
S
Stephen Rothwell 已提交
159
		enum dma_data_direction direction)
L
Linus Torvalds 已提交
160
{
S
Stephen Rothwell 已提交
161
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
162 163 164
	__dma_sync(vaddr, size, (int)direction);
}

165
#endif /* __KERNEL__ */
S
Stephen Rothwell 已提交
166
#endif	/* _ASM_DMA_MAPPING_H */