iommu-helpers.h 4.6 KB
Newer Older
1 2
#include <linux/prefetch.h>

L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/**
 * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
 * @ioc: The I/O Controller.
 * @startsg: The scatter/gather list of coalesced chunks.
 * @nents: The number of entries in the scatter/gather list.
 * @hint: The DMA Hint.
 *
 * This function inserts the coalesced scatter/gather list chunks into the
 * I/O Controller's I/O Pdir.
 */ 
static inline unsigned int
iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, 
		unsigned long hint,
		void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
					    unsigned long))
{
	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
	unsigned int n_mappings = 0;
	unsigned long dma_offset = 0, dma_len = 0;
	u64 *pdirp = NULL;

	/* Horrible hack.  For efficiency's sake, dma_sg starts one 
	 * entry below the true start (it is immediately incremented
	 * in the loop) */
	 dma_sg--;

	while (nents-- > 0) {
		unsigned long vaddr;
		long size;

33
		DBG_RUN_SG(" %d : %08lx/%05x %p/%05x\n", nents,
L
Linus Torvalds 已提交
34
			   (unsigned long)sg_dma_address(startsg), cnt,
35
			   sg_virt(startsg), startsg->length
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
		);


		/*
		** Look for the start of a new DMA stream
		*/
		
		if (sg_dma_address(startsg) & PIDE_FLAG) {
			u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;

			BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));

			dma_sg++;

			dma_len = sg_dma_len(startsg);
			sg_dma_len(startsg) = 0;
			dma_offset = (unsigned long) pide & ~IOVP_MASK;
			n_mappings++;
#if defined(ZX1_SUPPORT)
			/* Pluto IOMMU IO Virt Address is not zero based */
			sg_dma_address(dma_sg) = pide | ioc->ibase;
#else
			/* SBA, ccio, and dino are zero based.
			 * Trying to save a few CPU cycles for most users.
			 */
			sg_dma_address(dma_sg) = pide;
#endif
			pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
			prefetchw(pdirp);
		}
		
		BUG_ON(pdirp == NULL);
		
69
		vaddr = (unsigned long)sg_virt(startsg);
L
Linus Torvalds 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
		sg_dma_len(dma_sg) += startsg->length;
		size = startsg->length + dma_offset;
		dma_offset = 0;
#ifdef IOMMU_MAP_STATS
		ioc->msg_pages += startsg->length >> IOVP_SHIFT;
#endif
		do {
			iommu_io_pdir_entry(pdirp, KERNEL_SPACE, 
					    vaddr, hint);
			vaddr += IOVP_SIZE;
			size -= IOVP_SIZE;
			pdirp++;
		} while(unlikely(size > 0));
		startsg++;
	}
	return(n_mappings);
}


/*
** First pass is to walk the SG list and determine where the breaks are
** in the DMA stream. Allocates PDIR entries but does not fill them.
** Returns the number of DMA chunks.
**
** Doing the fill separate from the coalescing/allocation keeps the
** code simpler. Future enhancement could make one pass through
** the sglist do both.
*/

static inline unsigned int
100
iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
101 102
		struct scatterlist *startsg, int nents,
		int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
L
Linus Torvalds 已提交
103 104 105 106
{
	struct scatterlist *contig_sg;	   /* contig chunk head */
	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
	unsigned int n_mappings = 0;
107
	unsigned int max_seg_size = dma_get_max_seg_size(dev);
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115

	while (nents > 0) {

		/*
		** Prepare for first/next DMA stream
		*/
		contig_sg = startsg;
		dma_len = startsg->length;
116
		dma_offset = startsg->offset;
L
Linus Torvalds 已提交
117 118 119 120 121 122 123 124 125 126

		/* PARANOID: clear entries */
		sg_dma_address(startsg) = 0;
		sg_dma_len(startsg) = 0;

		/*
		** This loop terminates one iteration "early" since
		** it's always looking one "ahead".
		*/
		while(--nents > 0) {
127
			unsigned long prev_end, sg_start;
L
Linus Torvalds 已提交
128

129 130
			prev_end = (unsigned long)sg_virt(startsg) +
							startsg->length;
L
Linus Torvalds 已提交
131 132

			startsg++;
133
			sg_start = (unsigned long)sg_virt(startsg);
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143

			/* PARANOID: clear entries */
			sg_dma_address(startsg) = 0;
			sg_dma_len(startsg) = 0;

			/*
			** First make sure current dma stream won't
			** exceed DMA_CHUNK_SIZE if we coalesce the
			** next entry.
			*/   
144
			if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
L
Linus Torvalds 已提交
145 146 147
					    IOVP_SIZE) > DMA_CHUNK_SIZE))
				break;

148 149 150
			if (startsg->length + dma_len > max_seg_size)
				break;

L
Linus Torvalds 已提交
151
			/*
152 153 154 155
			* Next see if we can append the next chunk (i.e.
			* it must end on one page and begin on another, or
			* it must start on the same address as the previous
			* entry ended.
L
Linus Torvalds 已提交
156
			*/
157 158
			if (unlikely((prev_end != sg_start) ||
				((prev_end | sg_start) & ~PAGE_MASK)))
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168 169
				break;
			
			dma_len += startsg->length;
		}

		/*
		** End of DMA Stream
		** Terminate last VCONTIG block.
		** Allocate space for DMA stream.
		*/
		sg_dma_len(contig_sg) = dma_len;
170
		dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
L
Linus Torvalds 已提交
171 172
		sg_dma_address(contig_sg) =
			PIDE_FLAG 
173
			| (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
L
Linus Torvalds 已提交
174 175 176 177 178 179 180
			| dma_offset;
		n_mappings++;
	}

	return n_mappings;
}