tce_64.c 4.1 KB
Newer Older
1
/*
2 3
 * This file manages the translation entries for the IBM Calgary IOMMU.
 *
4 5
 * Derived from arch/powerpc/platforms/pseries/iommu.c
 *
6 7 8 9
 * Copyright (C) IBM Corporation, 2006
 *
 * Author: Jon Mason <jdmason@us.ibm.com>
 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/bootmem.h>
#include <asm/tce.h>
#include <asm/calgary.h>
#include <asm/proto.h>
37
#include <asm/cacheflush.h>
38 39 40 41 42 43

/* flush a tce at 'tceaddr' to main memory */
static inline void flush_tce(void* tceaddr)
{
	/* a single tce can't cross a cache line */
	if (cpu_has_clflush)
44
		clflush(tceaddr);
45
	else
46
		wbinvd();
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
}

void tce_build(struct iommu_table *tbl, unsigned long index,
	unsigned int npages, unsigned long uaddr, int direction)
{
	u64* tp;
	u64 t;
	u64 rpn;

	t = (1 << TCE_READ_SHIFT);
	if (direction != DMA_TO_DEVICE)
		t |= (1 << TCE_WRITE_SHIFT);

	tp = ((u64*)tbl->it_base) + index;

	while (npages--) {
		rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
		t &= ~TCE_RPN_MASK;
		t |= (rpn << TCE_RPN_SHIFT);

		*tp = cpu_to_be64(t);
		flush_tce(tp);

		uaddr += PAGE_SIZE;
		tp++;
	}
}

void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
{
	u64* tp;

	tp  = ((u64*)tbl->it_base) + index;

	while (npages--) {
		*tp = cpu_to_be64(0);
		flush_tce(tp);
		tp++;
	}
}

static inline unsigned int table_size_to_number_of_entries(unsigned char size)
{
	/*
	 * size is the order of the table, 0-7
	 * smallest table is 8K entries, so shift result by 13 to
	 * multiply by 8K
	 */
	return (1 << size) << 13;
}

static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
{
	unsigned int bitmapsz;
	unsigned long bmppages;
	int ret;

	tbl->it_busno = dev->bus->number;

	/* set the tce table size - measured in entries */
	tbl->it_size = table_size_to_number_of_entries(specified_table_size);

	/*
	 * number of bytes needed for the bitmap size in number of
	 * entries; we need one bit per entry
	 */
	bitmapsz = tbl->it_size / BITS_PER_BYTE;
	bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
	if (!bmppages) {
		printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
		ret = -ENOMEM;
		goto done;
	}

	tbl->it_map = (unsigned long*)bmppages;

	memset(tbl->it_map, 0, bitmapsz);

	tbl->it_hint = 0;

	spin_lock_init(&tbl->it_lock);

	return 0;

done:
	return ret;
}

135
int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
136 137 138 139
{
	struct iommu_table *tbl;
	int ret;

140 141 142
	if (pci_iommu(dev->bus)) {
		printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
		       dev, pci_iommu(dev->bus));
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
		BUG();
	}

	tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
	if (!tbl) {
		printk(KERN_ERR "Calgary: error allocating iommu_table\n");
		ret = -ENOMEM;
		goto done;
	}

	ret = tce_table_setparms(dev, tbl);
	if (ret)
		goto free_tbl;

	tbl->bbar = bbar;

159
	set_pci_iommu(dev->bus, tbl);
160 161 162 163 164 165 166 167 168

	return 0;

free_tbl:
	kfree(tbl);
done:
	return ret;
}

169
void * __init alloc_tce_table(void)
170 171 172 173 174 175 176 177 178
{
	unsigned int size;

	size = table_size_to_number_of_entries(specified_table_size);
	size *= TCE_ENTRY_SIZE;

	return __alloc_bootmem_low(size, size, 0);
}

179
void __init free_tce_table(void *tbl)
180 181 182 183 184 185 186 187 188 189 190
{
	unsigned int size;

	if (!tbl)
		return;

	size = table_size_to_number_of_entries(specified_table_size);
	size *= TCE_ENTRY_SIZE;

	free_bootmem(__pa(tbl), size);
}