rt2x00mmio.c 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
16
	along with this program; if not, see <http://www.gnu.org/licenses/>.
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 */

/*
	Module: rt2x00mmio
	Abstract: rt2x00 generic mmio device routines.
 */

#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>

#include "rt2x00.h"
#include "rt2x00mmio.h"

/*
 * Register access.
 */
35 36 37 38
int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
			    const unsigned int offset,
			    const struct rt2x00_field32 field,
			    u32 *reg)
39 40 41 42 43 44 45
{
	unsigned int i;

	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
		return 0;

	for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
46
		rt2x00mmio_register_read(rt2x00dev, offset, reg);
47 48 49 50 51 52 53 54 55 56 57
		if (!rt2x00_get_field32(*reg, field))
			return 1;
		udelay(REGISTER_BUSY_DELAY);
	}

	printk_once(KERN_ERR "%s() Indirect register access failed: "
	      "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
	*reg = ~0;

	return 0;
}
58
EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read);
59

60
bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev)
61 62 63
{
	struct data_queue *queue = rt2x00dev->rx;
	struct queue_entry *entry;
64
	struct queue_entry_priv_mmio *entry_priv;
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
	struct skb_frame_desc *skbdesc;
	int max_rx = 16;

	while (--max_rx) {
		entry = rt2x00queue_get_entry(queue, Q_INDEX);
		entry_priv = entry->priv_data;

		if (rt2x00dev->ops->lib->get_entry_state(entry))
			break;

		/*
		 * Fill in desc fields of the skb descriptor
		 */
		skbdesc = get_skb_frame_desc(entry->skb);
		skbdesc->desc = entry_priv->desc;
		skbdesc->desc_len = entry->queue->desc_size;

		/*
		 * DMA is already done, notify rt2x00lib that
		 * it finished successfully.
		 */
		rt2x00lib_dmastart(entry);
		rt2x00lib_dmadone(entry);

		/*
		 * Send the frame to rt2x00lib for further processing.
		 */
		rt2x00lib_rxdone(entry, GFP_ATOMIC);
	}

	return !max_rx;
}
97
EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone);
98

99
void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
100 101 102 103 104 105
{
	unsigned int i;

	for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
		msleep(10);
}
106
EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
107 108 109 110

/*
 * Device initialization handlers.
 */
111 112
static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
				      struct data_queue *queue)
113
{
114
	struct queue_entry_priv_mmio *entry_priv;
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
	void *addr;
	dma_addr_t dma;
	unsigned int i;

	/*
	 * Allocate DMA memory for descriptor and buffer.
	 */
	addr = dma_alloc_coherent(rt2x00dev->dev,
				  queue->limit * queue->desc_size,
				  &dma, GFP_KERNEL);
	if (!addr)
		return -ENOMEM;

	memset(addr, 0, queue->limit * queue->desc_size);

	/*
	 * Initialize all queue entries to contain valid addresses.
	 */
	for (i = 0; i < queue->limit; i++) {
		entry_priv = queue->entries[i].priv_data;
		entry_priv->desc = addr + i * queue->desc_size;
		entry_priv->desc_dma = dma + i * queue->desc_size;
	}

	return 0;
}

142 143
static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev,
				      struct data_queue *queue)
144
{
145
	struct queue_entry_priv_mmio *entry_priv =
146 147 148 149 150 151 152 153 154
	    queue->entries[0].priv_data;

	if (entry_priv->desc)
		dma_free_coherent(rt2x00dev->dev,
				  queue->limit * queue->desc_size,
				  entry_priv->desc, entry_priv->desc_dma);
	entry_priv->desc = NULL;
}

155
int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev)
156 157 158 159 160 161 162 163
{
	struct data_queue *queue;
	int status;

	/*
	 * Allocate DMA
	 */
	queue_for_each(rt2x00dev, queue) {
164
		status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue);
165 166 167 168 169 170 171 172 173 174 175
		if (status)
			goto exit;
	}

	/*
	 * Register interrupt handler.
	 */
	status = request_irq(rt2x00dev->irq,
			     rt2x00dev->ops->lib->irq_handler,
			     IRQF_SHARED, rt2x00dev->name, rt2x00dev);
	if (status) {
176 177
		rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n",
			   rt2x00dev->irq, status);
178 179 180 181 182 183 184
		goto exit;
	}

	return 0;

exit:
	queue_for_each(rt2x00dev, queue)
185
		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
186 187 188

	return status;
}
189
EXPORT_SYMBOL_GPL(rt2x00mmio_initialize);
190

191
void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev)
192 193 194 195 196 197 198 199 200 201 202 203
{
	struct data_queue *queue;

	/*
	 * Free irq line.
	 */
	free_irq(rt2x00dev->irq, rt2x00dev);

	/*
	 * Free DMA
	 */
	queue_for_each(rt2x00dev, queue)
204
		rt2x00mmio_free_queue_dma(rt2x00dev, queue);
205
}
206
EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize);
207 208 209 210 211 212 213 214

/*
 * rt2x00mmio module information.
 */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("rt2x00 mmio library");
MODULE_LICENSE("GPL");
新手
引导
客服 返回
顶部