rt2x00pci.c 11.2 KB
Newer Older
1
/*
I
Ivo van Doorn 已提交
2
	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00pci
	Abstract: rt2x00 generic pci device routines.
 */

#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "rt2x00.h"
#include "rt2x00pci.h"

/*
 * TX data handlers.
 */
37
int rt2x00pci_write_tx_data(struct queue_entry *entry)
38
{
39
	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
I
Ivo van Doorn 已提交
40
	struct skb_frame_desc *skbdesc;
41 42
	u32 word;

43
	rt2x00_desc_read(entry_priv->desc, 0, &word);
44

45 46 47 48 49 50 51 52 53
	/*
	 * This should not happen, we already checked the entry
	 * was ours. When the hardware disagrees there has been
	 * a queue corruption!
	 */
	if (unlikely(rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
		     rt2x00_get_field32(word, TXD_ENTRY_VALID))) {
		ERROR(entry->queue->rt2x00dev,
		      "Corrupt queue %d, accessing entry which is not ours.\n"
54
		      "Please file bug report to %s.\n",
55
		      entry->queue->qid, DRV_PROJECT);
56 57 58
		return -EINVAL;
	}

I
Ivo van Doorn 已提交
59 60 61
	/*
	 * Fill in skb descriptor
	 */
62
	skbdesc = get_skb_frame_desc(entry->skb);
63
	memset(skbdesc, 0, sizeof(*skbdesc));
64
	skbdesc->desc = entry_priv->desc;
65
	skbdesc->desc_len = entry->queue->desc_size;
I
Ivo van Doorn 已提交
66 67
	skbdesc->entry = entry;

68
	memcpy(entry_priv->data, entry->skb->data, entry->skb->len);
69 70 71 72 73 74

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);

/*
75
 * TX/RX data handlers.
76 77 78
 */
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
79 80
	struct data_queue *queue = rt2x00dev->rx;
	struct queue_entry *entry;
81
	struct queue_entry_priv_pci *entry_priv;
I
Ivo van Doorn 已提交
82 83
	struct skb_frame_desc *skbdesc;
	struct rxdone_entry_desc rxdesc;
84
	u32 word;
85 86

	while (1) {
I
Ivo van Doorn 已提交
87
		entry = rt2x00queue_get_entry(queue, Q_INDEX);
88 89
		entry_priv = entry->priv_data;
		rt2x00_desc_read(entry_priv->desc, 0, &word);
90

91
		if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
92 93
			break;

I
Ivo van Doorn 已提交
94 95
		memset(&rxdesc, 0, sizeof(rxdesc));
		rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
96 97

		/*
98
		 * Allocate the sk_buffer and copy all data into it.
99
		 */
100
		entry->skb = rt2x00queue_alloc_rxskb(queue);
I
Ivo van Doorn 已提交
101
		if (!entry->skb)
102 103
			return;

104 105
		memcpy(entry->skb->data, entry_priv->data, rxdesc.size);
		skb_trim(entry->skb, rxdesc.size);
106

I
Ivo van Doorn 已提交
107 108 109
		/*
		 * Fill in skb descriptor
		 */
I
Ivo van Doorn 已提交
110 111
		skbdesc = get_skb_frame_desc(entry->skb);
		memset(skbdesc, 0, sizeof(*skbdesc));
112
		skbdesc->desc = entry_priv->desc;
I
Ivo van Doorn 已提交
113
		skbdesc->desc_len = queue->desc_size;
I
Ivo van Doorn 已提交
114 115
		skbdesc->entry = entry;

116 117 118
		/*
		 * Send the frame to rt2x00lib for further processing.
		 */
I
Ivo van Doorn 已提交
119
		rt2x00lib_rxdone(entry, &rxdesc);
120

I
Ivo van Doorn 已提交
121
		if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
122
			rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
123
			rt2x00_desc_write(entry_priv->desc, 0, word);
124 125
		}

I
Ivo van Doorn 已提交
126
		rt2x00queue_index_inc(queue, Q_INDEX);
127 128 129 130
	}
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);

I
Ivo van Doorn 已提交
131 132
void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
133
{
134
	struct queue_entry_priv_pci *entry_priv = entry->priv_data;
135
	enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
136 137
	u32 word;

I
Ivo van Doorn 已提交
138
	rt2x00lib_txdone(entry, txdesc);
139 140 141 142 143 144

	/*
	 * Make this entry available for reuse.
	 */
	entry->flags = 0;

145
	rt2x00_desc_read(entry_priv->desc, 0, &word);
146 147
	rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
	rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
148
	rt2x00_desc_write(entry_priv->desc, 0, word);
149

150
	__clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
I
Ivo van Doorn 已提交
151
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
152 153

	/*
154 155
	 * If the data queue was below the threshold before the txdone
	 * handler we must make sure the packet queue in the mac80211 stack
156 157
	 * is reenabled when the txdone handler has finished.
	 */
158
	if (!rt2x00queue_threshold(entry->queue))
159
		ieee80211_wake_queue(rt2x00dev->hw, qid);
160 161 162 163

}
EXPORT_SYMBOL_GPL(rt2x00pci_txdone);

164 165 166
/*
 * Device initialization handlers.
 */
I
Ivo van Doorn 已提交
167 168 169
#define desc_size(__queue)			\
({						\
	 ((__queue)->limit * (__queue)->desc_size);\
170 171
})

I
Ivo van Doorn 已提交
172 173 174
#define data_size(__queue)			\
({						\
	 ((__queue)->limit * (__queue)->data_size);\
175 176
})

I
Ivo van Doorn 已提交
177 178 179
#define dma_size(__queue)			\
({						\
	data_size(__queue) + desc_size(__queue);\
180 181
})

I
Ivo van Doorn 已提交
182 183 184 185 186 187 188 189 190 191
#define desc_offset(__queue, __base, __i)	\
({						\
	(__base) + data_size(__queue) + 	\
	    ((__i) * (__queue)->desc_size);	\
})

#define data_offset(__queue, __base, __i)	\
({						\
	(__base) +				\
	    ((__i) * (__queue)->data_size);	\
I
Ivo van Doorn 已提交
192 193 194 195
})

static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
196
{
I
Ivo van Doorn 已提交
197
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
198
	struct queue_entry_priv_pci *entry_priv;
199 200
	void *addr;
	dma_addr_t dma;
201 202 203 204 205
	unsigned int i;

	/*
	 * Allocate DMA memory for descriptor and buffer.
	 */
206 207
	addr = pci_alloc_consistent(pci_dev, dma_size(queue), &dma);
	if (!addr)
208 209
		return -ENOMEM;

210
	memset(addr, 0, dma_size(queue));
I
Ivo van Doorn 已提交
211

212
	/*
I
Ivo van Doorn 已提交
213
	 * Initialize all queue entries to contain valid addresses.
214
	 */
I
Ivo van Doorn 已提交
215
	for (i = 0; i < queue->limit; i++) {
216 217 218 219 220
		entry_priv = queue->entries[i].priv_data;
		entry_priv->desc = desc_offset(queue, addr, i);
		entry_priv->desc_dma = desc_offset(queue, dma, i);
		entry_priv->data = data_offset(queue, addr, i);
		entry_priv->data_dma = data_offset(queue, dma, i);
221 222 223 224 225
	}

	return 0;
}

I
Ivo van Doorn 已提交
226 227
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
228
{
I
Ivo van Doorn 已提交
229
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
230 231
	struct queue_entry_priv_pci *entry_priv =
	    queue->entries[0].priv_data;
I
Ivo van Doorn 已提交
232

233
	if (entry_priv->data)
I
Ivo van Doorn 已提交
234
		pci_free_consistent(pci_dev, dma_size(queue),
235 236
				    entry_priv->data, entry_priv->data_dma);
	entry_priv->data = NULL;
237 238 239 240 241
}

int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
I
Ivo van Doorn 已提交
242
	struct data_queue *queue;
243 244 245 246 247
	int status;

	/*
	 * Allocate DMA
	 */
I
Ivo van Doorn 已提交
248 249
	queue_for_each(rt2x00dev, queue) {
		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
250 251 252 253 254 255 256 257 258 259 260 261
		if (status)
			goto exit;
	}

	/*
	 * Register interrupt handler.
	 */
	status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
			     IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
	if (status) {
		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
		      pci_dev->irq, status);
262
		goto exit;
263 264 265 266 267
	}

	return 0;

exit:
268 269
	queue_for_each(rt2x00dev, queue)
		rt2x00pci_free_queue_dma(rt2x00dev, queue);
270 271 272 273 274 275 276

	return status;
}
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);

void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
277
	struct data_queue *queue;
278 279 280 281 282 283 284 285 286

	/*
	 * Free irq line.
	 */
	free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);

	/*
	 * Free DMA
	 */
I
Ivo van Doorn 已提交
287 288
	queue_for_each(rt2x00dev, queue)
		rt2x00pci_free_queue_dma(rt2x00dev, queue);
289 290 291 292 293 294 295 296 297 298 299 300 301 302
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);

/*
 * PCI driver handlers.
 */
static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rf);
	rt2x00dev->rf = NULL;

	kfree(rt2x00dev->eeprom);
	rt2x00dev->eeprom = NULL;

303 304 305
	if (rt2x00dev->csr.base) {
		iounmap(rt2x00dev->csr.base);
		rt2x00dev->csr.base = NULL;
306 307 308 309 310 311 312
	}
}

static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);

313
	rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
314
				      pci_resource_len(pci_dev, 0));
315
	if (!rt2x00dev->csr.base)
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
		goto exit;

	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
	if (!rt2x00dev->eeprom)
		goto exit;

	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
	if (!rt2x00dev->rf)
		goto exit;

	return 0;

exit:
	ERROR_PROBE("Failed to allocate registers.\n");

	rt2x00pci_free_reg(rt2x00dev);

	return -ENOMEM;
}

int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
	struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
	struct ieee80211_hw *hw;
	struct rt2x00_dev *rt2x00dev;
	int retval;

	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
	if (retval) {
		ERROR_PROBE("PCI request regions failed.\n");
		return retval;
	}

	retval = pci_enable_device(pci_dev);
	if (retval) {
		ERROR_PROBE("Enable device failed.\n");
		goto exit_release_regions;
	}

	pci_set_master(pci_dev);

	if (pci_set_mwi(pci_dev))
		ERROR_PROBE("MWI not available.\n");

	if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
	    pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
		ERROR_PROBE("PCI DMA not supported.\n");
		retval = -EIO;
		goto exit_disable_device;
	}

	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
	if (!hw) {
		ERROR_PROBE("Failed to allocate hardware.\n");
		retval = -ENOMEM;
		goto exit_disable_device;
	}

	pci_set_drvdata(pci_dev, hw);

	rt2x00dev = hw->priv;
	rt2x00dev->dev = pci_dev;
	rt2x00dev->ops = ops;
	rt2x00dev->hw = hw;

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		goto exit_free_device;

	retval = rt2x00lib_probe_dev(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

exit_free_device:
	ieee80211_free_hw(hw);

exit_disable_device:
	if (retval != -EBUSY)
		pci_disable_device(pci_dev);

exit_release_regions:
	pci_release_regions(pci_dev);

	pci_set_drvdata(pci_dev, NULL);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);

void rt2x00pci_remove(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;

	/*
	 * Free all allocated data.
	 */
	rt2x00lib_remove_dev(rt2x00dev);
	rt2x00pci_free_reg(rt2x00dev);
	ieee80211_free_hw(hw);

	/*
	 * Free the PCI device data.
	 */
	pci_set_drvdata(pci_dev, NULL);
	pci_disable_device(pci_dev);
	pci_release_regions(pci_dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_remove);

#ifdef CONFIG_PM
int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	retval = rt2x00lib_suspend(rt2x00dev, state);
	if (retval)
		return retval;

	rt2x00pci_free_reg(rt2x00dev);

	pci_save_state(pci_dev);
	pci_disable_device(pci_dev);
	return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
}
EXPORT_SYMBOL_GPL(rt2x00pci_suspend);

int rt2x00pci_resume(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	if (pci_set_power_state(pci_dev, PCI_D0) ||
	    pci_enable_device(pci_dev) ||
	    pci_restore_state(pci_dev)) {
		ERROR(rt2x00dev, "Failed to resume device.\n");
		return -EIO;
	}

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2x00lib_resume(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
#endif /* CONFIG_PM */

/*
 * rt2x00pci module information.
 */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
I
Ivo van Doorn 已提交
486
MODULE_DESCRIPTION("rt2x00 pci library");
487
MODULE_LICENSE("GPL");