rt2x00pci.c 12.7 KB
Newer Older
1
/*
I
Ivo van Doorn 已提交
2
	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00pci
	Abstract: rt2x00 generic pci device routines.
 */

#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "rt2x00.h"
#include "rt2x00pci.h"

/*
 * Beacon handlers.
 */
int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
			    struct ieee80211_tx_control *control)
{
	struct rt2x00_dev *rt2x00dev = hw->priv;
I
Ivo van Doorn 已提交
41 42 43 44
	struct queue_entry_priv_pci_tx *priv_tx;
	struct skb_frame_desc *skbdesc;
	struct data_queue *queue;
	struct queue_entry *entry;
45 46 47 48 49 50 51

	/*
	 * Just in case mac80211 doesn't set this correctly,
	 * but we need this queue set for the descriptor
	 * initialization.
	 */
	control->queue = IEEE80211_TX_QUEUE_BEACON;
I
Ivo van Doorn 已提交
52 53 54
	queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
	entry = rt2x00queue_get_entry(queue, Q_INDEX);
	priv_tx = entry->priv_data;
55 56

	/*
I
Ivo van Doorn 已提交
57
	 * Fill in skb descriptor
58
	 */
I
Ivo van Doorn 已提交
59 60 61 62 63 64 65 66 67
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->data = skb->data;
	skbdesc->data_len = queue->data_size;
	skbdesc->desc = priv_tx->desc;
	skbdesc->desc_len = queue->desc_size;
	skbdesc->entry = entry;

	memcpy(priv_tx->data, skb->data, skb->len);
I
Ivo van Doorn 已提交
68
	rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
69 70 71 72 73 74 75 76 77 78 79 80 81 82

	/*
	 * Enable beacon generation.
	 */
	rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);

/*
 * TX data handlers.
 */
int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
I
Ivo van Doorn 已提交
83
			    struct data_queue *queue, struct sk_buff *skb,
84 85
			    struct ieee80211_tx_control *control)
{
I
Ivo van Doorn 已提交
86 87 88
	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
	struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
	struct skb_frame_desc *skbdesc;
89 90
	u32 word;

I
Ivo van Doorn 已提交
91
	if (rt2x00queue_full(queue))
92 93
		return -EINVAL;

I
Ivo van Doorn 已提交
94
	rt2x00_desc_read(priv_tx->desc, 0, &word);
95 96 97 98 99 100 101 102 103 104

	if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
	    rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
		ERROR(rt2x00dev,
		      "Arrived at non-free entry in the non-full queue %d.\n"
		      "Please file bug report to %s.\n",
		      control->queue, DRV_PROJECT);
		return -EINVAL;
	}

I
Ivo van Doorn 已提交
105 106 107
	/*
	 * Fill in skb descriptor
	 */
I
Ivo van Doorn 已提交
108 109 110 111 112 113 114 115 116
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->data = skb->data;
	skbdesc->data_len = queue->data_size;
	skbdesc->desc = priv_tx->desc;
	skbdesc->desc_len = queue->desc_size;
	skbdesc->entry = entry;

	memcpy(priv_tx->data, skb->data, skb->len);
I
Ivo van Doorn 已提交
117
	rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
118

I
Ivo van Doorn 已提交
119
	rt2x00queue_index_inc(queue, Q_INDEX);
120 121 122 123 124 125

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);

/*
126
 * TX/RX data handlers.
127 128 129
 */
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
130 131 132
	struct data_queue *queue = rt2x00dev->rx;
	struct queue_entry *entry;
	struct queue_entry_priv_pci_rx *priv_rx;
133
	struct ieee80211_hdr *hdr;
I
Ivo van Doorn 已提交
134 135
	struct skb_frame_desc *skbdesc;
	struct rxdone_entry_desc rxdesc;
136 137
	int header_size;
	int align;
138
	u32 word;
139 140

	while (1) {
I
Ivo van Doorn 已提交
141 142 143
		entry = rt2x00queue_get_entry(queue, Q_INDEX);
		priv_rx = entry->priv_data;
		rt2x00_desc_read(priv_rx->desc, 0, &word);
144

145
		if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
146 147
			break;

I
Ivo van Doorn 已提交
148 149
		memset(&rxdesc, 0, sizeof(rxdesc));
		rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
150

I
Ivo van Doorn 已提交
151
		hdr = (struct ieee80211_hdr *)priv_rx->data;
152 153 154 155 156 157 158
		header_size =
		    ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));

		/*
		 * The data behind the ieee80211 header must be
		 * aligned on a 4 byte boundary.
		 */
159
		align = header_size % 4;
160

161 162 163 164
		/*
		 * Allocate the sk_buffer, initialize it and copy
		 * all data into it.
		 */
I
Ivo van Doorn 已提交
165 166
		entry->skb = dev_alloc_skb(rxdesc.size + align);
		if (!entry->skb)
167 168
			return;

I
Ivo van Doorn 已提交
169 170 171
		skb_reserve(entry->skb, align);
		memcpy(skb_put(entry->skb, rxdesc.size),
		       priv_rx->data, rxdesc.size);
172

I
Ivo van Doorn 已提交
173 174 175
		/*
		 * Fill in skb descriptor
		 */
I
Ivo van Doorn 已提交
176 177 178 179 180 181
		skbdesc = get_skb_frame_desc(entry->skb);
		memset(skbdesc, 0, sizeof(*skbdesc));
		skbdesc->data = entry->skb->data;
		skbdesc->data_len = queue->data_size;
		skbdesc->desc = priv_rx->desc;
		skbdesc->desc_len = queue->desc_size;
I
Ivo van Doorn 已提交
182 183
		skbdesc->entry = entry;

184 185 186
		/*
		 * Send the frame to rt2x00lib for further processing.
		 */
I
Ivo van Doorn 已提交
187
		rt2x00lib_rxdone(entry, &rxdesc);
188

I
Ivo van Doorn 已提交
189
		if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
190
			rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
I
Ivo van Doorn 已提交
191
			rt2x00_desc_write(priv_rx->desc, 0, word);
192 193
		}

I
Ivo van Doorn 已提交
194
		rt2x00queue_index_inc(queue, Q_INDEX);
195 196 197 198
	}
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);

I
Ivo van Doorn 已提交
199 200
void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
201
{
I
Ivo van Doorn 已提交
202
	struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
203 204
	u32 word;

I
Ivo van Doorn 已提交
205 206
	txdesc->control = &priv_tx->control;
	rt2x00lib_txdone(entry, txdesc);
207 208 209 210 211 212

	/*
	 * Make this entry available for reuse.
	 */
	entry->flags = 0;

I
Ivo van Doorn 已提交
213
	rt2x00_desc_read(priv_tx->desc, 0, &word);
214 215
	rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
	rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
I
Ivo van Doorn 已提交
216
	rt2x00_desc_write(priv_tx->desc, 0, word);
217

I
Ivo van Doorn 已提交
218
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
219 220

	/*
I
Ivo van Doorn 已提交
221
	 * If the data queue was full before the txdone handler
222 223 224
	 * we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
I
Ivo van Doorn 已提交
225 226
	if (!rt2x00queue_full(entry->queue))
		ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
227 228 229 230

}
EXPORT_SYMBOL_GPL(rt2x00pci_txdone);

231 232 233
/*
 * Device initialization handlers.
 */
I
Ivo van Doorn 已提交
234 235 236 237
#define dma_size(__queue)				\
({							\
	(__queue)->limit *				\
	    ((__queue)->desc_size + (__queue)->data_size);\
238 239
})

I
Ivo van Doorn 已提交
240 241 242
#define priv_offset(__queue, __base, __i)		\
({							\
	(__base) + ((__i) * (__queue)->desc_size);	\
243 244
})

I
Ivo van Doorn 已提交
245 246 247 248 249
#define data_addr_offset(__queue, __base, __i)		\
({							\
	(__base) +					\
	    ((__queue)->limit * (__queue)->desc_size) +	\
	    ((__i) * (__queue)->data_size);		\
250 251
})

I
Ivo van Doorn 已提交
252 253 254 255 256 257 258 259 260
#define data_dma_offset(__queue, __base, __i)		\
({							\
	(__base) +					\
	    ((__queue)->limit * (__queue)->desc_size) +	\
	    ((__i) * (__queue)->data_size);		\
})

static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
261
{
I
Ivo van Doorn 已提交
262 263 264 265
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
	struct queue_entry_priv_pci_tx *priv_tx;
	void *data_addr;
	dma_addr_t data_dma;
266 267 268 269 270
	unsigned int i;

	/*
	 * Allocate DMA memory for descriptor and buffer.
	 */
I
Ivo van Doorn 已提交
271 272
	data_addr = pci_alloc_consistent(pci_dev, dma_size(queue), &data_dma);
	if (!data_addr)
273 274 275
		return -ENOMEM;

	/*
I
Ivo van Doorn 已提交
276
	 * Initialize all queue entries to contain valid addresses.
277
	 */
I
Ivo van Doorn 已提交
278 279 280 281 282
	for (i = 0; i < queue->limit; i++) {
		priv_tx = queue->entries[i].priv_data;
		priv_tx->desc = priv_offset(queue, data_addr, i);
		priv_tx->data = data_addr_offset(queue, data_addr, i);
		priv_tx->dma = data_dma_offset(queue, data_dma, i);
283 284 285 286 287
	}

	return 0;
}

I
Ivo van Doorn 已提交
288 289
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
290
{
I
Ivo van Doorn 已提交
291 292 293 294 295 296 297
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
	struct queue_entry_priv_pci_tx *priv_tx = queue->entries[0].priv_data;

	if (priv_tx->data)
		pci_free_consistent(pci_dev, dma_size(queue),
				    priv_tx->data, priv_tx->dma);
	priv_tx->data = NULL;
298 299 300 301 302
}

int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
I
Ivo van Doorn 已提交
303
	struct data_queue *queue;
304 305 306 307 308
	int status;

	/*
	 * Allocate DMA
	 */
I
Ivo van Doorn 已提交
309 310
	queue_for_each(rt2x00dev, queue) {
		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		if (status)
			goto exit;
	}

	/*
	 * Register interrupt handler.
	 */
	status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
			     IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
	if (status) {
		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
		      pci_dev->irq, status);
		return status;
	}

	return 0;

exit:
	rt2x00pci_uninitialize(rt2x00dev);

	return status;
}
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);

void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
I
Ivo van Doorn 已提交
337
	struct data_queue *queue;
338 339 340 341 342 343 344 345 346

	/*
	 * Free irq line.
	 */
	free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);

	/*
	 * Free DMA
	 */
I
Ivo van Doorn 已提交
347 348
	queue_for_each(rt2x00dev, queue)
		rt2x00pci_free_queue_dma(rt2x00dev, queue);
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);

/*
 * PCI driver handlers.
 */
static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rf);
	rt2x00dev->rf = NULL;

	kfree(rt2x00dev->eeprom);
	rt2x00dev->eeprom = NULL;

	if (rt2x00dev->csr_addr) {
		iounmap(rt2x00dev->csr_addr);
		rt2x00dev->csr_addr = NULL;
	}
}

static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);

	rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0),
				      pci_resource_len(pci_dev, 0));
	if (!rt2x00dev->csr_addr)
		goto exit;

	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
	if (!rt2x00dev->eeprom)
		goto exit;

	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
	if (!rt2x00dev->rf)
		goto exit;

	return 0;

exit:
	ERROR_PROBE("Failed to allocate registers.\n");

	rt2x00pci_free_reg(rt2x00dev);

	return -ENOMEM;
}

int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
	struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
	struct ieee80211_hw *hw;
	struct rt2x00_dev *rt2x00dev;
	int retval;

	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
	if (retval) {
		ERROR_PROBE("PCI request regions failed.\n");
		return retval;
	}

	retval = pci_enable_device(pci_dev);
	if (retval) {
		ERROR_PROBE("Enable device failed.\n");
		goto exit_release_regions;
	}

	pci_set_master(pci_dev);

	if (pci_set_mwi(pci_dev))
		ERROR_PROBE("MWI not available.\n");

	if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
	    pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
		ERROR_PROBE("PCI DMA not supported.\n");
		retval = -EIO;
		goto exit_disable_device;
	}

	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
	if (!hw) {
		ERROR_PROBE("Failed to allocate hardware.\n");
		retval = -ENOMEM;
		goto exit_disable_device;
	}

	pci_set_drvdata(pci_dev, hw);

	rt2x00dev = hw->priv;
	rt2x00dev->dev = pci_dev;
	rt2x00dev->ops = ops;
	rt2x00dev->hw = hw;

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		goto exit_free_device;

	retval = rt2x00lib_probe_dev(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

exit_free_device:
	ieee80211_free_hw(hw);

exit_disable_device:
	if (retval != -EBUSY)
		pci_disable_device(pci_dev);

exit_release_regions:
	pci_release_regions(pci_dev);

	pci_set_drvdata(pci_dev, NULL);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);

void rt2x00pci_remove(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;

	/*
	 * Free all allocated data.
	 */
	rt2x00lib_remove_dev(rt2x00dev);
	rt2x00pci_free_reg(rt2x00dev);
	ieee80211_free_hw(hw);

	/*
	 * Free the PCI device data.
	 */
	pci_set_drvdata(pci_dev, NULL);
	pci_disable_device(pci_dev);
	pci_release_regions(pci_dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_remove);

#ifdef CONFIG_PM
int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	retval = rt2x00lib_suspend(rt2x00dev, state);
	if (retval)
		return retval;

	rt2x00pci_free_reg(rt2x00dev);

	pci_save_state(pci_dev);
	pci_disable_device(pci_dev);
	return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
}
EXPORT_SYMBOL_GPL(rt2x00pci_suspend);

int rt2x00pci_resume(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	if (pci_set_power_state(pci_dev, PCI_D0) ||
	    pci_enable_device(pci_dev) ||
	    pci_restore_state(pci_dev)) {
		ERROR(rt2x00dev, "Failed to resume device.\n");
		return -EIO;
	}

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2x00lib_resume(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
#endif /* CONFIG_PM */

/*
 * rt2x00pci module information.
 */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
I
Ivo van Doorn 已提交
546
MODULE_DESCRIPTION("rt2x00 pci library");
547
MODULE_LICENSE("GPL");