rt2x00queue.c 15.8 KB
Newer Older
I
Ivo van Doorn 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00lib
	Abstract: rt2x00 queue specific routines.
 */

#include <linux/kernel.h>
#include <linux/module.h>
28
#include <linux/dma-mapping.h>
I
Ivo van Doorn 已提交
29 30 31 32

#include "rt2x00.h"
#include "rt2x00lib.h"

33 34
struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
					struct queue_entry *entry)
35 36 37
{
	unsigned int frame_size;
	unsigned int reserved_size;
38 39
	struct sk_buff *skb;
	struct skb_frame_desc *skbdesc;
40 41 42 43 44

	/*
	 * The frame size includes descriptor size, because the
	 * hardware directly receive the frame into the skbuffer.
	 */
45
	frame_size = entry->queue->data_size + entry->queue->desc_size;
46 47

	/*
48 49
	 * Reserve a few bytes extra headroom to allow drivers some moving
	 * space (e.g. for alignment), while keeping the skb aligned.
50
	 */
51
	reserved_size = 8;
52 53 54 55 56 57 58 59 60 61 62

	/*
	 * Allocate skbuffer.
	 */
	skb = dev_alloc_skb(frame_size + reserved_size);
	if (!skb)
		return NULL;

	skb_reserve(skb, reserved_size);
	skb_put(skb, frame_size);

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
	/*
	 * Populate skbdesc.
	 */
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->entry = entry;

	if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
		skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
						  skb->data,
						  skb->len,
						  DMA_FROM_DEVICE);
		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
	}

78 79
	return skb;
}
80
EXPORT_SYMBOL_GPL(rt2x00queue_alloc_rxskb);
81

82
void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
83
{
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);

	skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len,
					  DMA_TO_DEVICE);
	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);

void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
				 DMA_FROM_DEVICE);
		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
	}

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
				 DMA_TO_DEVICE);
		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
	}
}
EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);

void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
				 DMA_FROM_DEVICE);
	}

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
				 DMA_TO_DEVICE);
	}

124 125 126
	dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL_GPL(rt2x00queue_free_skb);
127

128
void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
129
				      struct txentry_desc *txdesc)
130
{
131
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
132
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
133
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
134
	struct ieee80211_rate *rate =
135
	    ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	const struct rt2x00_rate *hwrate;
	unsigned int data_length;
	unsigned int duration;
	unsigned int residual;

	memset(txdesc, 0, sizeof(*txdesc));

	/*
	 * Initialize information from queue
	 */
	txdesc->queue = entry->queue->qid;
	txdesc->cw_min = entry->queue->cw_min;
	txdesc->cw_max = entry->queue->cw_max;
	txdesc->aifs = entry->queue->aifs;

	/* Data length should be extended with 4 bytes for CRC */
	data_length = entry->skb->len + 4;

	/*
	 * Check whether this frame is to be acked.
	 */
157
	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
158 159 160 161 162
		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);

	/*
	 * Check if this is a RTS/CTS frame
	 */
163 164
	if (ieee80211_is_rts(hdr->frame_control) ||
	    ieee80211_is_cts(hdr->frame_control)) {
165
		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
166
		if (ieee80211_is_rts(hdr->frame_control))
167
			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
168
		else
169
			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
170
		if (tx_info->control.rts_cts_rate_idx >= 0)
171
			rate =
172
			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
173 174 175 176 177
	}

	/*
	 * Determine retry information.
	 */
178 179
	txdesc->retry_limit = tx_info->control.retry_limit;
	if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT)
180 181 182 183 184
		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);

	/*
	 * Check if more fragments are pending
	 */
185
	if (ieee80211_has_morefrags(hdr->frame_control)) {
186 187 188 189 190 191 192 193
		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
	}

	/*
	 * Beacons and probe responses require the tsf timestamp
	 * to be inserted into the frame.
	 */
194 195
	if (ieee80211_is_beacon(hdr->frame_control) ||
	    ieee80211_is_probe_resp(hdr->frame_control))
196 197 198 199 200 201 202 203 204
		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);

	/*
	 * Determine with what IFS priority this frame should be send.
	 * Set ifs to IFS_SIFS when the this is not the first fragment,
	 * or this fragment came after RTS/CTS.
	 */
	if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
		txdesc->ifs = IFS_SIFS;
205
	} else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
		txdesc->ifs = IFS_BACKOFF;
	} else {
		txdesc->ifs = IFS_SIFS;
	}

	/*
	 * PLCP setup
	 * Length calculation depends on OFDM/CCK rate.
	 */
	hwrate = rt2x00_get_rate(rate->hw_value);
	txdesc->signal = hwrate->plcp;
	txdesc->service = 0x04;

	if (hwrate->flags & DEV_RATE_OFDM) {
		__set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);

		txdesc->length_high = (data_length >> 6) & 0x3f;
		txdesc->length_low = data_length & 0x3f;
	} else {
		/*
		 * Convert length to microseconds.
		 */
		residual = get_duration_res(data_length, hwrate->bitrate);
		duration = get_duration(data_length, hwrate->bitrate);

		if (residual != 0) {
			duration++;

			/*
			 * Check if we need to set the Length Extension
			 */
			if (hwrate->bitrate == 110 && residual <= 30)
				txdesc->service |= 0x80;
		}

		txdesc->length_high = (duration >> 8) & 0xff;
		txdesc->length_low = duration & 0xff;

		/*
		 * When preamble is enabled we should set the
		 * preamble bit for the signal.
		 */
		if (rt2x00_get_rate_preamble(rate->hw_value))
			txdesc->signal |= 0x08;
	}
}
EXPORT_SYMBOL_GPL(rt2x00queue_create_tx_descriptor);

void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
				     struct txentry_desc *txdesc)
{
258 259
	struct data_queue *queue = entry->queue;
	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
260 261 262 263 264 265 266 267 268 269

	rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);

	/*
	 * All processing on the frame has been completed, this means
	 * it is now ready to be dumped to userspace through debugfs.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);

	/*
270 271 272 273 274 275 276 277
	 * Check if we need to kick the queue, there are however a few rules
	 *	1) Don't kick beacon queue
	 *	2) Don't kick unless this is the last in frame in a burst.
	 *	   When the burst flag is set, this frame is always followed
	 *	   by another frame which in some way are related to eachother.
	 *	   This is true for fragments, RTS or CTS-to-self frames.
	 *	3) Rule 2 can be broken when the available entries
	 *	   in the queue are less then a certain threshold.
278
	 */
279 280 281 282 283 284
	if (entry->queue->qid == QID_BEACON)
		return;

	if (rt2x00queue_threshold(queue) ||
	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
		rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
285 286 287
}
EXPORT_SYMBOL_GPL(rt2x00queue_write_tx_descriptor);

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
{
	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
	struct txentry_desc txdesc;

	if (unlikely(rt2x00queue_full(queue)))
		return -EINVAL;

	if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
		ERROR(queue->rt2x00dev,
		      "Arrived at non-free entry in the non-full queue %d.\n"
		      "Please file bug report to %s.\n",
		      queue->qid, DRV_PROJECT);
		return -EINVAL;
	}

	/*
	 * Copy all TX descriptor information into txdesc,
	 * after that we are free to use the skb->cb array
	 * for our information.
	 */
	entry->skb = skb;
	rt2x00queue_create_tx_descriptor(entry, &txdesc);

	if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
		__clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
		return -EIO;
	}

	__set_bit(ENTRY_DATA_PENDING, &entry->flags);

	rt2x00queue_index_inc(queue, Q_INDEX);
	rt2x00queue_write_tx_descriptor(entry, &txdesc);

	return 0;
}

I
Ivo van Doorn 已提交
325
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
326
					 const enum data_queue_qid queue)
I
Ivo van Doorn 已提交
327 328 329
{
	int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);

330
	if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
I
Ivo van Doorn 已提交
331 332 333 334 335
		return &rt2x00dev->tx[queue];

	if (!rt2x00dev->bcn)
		return NULL;

336
	if (queue == QID_BEACON)
I
Ivo van Doorn 已提交
337
		return &rt2x00dev->bcn[0];
338
	else if (queue == QID_ATIM && atim)
I
Ivo van Doorn 已提交
339 340 341 342 343 344 345 346 347 348
		return &rt2x00dev->bcn[1];

	return NULL;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);

struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
					  enum queue_index index)
{
	struct queue_entry *entry;
349
	unsigned long irqflags;
I
Ivo van Doorn 已提交
350 351 352 353 354 355 356

	if (unlikely(index >= Q_INDEX_MAX)) {
		ERROR(queue->rt2x00dev,
		      "Entry requested from invalid index type (%d)\n", index);
		return NULL;
	}

357
	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
358 359 360

	entry = &queue->entries[queue->index[index]];

361
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
362 363 364 365 366 367 368

	return entry;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);

void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
{
369 370
	unsigned long irqflags;

I
Ivo van Doorn 已提交
371 372 373 374 375 376
	if (unlikely(index >= Q_INDEX_MAX)) {
		ERROR(queue->rt2x00dev,
		      "Index change on invalid index type (%d)\n", index);
		return;
	}

377
	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
378 379 380 381 382

	queue->index[index]++;
	if (queue->index[index] >= queue->limit)
		queue->index[index] = 0;

I
Ivo van Doorn 已提交
383 384 385 386 387 388
	if (index == Q_INDEX) {
		queue->length++;
	} else if (index == Q_INDEX_DONE) {
		queue->length--;
		queue->count ++;
	}
I
Ivo van Doorn 已提交
389

390
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
391 392 393 394 395
}
EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);

static void rt2x00queue_reset(struct data_queue *queue)
{
396 397 398
	unsigned long irqflags;

	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
399 400 401 402 403

	queue->count = 0;
	queue->length = 0;
	memset(queue->index, 0, sizeof(queue->index));

404
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
}

void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue = rt2x00dev->rx;
	unsigned int i;

	rt2x00queue_reset(queue);

	if (!rt2x00dev->ops->lib->init_rxentry)
		return;

	for (i = 0; i < queue->limit; i++)
		rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
						  &queue->entries[i]);
}

void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;
	unsigned int i;

	txall_queue_for_each(rt2x00dev, queue) {
		rt2x00queue_reset(queue);

		if (!rt2x00dev->ops->lib->init_txentry)
			continue;

		for (i = 0; i < queue->limit; i++)
			rt2x00dev->ops->lib->init_txentry(rt2x00dev,
							  &queue->entries[i]);
	}
}

static int rt2x00queue_alloc_entries(struct data_queue *queue,
				     const struct data_queue_desc *qdesc)
{
	struct queue_entry *entries;
	unsigned int entry_size;
	unsigned int i;

	rt2x00queue_reset(queue);

	queue->limit = qdesc->entry_num;
449
	queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
I
Ivo van Doorn 已提交
450 451 452 453 454 455 456 457 458 459 460 461
	queue->data_size = qdesc->data_size;
	queue->desc_size = qdesc->desc_size;

	/*
	 * Allocate all queue entries.
	 */
	entry_size = sizeof(*entries) + qdesc->priv_size;
	entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
462 463
	( ((char *)(__base)) + ((__limit) * (__esize)) + \
	    ((__index) * (__psize)) )
I
Ivo van Doorn 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481

	for (i = 0; i < queue->limit; i++) {
		entries[i].flags = 0;
		entries[i].queue = queue;
		entries[i].skb = NULL;
		entries[i].entry_idx = i;
		entries[i].priv_data =
		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
					    sizeof(*entries), qdesc->priv_size);
	}

#undef QUEUE_ENTRY_PRIV_OFFSET

	queue->entries = entries;

	return 0;
}

482 483
static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
				  struct data_queue *queue)
484 485 486 487 488 489 490 491
{
	unsigned int i;

	if (!queue->entries)
		return;

	for (i = 0; i < queue->limit; i++) {
		if (queue->entries[i].skb)
492
			rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
493 494 495
	}
}

496 497
static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
				    struct data_queue *queue)
498 499 500 501 502
{
	unsigned int i;
	struct sk_buff *skb;

	for (i = 0; i < queue->limit; i++) {
503
		skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
504 505 506 507 508 509 510 511
		if (!skb)
			goto exit;
		queue->entries[i].skb = skb;
	}

	return 0;

exit:
512
	rt2x00queue_free_skbs(rt2x00dev, queue);
513 514 515 516

	return -ENOMEM;
}

I
Ivo van Doorn 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;
	int status;

	status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
	if (status)
		goto exit;

	tx_queue_for_each(rt2x00dev, queue) {
		status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
		if (status)
			goto exit;
	}

	status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
	if (status)
		goto exit;

536 537 538 539 540 541
	if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
		status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
						   rt2x00dev->ops->atim);
		if (status)
			goto exit;
	}
I
Ivo van Doorn 已提交
542

543
	status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
I
Ivo van Doorn 已提交
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
	if (status)
		goto exit;

	return 0;

exit:
	ERROR(rt2x00dev, "Queue entries allocation failed.\n");

	rt2x00queue_uninitialize(rt2x00dev);

	return status;
}

void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;

561
	rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
562

I
Ivo van Doorn 已提交
563 564 565 566 567 568
	queue_for_each(rt2x00dev, queue) {
		kfree(queue->entries);
		queue->entries = NULL;
	}
}

569 570 571 572 573 574 575 576 577 578 579 580
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
			     struct data_queue *queue, enum data_queue_qid qid)
{
	spin_lock_init(&queue->lock);

	queue->rt2x00dev = rt2x00dev;
	queue->qid = qid;
	queue->aifs = 2;
	queue->cw_min = 5;
	queue->cw_max = 10;
}

I
Ivo van Doorn 已提交
581 582 583 584 585 586 587 588 589 590
int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;
	enum data_queue_qid qid;
	unsigned int req_atim =
	    !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);

	/*
	 * We need the following queues:
	 * RX: 1
591
	 * TX: ops->tx_queues
I
Ivo van Doorn 已提交
592 593 594
	 * Beacon: 1
	 * Atim: 1 (if required)
	 */
595
	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
I
Ivo van Doorn 已提交
596 597 598 599 600 601 602 603 604 605 606 607

	queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
	if (!queue) {
		ERROR(rt2x00dev, "Queue allocation failed.\n");
		return -ENOMEM;
	}

	/*
	 * Initialize pointers
	 */
	rt2x00dev->rx = queue;
	rt2x00dev->tx = &queue[1];
608
	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
I
Ivo van Doorn 已提交
609 610 611 612 613 614 615

	/*
	 * Initialize queue parameters.
	 * RX: qid = QID_RX
	 * TX: qid = QID_AC_BE + index
	 * TX: cw_min: 2^5 = 32.
	 * TX: cw_max: 2^10 = 1024.
I
Ivo van Doorn 已提交
616 617
	 * BCN: qid = QID_BEACON
	 * ATIM: qid = QID_ATIM
I
Ivo van Doorn 已提交
618
	 */
619
	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
I
Ivo van Doorn 已提交
620

621 622 623
	qid = QID_AC_BE;
	tx_queue_for_each(rt2x00dev, queue)
		rt2x00queue_init(rt2x00dev, queue, qid++);
I
Ivo van Doorn 已提交
624

I
Ivo van Doorn 已提交
625
	rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
I
Ivo van Doorn 已提交
626
	if (req_atim)
I
Ivo van Doorn 已提交
627
		rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
I
Ivo van Doorn 已提交
628 629 630 631 632 633 634 635 636 637 638

	return 0;
}

void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rx);
	rt2x00dev->rx = NULL;
	rt2x00dev->tx = NULL;
	rt2x00dev->bcn = NULL;
}