rt2x00queue.c 23.9 KB
Newer Older
I
Ivo van Doorn 已提交
1
/*
2 3
	Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
	Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
I
Ivo van Doorn 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00lib
	Abstract: rt2x00 queue specific routines.
 */

#include <linux/kernel.h>
#include <linux/module.h>
29
#include <linux/dma-mapping.h>
I
Ivo van Doorn 已提交
30 31 32 33

#include "rt2x00.h"
#include "rt2x00lib.h"

34 35
struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
					struct queue_entry *entry)
36
{
37 38
	struct sk_buff *skb;
	struct skb_frame_desc *skbdesc;
I
Ivo van Doorn 已提交
39 40 41
	unsigned int frame_size;
	unsigned int head_size = 0;
	unsigned int tail_size = 0;
42 43 44 45 46

	/*
	 * The frame size includes descriptor size, because the
	 * hardware directly receive the frame into the skbuffer.
	 */
47
	frame_size = entry->queue->data_size + entry->queue->desc_size;
48 49

	/*
50 51 52
	 * The payload should be aligned to a 4-byte boundary,
	 * this means we need at least 3 bytes for moving the frame
	 * into the correct offset.
53
	 */
I
Ivo van Doorn 已提交
54 55 56 57 58
	head_size = 4;

	/*
	 * For IV/EIV/ICV assembly we must make sure there is
	 * at least 8 bytes bytes available in headroom for IV/EIV
59
	 * and 8 bytes for ICV data as tailroon.
I
Ivo van Doorn 已提交
60 61 62
	 */
	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
		head_size += 8;
63
		tail_size += 8;
I
Ivo van Doorn 已提交
64
	}
65 66 67 68

	/*
	 * Allocate skbuffer.
	 */
I
Ivo van Doorn 已提交
69
	skb = dev_alloc_skb(frame_size + head_size + tail_size);
70 71 72
	if (!skb)
		return NULL;

I
Ivo van Doorn 已提交
73 74 75 76 77
	/*
	 * Make sure we not have a frame with the requested bytes
	 * available in the head and tail.
	 */
	skb_reserve(skb, head_size);
78 79
	skb_put(skb, frame_size);

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	/*
	 * Populate skbdesc.
	 */
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->entry = entry;

	if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
		skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
						  skb->data,
						  skb->len,
						  DMA_FROM_DEVICE);
		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
	}

95 96
	return skb;
}
97

98
void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
99
{
100 101
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);

102 103 104 105 106
	/*
	 * If device has requested headroom, we should make sure that
	 * is also mapped to the DMA so it can be used for transfering
	 * additional descriptor information to the hardware.
	 */
107
	skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
108 109 110 111 112 113 114

	skbdesc->skb_dma =
	    dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);

	/*
	 * Restore data pointer to original location again.
	 */
115
	skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
116

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);

void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
				 DMA_FROM_DEVICE);
		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
	}

	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
132 133 134 135 136
		/*
		 * Add headroom to the skb length, it has been removed
		 * by the driver, but it was actually mapped to DMA.
		 */
		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
137
				 skb->len + rt2x00dev->ops->extra_tx_headroom,
138 139 140 141 142 143 144
				 DMA_TO_DEVICE);
		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
	}
}

void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
{
145 146 147
	if (!skb)
		return;

148
	rt2x00queue_unmap_skb(rt2x00dev, skb);
149 150
	dev_kfree_skb_any(skb);
}
151

152
void rt2x00queue_align_frame(struct sk_buff *skb)
153 154
{
	unsigned int frame_length = skb->len;
155
	unsigned int align = ALIGN_SIZE(skb, 0);
156 157 158 159

	if (!align)
		return;

160 161 162 163 164
	skb_push(skb, align);
	memmove(skb->data, skb->data + align, frame_length);
	skb_trim(skb, frame_length);
}

165
void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
166 167
{
	unsigned int frame_length = skb->len;
168
	unsigned int align = ALIGN_SIZE(skb, header_length);
169 170 171 172 173 174 175 176 177 178 179

	if (!align)
		return;

	skb_push(skb, align);
	memmove(skb->data, skb->data + align, frame_length);
	skb_trim(skb, frame_length);
}

void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
180
	unsigned int payload_length = skb->len - header_length;
181 182
	unsigned int header_align = ALIGN_SIZE(skb, 0);
	unsigned int payload_align = ALIGN_SIZE(skb, header_length);
183
	unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
184

185 186 187 188 189 190 191 192 193 194
	/*
	 * Adjust the header alignment if the payload needs to be moved more
	 * than the header.
	 */
	if (payload_align > header_align)
		header_align += 4;

	/* There is nothing to do if no alignment is needed */
	if (!header_align)
		return;
195

196 197 198 199 200 201 202 203 204 205
	/* Reserve the amount of space needed in front of the frame */
	skb_push(skb, header_align);

	/*
	 * Move the header.
	 */
	memmove(skb->data, skb->data + header_align, header_length);

	/* Move the payload, if present and if required */
	if (payload_length && payload_align)
206
		memmove(skb->data + header_length + l2pad,
207
			skb->data + header_length + l2pad + payload_align,
208 209 210 211
			payload_length);

	/* Trim the skb to the correct size */
	skb_trim(skb, header_length + l2pad + payload_length);
212 213
}

214 215
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
216
	unsigned int l2pad = L2PAD_SIZE(header_length);
217

218
	if (!l2pad)
219 220 221 222 223 224
		return;

	memmove(skb->data + l2pad, skb->data, header_length);
	skb_pull(skb, l2pad);
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
						 struct txentry_desc *txdesc)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
	struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
	unsigned long irqflags;

	if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
	    unlikely(!tx_info->control.vif))
		return;

	/*
	 * Hardware should insert sequence counter.
	 * FIXME: We insert a software sequence counter first for
	 * hardware that doesn't support hardware sequence counting.
	 *
	 * This is wrong because beacons are not getting sequence
	 * numbers assigned properly.
	 *
	 * A secondary problem exists for drivers that cannot toggle
	 * sequence counting per-frame, since those will override the
	 * sequence counter given by mac80211.
	 */
	spin_lock_irqsave(&intf->seqlock, irqflags);

	if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
		intf->seqno += 0x10;
	hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
	hdr->seq_ctrl |= cpu_to_le16(intf->seqno);

	spin_unlock_irqrestore(&intf->seqlock, irqflags);

	__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}

static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
						  struct txentry_desc *txdesc,
						  const struct rt2x00_rate *hwrate)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
	unsigned int data_length;
	unsigned int duration;
	unsigned int residual;

	/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
	data_length = entry->skb->len + 4;
	data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);

	/*
	 * PLCP setup
	 * Length calculation depends on OFDM/CCK rate.
	 */
	txdesc->signal = hwrate->plcp;
	txdesc->service = 0x04;

	if (hwrate->flags & DEV_RATE_OFDM) {
		txdesc->length_high = (data_length >> 6) & 0x3f;
		txdesc->length_low = data_length & 0x3f;
	} else {
		/*
		 * Convert length to microseconds.
		 */
		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
		duration = GET_DURATION(data_length, hwrate->bitrate);

		if (residual != 0) {
			duration++;

			/*
			 * Check if we need to set the Length Extension
			 */
			if (hwrate->bitrate == 110 && residual <= 30)
				txdesc->service |= 0x80;
		}

		txdesc->length_high = (duration >> 8) & 0xff;
		txdesc->length_low = duration & 0xff;

		/*
		 * When preamble is enabled we should set the
		 * preamble bit for the signal.
		 */
		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
			txdesc->signal |= 0x08;
	}
}

315 316
static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
					     struct txentry_desc *txdesc)
317
{
318
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
319
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
320
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
321
	struct ieee80211_rate *rate =
322
	    ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
323 324 325 326 327 328 329 330 331 332 333 334
	const struct rt2x00_rate *hwrate;

	memset(txdesc, 0, sizeof(*txdesc));

	/*
	 * Initialize information from queue
	 */
	txdesc->queue = entry->queue->qid;
	txdesc->cw_min = entry->queue->cw_min;
	txdesc->cw_max = entry->queue->cw_max;
	txdesc->aifs = entry->queue->aifs;

335
	/*
336
	 * Header and frame information.
337
	 */
338
	txdesc->length = entry->skb->len;
339 340
	txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);

341 342 343
	/*
	 * Check whether this frame is to be acked.
	 */
344
	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
345 346 347 348 349
		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);

	/*
	 * Check if this is a RTS/CTS frame
	 */
350 351
	if (ieee80211_is_rts(hdr->frame_control) ||
	    ieee80211_is_cts(hdr->frame_control)) {
352
		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
353
		if (ieee80211_is_rts(hdr->frame_control))
354
			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
355
		else
356
			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
357
		if (tx_info->control.rts_cts_rate_idx >= 0)
358
			rate =
359
			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
360 361 362 363 364
	}

	/*
	 * Determine retry information.
	 */
365
	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
I
Ivo van Doorn 已提交
366
	if (txdesc->retry_limit >= rt2x00dev->long_retry)
367 368 369 370 371
		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);

	/*
	 * Check if more fragments are pending
	 */
372 373
	if (ieee80211_has_morefrags(hdr->frame_control) ||
	    (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)) {
374 375 376 377 378 379
		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
	}

	/*
	 * Beacons and probe responses require the tsf timestamp
380 381 382
	 * to be inserted into the frame, except for a frame that has been injected
	 * through a monitor interface. This latter is needed for testing a
	 * monitor interface.
383
	 */
384 385 386
	if ((ieee80211_is_beacon(hdr->frame_control) ||
	    ieee80211_is_probe_resp(hdr->frame_control)) &&
	    (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
387 388 389 390 391 392 393
		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);

	/*
	 * Determine with what IFS priority this frame should be send.
	 * Set ifs to IFS_SIFS when the this is not the first fragment,
	 * or this fragment came after RTS/CTS.
	 */
394 395
	if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
	    !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
396 397
		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
		txdesc->ifs = IFS_BACKOFF;
398
	} else
399 400
		txdesc->ifs = IFS_SIFS;

401 402 403
	/*
	 * Determine rate modulation.
	 */
404
	hwrate = rt2x00_get_rate(rate->hw_value);
405
	txdesc->rate_mode = RATE_MODE_CCK;
406
	if (hwrate->flags & DEV_RATE_OFDM)
407
		txdesc->rate_mode = RATE_MODE_OFDM;
408

409 410 411 412
	/*
	 * Apply TX descriptor handling by components
	 */
	rt2x00crypto_create_tx_descriptor(entry, txdesc);
413
	rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
414 415
	rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
	rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
416 417
}

418 419
static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
					    struct txentry_desc *txdesc)
420
{
421 422
	struct data_queue *queue = entry->queue;
	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
423 424 425 426 427 428 429 430 431 432

	rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);

	/*
	 * All processing on the frame has been completed, this means
	 * it is now ready to be dumped to userspace through debugfs.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);

	/*
433 434 435 436 437 438 439 440
	 * Check if we need to kick the queue, there are however a few rules
	 *	1) Don't kick beacon queue
	 *	2) Don't kick unless this is the last in frame in a burst.
	 *	   When the burst flag is set, this frame is always followed
	 *	   by another frame which in some way are related to eachother.
	 *	   This is true for fragments, RTS or CTS-to-self frames.
	 *	3) Rule 2 can be broken when the available entries
	 *	   in the queue are less then a certain threshold.
441
	 */
442 443 444 445 446 447
	if (entry->queue->qid == QID_BEACON)
		return;

	if (rt2x00queue_threshold(queue) ||
	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
		rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
448 449
}

450 451
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
			       bool local)
452
{
453
	struct ieee80211_tx_info *tx_info;
454 455
	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
	struct txentry_desc txdesc;
I
Ivo van Doorn 已提交
456
	struct skb_frame_desc *skbdesc;
457
	u8 rate_idx, rate_flags;
458 459

	if (unlikely(rt2x00queue_full(queue)))
I
Ivo van Doorn 已提交
460
		return -ENOBUFS;
461

462
	if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		ERROR(queue->rt2x00dev,
		      "Arrived at non-free entry in the non-full queue %d.\n"
		      "Please file bug report to %s.\n",
		      queue->qid, DRV_PROJECT);
		return -EINVAL;
	}

	/*
	 * Copy all TX descriptor information into txdesc,
	 * after that we are free to use the skb->cb array
	 * for our information.
	 */
	entry->skb = skb;
	rt2x00queue_create_tx_descriptor(entry, &txdesc);

I
Ivo van Doorn 已提交
478
	/*
479
	 * All information is retrieved from the skb->cb array,
I
Ivo van Doorn 已提交
480
	 * now we should claim ownership of the driver part of that
481
	 * array, preserving the bitrate index and flags.
I
Ivo van Doorn 已提交
482
	 */
483 484 485
	tx_info = IEEE80211_SKB_CB(skb);
	rate_idx = tx_info->control.rates[0].idx;
	rate_flags = tx_info->control.rates[0].flags;
I
Ivo van Doorn 已提交
486
	skbdesc = get_skb_frame_desc(skb);
I
Ivo van Doorn 已提交
487 488
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->entry = entry;
489 490
	skbdesc->tx_rate_idx = rate_idx;
	skbdesc->tx_rate_flags = rate_flags;
I
Ivo van Doorn 已提交
491

492 493 494
	if (local)
		skbdesc->flags |= SKBDESC_NOT_MAC80211;

I
Ivo van Doorn 已提交
495 496 497 498 499 500
	/*
	 * When hardware encryption is supported, and this frame
	 * is to be encrypted, we should strip the IV/EIV data from
	 * the frame so we can provide it to the driver seperately.
	 */
	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
501
	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
502
		if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
503
			rt2x00crypto_tx_copy_iv(skb, &txdesc);
504
		else
505
			rt2x00crypto_tx_remove_iv(skb, &txdesc);
506
	}
I
Ivo van Doorn 已提交
507

508 509 510 511 512 513 514 515
	/*
	 * When DMA allocation is required we should guarentee to the
	 * driver that the DMA is aligned to a 4-byte boundary.
	 * However some drivers require L2 padding to pad the payload
	 * rather then the header. This could be a requirement for
	 * PCI and USB devices, while header alignment only is valid
	 * for PCI devices.
	 */
516
	if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
517
		rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
518
	else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
519
		rt2x00queue_align_frame(entry->skb);
520

I
Ivo van Doorn 已提交
521 522
	/*
	 * It could be possible that the queue was corrupted and this
I
Ivo van Doorn 已提交
523 524
	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
	 * this frame will simply be dropped.
I
Ivo van Doorn 已提交
525
	 */
526 527
	if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
							       &txdesc))) {
528
		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
I
Ivo van Doorn 已提交
529
		entry->skb = NULL;
I
Ivo van Doorn 已提交
530
		return -EIO;
531 532
	}

I
Ivo van Doorn 已提交
533 534 535
	if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
		rt2x00queue_map_txskb(queue->rt2x00dev, skb);

536
	set_bit(ENTRY_DATA_PENDING, &entry->flags);
537 538 539 540 541 542 543

	rt2x00queue_index_inc(queue, Q_INDEX);
	rt2x00queue_write_tx_descriptor(entry, &txdesc);

	return 0;
}

544
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
545 546
			      struct ieee80211_vif *vif,
			      const bool enable_beacon)
547 548 549 550 551 552 553 554 555
{
	struct rt2x00_intf *intf = vif_to_intf(vif);
	struct skb_frame_desc *skbdesc;
	struct txentry_desc txdesc;
	__le32 desc[16];

	if (unlikely(!intf->beacon))
		return -ENOBUFS;

556 557 558 559 560 561 562 563
	mutex_lock(&intf->beacon_skb_mutex);

	/*
	 * Clean up the beacon skb.
	 */
	rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
	intf->beacon->skb = NULL;

564 565
	if (!enable_beacon) {
		rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
566
		mutex_unlock(&intf->beacon_skb_mutex);
567 568 569
		return 0;
	}

570
	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
571 572
	if (!intf->beacon->skb) {
		mutex_unlock(&intf->beacon_skb_mutex);
573
		return -ENOMEM;
574
	}
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604

	/*
	 * Copy all TX descriptor information into txdesc,
	 * after that we are free to use the skb->cb array
	 * for our information.
	 */
	rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);

	/*
	 * For the descriptor we use a local array from where the
	 * driver can move it to the correct location required for
	 * the hardware.
	 */
	memset(desc, 0, sizeof(desc));

	/*
	 * Fill in skb descriptor
	 */
	skbdesc = get_skb_frame_desc(intf->beacon->skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->desc = desc;
	skbdesc->desc_len = intf->beacon->queue->desc_size;
	skbdesc->entry = intf->beacon;

	/*
	 * Write TX descriptor into reserved room in front of the beacon.
	 */
	rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);

	/*
605
	 * Send beacon to hardware and enable beacon genaration..
606 607 608
	 */
	rt2x00dev->ops->lib->write_beacon(intf->beacon);

609 610
	mutex_unlock(&intf->beacon_skb_mutex);

611 612 613
	return 0;
}

I
Ivo van Doorn 已提交
614
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
615
					 const enum data_queue_qid queue)
I
Ivo van Doorn 已提交
616 617 618
{
	int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);

619 620 621
	if (queue == QID_RX)
		return rt2x00dev->rx;

622
	if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
I
Ivo van Doorn 已提交
623 624 625 626 627
		return &rt2x00dev->tx[queue];

	if (!rt2x00dev->bcn)
		return NULL;

628
	if (queue == QID_BEACON)
I
Ivo van Doorn 已提交
629
		return &rt2x00dev->bcn[0];
630
	else if (queue == QID_ATIM && atim)
I
Ivo van Doorn 已提交
631 632 633 634 635 636 637 638 639 640
		return &rt2x00dev->bcn[1];

	return NULL;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);

struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
					  enum queue_index index)
{
	struct queue_entry *entry;
641
	unsigned long irqflags;
I
Ivo van Doorn 已提交
642 643 644 645 646 647 648

	if (unlikely(index >= Q_INDEX_MAX)) {
		ERROR(queue->rt2x00dev,
		      "Entry requested from invalid index type (%d)\n", index);
		return NULL;
	}

649
	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
650 651 652

	entry = &queue->entries[queue->index[index]];

653
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
654 655 656 657 658 659 660

	return entry;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);

void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
{
661 662
	unsigned long irqflags;

I
Ivo van Doorn 已提交
663 664 665 666 667 668
	if (unlikely(index >= Q_INDEX_MAX)) {
		ERROR(queue->rt2x00dev,
		      "Index change on invalid index type (%d)\n", index);
		return;
	}

669
	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
670 671 672 673 674

	queue->index[index]++;
	if (queue->index[index] >= queue->limit)
		queue->index[index] = 0;

I
Ivo van Doorn 已提交
675 676 677 678
	if (index == Q_INDEX) {
		queue->length++;
	} else if (index == Q_INDEX_DONE) {
		queue->length--;
J
John Daiker 已提交
679
		queue->count++;
I
Ivo van Doorn 已提交
680
	}
I
Ivo van Doorn 已提交
681

682
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
683 684 685 686
}

static void rt2x00queue_reset(struct data_queue *queue)
{
687 688 689
	unsigned long irqflags;

	spin_lock_irqsave(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
690 691 692 693 694

	queue->count = 0;
	queue->length = 0;
	memset(queue->index, 0, sizeof(queue->index));

695
	spin_unlock_irqrestore(&queue->lock, irqflags);
I
Ivo van Doorn 已提交
696 697
}

698 699 700 701 702 703 704 705
void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;

	txall_queue_for_each(rt2x00dev, queue)
		rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
}

706
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
I
Ivo van Doorn 已提交
707 708 709 710
{
	struct data_queue *queue;
	unsigned int i;

711
	queue_for_each(rt2x00dev, queue) {
I
Ivo van Doorn 已提交
712 713
		rt2x00queue_reset(queue);

714 715 716
		for (i = 0; i < queue->limit; i++) {
			queue->entries[i].flags = 0;

717
			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
718
		}
I
Ivo van Doorn 已提交
719 720 721 722 723 724 725 726 727 728 729 730 731
	}
}

static int rt2x00queue_alloc_entries(struct data_queue *queue,
				     const struct data_queue_desc *qdesc)
{
	struct queue_entry *entries;
	unsigned int entry_size;
	unsigned int i;

	rt2x00queue_reset(queue);

	queue->limit = qdesc->entry_num;
732
	queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
I
Ivo van Doorn 已提交
733 734 735 736 737 738 739 740 741 742 743 744
	queue->data_size = qdesc->data_size;
	queue->desc_size = qdesc->desc_size;

	/*
	 * Allocate all queue entries.
	 */
	entry_size = sizeof(*entries) + qdesc->priv_size;
	entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
745 746
	( ((char *)(__base)) + ((__limit) * (__esize)) + \
	    ((__index) * (__psize)) )
I
Ivo van Doorn 已提交
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764

	for (i = 0; i < queue->limit; i++) {
		entries[i].flags = 0;
		entries[i].queue = queue;
		entries[i].skb = NULL;
		entries[i].entry_idx = i;
		entries[i].priv_data =
		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
					    sizeof(*entries), qdesc->priv_size);
	}

#undef QUEUE_ENTRY_PRIV_OFFSET

	queue->entries = entries;

	return 0;
}

765 766
static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
				  struct data_queue *queue)
767 768 769 770 771 772 773 774
{
	unsigned int i;

	if (!queue->entries)
		return;

	for (i = 0; i < queue->limit; i++) {
		if (queue->entries[i].skb)
775
			rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
776 777 778
	}
}

779 780
static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
				    struct data_queue *queue)
781 782 783 784 785
{
	unsigned int i;
	struct sk_buff *skb;

	for (i = 0; i < queue->limit; i++) {
786
		skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
787
		if (!skb)
788
			return -ENOMEM;
789 790 791 792 793 794
		queue->entries[i].skb = skb;
	}

	return 0;
}

I
Ivo van Doorn 已提交
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;
	int status;

	status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
	if (status)
		goto exit;

	tx_queue_for_each(rt2x00dev, queue) {
		status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
		if (status)
			goto exit;
	}

	status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
	if (status)
		goto exit;

814 815 816 817 818 819
	if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
		status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
						   rt2x00dev->ops->atim);
		if (status)
			goto exit;
	}
I
Ivo van Doorn 已提交
820

821
	status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
I
Ivo van Doorn 已提交
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838
	if (status)
		goto exit;

	return 0;

exit:
	ERROR(rt2x00dev, "Queue entries allocation failed.\n");

	rt2x00queue_uninitialize(rt2x00dev);

	return status;
}

void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;

839
	rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
840

I
Ivo van Doorn 已提交
841 842 843 844 845 846
	queue_for_each(rt2x00dev, queue) {
		kfree(queue->entries);
		queue->entries = NULL;
	}
}

847 848 849 850 851 852 853
static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
			     struct data_queue *queue, enum data_queue_qid qid)
{
	spin_lock_init(&queue->lock);

	queue->rt2x00dev = rt2x00dev;
	queue->qid = qid;
854
	queue->txop = 0;
855 856 857 858 859
	queue->aifs = 2;
	queue->cw_min = 5;
	queue->cw_max = 10;
}

I
Ivo van Doorn 已提交
860 861 862 863 864 865 866 867 868 869
int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
{
	struct data_queue *queue;
	enum data_queue_qid qid;
	unsigned int req_atim =
	    !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);

	/*
	 * We need the following queues:
	 * RX: 1
870
	 * TX: ops->tx_queues
I
Ivo van Doorn 已提交
871 872 873
	 * Beacon: 1
	 * Atim: 1 (if required)
	 */
874
	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
I
Ivo van Doorn 已提交
875 876 877 878 879 880 881 882 883 884 885 886

	queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
	if (!queue) {
		ERROR(rt2x00dev, "Queue allocation failed.\n");
		return -ENOMEM;
	}

	/*
	 * Initialize pointers
	 */
	rt2x00dev->rx = queue;
	rt2x00dev->tx = &queue[1];
887
	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
I
Ivo van Doorn 已提交
888 889 890 891 892 893 894

	/*
	 * Initialize queue parameters.
	 * RX: qid = QID_RX
	 * TX: qid = QID_AC_BE + index
	 * TX: cw_min: 2^5 = 32.
	 * TX: cw_max: 2^10 = 1024.
I
Ivo van Doorn 已提交
895 896
	 * BCN: qid = QID_BEACON
	 * ATIM: qid = QID_ATIM
I
Ivo van Doorn 已提交
897
	 */
898
	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
I
Ivo van Doorn 已提交
899

900 901 902
	qid = QID_AC_BE;
	tx_queue_for_each(rt2x00dev, queue)
		rt2x00queue_init(rt2x00dev, queue, qid++);
I
Ivo van Doorn 已提交
903

I
Ivo van Doorn 已提交
904
	rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
I
Ivo van Doorn 已提交
905
	if (req_atim)
I
Ivo van Doorn 已提交
906
		rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
I
Ivo van Doorn 已提交
907 908 909 910 911 912 913 914 915 916 917

	return 0;
}

void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rx);
	rt2x00dev->rx = NULL;
	rt2x00dev->tx = NULL;
	rt2x00dev->bcn = NULL;
}