recv.c 32.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

S
Sujith 已提交
17
#include "ath9k.h"
18
#include "ar9003_mac.h"
19

F
Felix Fietkau 已提交
20 21
#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))

22 23 24 25 26 27
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
	return sc->ps_enabled &&
	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
}

28 29 30
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
					     struct ieee80211_hdr *hdr)
{
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
	int i;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		struct ath_wiphy *aphy = sc->sec_wiphy[i];
		if (aphy == NULL)
			continue;
		if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
		    == 0) {
			hw = aphy->hw;
			break;
		}
	}
	spin_unlock_bh(&sc->wiphy_lock);
	return hw;
47 48
}

49 50 51 52 53 54 55 56 57 58
/*
 * Setup and link descriptors.
 *
 * 11N: we can no longer afford to self link the last descriptor.
 * MAC acknowledges BA status as long as it copies frames to host
 * buffer (or rx fifo). This can incorrectly acknowledge packets
 * to a sender if last desc is self-linked.
 */
static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
{
59
	struct ath_hw *ah = sc->sc_ah;
60
	struct ath_common *common = ath9k_hw_common(ah);
61 62 63 64 65 66
	struct ath_desc *ds;
	struct sk_buff *skb;

	ATH_RXBUF_RESET(bf);

	ds = bf->bf_desc;
S
Sujith 已提交
67
	ds->ds_link = 0; /* link to null */
68 69
	ds->ds_data = bf->bf_buf_addr;

S
Sujith 已提交
70
	/* virtual addr of the beginning of the buffer. */
71
	skb = bf->bf_mpdu;
72
	BUG_ON(skb == NULL);
73 74
	ds->ds_vdata = skb->data;

75 76
	/*
	 * setup rx descriptors. The rx_bufsize here tells the hardware
77
	 * how much data it can DMA to us and that we are prepared
78 79
	 * to process
	 */
S
Sujith 已提交
80
	ath9k_hw_setuprxdesc(ah, ds,
81
			     common->rx_bufsize,
82 83
			     0);

S
Sujith 已提交
84
	if (sc->rx.rxlink == NULL)
85 86
		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
	else
S
Sujith 已提交
87
		*sc->rx.rxlink = bf->bf_daddr;
88

S
Sujith 已提交
89
	sc->rx.rxlink = &ds->ds_link;
90 91 92
	ath9k_hw_rxena(ah);
}

S
Sujith 已提交
93 94 95 96
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
	/* XXX block beacon interrupts */
	ath9k_hw_setantenna(sc->sc_ah, antenna);
S
Sujith 已提交
97 98
	sc->rx.defant = antenna;
	sc->rx.rxotherant = 0;
S
Sujith 已提交
99 100
}

101 102
static void ath_opmode_init(struct ath_softc *sc)
{
103
	struct ath_hw *ah = sc->sc_ah;
104 105
	struct ath_common *common = ath9k_hw_common(ah);

106 107 108 109 110 111 112
	u32 rfilt, mfilt[2];

	/* configure rx filter */
	rfilt = ath_calcrxfilter(sc);
	ath9k_hw_setrxfilter(ah, rfilt);

	/* configure bssid mask */
113
	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
114
		ath_hw_setbssidmask(common);
115 116 117 118 119 120 121 122 123

	/* configure operational mode */
	ath9k_hw_setopmode(ah);

	/* calculate and install multicast filter */
	mfilt[0] = mfilt[1] = ~0;
	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}

F
Felix Fietkau 已提交
124 125
static bool ath_rx_edma_buf_link(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
126
{
F
Felix Fietkau 已提交
127 128
	struct ath_hw *ah = sc->sc_ah;
	struct ath_rx_edma *rx_edma;
129 130 131
	struct sk_buff *skb;
	struct ath_buf *bf;

F
Felix Fietkau 已提交
132 133 134
	rx_edma = &sc->rx.rx_edma[qtype];
	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
		return false;
135

F
Felix Fietkau 已提交
136 137
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
	list_del_init(&bf->list);
138

F
Felix Fietkau 已提交
139 140 141 142 143 144
	skb = bf->bf_mpdu;

	ATH_RXBUF_RESET(bf);
	memset(skb->data, 0, ah->caps.rx_status_len);
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				ah->caps.rx_status_len, DMA_TO_DEVICE);
145

F
Felix Fietkau 已提交
146 147 148
	SKB_CB_ATHBUF(skb) = bf;
	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
	skb_queue_tail(&rx_edma->rx_fifo, skb);
149

F
Felix Fietkau 已提交
150 151 152 153 154 155 156 157 158 159 160 161
	return true;
}

static void ath_rx_addbuffer_edma(struct ath_softc *sc,
				  enum ath9k_rx_qtype qtype, int size)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u32 nbuf = 0;

	if (list_empty(&sc->rx.rxbuf)) {
		ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
		return;
162
	}
163

F
Felix Fietkau 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	while (!list_empty(&sc->rx.rxbuf)) {
		nbuf++;

		if (!ath_rx_edma_buf_link(sc, qtype))
			break;

		if (nbuf >= size)
			break;
	}
}

static void ath_rx_remove_buffer(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
{
	struct ath_buf *bf;
	struct ath_rx_edma *rx_edma;
	struct sk_buff *skb;

	rx_edma = &sc->rx.rx_edma[qtype];

	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
		bf = SKB_CB_ATHBUF(skb);
		BUG_ON(!bf);
		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}
}

static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_buf *bf;

	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);

198
	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
F
Felix Fietkau 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		if (bf->bf_mpdu)
			dev_kfree_skb_any(bf->bf_mpdu);
	}

	INIT_LIST_HEAD(&sc->rx.rxbuf);

	kfree(sc->rx.rx_bufptr);
	sc->rx.rx_bufptr = NULL;
}

static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
	skb_queue_head_init(&rx_edma->rx_fifo);
	skb_queue_head_init(&rx_edma->rx_buffers);
	rx_edma->rx_fifo_hwsize = size;
}

static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ath_buf *bf;
	int error = 0, i;
	u32 size;


	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
				     ah->caps.rx_status_len,
				     min(common->cachelsz, (u16)64));

	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
				    ah->caps.rx_status_len);

	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
			       ah->caps.rx_lp_qdepth);
	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
			       ah->caps.rx_hp_qdepth);

	size = sizeof(struct ath_buf) * nbufs;
	bf = kzalloc(size, GFP_KERNEL);
	if (!bf)
		return -ENOMEM;

	INIT_LIST_HEAD(&sc->rx.rxbuf);
	sc->rx.rx_bufptr = bf;

	for (i = 0; i < nbufs; i++, bf++) {
247
		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
F
Felix Fietkau 已提交
248
		if (!skb) {
249
			error = -ENOMEM;
F
Felix Fietkau 已提交
250
			goto rx_init_fail;
251 252
		}

F
Felix Fietkau 已提交
253
		memset(skb->data, 0, common->rx_bufsize);
254
		bf->bf_mpdu = skb;
F
Felix Fietkau 已提交
255

256
		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
257
						 common->rx_bufsize,
F
Felix Fietkau 已提交
258
						 DMA_BIDIRECTIONAL);
259
		if (unlikely(dma_mapping_error(sc->dev,
F
Felix Fietkau 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
						bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
				ath_print(common, ATH_DBG_FATAL,
					"dma_mapping_error() on RX init\n");
				error = -ENOMEM;
				goto rx_init_fail;
		}

		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}

	return 0;

rx_init_fail:
	ath_rx_edma_cleanup(sc);
	return error;
}

static void ath_edma_start_recv(struct ath_softc *sc)
{
	spin_lock_bh(&sc->rx.rxbuflock);

	ath9k_hw_rxena(sc->sc_ah);

	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);

	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);

	spin_unlock_bh(&sc->rx.rxbuflock);

	ath_opmode_init(sc);

295
	ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_SCANNING));
F
Felix Fietkau 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
}

static void ath_edma_stop_recv(struct ath_softc *sc)
{
	spin_lock_bh(&sc->rx.rxbuflock);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
	spin_unlock_bh(&sc->rx.rxbuflock);
}

int ath_rx_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct sk_buff *skb;
	struct ath_buf *bf;
	int error = 0;

	spin_lock_init(&sc->rx.rxflushlock);
	sc->sc_flags &= ~SC_OP_RXFLUSH;
	spin_lock_init(&sc->rx.rxbuflock);

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		return ath_rx_edma_init(sc, nbufs);
	} else {
		common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
				min(common->cachelsz, (u16)64));

		ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
				common->cachelsz, common->rx_bufsize);

		/* Initialize rx descriptors */

		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
329
				"rx", nbufs, 1, 0);
F
Felix Fietkau 已提交
330
		if (error != 0) {
331
			ath_print(common, ATH_DBG_FATAL,
F
Felix Fietkau 已提交
332 333
				  "failed to allocate rx descriptors: %d\n",
				  error);
334 335
			goto err;
		}
F
Felix Fietkau 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
					      GFP_KERNEL);
			if (skb == NULL) {
				error = -ENOMEM;
				goto err;
			}

			bf->bf_mpdu = skb;
			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					common->rx_bufsize,
					DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(sc->dev,
							bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
				ath_print(common, ATH_DBG_FATAL,
					  "dma_mapping_error() on RX init\n");
				error = -ENOMEM;
				goto err;
			}
			bf->bf_dmacontext = bf->bf_buf_addr;
		}
		sc->rx.rxlink = NULL;
361
	}
362

363
err:
364 365 366 367 368 369 370 371
	if (error)
		ath_rx_cleanup(sc);

	return error;
}

void ath_rx_cleanup(struct ath_softc *sc)
{
372 373
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
374 375 376
	struct sk_buff *skb;
	struct ath_buf *bf;

F
Felix Fietkau 已提交
377 378 379 380 381 382 383 384 385 386 387 388
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_rx_edma_cleanup(sc);
		return;
	} else {
		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
			skb = bf->bf_mpdu;
			if (skb) {
				dma_unmap_single(sc->dev, bf->bf_buf_addr,
						common->rx_bufsize,
						DMA_FROM_DEVICE);
				dev_kfree_skb(skb);
			}
389
		}
390

F
Felix Fietkau 已提交
391 392 393
		if (sc->rx.rxdma.dd_desc_len != 0)
			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
	}
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
}

/*
 * Calculate the receive filter according to the
 * operating mode and state:
 *
 * o always accept unicast, broadcast, and multicast traffic
 * o maintain current state of phy error reception (the hal
 *   may enable phy error frames for noise immunity work)
 * o probe request frames are accepted only when operating in
 *   hostap, adhoc, or monitor modes
 * o enable promiscuous mode according to the interface state
 * o accept beacons:
 *   - when operating in adhoc mode so the 802.11 layer creates
 *     node table entries for peers,
 *   - when operating in station mode for collecting rssi data when
 *     the station is otherwise quiet, or
 *   - when operating as a repeater so we see repeater-sta beacons
 *   - when scanning
 */

u32 ath_calcrxfilter(struct ath_softc *sc)
{
#define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
S
Sujith 已提交
418

419 420 421 422 423 424 425
	u32 rfilt;

	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
		| ATH9K_RX_FILTER_MCAST;

	/* If not a STA, enable processing of Probe Requests */
426
	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
427 428
		rfilt |= ATH9K_RX_FILTER_PROBEREQ;

429 430 431 432 433
	/*
	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
	 * mode interface or when in monitor mode. AP mode does not need this
	 * since it receives all in-BSS frames anyway.
	 */
434
	if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
S
Sujith 已提交
435
	     (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
436
	    (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
437 438
		rfilt |= ATH9K_RX_FILTER_PROM;

439 440 441
	if (sc->rx.rxfilter & FIF_CONTROL)
		rfilt |= ATH9K_RX_FILTER_CONTROL;

442 443 444 445
	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
		rfilt |= ATH9K_RX_FILTER_MYBEACON;
	else
446 447
		rfilt |= ATH9K_RX_FILTER_BEACON;

448 449 450 451
	if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
	    AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
	    (sc->rx.rxfilter & FIF_PSPOLL))
452
		rfilt |= ATH9K_RX_FILTER_PSPOLL;
S
Sujith 已提交
453

S
Sujith 已提交
454 455 456
	if (conf_is_ht(&sc->hw->conf))
		rfilt |= ATH9K_RX_FILTER_COMP_BAR;

457
	if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
458 459
		/* TODO: only needed if more than one BSSID is in use in
		 * station/adhoc mode */
460 461 462
		/* The following may also be needed for other older chips */
		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
			rfilt |= ATH9K_RX_FILTER_PROM;
463 464 465
		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
	}

466
	return rfilt;
S
Sujith 已提交
467

468 469 470 471 472
#undef RX_FILTER_PRESERVE
}

int ath_startrecv(struct ath_softc *sc)
{
473
	struct ath_hw *ah = sc->sc_ah;
474 475
	struct ath_buf *bf, *tbf;

F
Felix Fietkau 已提交
476 477 478 479 480
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_edma_start_recv(sc);
		return 0;
	}

S
Sujith 已提交
481 482
	spin_lock_bh(&sc->rx.rxbuflock);
	if (list_empty(&sc->rx.rxbuf))
483 484
		goto start_recv;

S
Sujith 已提交
485 486
	sc->rx.rxlink = NULL;
	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
487 488 489 490
		ath_rx_buf_link(sc, bf);
	}

	/* We could have deleted elements so the list may be empty now */
S
Sujith 已提交
491
	if (list_empty(&sc->rx.rxbuf))
492 493
		goto start_recv;

S
Sujith 已提交
494
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
495
	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
S
Sujith 已提交
496
	ath9k_hw_rxena(ah);
497 498

start_recv:
S
Sujith 已提交
499
	spin_unlock_bh(&sc->rx.rxbuflock);
S
Sujith 已提交
500
	ath_opmode_init(sc);
501
	ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_SCANNING));
S
Sujith 已提交
502

503 504 505 506 507
	return 0;
}

bool ath_stoprecv(struct ath_softc *sc)
{
508
	struct ath_hw *ah = sc->sc_ah;
509 510
	bool stopped;

S
Sujith 已提交
511 512 513
	ath9k_hw_stoppcurecv(ah);
	ath9k_hw_setrxfilter(ah, 0);
	stopped = ath9k_hw_stopdmarecv(ah);
F
Felix Fietkau 已提交
514 515 516 517 518

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_edma_stop_recv(sc);
	else
		sc->rx.rxlink = NULL;
S
Sujith 已提交
519

520 521 522 523 524
	return stopped;
}

void ath_flushrecv(struct ath_softc *sc)
{
S
Sujith 已提交
525
	spin_lock_bh(&sc->rx.rxflushlock);
S
Sujith 已提交
526
	sc->sc_flags |= SC_OP_RXFLUSH;
F
Felix Fietkau 已提交
527 528 529
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_rx_tasklet(sc, 1, true);
	ath_rx_tasklet(sc, 1, false);
S
Sujith 已提交
530
	sc->sc_flags &= ~SC_OP_RXFLUSH;
S
Sujith 已提交
531
	spin_unlock_bh(&sc->rx.rxflushlock);
532 533
}

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
	struct ieee80211_mgmt *mgmt;
	u8 *pos, *end, id, elen;
	struct ieee80211_tim_ie *tim;

	mgmt = (struct ieee80211_mgmt *)skb->data;
	pos = mgmt->u.beacon.variable;
	end = skb->data + skb->len;

	while (pos + 2 < end) {
		id = *pos++;
		elen = *pos++;
		if (pos + elen > end)
			break;

		if (id == WLAN_EID_TIM) {
			if (elen < sizeof(*tim))
				break;
			tim = (struct ieee80211_tim_ie *) pos;
			if (tim->dtim_count != 0)
				break;
			return tim->bitmap_ctrl & 0x01;
		}

		pos += elen;
	}

	return false;
}

static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
{
	struct ieee80211_mgmt *mgmt;
569
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
570 571 572 573 574

	if (skb->len < 24 + 8 + 2 + 2)
		return;

	mgmt = (struct ieee80211_mgmt *)skb->data;
575
	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
576 577
		return; /* not from our current AP */

S
Sujith 已提交
578
	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
579

S
Sujith 已提交
580 581
	if (sc->ps_flags & PS_BEACON_SYNC) {
		sc->ps_flags &= ~PS_BEACON_SYNC;
582 583 584
		ath_print(common, ATH_DBG_PS,
			  "Reconfigure Beacon timers based on "
			  "timestamp from the AP\n");
585 586 587
		ath_beacon_config(sc, NULL);
	}

588 589 590
	if (ath_beacon_dtim_pending_cab(skb)) {
		/*
		 * Remain awake waiting for buffered broadcast/multicast
591 592 593 594
		 * frames. If the last broadcast/multicast frame is not
		 * received properly, the next beacon frame will work as
		 * a backup trigger for returning into NETWORK SLEEP state,
		 * so we are waiting for it as well.
595
		 */
596 597
		ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
			  "buffered broadcast/multicast frame(s)\n");
S
Sujith 已提交
598
		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
599 600 601
		return;
	}

S
Sujith 已提交
602
	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
603 604 605 606 607
		/*
		 * This can happen if a broadcast frame is dropped or the AP
		 * fails to send a frame indicating that all CAB frames have
		 * been delivered.
		 */
S
Sujith 已提交
608
		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
609 610
		ath_print(common, ATH_DBG_PS,
			  "PS wait for CAB frames timed out\n");
611 612 613 614 615 616
	}
}

static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
617
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
618 619 620 621

	hdr = (struct ieee80211_hdr *)skb->data;

	/* Process Beacon and CAB receive in PS state */
622 623
	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
	    && ieee80211_is_beacon(hdr->frame_control))
624
		ath_rx_ps_beacon(sc, skb);
S
Sujith 已提交
625
	else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
626 627 628 629 630 631 632 633
		 (ieee80211_is_data(hdr->frame_control) ||
		  ieee80211_is_action(hdr->frame_control)) &&
		 is_multicast_ether_addr(hdr->addr1) &&
		 !ieee80211_has_moredata(hdr->frame_control)) {
		/*
		 * No more broadcast/multicast frames to be received at this
		 * point.
		 */
S
Sujith 已提交
634
		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
635 636
		ath_print(common, ATH_DBG_PS,
			  "All PS CAB frames received, back to sleep\n");
S
Sujith 已提交
637
	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
638 639
		   !is_multicast_ether_addr(hdr->addr1) &&
		   !ieee80211_has_morefrags(hdr->frame_control)) {
S
Sujith 已提交
640
		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
641 642
		ath_print(common, ATH_DBG_PS,
			  "Going back to sleep after having received "
643
			  "PS-Poll data (0x%lx)\n",
S
Sujith 已提交
644 645 646 647
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
648 649 650
	}
}

651 652
static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
				    struct ath_softc *sc, struct sk_buff *skb,
653
				    struct ieee80211_rx_status *rxs)
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
{
	struct ieee80211_hdr *hdr;

	hdr = (struct ieee80211_hdr *)skb->data;

	/* Send the frame to mac80211 */
	if (is_multicast_ether_addr(hdr->addr1)) {
		int i;
		/*
		 * Deliver broadcast/multicast frames to all suitable
		 * virtual wiphys.
		 */
		/* TODO: filter based on channel configuration */
		for (i = 0; i < sc->num_sec_wiphy; i++) {
			struct ath_wiphy *aphy = sc->sec_wiphy[i];
			struct sk_buff *nskb;
			if (aphy == NULL)
				continue;
			nskb = skb_copy(skb, GFP_ATOMIC);
673 674 675
			if (!nskb)
				continue;
			ieee80211_rx(aphy->hw, nskb);
676
		}
677
		ieee80211_rx(sc->hw, skb);
678
	} else
679
		/* Deliver unicast frames based on receiver address */
680
		ieee80211_rx(hw, skb);
681 682
}

F
Felix Fietkau 已提交
683 684
static bool ath_edma_get_buffers(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
685
{
F
Felix Fietkau 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699
	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct sk_buff *skb;
	struct ath_buf *bf;
	int ret;

	skb = skb_peek(&rx_edma->rx_fifo);
	if (!skb)
		return false;

	bf = SKB_CB_ATHBUF(skb);
	BUG_ON(!bf);

M
Ming Lei 已提交
700
	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
F
Felix Fietkau 已提交
701 702 703
				common->rx_bufsize, DMA_FROM_DEVICE);

	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
M
Ming Lei 已提交
704 705 706 707
	if (ret == -EINPROGRESS) {
		/*let device gain the buffer again*/
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				common->rx_bufsize, DMA_FROM_DEVICE);
F
Felix Fietkau 已提交
708
		return false;
M
Ming Lei 已提交
709
	}
F
Felix Fietkau 已提交
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

	__skb_unlink(skb, &rx_edma->rx_fifo);
	if (ret == -EINVAL) {
		/* corrupt descriptor, skip this one and the following one */
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		ath_rx_edma_buf_link(sc, qtype);
		skb = skb_peek(&rx_edma->rx_fifo);
		if (!skb)
			return true;

		bf = SKB_CB_ATHBUF(skb);
		BUG_ON(!bf);

		__skb_unlink(skb, &rx_edma->rx_fifo);
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		ath_rx_edma_buf_link(sc, qtype);
726
		return true;
F
Felix Fietkau 已提交
727 728 729 730 731
	}
	skb_queue_tail(&rx_edma->rx_buffers, skb);

	return true;
}
732

F
Felix Fietkau 已提交
733 734 735 736 737 738
static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
						struct ath_rx_status *rs,
						enum ath9k_rx_qtype qtype)
{
	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
	struct sk_buff *skb;
S
Sujith 已提交
739
	struct ath_buf *bf;
F
Felix Fietkau 已提交
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755

	while (ath_edma_get_buffers(sc, qtype));
	skb = __skb_dequeue(&rx_edma->rx_buffers);
	if (!skb)
		return NULL;

	bf = SKB_CB_ATHBUF(skb);
	ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
	return bf;
}

static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
					   struct ath_rx_status *rs)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
756
	struct ath_desc *ds;
F
Felix Fietkau 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
	struct ath_buf *bf;
	int ret;

	if (list_empty(&sc->rx.rxbuf)) {
		sc->rx.rxlink = NULL;
		return NULL;
	}

	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
	ds = bf->bf_desc;

	/*
	 * Must provide the virtual address of the current
	 * descriptor, the physical address, and the virtual
	 * address of the next descriptor in the h/w chain.
	 * This allows the HAL to look ahead to see if the
	 * hardware is done with a descriptor by checking the
	 * done bit in the following descriptor and the address
	 * of the current descriptor the DMA engine is working
	 * on.  All this is necessary because of our use of
	 * a self-linked list to avoid rx overruns.
	 */
	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
	if (ret == -EINPROGRESS) {
		struct ath_rx_status trs;
		struct ath_buf *tbf;
		struct ath_desc *tds;

		memset(&trs, 0, sizeof(trs));
		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
			sc->rx.rxlink = NULL;
			return NULL;
		}

		tbf = list_entry(bf->list.next, struct ath_buf, list);

		/*
		 * On some hardware the descriptor status words could
		 * get corrupted, including the done bit. Because of
		 * this, check if the next descriptor's done bit is
		 * set or not.
		 *
		 * If the next descriptor's done bit is set, the current
		 * descriptor has been corrupted. Force s/w to discard
		 * this descriptor and continue...
		 */

		tds = tbf->bf_desc;
		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
		if (ret == -EINPROGRESS)
			return NULL;
	}

	if (!bf->bf_mpdu)
		return bf;

	/*
	 * Synchronize the DMA transfer with CPU before
	 * 1. accessing the frame
	 * 2. requeueing the same buffer to h/w
	 */
M
Ming Lei 已提交
818
	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
F
Felix Fietkau 已提交
819 820 821 822 823 824
			common->rx_bufsize,
			DMA_FROM_DEVICE);

	return bf;
}

825 826
/* Assumes you've already done the endian to CPU conversion */
static bool ath9k_rx_accept(struct ath_common *common,
827
			    struct ieee80211_hdr *hdr,
828 829 830 831 832 833
			    struct ieee80211_rx_status *rxs,
			    struct ath_rx_status *rx_stats,
			    bool *decrypt_error)
{
	struct ath_hw *ah = common->ah;
	__le16 fc;
834
	u8 rx_status_len = ah->caps.rx_status_len;
835 836 837 838 839 840 841 842 843 844

	fc = hdr->frame_control;

	if (!rx_stats->rs_datalen)
		return false;
        /*
         * rs_status follows rs_datalen so if rs_datalen is too large
         * we can take a hint that hardware corrupted it, so ignore
         * those frames.
         */
845
	if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
		return false;

	/*
	 * rs_more indicates chained descriptors which can be used
	 * to link buffers together for a sort of scatter-gather
	 * operation.
	 * reject the frame, we don't support scatter-gather yet and
	 * the frame is probably corrupt anyway
	 */
	if (rx_stats->rs_more)
		return false;

	/*
	 * The rx_stats->rs_status will not be set until the end of the
	 * chained descriptors so it can be ignored if rs_more is set. The
	 * rs_more will be false at the last element of the chained
	 * descriptors.
	 */
	if (rx_stats->rs_status != 0) {
		if (rx_stats->rs_status & ATH9K_RXERR_CRC)
			rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
		if (rx_stats->rs_status & ATH9K_RXERR_PHY)
			return false;

		if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
			*decrypt_error = true;
		} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
			if (ieee80211_is_ctl(fc))
				/*
				 * Sometimes, we get invalid
				 * MIC failures on valid control frames.
				 * Remove these mic errors.
				 */
				rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
			else
				rxs->flag |= RX_FLAG_MMIC_ERROR;
		}
		/*
		 * Reject error frames with the exception of
		 * decryption and MIC failures. For monitor mode,
		 * we also ignore the CRC error.
		 */
		if (ah->opmode == NL80211_IFTYPE_MONITOR) {
			if (rx_stats->rs_status &
			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
			      ATH9K_RXERR_CRC))
				return false;
		} else {
			if (rx_stats->rs_status &
			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
				return false;
			}
		}
	}
	return true;
}

static int ath9k_process_rate(struct ath_common *common,
			      struct ieee80211_hw *hw,
			      struct ath_rx_status *rx_stats,
906
			      struct ieee80211_rx_status *rxs)
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
{
	struct ieee80211_supported_band *sband;
	enum ieee80211_band band;
	unsigned int i = 0;

	band = hw->conf.channel->band;
	sband = hw->wiphy->bands[band];

	if (rx_stats->rs_rate & 0x80) {
		/* HT rate */
		rxs->flag |= RX_FLAG_HT;
		if (rx_stats->rs_flags & ATH9K_RX_2040)
			rxs->flag |= RX_FLAG_40MHZ;
		if (rx_stats->rs_flags & ATH9K_RX_GI)
			rxs->flag |= RX_FLAG_SHORT_GI;
		rxs->rate_idx = rx_stats->rs_rate & 0x7f;
		return 0;
	}

	for (i = 0; i < sband->n_bitrates; i++) {
		if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
			rxs->rate_idx = i;
			return 0;
		}
		if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
			rxs->flag |= RX_FLAG_SHORTPRE;
			rxs->rate_idx = i;
			return 0;
		}
	}

	/*
	 * No valid hardware bitrate found -- we should not get here
	 * because hardware has already validated this frame as OK.
	 */
	ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
		  "0x%02x using 1 Mbit\n", rx_stats->rs_rate);

	return -EINVAL;
}

static void ath9k_process_rssi(struct ath_common *common,
			       struct ieee80211_hw *hw,
950
			       struct ieee80211_hdr *hdr,
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
			       struct ath_rx_status *rx_stats)
{
	struct ath_hw *ah = common->ah;
	struct ieee80211_sta *sta;
	struct ath_node *an;
	int last_rssi = ATH_RSSI_DUMMY_MARKER;
	__le16 fc;

	fc = hdr->frame_control;

	rcu_read_lock();
	/*
	 * XXX: use ieee80211_find_sta! This requires quite a bit of work
	 * under the current ath9k virtual wiphy implementation as we have
	 * no way of tying a vif to wiphy. Typically vifs are attached to
	 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
	 * wiphy you'd have to iterate over every wiphy and each sdata.
	 */
	sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
	if (sta) {
		an = (struct ath_node *) sta->drv_priv;
		if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
		   !rx_stats->rs_moreaggr)
			ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
		last_rssi = an->last_rssi;
	}
	rcu_read_unlock();

	if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
		rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
					      ATH_RSSI_EP_MULTIPLIER);
	if (rx_stats->rs_rssi < 0)
		rx_stats->rs_rssi = 0;

	/* Update Beacon RSSI, this is used by ANI. */
	if (ieee80211_is_beacon(fc))
		ah->stats.avgbrssi = rx_stats->rs_rssi;
}

/*
 * For Decrypt or Demic errors, we only mark packet status here and always push
 * up the frame up to let mac80211 handle the actual error case, be it no
 * decryption key or real decryption error. This let us keep statistics there.
 */
static int ath9k_rx_skb_preprocess(struct ath_common *common,
				   struct ieee80211_hw *hw,
997
				   struct ieee80211_hdr *hdr,
998 999 1000 1001 1002 1003 1004 1005 1006 1007
				   struct ath_rx_status *rx_stats,
				   struct ieee80211_rx_status *rx_status,
				   bool *decrypt_error)
{
	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));

	/*
	 * everything but the rate is checked here, the rate check is done
	 * separately to avoid doing two lookups for a rate for each frame.
	 */
1008
	if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
1009 1010
		return -EINVAL;

1011
	ath9k_process_rssi(common, hw, hdr, rx_stats);
1012

1013
	if (ath9k_process_rate(common, hw, rx_stats, rx_status))
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		return -EINVAL;

	rx_status->band = hw->conf.channel->band;
	rx_status->freq = hw->conf.channel->center_freq;
	rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
	rx_status->antenna = rx_stats->rs_antenna;
	rx_status->flag |= RX_FLAG_TSFT;

	return 0;
}

static void ath9k_rx_skb_postprocess(struct ath_common *common,
				     struct sk_buff *skb,
				     struct ath_rx_status *rx_stats,
				     struct ieee80211_rx_status *rxs,
				     bool decrypt_error)
{
	struct ath_hw *ah = common->ah;
	struct ieee80211_hdr *hdr;
	int hdrlen, padpos, padsize;
	u8 keyix;
	__le16 fc;

	/* see if any padding is done by the hw and remove it */
	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
	fc = hdr->frame_control;
	padpos = ath9k_cmn_padpos(hdr->frame_control);

	/* The MAC header is padded to have 32-bit boundary if the
	 * packet payload is non-zero. The general calculation for
	 * padsize would take into account odd header lengths:
	 * padsize = (4 - padpos % 4) % 4; However, since only
	 * even-length headers are used, padding can only be 0 or 2
	 * bytes and we can optimize this a bit. In addition, we must
	 * not try to remove padding from short control frames that do
	 * not have payload. */
	padsize = padpos & 3;
	if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
	}

	keyix = rx_stats->rs_keyix;

	if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
	    ieee80211_has_protected(fc)) {
		rxs->flag |= RX_FLAG_DECRYPTED;
	} else if (ieee80211_has_protected(fc)
		   && !decrypt_error && skb->len >= hdrlen + 4) {
		keyix = skb->data[hdrlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			rxs->flag |= RX_FLAG_DECRYPTED;
	}
	if (ah->sw_mgmt_crypto &&
	    (rxs->flag & RX_FLAG_DECRYPTED) &&
	    ieee80211_is_mgmt(fc))
		/* Use software decrypt for management frames. */
		rxs->flag &= ~RX_FLAG_DECRYPTED;
}
F
Felix Fietkau 已提交
1075 1076 1077 1078

int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
	struct ath_buf *bf;
1079
	struct sk_buff *skb = NULL, *requeue_skb;
1080
	struct ieee80211_rx_status *rxs;
1081
	struct ath_hw *ah = sc->sc_ah;
1082
	struct ath_common *common = ath9k_hw_common(ah);
1083 1084 1085 1086 1087 1088
	/*
	 * The hw can techncically differ from common->hw when using ath9k
	 * virtual wiphy so to account for that we iterate over the active
	 * wiphys and find the appropriate wiphy and therefore hw.
	 */
	struct ieee80211_hw *hw = NULL;
S
Sujith 已提交
1089
	struct ieee80211_hdr *hdr;
1090
	int retval;
S
Sujith 已提交
1091
	bool decrypt_error = false;
1092
	struct ath_rx_status rs;
F
Felix Fietkau 已提交
1093 1094 1095
	enum ath9k_rx_qtype qtype;
	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
	int dma_type;
1096
	u8 rx_status_len = ah->caps.rx_status_len;
1097 1098
	u64 tsf = 0;
	u32 tsf_lower = 0;
S
Sujith 已提交
1099

F
Felix Fietkau 已提交
1100 1101
	if (edma)
		dma_type = DMA_BIDIRECTIONAL;
1102 1103
	else
		dma_type = DMA_FROM_DEVICE;
F
Felix Fietkau 已提交
1104 1105

	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
S
Sujith 已提交
1106
	spin_lock_bh(&sc->rx.rxbuflock);
1107

1108 1109 1110
	tsf = ath9k_hw_gettsf64(ah);
	tsf_lower = tsf & 0xffffffff;

1111 1112
	do {
		/* If handling rx interrupt and flush is in progress => exit */
S
Sujith 已提交
1113
		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
1114 1115
			break;

1116
		memset(&rs, 0, sizeof(rs));
F
Felix Fietkau 已提交
1117 1118 1119 1120
		if (edma)
			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
		else
			bf = ath_get_next_rx_buf(sc, &rs);
1121

F
Felix Fietkau 已提交
1122 1123
		if (!bf)
			break;
1124 1125

		skb = bf->bf_mpdu;
S
Sujith 已提交
1126
		if (!skb)
1127 1128
			continue;

1129
		hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
1130 1131
		rxs =  IEEE80211_SKB_RXCB(skb);

1132 1133
		hw = ath_get_virt_hw(sc, hdr);

1134
		ath_debug_stat_rx(sc, &rs);
S
Sujith 已提交
1135

1136
		/*
S
Sujith 已提交
1137 1138
		 * If we're asked to flush receive queue, directly
		 * chain it back at the queue without processing it.
1139
		 */
S
Sujith 已提交
1140
		if (flush)
1141
			goto requeue;
1142

1143 1144 1145 1146 1147
		retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
						 rxs, &decrypt_error);
		if (retval)
			goto requeue;

1148 1149 1150 1151 1152 1153 1154 1155 1156
		rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
		if (rs.rs_tstamp > tsf_lower &&
		    unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
			rxs->mactime -= 0x100000000ULL;

		if (rs.rs_tstamp < tsf_lower &&
		    unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
			rxs->mactime += 0x100000000ULL;

1157 1158
		/* Ensure we always have an skb to requeue once we are done
		 * processing the current buffer's skb */
1159
		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1160 1161 1162

		/* If there is no memory we ignore the current RX'd frame,
		 * tell hardware it can give us a new frame using the old
S
Sujith 已提交
1163
		 * skb and put it at the tail of the sc->rx.rxbuf list for
1164 1165 1166
		 * processing. */
		if (!requeue_skb)
			goto requeue;
1167

1168
		/* Unmap the frame */
1169
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1170
				 common->rx_bufsize,
F
Felix Fietkau 已提交
1171
				 dma_type);
1172

F
Felix Fietkau 已提交
1173 1174 1175
		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
		if (ah->caps.rx_status_len)
			skb_pull(skb, ah->caps.rx_status_len);
S
Sujith 已提交
1176

1177 1178
		ath9k_rx_skb_postprocess(common, skb, &rs,
					 rxs, decrypt_error);
S
Sujith 已提交
1179

1180 1181
		/* We will now give hardware our shiny new allocated skb */
		bf->bf_mpdu = requeue_skb;
1182
		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
1183
						 common->rx_bufsize,
F
Felix Fietkau 已提交
1184
						 dma_type);
1185
		if (unlikely(dma_mapping_error(sc->dev,
1186 1187 1188
			  bf->bf_buf_addr))) {
			dev_kfree_skb_any(requeue_skb);
			bf->bf_mpdu = NULL;
1189 1190
			ath_print(common, ATH_DBG_FATAL,
				  "dma_mapping_error() on RX\n");
1191
			ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1192 1193
			break;
		}
1194
		bf->bf_dmacontext = bf->bf_buf_addr;
1195 1196 1197 1198 1199

		/*
		 * change the default rx antenna if rx diversity chooses the
		 * other antenna 3 times in a row.
		 */
1200
		if (sc->rx.defant != rs.rs_antenna) {
S
Sujith 已提交
1201
			if (++sc->rx.rxotherant >= 3)
1202
				ath_setdefantenna(sc, rs.rs_antenna);
1203
		} else {
S
Sujith 已提交
1204
			sc->rx.rxotherant = 0;
1205
		}
1206

1207 1208 1209 1210
		if (unlikely(ath9k_check_auto_sleep(sc) ||
			     (sc->ps_flags & (PS_WAIT_FOR_BEACON |
					      PS_WAIT_FOR_CAB |
					      PS_WAIT_FOR_PSPOLL_DATA))))
1211 1212
			ath_rx_ps(sc, skb);

1213
		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1214

1215
requeue:
F
Felix Fietkau 已提交
1216 1217 1218 1219 1220 1221 1222
		if (edma) {
			list_add_tail(&bf->list, &sc->rx.rxbuf);
			ath_rx_edma_buf_link(sc, qtype);
		} else {
			list_move_tail(&bf->list, &sc->rx.rxbuf);
			ath_rx_buf_link(sc, bf);
		}
S
Sujith 已提交
1223 1224
	} while (1);

S
Sujith 已提交
1225
	spin_unlock_bh(&sc->rx.rxbuflock);
1226 1227 1228

	return 0;
}