recv.c 28.5 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include <linux/dma-mapping.h>
S
Sujith 已提交
18
#include "ath9k.h"
19
#include "ar9003_mac.h"
20

21
#define SKB_CB_ATHBUF(__skb)	(*((struct ath_rxbuf **)__skb->cb))
F
Felix Fietkau 已提交
22

23 24 25 26 27 28
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
	return sc->ps_enabled &&
	       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
}

29 30 31 32 33 34 35 36
/*
 * Setup and link descriptors.
 *
 * 11N: we can no longer afford to self link the last descriptor.
 * MAC acknowledges BA status as long as it copies frames to host
 * buffer (or rx fifo). This can incorrectly acknowledge packets
 * to a sender if last desc is self-linked.
 */
37
static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
38
{
39
	struct ath_hw *ah = sc->sc_ah;
40
	struct ath_common *common = ath9k_hw_common(ah);
41 42 43 44
	struct ath_desc *ds;
	struct sk_buff *skb;

	ds = bf->bf_desc;
S
Sujith 已提交
45
	ds->ds_link = 0; /* link to null */
46 47
	ds->ds_data = bf->bf_buf_addr;

S
Sujith 已提交
48
	/* virtual addr of the beginning of the buffer. */
49
	skb = bf->bf_mpdu;
50
	BUG_ON(skb == NULL);
51 52
	ds->ds_vdata = skb->data;

53 54
	/*
	 * setup rx descriptors. The rx_bufsize here tells the hardware
55
	 * how much data it can DMA to us and that we are prepared
56 57
	 * to process
	 */
S
Sujith 已提交
58
	ath9k_hw_setuprxdesc(ah, ds,
59
			     common->rx_bufsize,
60 61
			     0);

S
Sujith 已提交
62
	if (sc->rx.rxlink == NULL)
63 64
		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
	else
S
Sujith 已提交
65
		*sc->rx.rxlink = bf->bf_daddr;
66

S
Sujith 已提交
67
	sc->rx.rxlink = &ds->ds_link;
68 69
}

70
static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
71 72 73 74 75 76 77
{
	if (sc->rx.buf_hold)
		ath_rx_buf_link(sc, sc->rx.buf_hold);

	sc->rx.buf_hold = bf;
}

S
Sujith 已提交
78 79 80 81
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
	/* XXX block beacon interrupts */
	ath9k_hw_setantenna(sc->sc_ah, antenna);
S
Sujith 已提交
82 83
	sc->rx.defant = antenna;
	sc->rx.rxotherant = 0;
S
Sujith 已提交
84 85
}

86 87
static void ath_opmode_init(struct ath_softc *sc)
{
88
	struct ath_hw *ah = sc->sc_ah;
89 90
	struct ath_common *common = ath9k_hw_common(ah);

91 92 93 94 95 96 97
	u32 rfilt, mfilt[2];

	/* configure rx filter */
	rfilt = ath_calcrxfilter(sc);
	ath9k_hw_setrxfilter(ah, rfilt);

	/* configure bssid mask */
98
	ath_hw_setbssidmask(common);
99 100 101 102 103 104 105 106 107

	/* configure operational mode */
	ath9k_hw_setopmode(ah);

	/* calculate and install multicast filter */
	mfilt[0] = mfilt[1] = ~0;
	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}

F
Felix Fietkau 已提交
108 109
static bool ath_rx_edma_buf_link(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
110
{
F
Felix Fietkau 已提交
111 112
	struct ath_hw *ah = sc->sc_ah;
	struct ath_rx_edma *rx_edma;
113
	struct sk_buff *skb;
114
	struct ath_rxbuf *bf;
115

F
Felix Fietkau 已提交
116 117 118
	rx_edma = &sc->rx.rx_edma[qtype];
	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
		return false;
119

120
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
F
Felix Fietkau 已提交
121
	list_del_init(&bf->list);
122

F
Felix Fietkau 已提交
123 124 125 126 127
	skb = bf->bf_mpdu;

	memset(skb->data, 0, ah->caps.rx_status_len);
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				ah->caps.rx_status_len, DMA_TO_DEVICE);
128

F
Felix Fietkau 已提交
129 130
	SKB_CB_ATHBUF(skb) = bf;
	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
131
	__skb_queue_tail(&rx_edma->rx_fifo, skb);
132

F
Felix Fietkau 已提交
133 134 135 136
	return true;
}

static void ath_rx_addbuffer_edma(struct ath_softc *sc,
137
				  enum ath9k_rx_qtype qtype)
F
Felix Fietkau 已提交
138 139
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
140
	struct ath_rxbuf *bf, *tbf;
F
Felix Fietkau 已提交
141 142

	if (list_empty(&sc->rx.rxbuf)) {
143
		ath_dbg(common, QUEUE, "No free rx buf available\n");
F
Felix Fietkau 已提交
144
		return;
145
	}
146

147
	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list)
F
Felix Fietkau 已提交
148 149 150 151 152 153 154 155
		if (!ath_rx_edma_buf_link(sc, qtype))
			break;

}

static void ath_rx_remove_buffer(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
{
156
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
157 158 159 160 161
	struct ath_rx_edma *rx_edma;
	struct sk_buff *skb;

	rx_edma = &sc->rx.rx_edma[qtype];

162
	while ((skb = __skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
F
Felix Fietkau 已提交
163 164 165 166 167 168 169 170
		bf = SKB_CB_ATHBUF(skb);
		BUG_ON(!bf);
		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}
}

static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
171 172
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
173
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
174 175 176 177

	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);

178
	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
179 180 181 182
		if (bf->bf_mpdu) {
			dma_unmap_single(sc->dev, bf->bf_buf_addr,
					common->rx_bufsize,
					DMA_BIDIRECTIONAL);
F
Felix Fietkau 已提交
183
			dev_kfree_skb_any(bf->bf_mpdu);
184 185 186
			bf->bf_buf_addr = 0;
			bf->bf_mpdu = NULL;
		}
F
Felix Fietkau 已提交
187 188 189 190 191
	}
}

static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
192
	__skb_queue_head_init(&rx_edma->rx_fifo);
F
Felix Fietkau 已提交
193 194 195 196 197 198 199 200
	rx_edma->rx_fifo_hwsize = size;
}

static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
201
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
202 203 204 205 206 207 208 209 210 211 212
	int error = 0, i;
	u32 size;

	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
				    ah->caps.rx_status_len);

	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
			       ah->caps.rx_lp_qdepth);
	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
			       ah->caps.rx_hp_qdepth);

213
	size = sizeof(struct ath_rxbuf) * nbufs;
214
	bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
F
Felix Fietkau 已提交
215 216 217 218 219 220
	if (!bf)
		return -ENOMEM;

	INIT_LIST_HEAD(&sc->rx.rxbuf);

	for (i = 0; i < nbufs; i++, bf++) {
221
		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
F
Felix Fietkau 已提交
222
		if (!skb) {
223
			error = -ENOMEM;
F
Felix Fietkau 已提交
224
			goto rx_init_fail;
225 226
		}

F
Felix Fietkau 已提交
227
		memset(skb->data, 0, common->rx_bufsize);
228
		bf->bf_mpdu = skb;
F
Felix Fietkau 已提交
229

230
		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
231
						 common->rx_bufsize,
F
Felix Fietkau 已提交
232
						 DMA_BIDIRECTIONAL);
233
		if (unlikely(dma_mapping_error(sc->dev,
F
Felix Fietkau 已提交
234 235 236
						bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
237
				bf->bf_buf_addr = 0;
238
				ath_err(common,
F
Felix Fietkau 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
					"dma_mapping_error() on RX init\n");
				error = -ENOMEM;
				goto rx_init_fail;
		}

		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}

	return 0;

rx_init_fail:
	ath_rx_edma_cleanup(sc);
	return error;
}

static void ath_edma_start_recv(struct ath_softc *sc)
{
	ath9k_hw_rxena(sc->sc_ah);
257 258
	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP);
	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP);
F
Felix Fietkau 已提交
259
	ath_opmode_init(sc);
260
	ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
F
Felix Fietkau 已提交
261 262 263 264 265 266 267 268 269 270 271 272
}

static void ath_edma_stop_recv(struct ath_softc *sc)
{
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
}

int ath_rx_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct sk_buff *skb;
273
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
274 275
	int error = 0;

276
	spin_lock_init(&sc->sc_pcu_lock);
F
Felix Fietkau 已提交
277

278 279 280
	common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
			     sc->sc_ah->caps.rx_status_len;

281
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
F
Felix Fietkau 已提交
282 283
		return ath_rx_edma_init(sc, nbufs);

284 285
	ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
		common->cachelsz, common->rx_bufsize);
F
Felix Fietkau 已提交
286

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	/* Initialize rx descriptors */

	error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
				  "rx", nbufs, 1, 0);
	if (error != 0) {
		ath_err(common,
			"failed to allocate rx descriptors: %d\n",
			error);
		goto err;
	}

	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
		skb = ath_rxbuf_alloc(common, common->rx_bufsize,
				      GFP_KERNEL);
		if (skb == NULL) {
			error = -ENOMEM;
303 304
			goto err;
		}
F
Felix Fietkau 已提交
305

306 307 308 309 310 311 312 313 314 315 316 317 318
		bf->bf_mpdu = skb;
		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
						 common->rx_bufsize,
						 DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(sc->dev,
					       bf->bf_buf_addr))) {
			dev_kfree_skb_any(skb);
			bf->bf_mpdu = NULL;
			bf->bf_buf_addr = 0;
			ath_err(common,
				"dma_mapping_error() on RX init\n");
			error = -ENOMEM;
			goto err;
F
Felix Fietkau 已提交
319
		}
320
	}
321
	sc->rx.rxlink = NULL;
322
err:
323 324 325 326 327 328 329 330
	if (error)
		ath_rx_cleanup(sc);

	return error;
}

void ath_rx_cleanup(struct ath_softc *sc)
{
331 332
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
333
	struct sk_buff *skb;
334
	struct ath_rxbuf *bf;
335

F
Felix Fietkau 已提交
336 337 338
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_rx_edma_cleanup(sc);
		return;
339 340 341 342 343 344 345 346 347 348 349
	}

	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
		skb = bf->bf_mpdu;
		if (skb) {
			dma_unmap_single(sc->dev, bf->bf_buf_addr,
					 common->rx_bufsize,
					 DMA_FROM_DEVICE);
			dev_kfree_skb(skb);
			bf->bf_buf_addr = 0;
			bf->bf_mpdu = NULL;
350
		}
F
Felix Fietkau 已提交
351
	}
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
}

/*
 * Calculate the receive filter according to the
 * operating mode and state:
 *
 * o always accept unicast, broadcast, and multicast traffic
 * o maintain current state of phy error reception (the hal
 *   may enable phy error frames for noise immunity work)
 * o probe request frames are accepted only when operating in
 *   hostap, adhoc, or monitor modes
 * o enable promiscuous mode according to the interface state
 * o accept beacons:
 *   - when operating in adhoc mode so the 802.11 layer creates
 *     node table entries for peers,
 *   - when operating in station mode for collecting rssi data when
 *     the station is otherwise quiet, or
 *   - when operating as a repeater so we see repeater-sta beacons
 *   - when scanning
 */

u32 ath_calcrxfilter(struct ath_softc *sc)
{
	u32 rfilt;

L
Luis R. Rodriguez 已提交
377 378 379
	if (config_enabled(CONFIG_ATH9K_TX99))
		return 0;

380
	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
381 382
		| ATH9K_RX_FILTER_MCAST;

383 384 385 386
	/* if operating on a DFS channel, enable radar pulse detection */
	if (sc->hw->conf.radar_enabled)
		rfilt |= ATH9K_RX_FILTER_PHYRADAR | ATH9K_RX_FILTER_PHYERR;

387
	if (sc->rx.rxfilter & FIF_PROBE_REQ)
388 389
		rfilt |= ATH9K_RX_FILTER_PROBEREQ;

390 391 392 393 394
	/*
	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
	 * mode interface or when in monitor mode. AP mode does not need this
	 * since it receives all in-BSS frames anyway.
	 */
395
	if (sc->sc_ah->is_monitoring)
396 397
		rfilt |= ATH9K_RX_FILTER_PROM;

398 399 400
	if (sc->rx.rxfilter & FIF_CONTROL)
		rfilt |= ATH9K_RX_FILTER_CONTROL;

401
	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
402
	    (sc->nvifs <= 1) &&
403 404 405
	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
		rfilt |= ATH9K_RX_FILTER_MYBEACON;
	else
406 407
		rfilt |= ATH9K_RX_FILTER_BEACON;

408
	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
409
	    (sc->rx.rxfilter & FIF_PSPOLL))
410
		rfilt |= ATH9K_RX_FILTER_PSPOLL;
S
Sujith 已提交
411

S
Sujith 已提交
412 413 414
	if (conf_is_ht(&sc->hw->conf))
		rfilt |= ATH9K_RX_FILTER_COMP_BAR;

415
	if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
416 417
		/* This is needed for older chips */
		if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160)
418
			rfilt |= ATH9K_RX_FILTER_PROM;
419 420 421
		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
	}

422
	if (AR_SREV_9550(sc->sc_ah) || AR_SREV_9531(sc->sc_ah))
423 424
		rfilt |= ATH9K_RX_FILTER_4ADDRESS;

425
	return rfilt;
S
Sujith 已提交
426

427 428 429 430
}

int ath_startrecv(struct ath_softc *sc)
{
431
	struct ath_hw *ah = sc->sc_ah;
432
	struct ath_rxbuf *bf, *tbf;
433

F
Felix Fietkau 已提交
434 435 436 437 438
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_edma_start_recv(sc);
		return 0;
	}

S
Sujith 已提交
439
	if (list_empty(&sc->rx.rxbuf))
440 441
		goto start_recv;

442
	sc->rx.buf_hold = NULL;
S
Sujith 已提交
443 444
	sc->rx.rxlink = NULL;
	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
445 446 447 448
		ath_rx_buf_link(sc, bf);
	}

	/* We could have deleted elements so the list may be empty now */
S
Sujith 已提交
449
	if (list_empty(&sc->rx.rxbuf))
450 451
		goto start_recv;

452
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
453
	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
S
Sujith 已提交
454
	ath9k_hw_rxena(ah);
455 456

start_recv:
S
Sujith 已提交
457
	ath_opmode_init(sc);
458
	ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
S
Sujith 已提交
459

460 461 462
	return 0;
}

F
Felix Fietkau 已提交
463 464 465 466 467 468 469
static void ath_flushrecv(struct ath_softc *sc)
{
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_rx_tasklet(sc, 1, true);
	ath_rx_tasklet(sc, 1, false);
}

470 471
bool ath_stoprecv(struct ath_softc *sc)
{
472
	struct ath_hw *ah = sc->sc_ah;
473
	bool stopped, reset = false;
474

475
	ath9k_hw_abortpcurecv(ah);
S
Sujith 已提交
476
	ath9k_hw_setrxfilter(ah, 0);
477
	stopped = ath9k_hw_stopdmarecv(ah, &reset);
F
Felix Fietkau 已提交
478

F
Felix Fietkau 已提交
479 480
	ath_flushrecv(sc);

F
Felix Fietkau 已提交
481 482 483 484
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_edma_stop_recv(sc);
	else
		sc->rx.rxlink = NULL;
S
Sujith 已提交
485

486 487
	if (!(ah->ah_flags & AH_UNPLUGGED) &&
	    unlikely(!stopped)) {
488 489 490 491 492
		ath_err(ath9k_hw_common(sc->sc_ah),
			"Could not stop RX, we could be "
			"confusing the DMA engine when we start RX up\n");
		ATH_DBG_WARN_ON_ONCE(!stopped);
	}
493
	return stopped && !reset;
494 495
}

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
	struct ieee80211_mgmt *mgmt;
	u8 *pos, *end, id, elen;
	struct ieee80211_tim_ie *tim;

	mgmt = (struct ieee80211_mgmt *)skb->data;
	pos = mgmt->u.beacon.variable;
	end = skb->data + skb->len;

	while (pos + 2 < end) {
		id = *pos++;
		elen = *pos++;
		if (pos + elen > end)
			break;

		if (id == WLAN_EID_TIM) {
			if (elen < sizeof(*tim))
				break;
			tim = (struct ieee80211_tim_ie *) pos;
			if (tim->dtim_count != 0)
				break;
			return tim->bitmap_ctrl & 0x01;
		}

		pos += elen;
	}

	return false;
}

static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
{
530
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
531 532 533 534

	if (skb->len < 24 + 8 + 2 + 2)
		return;

S
Sujith 已提交
535
	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
536

S
Sujith 已提交
537 538
	if (sc->ps_flags & PS_BEACON_SYNC) {
		sc->ps_flags &= ~PS_BEACON_SYNC;
539
		ath_dbg(common, PS,
S
Sujith Manoharan 已提交
540
			"Reconfigure beacon timers based on synchronized timestamp\n");
S
Sujith Manoharan 已提交
541
		ath9k_set_beacon(sc);
542 543 544

		if (sc->p2p_ps_vif)
			ath9k_update_p2p_ps(sc, sc->p2p_ps_vif->vif);
545 546
	}

547 548 549
	if (ath_beacon_dtim_pending_cab(skb)) {
		/*
		 * Remain awake waiting for buffered broadcast/multicast
550 551 552 553
		 * frames. If the last broadcast/multicast frame is not
		 * received properly, the next beacon frame will work as
		 * a backup trigger for returning into NETWORK SLEEP state,
		 * so we are waiting for it as well.
554
		 */
555
		ath_dbg(common, PS,
J
Joe Perches 已提交
556
			"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
S
Sujith 已提交
557
		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
558 559 560
		return;
	}

S
Sujith 已提交
561
	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
562 563 564 565 566
		/*
		 * This can happen if a broadcast frame is dropped or the AP
		 * fails to send a frame indicating that all CAB frames have
		 * been delivered.
		 */
S
Sujith 已提交
567
		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
568
		ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
569 570 571
	}
}

572
static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
573 574
{
	struct ieee80211_hdr *hdr;
575
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
576 577 578 579

	hdr = (struct ieee80211_hdr *)skb->data;

	/* Process Beacon and CAB receive in PS state */
580
	if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
S
Sujith Manoharan 已提交
581
	    && mybeacon) {
582
		ath_rx_ps_beacon(sc, skb);
S
Sujith Manoharan 已提交
583 584 585 586 587
	} else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
		   (ieee80211_is_data(hdr->frame_control) ||
		    ieee80211_is_action(hdr->frame_control)) &&
		   is_multicast_ether_addr(hdr->addr1) &&
		   !ieee80211_has_moredata(hdr->frame_control)) {
588 589 590 591
		/*
		 * No more broadcast/multicast frames to be received at this
		 * point.
		 */
592
		sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
593
		ath_dbg(common, PS,
J
Joe Perches 已提交
594
			"All PS CAB frames received, back to sleep\n");
S
Sujith 已提交
595
	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
596 597
		   !is_multicast_ether_addr(hdr->addr1) &&
		   !ieee80211_has_morefrags(hdr->frame_control)) {
S
Sujith 已提交
598
		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
599
		ath_dbg(common, PS,
J
Joe Perches 已提交
600
			"Going back to sleep after having received PS-Poll data (0x%lx)\n",
S
Sujith 已提交
601 602 603 604
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
605 606 607
	}
}

F
Felix Fietkau 已提交
608
static bool ath_edma_get_buffers(struct ath_softc *sc,
609 610
				 enum ath9k_rx_qtype qtype,
				 struct ath_rx_status *rs,
611
				 struct ath_rxbuf **dest)
612
{
F
Felix Fietkau 已提交
613 614 615 616
	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct sk_buff *skb;
617
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
618 619 620 621 622 623 624 625 626
	int ret;

	skb = skb_peek(&rx_edma->rx_fifo);
	if (!skb)
		return false;

	bf = SKB_CB_ATHBUF(skb);
	BUG_ON(!bf);

M
Ming Lei 已提交
627
	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
F
Felix Fietkau 已提交
628 629
				common->rx_bufsize, DMA_FROM_DEVICE);

630
	ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data);
M
Ming Lei 已提交
631 632 633 634
	if (ret == -EINPROGRESS) {
		/*let device gain the buffer again*/
		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				common->rx_bufsize, DMA_FROM_DEVICE);
F
Felix Fietkau 已提交
635
		return false;
M
Ming Lei 已提交
636
	}
F
Felix Fietkau 已提交
637 638 639 640 641 642 643

	__skb_unlink(skb, &rx_edma->rx_fifo);
	if (ret == -EINVAL) {
		/* corrupt descriptor, skip this one and the following one */
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		ath_rx_edma_buf_link(sc, qtype);

644 645 646 647 648 649 650 651 652
		skb = skb_peek(&rx_edma->rx_fifo);
		if (skb) {
			bf = SKB_CB_ATHBUF(skb);
			BUG_ON(!bf);

			__skb_unlink(skb, &rx_edma->rx_fifo);
			list_add_tail(&bf->list, &sc->rx.rxbuf);
			ath_rx_edma_buf_link(sc, qtype);
		}
653 654

		bf = NULL;
F
Felix Fietkau 已提交
655 656
	}

657
	*dest = bf;
F
Felix Fietkau 已提交
658 659
	return true;
}
660

661
static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
F
Felix Fietkau 已提交
662 663 664
						struct ath_rx_status *rs,
						enum ath9k_rx_qtype qtype)
{
665
	struct ath_rxbuf *bf = NULL;
F
Felix Fietkau 已提交
666

667 668 669
	while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
		if (!bf)
			continue;
F
Felix Fietkau 已提交
670

671 672 673
		return bf;
	}
	return NULL;
F
Felix Fietkau 已提交
674 675
}

676
static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
F
Felix Fietkau 已提交
677 678 679 680
					   struct ath_rx_status *rs)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
681
	struct ath_desc *ds;
682
	struct ath_rxbuf *bf;
F
Felix Fietkau 已提交
683 684 685 686 687 688 689
	int ret;

	if (list_empty(&sc->rx.rxbuf)) {
		sc->rx.rxlink = NULL;
		return NULL;
	}

690
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
691 692 693
	if (bf == sc->rx.buf_hold)
		return NULL;

F
Felix Fietkau 已提交
694 695 696 697 698 699 700 701 702 703 704 705 706
	ds = bf->bf_desc;

	/*
	 * Must provide the virtual address of the current
	 * descriptor, the physical address, and the virtual
	 * address of the next descriptor in the h/w chain.
	 * This allows the HAL to look ahead to see if the
	 * hardware is done with a descriptor by checking the
	 * done bit in the following descriptor and the address
	 * of the current descriptor the DMA engine is working
	 * on.  All this is necessary because of our use of
	 * a self-linked list to avoid rx overruns.
	 */
707
	ret = ath9k_hw_rxprocdesc(ah, ds, rs);
F
Felix Fietkau 已提交
708 709
	if (ret == -EINPROGRESS) {
		struct ath_rx_status trs;
710
		struct ath_rxbuf *tbf;
F
Felix Fietkau 已提交
711 712 713 714 715 716 717 718
		struct ath_desc *tds;

		memset(&trs, 0, sizeof(trs));
		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
			sc->rx.rxlink = NULL;
			return NULL;
		}

719
		tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
F
Felix Fietkau 已提交
720 721 722 723 724 725 726 727 728 729 730 731 732

		/*
		 * On some hardware the descriptor status words could
		 * get corrupted, including the done bit. Because of
		 * this, check if the next descriptor's done bit is
		 * set or not.
		 *
		 * If the next descriptor's done bit is set, the current
		 * descriptor has been corrupted. Force s/w to discard
		 * this descriptor and continue...
		 */

		tds = tbf->bf_desc;
733
		ret = ath9k_hw_rxprocdesc(ah, tds, &trs);
F
Felix Fietkau 已提交
734 735
		if (ret == -EINPROGRESS)
			return NULL;
736 737

		/*
738 739
		 * Re-check previous descriptor, in case it has been filled
		 * in the mean time.
740
		 */
741 742 743 744 745 746 747 748 749
		ret = ath9k_hw_rxprocdesc(ah, ds, rs);
		if (ret == -EINPROGRESS) {
			/*
			 * mark descriptor as zero-length and set the 'more'
			 * flag to ensure that both buffers get discarded
			 */
			rs->rs_datalen = 0;
			rs->rs_more = true;
		}
F
Felix Fietkau 已提交
750 751
	}

752
	list_del(&bf->list);
F
Felix Fietkau 已提交
753 754 755 756 757 758 759 760
	if (!bf->bf_mpdu)
		return bf;

	/*
	 * Synchronize the DMA transfer with CPU before
	 * 1. accessing the frame
	 * 2. requeueing the same buffer to h/w
	 */
M
Ming Lei 已提交
761
	dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
F
Felix Fietkau 已提交
762 763 764 765 766 767
			common->rx_bufsize,
			DMA_FROM_DEVICE);

	return bf;
}

S
Sujith Manoharan 已提交
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
static void ath9k_process_tsf(struct ath_rx_status *rs,
			      struct ieee80211_rx_status *rxs,
			      u64 tsf)
{
	u32 tsf_lower = tsf & 0xffffffff;

	rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
	if (rs->rs_tstamp > tsf_lower &&
	    unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
		rxs->mactime -= 0x100000000ULL;

	if (rs->rs_tstamp < tsf_lower &&
	    unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
		rxs->mactime += 0x100000000ULL;
}

784 785 786 787 788
/*
 * For Decrypt or Demic errors, we only mark packet status here and always push
 * up the frame up to let mac80211 handle the actual error case, be it no
 * decryption key or real decryption error. This let us keep statistics there.
 */
789
static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
790
				   struct sk_buff *skb,
791 792
				   struct ath_rx_status *rx_stats,
				   struct ieee80211_rx_status *rx_status,
S
Sujith Manoharan 已提交
793
				   bool *decrypt_error, u64 tsf)
794
{
795 796 797
	struct ieee80211_hw *hw = sc->hw;
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
798
	struct ieee80211_hdr *hdr;
799 800
	bool discard_current = sc->rx.discard_next;

801 802 803 804
	/*
	 * Discard corrupt descriptors which are marked in
	 * ath_get_next_rx_buf().
	 */
805
	if (discard_current)
806 807 808
		goto corrupt;

	sc->rx.discard_next = false;
809

810 811 812 813 814
	/*
	 * Discard zero-length packets.
	 */
	if (!rx_stats->rs_datalen) {
		RX_STAT_INC(rx_len_err);
815
		goto corrupt;
816 817
	}

818 819 820 821 822
	/*
	 * rs_status follows rs_datalen so if rs_datalen is too large
	 * we can take a hint that hardware corrupted it, so ignore
	 * those frames.
	 */
823 824
	if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
		RX_STAT_INC(rx_len_err);
825
		goto corrupt;
826 827
	}

828 829 830 831
	/* Only use status info from the last fragment */
	if (rx_stats->rs_more)
		return 0;

832 833 834 835 836 837 838
	/*
	 * Return immediately if the RX descriptor has been marked
	 * as corrupt based on the various error bits.
	 *
	 * This is different from the other corrupt descriptor
	 * condition handled above.
	 */
839 840
	if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
		goto corrupt;
841

842 843
	hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);

S
Sujith Manoharan 已提交
844
	ath9k_process_tsf(rx_stats, rx_status, tsf);
845
	ath_debug_stat_rx(sc, rx_stats);
S
Sujith Manoharan 已提交
846

847 848 849 850 851 852 853 854 855
	/*
	 * Process PHY errors and return so that the packet
	 * can be dropped.
	 */
	if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
		ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
		if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
			RX_STAT_INC(rx_spectral);

856
		return -EINVAL;
857 858
	}

859 860 861 862
	/*
	 * everything but the rate is checked here, the rate check is done
	 * separately to avoid doing two lookups for a rate for each frame.
	 */
863
	if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
864
		return -EINVAL;
865

O
Oleksij Rempel 已提交
866 867 868 869
	if (ath_is_mybeacon(common, hdr)) {
		RX_STAT_INC(rx_beacons);
		rx_stats->is_mybeacon = true;
	}
870

871 872 873
	/*
	 * This shouldn't happen, but have a safety check anyway.
	 */
874 875
	if (WARN_ON(!ah->curchan))
		return -EINVAL;
876

877 878 879 880 881 882 883 884
	if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status)) {
		/*
		 * No valid hardware bitrate found -- we should not get here
		 * because hardware has already validated this frame as OK.
		 */
		ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
			rx_stats->rs_rate);
		RX_STAT_INC(rx_rate_err);
885
		return -EINVAL;
886
	}
887

888
	ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
889

890 891
	rx_status->band = ah->curchan->chan->band;
	rx_status->freq = ah->curchan->chan->center_freq;
892
	rx_status->antenna = rx_stats->rs_antenna;
893
	rx_status->flag |= RX_FLAG_MACTIME_END;
894

S
Sujith Manoharan 已提交
895 896 897 898 899 900
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
	if (ieee80211_is_data_present(hdr->frame_control) &&
	    !ieee80211_is_qos_nullfunc(hdr->frame_control))
		sc->rx.num_pkts++;
#endif

901 902 903 904 905
	return 0;

corrupt:
	sc->rx.discard_next = rx_stats->rs_more;
	return -EINVAL;
906 907
}

S
Sujith Manoharan 已提交
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
/*
 * Run the LNA combining algorithm only in these cases:
 *
 * Standalone WLAN cards with both LNA/Antenna diversity
 * enabled in the EEPROM.
 *
 * WLAN+BT cards which are in the supported card list
 * in ath_pci_id_table and the user has loaded the
 * driver with "bt_ant_diversity" set to true.
 */
static void ath9k_antenna_check(struct ath_softc *sc,
				struct ath_rx_status *rs)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
		return;

	/*
	 * Change the default rx antenna if rx diversity
	 * chooses the other antenna 3 times in a row.
	 */
	if (sc->rx.defant != rs->rs_antenna) {
		if (++sc->rx.rxotherant >= 3)
			ath_setdefantenna(sc, rs->rs_antenna);
	} else {
		sc->rx.rxotherant = 0;
	}

	if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
		if (common->bt_ant_diversity)
			ath_ant_comb_scan(sc, rs);
	} else {
		ath_ant_comb_scan(sc, rs);
	}
}

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
static void ath9k_apply_ampdu_details(struct ath_softc *sc,
	struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
{
	if (rs->rs_isaggr) {
		rxs->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;

		rxs->ampdu_reference = sc->rx.ampdu_ref;

		if (!rs->rs_moreaggr) {
			rxs->flag |= RX_FLAG_AMPDU_IS_LAST;
			sc->rx.ampdu_ref++;
		}

		if (rs->rs_flags & ATH9K_RX_DELIM_CRC_PRE)
			rxs->flag |= RX_FLAG_AMPDU_DELIM_CRC_ERROR;
	}
}

F
Felix Fietkau 已提交
965 966
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
967
	struct ath_rxbuf *bf;
968
	struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
969
	struct ieee80211_rx_status *rxs;
970
	struct ath_hw *ah = sc->sc_ah;
971
	struct ath_common *common = ath9k_hw_common(ah);
972
	struct ieee80211_hw *hw = sc->hw;
973
	int retval;
974
	struct ath_rx_status rs;
F
Felix Fietkau 已提交
975 976 977
	enum ath9k_rx_qtype qtype;
	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
	int dma_type;
978
	u64 tsf = 0;
979
	unsigned long flags;
980
	dma_addr_t new_buf_addr;
S
Sujith 已提交
981

F
Felix Fietkau 已提交
982 983
	if (edma)
		dma_type = DMA_BIDIRECTIONAL;
984 985
	else
		dma_type = DMA_FROM_DEVICE;
F
Felix Fietkau 已提交
986 987

	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
988

989 990
	tsf = ath9k_hw_gettsf64(ah);

991
	do {
992
		bool decrypt_error = false;
993

994
		memset(&rs, 0, sizeof(rs));
F
Felix Fietkau 已提交
995 996 997 998
		if (edma)
			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
		else
			bf = ath_get_next_rx_buf(sc, &rs);
999

F
Felix Fietkau 已提交
1000 1001
		if (!bf)
			break;
1002 1003

		skb = bf->bf_mpdu;
S
Sujith 已提交
1004
		if (!skb)
1005 1006
			continue;

1007 1008 1009 1010 1011 1012 1013 1014 1015
		/*
		 * Take frame header from the first fragment and RX status from
		 * the last one.
		 */
		if (sc->rx.frag)
			hdr_skb = sc->rx.frag;
		else
			hdr_skb = skb;

1016
		rxs = IEEE80211_SKB_RXCB(hdr_skb);
1017 1018
		memset(rxs, 0, sizeof(struct ieee80211_rx_status));

1019
		retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
S
Sujith Manoharan 已提交
1020
						 &decrypt_error, tsf);
1021 1022 1023
		if (retval)
			goto requeue_drop_frag;

1024 1025
		/* Ensure we always have an skb to requeue once we are done
		 * processing the current buffer's skb */
1026
		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
1027 1028 1029

		/* If there is no memory we ignore the current RX'd frame,
		 * tell hardware it can give us a new frame using the old
S
Sujith 已提交
1030
		 * skb and put it at the tail of the sc->rx.rxbuf list for
1031
		 * processing. */
B
Ben Greear 已提交
1032 1033
		if (!requeue_skb) {
			RX_STAT_INC(rx_oom_err);
1034
			goto requeue_drop_frag;
B
Ben Greear 已提交
1035
		}
1036

1037 1038 1039 1040 1041 1042 1043 1044
		/* We will now give hardware our shiny new allocated skb */
		new_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
					      common->rx_bufsize, dma_type);
		if (unlikely(dma_mapping_error(sc->dev, new_buf_addr))) {
			dev_kfree_skb_any(requeue_skb);
			goto requeue_drop_frag;
		}

1045
		/* Unmap the frame */
1046
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
1047
				 common->rx_bufsize, dma_type);
1048

S
Sujith Manoharan 已提交
1049 1050 1051
		bf->bf_mpdu = requeue_skb;
		bf->bf_buf_addr = new_buf_addr;

F
Felix Fietkau 已提交
1052 1053 1054
		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
		if (ah->caps.rx_status_len)
			skb_pull(skb, ah->caps.rx_status_len);
S
Sujith 已提交
1055

1056
		if (!rs.rs_more)
1057 1058
			ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
						     rxs, decrypt_error);
S
Sujith 已提交
1059

1060
		if (rs.rs_more) {
B
Ben Greear 已提交
1061
			RX_STAT_INC(rx_frags);
1062 1063 1064 1065 1066 1067 1068 1069 1070
			/*
			 * rs_more indicates chained descriptors which can be
			 * used to link buffers together for a sort of
			 * scatter-gather operation.
			 */
			if (sc->rx.frag) {
				/* too many fragments - cannot handle frame */
				dev_kfree_skb_any(sc->rx.frag);
				dev_kfree_skb_any(skb);
B
Ben Greear 已提交
1071
				RX_STAT_INC(rx_too_many_frags_err);
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
				skb = NULL;
			}
			sc->rx.frag = skb;
			goto requeue;
		}

		if (sc->rx.frag) {
			int space = skb->len - skb_tailroom(hdr_skb);

			if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
				dev_kfree_skb(skb);
B
Ben Greear 已提交
1083
				RX_STAT_INC(rx_oom_err);
1084 1085 1086
				goto requeue_drop_frag;
			}

1087 1088
			sc->rx.frag = NULL;

1089 1090 1091 1092 1093 1094
			skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
						  skb->len);
			dev_kfree_skb_any(skb);
			skb = hdr_skb;
		}

1095 1096
		if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
			skb_trim(skb, skb->len - 8);
1097

1098 1099 1100 1101 1102 1103 1104
		spin_lock_irqsave(&sc->sc_pm_lock, flags);
		if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
				     PS_WAIT_FOR_CAB |
				     PS_WAIT_FOR_PSPOLL_DATA)) ||
		    ath9k_check_auto_sleep(sc))
			ath_rx_ps(sc, skb, rs.is_mybeacon);
		spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1105

S
Sujith Manoharan 已提交
1106
		ath9k_antenna_check(sc, &rs);
1107
		ath9k_apply_ampdu_details(sc, &rs, rxs);
1108
		ath_debug_rate_stats(sc, &rs, skb);
1109

1110
		ieee80211_rx(hw, skb);
1111

1112 1113 1114 1115 1116
requeue_drop_frag:
		if (sc->rx.frag) {
			dev_kfree_skb_any(sc->rx.frag);
			sc->rx.frag = NULL;
		}
1117
requeue:
1118 1119 1120 1121
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		if (flush)
			continue;

F
Felix Fietkau 已提交
1122 1123 1124
		if (edma) {
			ath_rx_edma_buf_link(sc, qtype);
		} else {
1125
			ath_rx_buf_relink(sc, bf);
1126
			ath9k_hw_rxena(ah);
F
Felix Fietkau 已提交
1127
		}
S
Sujith 已提交
1128 1129
	} while (1);

1130 1131
	if (!(ah->imask & ATH9K_INT_RXEOL)) {
		ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
1132
		ath9k_hw_set_interrupts(ah);
1133 1134
	}

1135 1136
	return 0;
}