recv.c 24.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

S
Sujith 已提交
17
#include "ath9k.h"
18
#include "ar9003_mac.h"
19

F
Felix Fietkau 已提交
20 21
#define SKB_CB_ATHBUF(__skb)	(*((struct ath_buf **)__skb->cb))

22 23 24
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
					     struct ieee80211_hdr *hdr)
{
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
	int i;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		struct ath_wiphy *aphy = sc->sec_wiphy[i];
		if (aphy == NULL)
			continue;
		if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
		    == 0) {
			hw = aphy->hw;
			break;
		}
	}
	spin_unlock_bh(&sc->wiphy_lock);
	return hw;
41 42
}

43 44 45 46 47 48 49 50 51 52
/*
 * Setup and link descriptors.
 *
 * 11N: we can no longer afford to self link the last descriptor.
 * MAC acknowledges BA status as long as it copies frames to host
 * buffer (or rx fifo). This can incorrectly acknowledge packets
 * to a sender if last desc is self-linked.
 */
static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
{
53
	struct ath_hw *ah = sc->sc_ah;
54
	struct ath_common *common = ath9k_hw_common(ah);
55 56 57 58 59 60
	struct ath_desc *ds;
	struct sk_buff *skb;

	ATH_RXBUF_RESET(bf);

	ds = bf->bf_desc;
S
Sujith 已提交
61
	ds->ds_link = 0; /* link to null */
62 63
	ds->ds_data = bf->bf_buf_addr;

S
Sujith 已提交
64
	/* virtual addr of the beginning of the buffer. */
65
	skb = bf->bf_mpdu;
66
	BUG_ON(skb == NULL);
67 68
	ds->ds_vdata = skb->data;

69 70
	/*
	 * setup rx descriptors. The rx_bufsize here tells the hardware
71
	 * how much data it can DMA to us and that we are prepared
72 73
	 * to process
	 */
S
Sujith 已提交
74
	ath9k_hw_setuprxdesc(ah, ds,
75
			     common->rx_bufsize,
76 77
			     0);

S
Sujith 已提交
78
	if (sc->rx.rxlink == NULL)
79 80
		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
	else
S
Sujith 已提交
81
		*sc->rx.rxlink = bf->bf_daddr;
82

S
Sujith 已提交
83
	sc->rx.rxlink = &ds->ds_link;
84 85 86
	ath9k_hw_rxena(ah);
}

S
Sujith 已提交
87 88 89 90
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
	/* XXX block beacon interrupts */
	ath9k_hw_setantenna(sc->sc_ah, antenna);
S
Sujith 已提交
91 92
	sc->rx.defant = antenna;
	sc->rx.rxotherant = 0;
S
Sujith 已提交
93 94
}

95 96
static void ath_opmode_init(struct ath_softc *sc)
{
97
	struct ath_hw *ah = sc->sc_ah;
98 99
	struct ath_common *common = ath9k_hw_common(ah);

100 101 102 103 104 105 106
	u32 rfilt, mfilt[2];

	/* configure rx filter */
	rfilt = ath_calcrxfilter(sc);
	ath9k_hw_setrxfilter(ah, rfilt);

	/* configure bssid mask */
107
	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
108
		ath_hw_setbssidmask(common);
109 110 111 112 113

	/* configure operational mode */
	ath9k_hw_setopmode(ah);

	/* Handle any link-level address change. */
114
	ath9k_hw_setmac(ah, common->macaddr);
115 116 117 118 119 120

	/* calculate and install multicast filter */
	mfilt[0] = mfilt[1] = ~0;
	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}

F
Felix Fietkau 已提交
121 122
static bool ath_rx_edma_buf_link(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
123
{
F
Felix Fietkau 已提交
124 125
	struct ath_hw *ah = sc->sc_ah;
	struct ath_rx_edma *rx_edma;
126 127 128
	struct sk_buff *skb;
	struct ath_buf *bf;

F
Felix Fietkau 已提交
129 130 131
	rx_edma = &sc->rx.rx_edma[qtype];
	if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
		return false;
132

F
Felix Fietkau 已提交
133 134
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
	list_del_init(&bf->list);
135

F
Felix Fietkau 已提交
136 137 138 139 140 141
	skb = bf->bf_mpdu;

	ATH_RXBUF_RESET(bf);
	memset(skb->data, 0, ah->caps.rx_status_len);
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				ah->caps.rx_status_len, DMA_TO_DEVICE);
142

F
Felix Fietkau 已提交
143 144 145
	SKB_CB_ATHBUF(skb) = bf;
	ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
	skb_queue_tail(&rx_edma->rx_fifo, skb);
146

F
Felix Fietkau 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159 160
	return true;
}

static void ath_rx_addbuffer_edma(struct ath_softc *sc,
				  enum ath9k_rx_qtype qtype, int size)
{
	struct ath_rx_edma *rx_edma;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	u32 nbuf = 0;

	rx_edma = &sc->rx.rx_edma[qtype];
	if (list_empty(&sc->rx.rxbuf)) {
		ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
		return;
161
	}
162

F
Felix Fietkau 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
	while (!list_empty(&sc->rx.rxbuf)) {
		nbuf++;

		if (!ath_rx_edma_buf_link(sc, qtype))
			break;

		if (nbuf >= size)
			break;
	}
}

static void ath_rx_remove_buffer(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
{
	struct ath_buf *bf;
	struct ath_rx_edma *rx_edma;
	struct sk_buff *skb;

	rx_edma = &sc->rx.rx_edma[qtype];

	while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
		bf = SKB_CB_ATHBUF(skb);
		BUG_ON(!bf);
		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}
}

static void ath_rx_edma_cleanup(struct ath_softc *sc)
{
	struct ath_buf *bf;

	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);

197
	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
F
Felix Fietkau 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
		if (bf->bf_mpdu)
			dev_kfree_skb_any(bf->bf_mpdu);
	}

	INIT_LIST_HEAD(&sc->rx.rxbuf);

	kfree(sc->rx.rx_bufptr);
	sc->rx.rx_bufptr = NULL;
}

static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
	skb_queue_head_init(&rx_edma->rx_fifo);
	skb_queue_head_init(&rx_edma->rx_buffers);
	rx_edma->rx_fifo_hwsize = size;
}

static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_hw *ah = sc->sc_ah;
	struct sk_buff *skb;
	struct ath_buf *bf;
	int error = 0, i;
	u32 size;


	common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
				     ah->caps.rx_status_len,
				     min(common->cachelsz, (u16)64));

	ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
				    ah->caps.rx_status_len);

	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
			       ah->caps.rx_lp_qdepth);
	ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
			       ah->caps.rx_hp_qdepth);

	size = sizeof(struct ath_buf) * nbufs;
	bf = kzalloc(size, GFP_KERNEL);
	if (!bf)
		return -ENOMEM;

	INIT_LIST_HEAD(&sc->rx.rxbuf);
	sc->rx.rx_bufptr = bf;

	for (i = 0; i < nbufs; i++, bf++) {
246
		skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
F
Felix Fietkau 已提交
247
		if (!skb) {
248
			error = -ENOMEM;
F
Felix Fietkau 已提交
249
			goto rx_init_fail;
250 251
		}

F
Felix Fietkau 已提交
252
		memset(skb->data, 0, common->rx_bufsize);
253
		bf->bf_mpdu = skb;
F
Felix Fietkau 已提交
254

255
		bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
256
						 common->rx_bufsize,
F
Felix Fietkau 已提交
257
						 DMA_BIDIRECTIONAL);
258
		if (unlikely(dma_mapping_error(sc->dev,
F
Felix Fietkau 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
						bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
				ath_print(common, ATH_DBG_FATAL,
					"dma_mapping_error() on RX init\n");
				error = -ENOMEM;
				goto rx_init_fail;
		}

		list_add_tail(&bf->list, &sc->rx.rxbuf);
	}

	return 0;

rx_init_fail:
	ath_rx_edma_cleanup(sc);
	return error;
}

static void ath_edma_start_recv(struct ath_softc *sc)
{
	spin_lock_bh(&sc->rx.rxbuflock);

	ath9k_hw_rxena(sc->sc_ah);

	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
			      sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);

	ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
			      sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);

	spin_unlock_bh(&sc->rx.rxbuflock);

	ath_opmode_init(sc);

	ath9k_hw_startpcureceive(sc->sc_ah);
}

static void ath_edma_stop_recv(struct ath_softc *sc)
{
	spin_lock_bh(&sc->rx.rxbuflock);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
	ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
	spin_unlock_bh(&sc->rx.rxbuflock);
}

int ath_rx_init(struct ath_softc *sc, int nbufs)
{
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct sk_buff *skb;
	struct ath_buf *bf;
	int error = 0;

	spin_lock_init(&sc->rx.rxflushlock);
	sc->sc_flags &= ~SC_OP_RXFLUSH;
	spin_lock_init(&sc->rx.rxbuflock);

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		return ath_rx_edma_init(sc, nbufs);
	} else {
		common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
				min(common->cachelsz, (u16)64));

		ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
				common->cachelsz, common->rx_bufsize);

		/* Initialize rx descriptors */

		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
328
				"rx", nbufs, 1, 0);
F
Felix Fietkau 已提交
329
		if (error != 0) {
330
			ath_print(common, ATH_DBG_FATAL,
F
Felix Fietkau 已提交
331 332
				  "failed to allocate rx descriptors: %d\n",
				  error);
333 334
			goto err;
		}
F
Felix Fietkau 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359

		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
			skb = ath_rxbuf_alloc(common, common->rx_bufsize,
					      GFP_KERNEL);
			if (skb == NULL) {
				error = -ENOMEM;
				goto err;
			}

			bf->bf_mpdu = skb;
			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					common->rx_bufsize,
					DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(sc->dev,
							bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
				ath_print(common, ATH_DBG_FATAL,
					  "dma_mapping_error() on RX init\n");
				error = -ENOMEM;
				goto err;
			}
			bf->bf_dmacontext = bf->bf_buf_addr;
		}
		sc->rx.rxlink = NULL;
360
	}
361

362
err:
363 364 365 366 367 368 369 370
	if (error)
		ath_rx_cleanup(sc);

	return error;
}

void ath_rx_cleanup(struct ath_softc *sc)
{
371 372
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
373 374 375
	struct sk_buff *skb;
	struct ath_buf *bf;

F
Felix Fietkau 已提交
376 377 378 379 380 381 382 383 384 385 386 387
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_rx_edma_cleanup(sc);
		return;
	} else {
		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
			skb = bf->bf_mpdu;
			if (skb) {
				dma_unmap_single(sc->dev, bf->bf_buf_addr,
						common->rx_bufsize,
						DMA_FROM_DEVICE);
				dev_kfree_skb(skb);
			}
388
		}
389

F
Felix Fietkau 已提交
390 391 392
		if (sc->rx.rxdma.dd_desc_len != 0)
			ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
	}
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
}

/*
 * Calculate the receive filter according to the
 * operating mode and state:
 *
 * o always accept unicast, broadcast, and multicast traffic
 * o maintain current state of phy error reception (the hal
 *   may enable phy error frames for noise immunity work)
 * o probe request frames are accepted only when operating in
 *   hostap, adhoc, or monitor modes
 * o enable promiscuous mode according to the interface state
 * o accept beacons:
 *   - when operating in adhoc mode so the 802.11 layer creates
 *     node table entries for peers,
 *   - when operating in station mode for collecting rssi data when
 *     the station is otherwise quiet, or
 *   - when operating as a repeater so we see repeater-sta beacons
 *   - when scanning
 */

u32 ath_calcrxfilter(struct ath_softc *sc)
{
#define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
S
Sujith 已提交
417

418 419 420 421 422 423 424
	u32 rfilt;

	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
		| ATH9K_RX_FILTER_MCAST;

	/* If not a STA, enable processing of Probe Requests */
425
	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
426 427
		rfilt |= ATH9K_RX_FILTER_PROBEREQ;

428 429 430 431 432
	/*
	 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
	 * mode interface or when in monitor mode. AP mode does not need this
	 * since it receives all in-BSS frames anyway.
	 */
433
	if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
S
Sujith 已提交
434
	     (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
435
	    (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR))
436 437
		rfilt |= ATH9K_RX_FILTER_PROM;

438 439 440
	if (sc->rx.rxfilter & FIF_CONTROL)
		rfilt |= ATH9K_RX_FILTER_CONTROL;

441 442 443 444
	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
		rfilt |= ATH9K_RX_FILTER_MYBEACON;
	else
445 446
		rfilt |= ATH9K_RX_FILTER_BEACON;

447 448 449 450
	if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
	    AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
	    (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
	    (sc->rx.rxfilter & FIF_PSPOLL))
451
		rfilt |= ATH9K_RX_FILTER_PSPOLL;
S
Sujith 已提交
452

S
Sujith 已提交
453 454 455
	if (conf_is_ht(&sc->hw->conf))
		rfilt |= ATH9K_RX_FILTER_COMP_BAR;

456
	if (sc->sec_wiphy || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
457 458
		/* TODO: only needed if more than one BSSID is in use in
		 * station/adhoc mode */
459 460 461
		/* The following may also be needed for other older chips */
		if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
			rfilt |= ATH9K_RX_FILTER_PROM;
462 463 464
		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
	}

465
	return rfilt;
S
Sujith 已提交
466

467 468 469 470 471
#undef RX_FILTER_PRESERVE
}

int ath_startrecv(struct ath_softc *sc)
{
472
	struct ath_hw *ah = sc->sc_ah;
473 474
	struct ath_buf *bf, *tbf;

F
Felix Fietkau 已提交
475 476 477 478 479
	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
		ath_edma_start_recv(sc);
		return 0;
	}

S
Sujith 已提交
480 481
	spin_lock_bh(&sc->rx.rxbuflock);
	if (list_empty(&sc->rx.rxbuf))
482 483
		goto start_recv;

S
Sujith 已提交
484 485
	sc->rx.rxlink = NULL;
	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
486 487 488 489
		ath_rx_buf_link(sc, bf);
	}

	/* We could have deleted elements so the list may be empty now */
S
Sujith 已提交
490
	if (list_empty(&sc->rx.rxbuf))
491 492
		goto start_recv;

S
Sujith 已提交
493
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
494
	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
S
Sujith 已提交
495
	ath9k_hw_rxena(ah);
496 497

start_recv:
S
Sujith 已提交
498
	spin_unlock_bh(&sc->rx.rxbuflock);
S
Sujith 已提交
499 500 501
	ath_opmode_init(sc);
	ath9k_hw_startpcureceive(ah);

502 503 504 505 506
	return 0;
}

bool ath_stoprecv(struct ath_softc *sc)
{
507
	struct ath_hw *ah = sc->sc_ah;
508 509
	bool stopped;

S
Sujith 已提交
510 511 512
	ath9k_hw_stoppcurecv(ah);
	ath9k_hw_setrxfilter(ah, 0);
	stopped = ath9k_hw_stopdmarecv(ah);
F
Felix Fietkau 已提交
513 514 515 516 517

	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_edma_stop_recv(sc);
	else
		sc->rx.rxlink = NULL;
S
Sujith 已提交
518

519 520 521 522 523
	return stopped;
}

void ath_flushrecv(struct ath_softc *sc)
{
S
Sujith 已提交
524
	spin_lock_bh(&sc->rx.rxflushlock);
S
Sujith 已提交
525
	sc->sc_flags |= SC_OP_RXFLUSH;
F
Felix Fietkau 已提交
526 527 528
	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
		ath_rx_tasklet(sc, 1, true);
	ath_rx_tasklet(sc, 1, false);
S
Sujith 已提交
529
	sc->sc_flags &= ~SC_OP_RXFLUSH;
S
Sujith 已提交
530
	spin_unlock_bh(&sc->rx.rxflushlock);
531 532
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
{
	/* Check whether the Beacon frame has DTIM indicating buffered bc/mc */
	struct ieee80211_mgmt *mgmt;
	u8 *pos, *end, id, elen;
	struct ieee80211_tim_ie *tim;

	mgmt = (struct ieee80211_mgmt *)skb->data;
	pos = mgmt->u.beacon.variable;
	end = skb->data + skb->len;

	while (pos + 2 < end) {
		id = *pos++;
		elen = *pos++;
		if (pos + elen > end)
			break;

		if (id == WLAN_EID_TIM) {
			if (elen < sizeof(*tim))
				break;
			tim = (struct ieee80211_tim_ie *) pos;
			if (tim->dtim_count != 0)
				break;
			return tim->bitmap_ctrl & 0x01;
		}

		pos += elen;
	}

	return false;
}

static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
{
	struct ieee80211_mgmt *mgmt;
568
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
569 570 571 572 573

	if (skb->len < 24 + 8 + 2 + 2)
		return;

	mgmt = (struct ieee80211_mgmt *)skb->data;
574
	if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
575 576
		return; /* not from our current AP */

S
Sujith 已提交
577
	sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
578

S
Sujith 已提交
579 580
	if (sc->ps_flags & PS_BEACON_SYNC) {
		sc->ps_flags &= ~PS_BEACON_SYNC;
581 582 583
		ath_print(common, ATH_DBG_PS,
			  "Reconfigure Beacon timers based on "
			  "timestamp from the AP\n");
584 585 586
		ath_beacon_config(sc, NULL);
	}

587 588 589
	if (ath_beacon_dtim_pending_cab(skb)) {
		/*
		 * Remain awake waiting for buffered broadcast/multicast
590 591 592 593
		 * frames. If the last broadcast/multicast frame is not
		 * received properly, the next beacon frame will work as
		 * a backup trigger for returning into NETWORK SLEEP state,
		 * so we are waiting for it as well.
594
		 */
595 596
		ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
			  "buffered broadcast/multicast frame(s)\n");
S
Sujith 已提交
597
		sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
598 599 600
		return;
	}

S
Sujith 已提交
601
	if (sc->ps_flags & PS_WAIT_FOR_CAB) {
602 603 604 605 606
		/*
		 * This can happen if a broadcast frame is dropped or the AP
		 * fails to send a frame indicating that all CAB frames have
		 * been delivered.
		 */
S
Sujith 已提交
607
		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
608 609
		ath_print(common, ATH_DBG_PS,
			  "PS wait for CAB frames timed out\n");
610 611 612 613 614 615
	}
}

static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
{
	struct ieee80211_hdr *hdr;
616
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
617 618 619 620

	hdr = (struct ieee80211_hdr *)skb->data;

	/* Process Beacon and CAB receive in PS state */
S
Sujith 已提交
621
	if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
622
	    ieee80211_is_beacon(hdr->frame_control))
623
		ath_rx_ps_beacon(sc, skb);
S
Sujith 已提交
624
	else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
625 626 627 628 629 630 631 632
		 (ieee80211_is_data(hdr->frame_control) ||
		  ieee80211_is_action(hdr->frame_control)) &&
		 is_multicast_ether_addr(hdr->addr1) &&
		 !ieee80211_has_moredata(hdr->frame_control)) {
		/*
		 * No more broadcast/multicast frames to be received at this
		 * point.
		 */
S
Sujith 已提交
633
		sc->ps_flags &= ~PS_WAIT_FOR_CAB;
634 635
		ath_print(common, ATH_DBG_PS,
			  "All PS CAB frames received, back to sleep\n");
S
Sujith 已提交
636
	} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
637 638
		   !is_multicast_ether_addr(hdr->addr1) &&
		   !ieee80211_has_morefrags(hdr->frame_control)) {
S
Sujith 已提交
639
		sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
640 641
		ath_print(common, ATH_DBG_PS,
			  "Going back to sleep after having received "
642
			  "PS-Poll data (0x%lx)\n",
S
Sujith 已提交
643 644 645 646
			sc->ps_flags & (PS_WAIT_FOR_BEACON |
					PS_WAIT_FOR_CAB |
					PS_WAIT_FOR_PSPOLL_DATA |
					PS_WAIT_FOR_TX_ACK));
647 648 649
	}
}

650 651
static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
				    struct ath_softc *sc, struct sk_buff *skb,
652
				    struct ieee80211_rx_status *rxs)
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
{
	struct ieee80211_hdr *hdr;

	hdr = (struct ieee80211_hdr *)skb->data;

	/* Send the frame to mac80211 */
	if (is_multicast_ether_addr(hdr->addr1)) {
		int i;
		/*
		 * Deliver broadcast/multicast frames to all suitable
		 * virtual wiphys.
		 */
		/* TODO: filter based on channel configuration */
		for (i = 0; i < sc->num_sec_wiphy; i++) {
			struct ath_wiphy *aphy = sc->sec_wiphy[i];
			struct sk_buff *nskb;
			if (aphy == NULL)
				continue;
			nskb = skb_copy(skb, GFP_ATOMIC);
672 673 674
			if (!nskb)
				continue;
			ieee80211_rx(aphy->hw, nskb);
675
		}
676
		ieee80211_rx(sc->hw, skb);
677
	} else
678
		/* Deliver unicast frames based on receiver address */
679
		ieee80211_rx(hw, skb);
680 681
}

F
Felix Fietkau 已提交
682 683
static bool ath_edma_get_buffers(struct ath_softc *sc,
				 enum ath9k_rx_qtype qtype)
684
{
F
Felix Fietkau 已提交
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct sk_buff *skb;
	struct ath_buf *bf;
	int ret;

	skb = skb_peek(&rx_edma->rx_fifo);
	if (!skb)
		return false;

	bf = SKB_CB_ATHBUF(skb);
	BUG_ON(!bf);

	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
				common->rx_bufsize, DMA_FROM_DEVICE);

	ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
	if (ret == -EINPROGRESS)
		return false;

	__skb_unlink(skb, &rx_edma->rx_fifo);
	if (ret == -EINVAL) {
		/* corrupt descriptor, skip this one and the following one */
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		ath_rx_edma_buf_link(sc, qtype);
		skb = skb_peek(&rx_edma->rx_fifo);
		if (!skb)
			return true;

		bf = SKB_CB_ATHBUF(skb);
		BUG_ON(!bf);

		__skb_unlink(skb, &rx_edma->rx_fifo);
		list_add_tail(&bf->list, &sc->rx.rxbuf);
		ath_rx_edma_buf_link(sc, qtype);
	}
	skb_queue_tail(&rx_edma->rx_buffers, skb);

	return true;
}
726

F
Felix Fietkau 已提交
727 728 729 730 731 732
static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
						struct ath_rx_status *rs,
						enum ath9k_rx_qtype qtype)
{
	struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
	struct sk_buff *skb;
S
Sujith 已提交
733
	struct ath_buf *bf;
F
Felix Fietkau 已提交
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749

	while (ath_edma_get_buffers(sc, qtype));
	skb = __skb_dequeue(&rx_edma->rx_buffers);
	if (!skb)
		return NULL;

	bf = SKB_CB_ATHBUF(skb);
	ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
	return bf;
}

static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
					   struct ath_rx_status *rs)
{
	struct ath_hw *ah = sc->sc_ah;
	struct ath_common *common = ath9k_hw_common(ah);
750
	struct ath_desc *ds;
F
Felix Fietkau 已提交
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
	struct ath_buf *bf;
	int ret;

	if (list_empty(&sc->rx.rxbuf)) {
		sc->rx.rxlink = NULL;
		return NULL;
	}

	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
	ds = bf->bf_desc;

	/*
	 * Must provide the virtual address of the current
	 * descriptor, the physical address, and the virtual
	 * address of the next descriptor in the h/w chain.
	 * This allows the HAL to look ahead to see if the
	 * hardware is done with a descriptor by checking the
	 * done bit in the following descriptor and the address
	 * of the current descriptor the DMA engine is working
	 * on.  All this is necessary because of our use of
	 * a self-linked list to avoid rx overruns.
	 */
	ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
	if (ret == -EINPROGRESS) {
		struct ath_rx_status trs;
		struct ath_buf *tbf;
		struct ath_desc *tds;

		memset(&trs, 0, sizeof(trs));
		if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
			sc->rx.rxlink = NULL;
			return NULL;
		}

		tbf = list_entry(bf->list.next, struct ath_buf, list);

		/*
		 * On some hardware the descriptor status words could
		 * get corrupted, including the done bit. Because of
		 * this, check if the next descriptor's done bit is
		 * set or not.
		 *
		 * If the next descriptor's done bit is set, the current
		 * descriptor has been corrupted. Force s/w to discard
		 * this descriptor and continue...
		 */

		tds = tbf->bf_desc;
		ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
		if (ret == -EINPROGRESS)
			return NULL;
	}

	if (!bf->bf_mpdu)
		return bf;

	/*
	 * Synchronize the DMA transfer with CPU before
	 * 1. accessing the frame
	 * 2. requeueing the same buffer to h/w
	 */
	dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
			common->rx_bufsize,
			DMA_FROM_DEVICE);

	return bf;
}


int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
	struct ath_buf *bf;
823
	struct sk_buff *skb = NULL, *requeue_skb;
824
	struct ieee80211_rx_status *rxs;
825
	struct ath_hw *ah = sc->sc_ah;
826
	struct ath_common *common = ath9k_hw_common(ah);
827 828 829 830 831 832
	/*
	 * The hw can techncically differ from common->hw when using ath9k
	 * virtual wiphy so to account for that we iterate over the active
	 * wiphys and find the appropriate wiphy and therefore hw.
	 */
	struct ieee80211_hw *hw = NULL;
S
Sujith 已提交
833
	struct ieee80211_hdr *hdr;
834
	int retval;
S
Sujith 已提交
835
	bool decrypt_error = false;
836
	struct ath_rx_status rs;
F
Felix Fietkau 已提交
837 838 839
	enum ath9k_rx_qtype qtype;
	bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
	int dma_type;
S
Sujith 已提交
840

F
Felix Fietkau 已提交
841 842 843 844 845 846
	if (edma)
		dma_type = DMA_FROM_DEVICE;
	else
		dma_type = DMA_BIDIRECTIONAL;

	qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
S
Sujith 已提交
847
	spin_lock_bh(&sc->rx.rxbuflock);
848 849 850

	do {
		/* If handling rx interrupt and flush is in progress => exit */
S
Sujith 已提交
851
		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
852 853
			break;

854
		memset(&rs, 0, sizeof(rs));
F
Felix Fietkau 已提交
855 856 857 858
		if (edma)
			bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
		else
			bf = ath_get_next_rx_buf(sc, &rs);
859

F
Felix Fietkau 已提交
860 861
		if (!bf)
			break;
862 863

		skb = bf->bf_mpdu;
S
Sujith 已提交
864
		if (!skb)
865 866
			continue;

867
		hdr = (struct ieee80211_hdr *) skb->data;
868 869
		rxs =  IEEE80211_SKB_RXCB(skb);

870 871
		hw = ath_get_virt_hw(sc, hdr);

872
		ath_debug_stat_rx(sc, &rs);
S
Sujith 已提交
873

874
		/*
S
Sujith 已提交
875 876
		 * If we're asked to flush receive queue, directly
		 * chain it back at the queue without processing it.
877
		 */
S
Sujith 已提交
878
		if (flush)
879
			goto requeue;
880

881
		retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
882
						     rxs, &decrypt_error);
883
		if (retval)
884 885 886 887
			goto requeue;

		/* Ensure we always have an skb to requeue once we are done
		 * processing the current buffer's skb */
888
		requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
889 890 891

		/* If there is no memory we ignore the current RX'd frame,
		 * tell hardware it can give us a new frame using the old
S
Sujith 已提交
892
		 * skb and put it at the tail of the sc->rx.rxbuf list for
893 894 895
		 * processing. */
		if (!requeue_skb)
			goto requeue;
896

897
		/* Unmap the frame */
898
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
899
				 common->rx_bufsize,
F
Felix Fietkau 已提交
900
				 dma_type);
901

F
Felix Fietkau 已提交
902 903 904
		skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
		if (ah->caps.rx_status_len)
			skb_pull(skb, ah->caps.rx_status_len);
S
Sujith 已提交
905

906
		ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
907
					     rxs, decrypt_error);
S
Sujith 已提交
908

909 910
		/* We will now give hardware our shiny new allocated skb */
		bf->bf_mpdu = requeue_skb;
911
		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
912
						 common->rx_bufsize,
F
Felix Fietkau 已提交
913
						 dma_type);
914
		if (unlikely(dma_mapping_error(sc->dev,
915 916 917
			  bf->bf_buf_addr))) {
			dev_kfree_skb_any(requeue_skb);
			bf->bf_mpdu = NULL;
918 919
			ath_print(common, ATH_DBG_FATAL,
				  "dma_mapping_error() on RX\n");
920
			ath_rx_send_to_mac80211(hw, sc, skb, rxs);
921 922
			break;
		}
923
		bf->bf_dmacontext = bf->bf_buf_addr;
924 925 926 927 928

		/*
		 * change the default rx antenna if rx diversity chooses the
		 * other antenna 3 times in a row.
		 */
929
		if (sc->rx.defant != rs.rs_antenna) {
S
Sujith 已提交
930
			if (++sc->rx.rxotherant >= 3)
931
				ath_setdefantenna(sc, rs.rs_antenna);
932
		} else {
S
Sujith 已提交
933
			sc->rx.rxotherant = 0;
934
		}
935

S
Sujith 已提交
936 937 938
		if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
					     PS_WAIT_FOR_CAB |
					     PS_WAIT_FOR_PSPOLL_DATA)))
939 940
			ath_rx_ps(sc, skb);

941
		ath_rx_send_to_mac80211(hw, sc, skb, rxs);
942

943
requeue:
F
Felix Fietkau 已提交
944 945 946 947 948 949 950
		if (edma) {
			list_add_tail(&bf->list, &sc->rx.rxbuf);
			ath_rx_edma_buf_link(sc, qtype);
		} else {
			list_move_tail(&bf->list, &sc->rx.rxbuf);
			ath_rx_buf_link(sc, bf);
		}
S
Sujith 已提交
951 952
	} while (1);

S
Sujith 已提交
953
	spin_unlock_bh(&sc->rx.rxbuflock);
954 955 956

	return 0;
}