htt_rx.c 46.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

18
#include "core.h"
19 20 21 22
#include "htc.h"
#include "htt.h"
#include "txrx.h"
#include "debug.h"
23
#include "trace.h"
24
#include "mac.h"
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44

#include <linux/log2.h>

/* slightly larger than one large A-MPDU */
#define HTT_RX_RING_SIZE_MIN 128

/* roughly 20 ms @ 1 Gbps of 1500B MSDUs */
#define HTT_RX_RING_SIZE_MAX 2048

#define HTT_RX_AVG_FRM_BYTES 1000

/* ms, very conservative */
#define HTT_RX_HOST_LATENCY_MAX_MS 20

/* ms, conservative */
#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10

/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50

45
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
46
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
{
	int size;

	/*
	 * It is expected that the host CPU will typically be able to
	 * service the rx indication from one A-MPDU before the rx
	 * indication from the subsequent A-MPDU happens, roughly 1-2 ms
	 * later. However, the rx ring should be sized very conservatively,
	 * to accomodate the worst reasonable delay before the host CPU
	 * services a rx indication interrupt.
	 *
	 * The rx ring need not be kept full of empty buffers. In theory,
	 * the htt host SW can dynamically track the low-water mark in the
	 * rx ring, and dynamically adjust the level to which the rx ring
	 * is filled with empty buffers, to dynamically meet the desired
	 * low-water mark.
	 *
	 * In contrast, it's difficult to resize the rx ring itself, once
	 * it's in use. Thus, the ring itself should be sized very
	 * conservatively, while the degree to which the ring is filled
	 * with empty buffers should be sized moderately conservatively.
	 */

	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
	size =
	    htt->max_throughput_mbps +
	    1000  /
	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;

	if (size < HTT_RX_RING_SIZE_MIN)
		size = HTT_RX_RING_SIZE_MIN;

	if (size > HTT_RX_RING_SIZE_MAX)
		size = HTT_RX_RING_SIZE_MAX;

	size = roundup_pow_of_two(size);

	return size;
}

static int ath10k_htt_rx_ring_fill_level(struct ath10k_htt *htt)
{
	int size;

	/* 1e6 bps/mbps / 1e3 ms per sec = 1000 */
	size =
	    htt->max_throughput_mbps *
	    1000  /
	    (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;

	/*
	 * Make sure the fill level is at least 1 less than the ring size.
	 * Leaving 1 element empty allows the SW to easily distinguish
	 * between a full ring vs. an empty ring.
	 */
	if (size >= htt->rx_ring.size)
		size = htt->rx_ring.size - 1;

	return size;
}

static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{
	struct sk_buff *skb;
	struct ath10k_skb_cb *cb;
	int i;

	for (i = 0; i < htt->rx_ring.fill_cnt; i++) {
		skb = htt->rx_ring.netbufs_ring[i];
		cb = ATH10K_SKB_CB(skb);
		dma_unmap_single(htt->ar->dev, cb->paddr,
				 skb->len + skb_tailroom(skb),
				 DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);
	}

	htt->rx_ring.fill_cnt = 0;
}

static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
	struct htt_rx_desc *rx_desc;
	struct sk_buff *skb;
	dma_addr_t paddr;
	int ret = 0, idx;

135
	idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	while (num > 0) {
		skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
		if (!skb) {
			ret = -ENOMEM;
			goto fail;
		}

		if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
			skb_pull(skb,
				 PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
				 skb->data);

		/* Clear rx_desc attention word before posting to Rx ring */
		rx_desc = (struct htt_rx_desc *)skb->data;
		rx_desc->attention.flags = __cpu_to_le32(0);

		paddr = dma_map_single(htt->ar->dev, skb->data,
				       skb->len + skb_tailroom(skb),
				       DMA_FROM_DEVICE);

		if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
			dev_kfree_skb_any(skb);
			ret = -ENOMEM;
			goto fail;
		}

		ATH10K_SKB_CB(skb)->paddr = paddr;
		htt->rx_ring.netbufs_ring[idx] = skb;
		htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
		htt->rx_ring.fill_cnt++;

		num--;
		idx++;
		idx &= htt->rx_ring.size_mask;
	}

fail:
173
	*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
174 175 176 177 178 179 180 181 182 183 184
	return ret;
}

static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
	lockdep_assert_held(&htt->rx_ring.lock);
	return __ath10k_htt_rx_ring_fill_n(htt, num);
}

static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
185
	int ret, num_deficit, num_to_fill;
186

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	/* Refilling the whole RX ring buffer proves to be a bad idea. The
	 * reason is RX may take up significant amount of CPU cycles and starve
	 * other tasks, e.g. TX on an ethernet device while acting as a bridge
	 * with ath10k wlan interface. This ended up with very poor performance
	 * once CPU the host system was overwhelmed with RX on ath10k.
	 *
	 * By limiting the number of refills the replenishing occurs
	 * progressively. This in turns makes use of the fact tasklets are
	 * processed in FIFO order. This means actual RX processing can starve
	 * out refilling. If there's not enough buffers on RX ring FW will not
	 * report RX until it is refilled with enough buffers. This
	 * automatically balances load wrt to CPU power.
	 *
	 * This probably comes at a cost of lower maximum throughput but
	 * improves the avarage and stability. */
202
	spin_lock_bh(&htt->rx_ring.lock);
203 204 205
	num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
	num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
	num_deficit -= num_to_fill;
206 207 208 209 210 211 212 213 214 215
	ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
	if (ret == -ENOMEM) {
		/*
		 * Failed to fill it to the desired level -
		 * we'll start a timer and try again next time.
		 * As long as enough buffers are left in the ring for
		 * another A-MPDU rx, no special recovery is needed.
		 */
		mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
			  msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
216 217
	} else if (num_deficit > 0) {
		tasklet_schedule(&htt->rx_replenish_task);
218 219 220 221 222 223 224
	}
	spin_unlock_bh(&htt->rx_ring.lock);
}

static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
{
	struct ath10k_htt *htt = (struct ath10k_htt *)arg;
225

226 227 228
	ath10k_htt_rx_msdu_buff_replenish(htt);
}

M
Michal Kazior 已提交
229
static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt)
230
{
M
Michal Kazior 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	struct sk_buff *skb;
	int i;

	for (i = 0; i < htt->rx_ring.size; i++) {
		skb = htt->rx_ring.netbufs_ring[i];
		if (!skb)
			continue;

		dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr,
				 skb->len + skb_tailroom(skb),
				 DMA_FROM_DEVICE);
		dev_kfree_skb_any(skb);
		htt->rx_ring.netbufs_ring[i] = NULL;
	}
}
246

M
Michal Kazior 已提交
247
void ath10k_htt_rx_free(struct ath10k_htt *htt)
M
Michal Kazior 已提交
248
{
249
	del_timer_sync(&htt->rx_ring.refill_retry_timer);
250
	tasklet_kill(&htt->rx_replenish_task);
251 252 253 254
	tasklet_kill(&htt->txrx_compl_task);

	skb_queue_purge(&htt->tx_compl_q);
	skb_queue_purge(&htt->rx_compl_q);
255

M
Michal Kazior 已提交
256
	ath10k_htt_rx_ring_clean_up(htt);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

	dma_free_coherent(htt->ar->dev,
			  (htt->rx_ring.size *
			   sizeof(htt->rx_ring.paddrs_ring)),
			  htt->rx_ring.paddrs_ring,
			  htt->rx_ring.base_paddr);

	dma_free_coherent(htt->ar->dev,
			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
			  htt->rx_ring.alloc_idx.vaddr,
			  htt->rx_ring.alloc_idx.paddr);

	kfree(htt->rx_ring.netbufs_ring);
}

static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
{
274
	struct ath10k *ar = htt->ar;
275 276 277
	int idx;
	struct sk_buff *msdu;

278
	lockdep_assert_held(&htt->rx_ring.lock);
279

280
	if (htt->rx_ring.fill_cnt == 0) {
281
		ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
282 283
		return NULL;
	}
284 285 286

	idx = htt->rx_ring.sw_rd_idx.msdu_payld;
	msdu = htt->rx_ring.netbufs_ring[idx];
M
Michal Kazior 已提交
287
	htt->rx_ring.netbufs_ring[idx] = NULL;
288 289 290 291 292 293

	idx++;
	idx &= htt->rx_ring.size_mask;
	htt->rx_ring.sw_rd_idx.msdu_payld = idx;
	htt->rx_ring.fill_cnt--;

294 295 296
	trace_ath10k_htt_rx_pop_msdu(ar, msdu->data, msdu->len +
				     skb_tailroom(msdu));

297 298 299 300 301 302 303 304 305 306 307 308 309 310
	return msdu;
}

static void ath10k_htt_rx_free_msdu_chain(struct sk_buff *skb)
{
	struct sk_buff *next;

	while (skb) {
		next = skb->next;
		dev_kfree_skb_any(skb);
		skb = next;
	}
}

311
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
312 313 314
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
				   u8 **fw_desc, int *fw_desc_len,
				   struct sk_buff **head_msdu,
315 316
				   struct sk_buff **tail_msdu,
				   u32 *attention)
317
{
318
	struct ath10k *ar = htt->ar;
319
	int msdu_len, msdu_chaining = 0;
320
	struct sk_buff *msdu, *next;
321
	struct htt_rx_desc *rx_desc;
322
	u32 tsf;
323

324 325
	lockdep_assert_held(&htt->rx_ring.lock);

326
	if (htt->rx_confused) {
327
		ath10k_warn(ar, "htt is confused. refusing rx\n");
328
		return -1;
329 330 331 332 333 334 335 336 337 338 339
	}

	msdu = *head_msdu = ath10k_htt_rx_netbuf_pop(htt);
	while (msdu) {
		int last_msdu, msdu_len_invalid, msdu_chained;

		dma_unmap_single(htt->ar->dev,
				 ATH10K_SKB_CB(msdu)->paddr,
				 msdu->len + skb_tailroom(msdu),
				 DMA_FROM_DEVICE);

340
		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
				msdu->data, msdu->len + skb_tailroom(msdu));

		rx_desc = (struct htt_rx_desc *)msdu->data;

		/* FIXME: we must report msdu payload since this is what caller
		 *        expects now */
		skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
		skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));

		/*
		 * Sanity check - confirm the HW is finished filling in the
		 * rx data.
		 * If the HW and SW are working correctly, then it's guaranteed
		 * that the HW's MAC DMA is done before this point in the SW.
		 * To prevent the case that we handle a stale Rx descriptor,
		 * just assert for now until we have a way to recover.
		 */
		if (!(__le32_to_cpu(rx_desc->attention.flags)
				& RX_ATTENTION_FLAGS_MSDU_DONE)) {
			ath10k_htt_rx_free_msdu_chain(*head_msdu);
			*head_msdu = NULL;
			msdu = NULL;
363
			ath10k_err(ar, "htt rx stopped. cannot recover\n");
364 365 366 367
			htt->rx_confused = true;
			break;
		}

368 369 370 371 372
		*attention |= __le32_to_cpu(rx_desc->attention.flags) &
					    (RX_ATTENTION_FLAGS_TKIP_MIC_ERR |
					     RX_ATTENTION_FLAGS_DECRYPT_ERR |
					     RX_ATTENTION_FLAGS_FCS_ERR |
					     RX_ATTENTION_FLAGS_MGMT_TYPE);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
		/*
		 * Copy the FW rx descriptor for this MSDU from the rx
		 * indication message into the MSDU's netbuf. HL uses the
		 * same rx indication message definition as LL, and simply
		 * appends new info (fields from the HW rx desc, and the
		 * MSDU payload itself). So, the offset into the rx
		 * indication message only has to account for the standard
		 * offset of the per-MSDU FW rx desc info within the
		 * message, and how many bytes of the per-MSDU FW rx desc
		 * info have already been consumed. (And the endianness of
		 * the host, since for a big-endian host, the rx ind
		 * message contents, including the per-MSDU rx desc bytes,
		 * were byteswapped during upload.)
		 */
		if (*fw_desc_len > 0) {
			rx_desc->fw_desc.info0 = **fw_desc;
			/*
			 * The target is expected to only provide the basic
			 * per-MSDU rx descriptors. Just to be sure, verify
			 * that the target has not attached extension data
			 * (e.g. LRO flow ID).
			 */

			/* or more, if there's extension data */
			(*fw_desc)++;
			(*fw_desc_len)--;
		} else {
			/*
			 * When an oversized AMSDU happened, FW will lost
			 * some of MSDU status - in this case, the FW
			 * descriptors provided will be less than the
			 * actual MSDUs inside this MPDU. Mark the FW
			 * descriptors so that it will still deliver to
			 * upper stack, if no CRC error for this MPDU.
			 *
			 * FIX THIS - the FW descriptors are actually for
			 * MSDUs in the end of this A-MSDU instead of the
			 * beginning.
			 */
			rx_desc->fw_desc.info0 = 0;
		}

		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
			      RX_MSDU_START_INFO0_MSDU_LENGTH);
		msdu_chained = rx_desc->frag_info.ring2_more_count;

		if (msdu_len_invalid)
			msdu_len = 0;

		skb_trim(msdu, 0);
		skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
		msdu_len -= msdu->len;

		/* FIXME: Do chained buffers include htt_rx_desc or not? */
		while (msdu_chained--) {
			struct sk_buff *next = ath10k_htt_rx_netbuf_pop(htt);

			dma_unmap_single(htt->ar->dev,
					 ATH10K_SKB_CB(next)->paddr,
					 next->len + skb_tailroom(next),
					 DMA_FROM_DEVICE);

438
			ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
B
Ben Greear 已提交
439
					"htt rx chained: ", next->data,
440 441 442 443 444 445 446 447
					next->len + skb_tailroom(next));

			skb_trim(next, 0);
			skb_put(next, min(msdu_len, HTT_RX_BUF_SIZE));
			msdu_len -= next->len;

			msdu->next = next;
			msdu = next;
448
			msdu_chaining = 1;
449 450 451 452 453
		}

		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
				RX_MSDU_END_INFO0_LAST_MSDU;

454 455 456
		tsf = __le32_to_cpu(rx_desc->ppdu_end.tsf_timestamp);
		trace_ath10k_htt_rx_desc(ar, tsf, &rx_desc->attention,
					 sizeof(*rx_desc) - sizeof(u32));
457 458 459 460
		if (last_msdu) {
			msdu->next = NULL;
			break;
		}
461 462 463 464

		next = ath10k_htt_rx_netbuf_pop(htt);
		msdu->next = next;
		msdu = next;
465 466 467
	}
	*tail_msdu = msdu;

468 469 470
	if (*head_msdu == NULL)
		msdu_chaining = -1;

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	/*
	 * Don't refill the ring yet.
	 *
	 * First, the elements popped here are still in use - it is not
	 * safe to overwrite them until the matching call to
	 * mpdu_desc_list_next. Second, for efficiency it is preferable to
	 * refill the rx ring with 1 PPDU's worth of rx buffers (something
	 * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
	 * (something like 3 buffers). Consequently, we'll rely on the txrx
	 * SW to tell us when it is done pulling all the PPDU's rx buffers
	 * out of the rx ring, and then refill it just once.
	 */

	return msdu_chaining;
}

487 488 489
static void ath10k_htt_rx_replenish_task(unsigned long ptr)
{
	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
490

491 492 493
	ath10k_htt_rx_msdu_buff_replenish(htt);
}

M
Michal Kazior 已提交
494
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
495
{
496
	struct ath10k *ar = htt->ar;
497 498
	dma_addr_t paddr;
	void *vaddr;
499
	size_t size;
500 501 502 503
	struct timer_list *timer = &htt->rx_ring.refill_retry_timer;

	htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
	if (!is_power_of_2(htt->rx_ring.size)) {
504
		ath10k_warn(ar, "htt rx ring size is not power of 2\n");
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
		return -EINVAL;
	}

	htt->rx_ring.size_mask = htt->rx_ring.size - 1;

	/*
	 * Set the initial value for the level to which the rx ring
	 * should be filled, based on the max throughput and the
	 * worst likely latency for the host to fill the rx ring
	 * with new buffers. In theory, this fill level can be
	 * dynamically adjusted from the initial value set here, to
	 * reflect the actual host latency rather than a
	 * conservative assumption about the host latency.
	 */
	htt->rx_ring.fill_level = ath10k_htt_rx_ring_fill_level(htt);

	htt->rx_ring.netbufs_ring =
M
Michal Kazior 已提交
522
		kzalloc(htt->rx_ring.size * sizeof(struct sk_buff *),
523 524 525 526
			GFP_KERNEL);
	if (!htt->rx_ring.netbufs_ring)
		goto err_netbuf;

527 528 529
	size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);

	vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	if (!vaddr)
		goto err_dma_ring;

	htt->rx_ring.paddrs_ring = vaddr;
	htt->rx_ring.base_paddr = paddr;

	vaddr = dma_alloc_coherent(htt->ar->dev,
				   sizeof(*htt->rx_ring.alloc_idx.vaddr),
				   &paddr, GFP_DMA);
	if (!vaddr)
		goto err_dma_idx;

	htt->rx_ring.alloc_idx.vaddr = vaddr;
	htt->rx_ring.alloc_idx.paddr = paddr;
	htt->rx_ring.sw_rd_idx.msdu_payld = 0;
	*htt->rx_ring.alloc_idx.vaddr = 0;

	/* Initialize the Rx refill retry timer */
	setup_timer(timer, ath10k_htt_rx_ring_refill_retry, (unsigned long)htt);

	spin_lock_init(&htt->rx_ring.lock);

	htt->rx_ring.fill_cnt = 0;
	if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
		goto err_fill_ring;

556 557 558
	tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
		     (unsigned long)htt);

559 560 561 562 563 564
	skb_queue_head_init(&htt->tx_compl_q);
	skb_queue_head_init(&htt->rx_compl_q);

	tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
		     (unsigned long)htt);

565
	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
		   htt->rx_ring.size, htt->rx_ring.fill_level);
	return 0;

err_fill_ring:
	ath10k_htt_rx_ring_free(htt);
	dma_free_coherent(htt->ar->dev,
			  sizeof(*htt->rx_ring.alloc_idx.vaddr),
			  htt->rx_ring.alloc_idx.vaddr,
			  htt->rx_ring.alloc_idx.paddr);
err_dma_idx:
	dma_free_coherent(htt->ar->dev,
			  (htt->rx_ring.size *
			   sizeof(htt->rx_ring.paddrs_ring)),
			  htt->rx_ring.paddrs_ring,
			  htt->rx_ring.base_paddr);
err_dma_ring:
	kfree(htt->rx_ring.netbufs_ring);
err_netbuf:
	return -ENOMEM;
}

587 588
static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
					  enum htt_rx_mpdu_encrypt_type type)
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
{
	switch (type) {
	case HTT_RX_MPDU_ENCRYPT_WEP40:
	case HTT_RX_MPDU_ENCRYPT_WEP104:
		return 4;
	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
	case HTT_RX_MPDU_ENCRYPT_WEP128: /* not tested */
	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
	case HTT_RX_MPDU_ENCRYPT_WAPI: /* not tested */
	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
		return 8;
	case HTT_RX_MPDU_ENCRYPT_NONE:
		return 0;
	}

604
	ath10k_warn(ar, "unknown encryption type %d\n", type);
605 606 607
	return 0;
}

608 609
static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
					 enum htt_rx_mpdu_encrypt_type type)
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
{
	switch (type) {
	case HTT_RX_MPDU_ENCRYPT_NONE:
	case HTT_RX_MPDU_ENCRYPT_WEP40:
	case HTT_RX_MPDU_ENCRYPT_WEP104:
	case HTT_RX_MPDU_ENCRYPT_WEP128:
	case HTT_RX_MPDU_ENCRYPT_WAPI:
		return 0;
	case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
	case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
		return 4;
	case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
		return 8;
	}

625
	ath10k_warn(ar, "unknown encryption type %d\n", type);
626 627 628 629 630 631 632 633 634 635 636
	return 0;
}

/* Applies for first msdu in chain, before altering it. */
static struct ieee80211_hdr *ath10k_htt_rx_skb_get_hdr(struct sk_buff *skb)
{
	struct htt_rx_desc *rxd;
	enum rx_msdu_decap_format fmt;

	rxd = (void *)skb->data - sizeof(*rxd);
	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
637
		 RX_MSDU_START_INFO1_DECAP_FORMAT);
638 639 640

	if (fmt == RX_MSDU_DECAP_RAW)
		return (void *)skb->data;
641 642

	return (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
643 644 645 646 647
}

/* This function only applies for first msdu in an msdu chain */
static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
{
648 649
	u8 *qc;

650
	if (ieee80211_is_data_qos(hdr->frame_control)) {
651
		qc = ieee80211_get_qos_ctl(hdr);
652 653 654 655 656 657
		if (qc[0] & 0x80)
			return true;
	}
	return false;
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671
struct rfc1042_hdr {
	u8 llc_dsap;
	u8 llc_ssap;
	u8 llc_ctrl;
	u8 snap_oui[3];
	__be16 snap_type;
} __packed;

struct amsdu_subframe_hdr {
	u8 dst[ETH_ALEN];
	u8 src[ETH_ALEN];
	__be16 len;
} __packed;

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
static const u8 rx_legacy_rate_idx[] = {
	3,	/* 0x00  - 11Mbps  */
	2,	/* 0x01  - 5.5Mbps */
	1,	/* 0x02  - 2Mbps   */
	0,	/* 0x03  - 1Mbps   */
	3,	/* 0x04  - 11Mbps  */
	2,	/* 0x05  - 5.5Mbps */
	1,	/* 0x06  - 2Mbps   */
	0,	/* 0x07  - 1Mbps   */
	10,	/* 0x08  - 48Mbps  */
	8,	/* 0x09  - 24Mbps  */
	6,	/* 0x0A  - 12Mbps  */
	4,	/* 0x0B  - 6Mbps   */
	11,	/* 0x0C  - 54Mbps  */
	9,	/* 0x0D  - 36Mbps  */
	7,	/* 0x0E  - 18Mbps  */
	5,	/* 0x0F  - 9Mbps   */
};

691
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
692
				  enum ieee80211_band band,
693
				  u8 info0, u32 info1, u32 info2,
694
				  struct ieee80211_rx_status *status)
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
{
	u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
	u8 preamble = 0;

	/* Check if valid fields */
	if (!(info0 & HTT_RX_INDICATION_INFO0_START_VALID))
		return;

	preamble = MS(info1, HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE);

	switch (preamble) {
	case HTT_RX_LEGACY:
		cck = info0 & HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK;
		rate = MS(info0, HTT_RX_INDICATION_INFO0_LEGACY_RATE);
		rate_idx = 0;

		if (rate < 0x08 || rate > 0x0F)
			break;

		switch (band) {
		case IEEE80211_BAND_2GHZ:
			if (cck)
				rate &= ~BIT(3);
			rate_idx = rx_legacy_rate_idx[rate];
			break;
		case IEEE80211_BAND_5GHZ:
			rate_idx = rx_legacy_rate_idx[rate];
			/* We are using same rate table registering
			   HW - ath10k_rates[]. In case of 5GHz skip
			   CCK rates, so -4 here */
			rate_idx -= 4;
			break;
		default:
			break;
		}

		status->rate_idx = rate_idx;
		break;
	case HTT_RX_HT:
	case HTT_RX_HT_WITH_TXBF:
		/* HT-SIG - Table 20-11 in info1 and info2 */
		mcs = info1 & 0x1F;
		nss = mcs >> 3;
		bw = (info1 >> 7) & 1;
		sgi = (info2 >> 7) & 1;

		status->rate_idx = mcs;
		status->flag |= RX_FLAG_HT;
		if (sgi)
			status->flag |= RX_FLAG_SHORT_GI;
		if (bw)
			status->flag |= RX_FLAG_40MHZ;
		break;
	case HTT_RX_VHT:
	case HTT_RX_VHT_WITH_TXBF:
		/* VHT-SIG-A1 in info 1, VHT-SIG-A2 in info2
		   TODO check this */
		mcs = (info2 >> 4) & 0x0F;
		nss = ((info1 >> 10) & 0x07) + 1;
		bw = info1 & 3;
		sgi = info2 & 1;

		status->rate_idx = mcs;
		status->vht_nss = nss;

		if (sgi)
			status->flag |= RX_FLAG_SHORT_GI;

		switch (bw) {
		/* 20MHZ */
		case 0:
			break;
		/* 40MHZ */
		case 1:
			status->flag |= RX_FLAG_40MHZ;
			break;
		/* 80MHZ */
		case 2:
			status->vht_flag |= RX_VHT_FLAG_80MHZ;
		}

		status->flag |= RX_FLAG_VHT;
		break;
	default:
		break;
	}
}

783
static void ath10k_htt_rx_h_protected(struct ath10k_htt *htt,
784 785
				      struct ieee80211_rx_status *rx_status,
				      struct sk_buff *skb,
786 787 788
				      enum htt_rx_mpdu_encrypt_type enctype,
				      enum rx_msdu_decap_format fmt,
				      bool dot11frag)
789
{
790
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
791

792 793 794
	rx_status->flag &= ~(RX_FLAG_DECRYPTED |
			     RX_FLAG_IV_STRIPPED |
			     RX_FLAG_MMIC_STRIPPED);
795

796 797 798 799 800 801 802 803 804 805 806 807 808
	if (enctype == HTT_RX_MPDU_ENCRYPT_NONE)
		return;

	/*
	 * There's no explicit rx descriptor flag to indicate whether a given
	 * frame has been decrypted or not. We're forced to use the decap
	 * format as an implicit indication. However fragmentation rx is always
	 * raw and it probably never reports undecrypted raws.
	 *
	 * This makes sure sniffed frames are reported as-is without stripping
	 * the protected flag.
	 */
	if (fmt == RX_MSDU_DECAP_RAW && !dot11frag)
809 810
		return;

811 812 813
	rx_status->flag |= RX_FLAG_DECRYPTED |
			   RX_FLAG_IV_STRIPPED |
			   RX_FLAG_MMIC_STRIPPED;
814 815 816 817
	hdr->frame_control = __cpu_to_le16(__le16_to_cpu(hdr->frame_control) &
					   ~IEEE80211_FCTL_PROTECTED);
}

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
				    struct ieee80211_rx_status *status)
{
	struct ieee80211_channel *ch;

	spin_lock_bh(&ar->data_lock);
	ch = ar->scan_channel;
	if (!ch)
		ch = ar->rx_channel;
	spin_unlock_bh(&ar->data_lock);

	if (!ch)
		return false;

	status->band = ch->band;
	status->freq = ch->center_freq;

	return true;
}

838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
static const char * const tid_to_ac[] = {
	"BE",
	"BK",
	"BK",
	"BE",
	"VI",
	"VI",
	"VO",
	"VO",
};

static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
{
	u8 *qc;
	int tid;

	if (!ieee80211_is_data_qos(hdr->frame_control))
		return "";

	qc = ieee80211_get_qos_ctl(hdr);
	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
	if (tid < 8)
		snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
	else
		snprintf(out, size, "tid %d", tid);

	return out;
}

867 868 869
static void ath10k_process_rx(struct ath10k *ar,
			      struct ieee80211_rx_status *rx_status,
			      struct sk_buff *skb)
870 871
{
	struct ieee80211_rx_status *status;
872 873
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	char tid[32];
874

875 876
	status = IEEE80211_SKB_RXCB(skb);
	*status = *rx_status;
877

878
	ath10k_dbg(ar, ATH10K_DBG_DATA,
879
		   "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
880 881
		   skb,
		   skb->len,
882 883 884 885 886
		   ieee80211_get_SA(hdr),
		   ath10k_get_tid(hdr, tid, sizeof(tid)),
		   is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
							"mcast" : "ucast",
		   (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
887 888 889 890 891 892 893 894 895
		   status->flag == 0 ? "legacy" : "",
		   status->flag & RX_FLAG_HT ? "ht" : "",
		   status->flag & RX_FLAG_VHT ? "vht" : "",
		   status->flag & RX_FLAG_40MHZ ? "40" : "",
		   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
		   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
		   status->rate_idx,
		   status->vht_nss,
		   status->freq,
896
		   status->band, status->flag,
897
		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
898 899
		   !!(status->flag & RX_FLAG_MMIC_ERROR),
		   !!(status->flag & RX_FLAG_AMSDU_MORE));
900
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
901
			skb->data, skb->len);
902

903
	ieee80211_rx(ar->hw, skb);
904 905
}

M
Michal Kazior 已提交
906 907 908 909 910 911
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
{
	/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
	return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
}

912
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
913 914
				struct ieee80211_rx_status *rx_status,
				struct sk_buff *skb_in)
915
{
916
	struct ath10k *ar = htt->ar;
917
	struct htt_rx_desc *rxd;
918
	struct sk_buff *skb = skb_in;
919 920 921
	struct sk_buff *first;
	enum rx_msdu_decap_format fmt;
	enum htt_rx_mpdu_encrypt_type enctype;
922
	struct ieee80211_hdr *hdr;
M
Michal Kazior 已提交
923
	u8 hdr_buf[64], da[ETH_ALEN], sa[ETH_ALEN], *qos;
924 925 926 927
	unsigned int hdr_len;

	rxd = (void *)skb->data - sizeof(*rxd);
	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
928
		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
929

930 931 932 933
	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
	hdr_len = ieee80211_hdrlen(hdr->frame_control);
	memcpy(hdr_buf, hdr, hdr_len);
	hdr = (struct ieee80211_hdr *)hdr_buf;
934 935 936 937

	first = skb;
	while (skb) {
		void *decap_hdr;
938
		int len;
939 940 941

		rxd = (void *)skb->data - sizeof(*rxd);
		fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
942
			 RX_MSDU_START_INFO1_DECAP_FORMAT);
943 944
		decap_hdr = (void *)rxd->rx_hdr_status;

945
		skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
946

947 948 949
		/* First frame in an A-MSDU chain has more decapped data. */
		if (skb == first) {
			len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
950 951
			len += round_up(ath10k_htt_rx_crypto_param_len(ar,
						enctype), 4);
952
			decap_hdr += len;
953 954
		}

955 956
		switch (fmt) {
		case RX_MSDU_DECAP_RAW:
957
			/* remove trailing FCS */
958 959 960
			skb_trim(skb, skb->len - FCS_LEN);
			break;
		case RX_MSDU_DECAP_NATIVE_WIFI:
M
Michal Kazior 已提交
961
			/* pull decapped header and copy SA & DA */
962
			hdr = (struct ieee80211_hdr *)skb->data;
M
Michal Kazior 已提交
963
			hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
K
Kalle Valo 已提交
964 965
			ether_addr_copy(da, ieee80211_get_DA(hdr));
			ether_addr_copy(sa, ieee80211_get_SA(hdr));
966 967 968 969 970 971 972 973 974 975 976 977 978
			skb_pull(skb, hdr_len);

			/* push original 802.11 header */
			hdr = (struct ieee80211_hdr *)hdr_buf;
			hdr_len = ieee80211_hdrlen(hdr->frame_control);
			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);

			/* original A-MSDU header has the bit set but we're
			 * not including A-MSDU subframe header */
			hdr = (struct ieee80211_hdr *)skb->data;
			qos = ieee80211_get_qos_ctl(hdr);
			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;

M
Michal Kazior 已提交
979 980 981
			/* original 802.11 header has a different DA and in
			 * case of 4addr it may also have different SA
			 */
K
Kalle Valo 已提交
982 983
			ether_addr_copy(ieee80211_get_DA(hdr), da);
			ether_addr_copy(ieee80211_get_SA(hdr), sa);
984 985
			break;
		case RX_MSDU_DECAP_ETHERNET2_DIX:
986 987 988
			/* strip ethernet header and insert decapped 802.11
			 * header, amsdu subframe header and rfc1042 header */

989 990 991 992 993 994 995 996 997
			len = 0;
			len += sizeof(struct rfc1042_hdr);
			len += sizeof(struct amsdu_subframe_hdr);

			skb_pull(skb, sizeof(struct ethhdr));
			memcpy(skb_push(skb, len), decap_hdr, len);
			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
			break;
		case RX_MSDU_DECAP_8023_SNAP_LLC:
998 999
			/* insert decapped 802.11 header making a singly
			 * A-MSDU */
1000 1001
			memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
			break;
1002 1003
		}

1004
		skb_in = skb;
1005 1006
		ath10k_htt_rx_h_protected(htt, rx_status, skb_in, enctype, fmt,
					  false);
1007
		skb = skb->next;
1008
		skb_in->next = NULL;
1009

1010
		if (skb)
1011
			rx_status->flag |= RX_FLAG_AMSDU_MORE;
1012
		else
1013
			rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
1014

1015
		ath10k_process_rx(htt->ar, rx_status, skb_in);
1016
	}
1017

1018 1019
	/* FIXME: It might be nice to re-assemble the A-MSDU when there's a
	 * monitor interface active for sniffing purposes. */
1020 1021
}

1022 1023 1024
static void ath10k_htt_rx_msdu(struct ath10k_htt *htt,
			       struct ieee80211_rx_status *rx_status,
			       struct sk_buff *skb)
1025
{
1026
	struct ath10k *ar = htt->ar;
1027 1028 1029 1030
	struct htt_rx_desc *rxd;
	struct ieee80211_hdr *hdr;
	enum rx_msdu_decap_format fmt;
	enum htt_rx_mpdu_encrypt_type enctype;
1031 1032
	int hdr_len;
	void *rfc1042;
1033 1034 1035

	/* This shouldn't happen. If it does than it may be a FW bug. */
	if (skb->next) {
1036
		ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
1037 1038 1039 1040 1041 1042
		ath10k_htt_rx_free_msdu_chain(skb->next);
		skb->next = NULL;
	}

	rxd = (void *)skb->data - sizeof(*rxd);
	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1043
		 RX_MSDU_START_INFO1_DECAP_FORMAT);
1044
	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
1045
		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1046 1047
	hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1048

1049 1050
	skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);

1051 1052 1053
	switch (fmt) {
	case RX_MSDU_DECAP_RAW:
		/* remove trailing FCS */
1054
		skb_trim(skb, skb->len - FCS_LEN);
1055 1056
		break;
	case RX_MSDU_DECAP_NATIVE_WIFI:
1057 1058
		/* Pull decapped header */
		hdr = (struct ieee80211_hdr *)skb->data;
M
Michal Kazior 已提交
1059
		hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
1060 1061 1062 1063 1064 1065
		skb_pull(skb, hdr_len);

		/* Push original header */
		hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
		hdr_len = ieee80211_hdrlen(hdr->frame_control);
		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
1066 1067
		break;
	case RX_MSDU_DECAP_ETHERNET2_DIX:
1068 1069
		/* strip ethernet header and insert decapped 802.11 header and
		 * rfc1042 header */
1070

1071 1072
		rfc1042 = hdr;
		rfc1042 += roundup(hdr_len, 4);
1073 1074
		rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
					enctype), 4);
1075

1076 1077 1078 1079 1080 1081 1082 1083
		skb_pull(skb, sizeof(struct ethhdr));
		memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
		       rfc1042, sizeof(struct rfc1042_hdr));
		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
		break;
	case RX_MSDU_DECAP_8023_SNAP_LLC:
		/* remove A-MSDU subframe header and insert
		 * decapped 802.11 header. rfc1042 header is already there */
1084

1085 1086 1087
		skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
		memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
		break;
1088 1089
	}

1090
	ath10k_htt_rx_h_protected(htt, rx_status, skb, enctype, fmt, false);
1091

1092
	ath10k_process_rx(htt->ar, rx_status, skb);
1093 1094
}

1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
	struct htt_rx_desc *rxd;
	u32 flags, info;
	bool is_ip4, is_ip6;
	bool is_tcp, is_udp;
	bool ip_csum_ok, tcpudp_csum_ok;

	rxd = (void *)skb->data - sizeof(*rxd);
	flags = __le32_to_cpu(rxd->attention.flags);
	info = __le32_to_cpu(rxd->msdu_start.info1);

	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
	is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
	is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
	ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
	tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);

	if (!is_ip4 && !is_ip6)
		return CHECKSUM_NONE;
	if (!is_tcp && !is_udp)
		return CHECKSUM_NONE;
	if (!ip_csum_ok)
		return CHECKSUM_NONE;
	if (!tcpudp_csum_ok)
		return CHECKSUM_NONE;

	return CHECKSUM_UNNECESSARY;
}

B
Ben Greear 已提交
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
{
	struct sk_buff *next = msdu_head->next;
	struct sk_buff *to_free = next;
	int space;
	int total_len = 0;

	/* TODO:  Might could optimize this by using
	 * skb_try_coalesce or similar method to
	 * decrease copying, or maybe get mac80211 to
	 * provide a way to just receive a list of
	 * skb?
	 */

	msdu_head->next = NULL;

	/* Allocate total length all at once. */
	while (next) {
		total_len += next->len;
		next = next->next;
	}

	space = total_len - skb_tailroom(msdu_head);
	if ((space > 0) &&
	    (pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
		/* TODO:  bump some rx-oom error stat */
		/* put it back together so we can free the
		 * whole list at once.
		 */
		msdu_head->next = to_free;
		return -1;
	}

	/* Walk list again, copying contents into
	 * msdu_head
	 */
	next = to_free;
	while (next) {
		skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
					  next->len);
		next = next->next;
	}

	/* If here, we have consolidated skb.  Free the
	 * fragments and pass the main skb on up the
	 * stack.
	 */
	ath10k_htt_rx_free_msdu_chain(to_free);
	return 0;
}

1177 1178
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k_htt *htt,
					struct sk_buff *head,
1179
					enum htt_rx_mpdu_status status,
1180 1181
					bool channel_set,
					u32 attention)
1182
{
1183 1184
	struct ath10k *ar = htt->ar;

1185
	if (head->len == 0) {
1186
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1187 1188 1189 1190
			   "htt rx dropping due to zero-len\n");
		return false;
	}

1191
	if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
1192
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1193 1194 1195 1196
			   "htt rx dropping due to decrypt-err\n");
		return false;
	}

1197
	if (!channel_set) {
1198
		ath10k_warn(ar, "no channel configured; ignoring frame!\n");
1199 1200 1201
		return false;
	}

1202 1203
	/* Skip mgmt frames while we handle this in WMI */
	if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
1204
	    attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
1205
		ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
1206 1207 1208 1209 1210 1211
		return false;
	}

	if (status != HTT_RX_IND_MPDU_STATUS_OK &&
	    status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
	    status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
M
Michal Kazior 已提交
1212
	    !htt->ar->monitor_started) {
1213
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1214 1215 1216 1217 1218 1219
			   "htt rx ignoring frame w/ status %d\n",
			   status);
		return false;
	}

	if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
1220
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1221 1222 1223 1224 1225 1226 1227
			   "htt rx CAC running\n");
		return false;
	}

	return true;
}

1228 1229 1230
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
				  struct htt_rx_indication *rx)
{
1231
	struct ath10k *ar = htt->ar;
1232
	struct ieee80211_rx_status *rx_status = &htt->rx_status;
1233
	struct htt_rx_indication_mpdu_range *mpdu_ranges;
1234
	struct htt_rx_desc *rxd;
1235
	enum htt_rx_mpdu_status status;
1236 1237
	struct ieee80211_hdr *hdr;
	int num_mpdu_ranges;
1238
	u32 attention;
1239 1240
	int fw_desc_len;
	u8 *fw_desc;
1241
	bool channel_set;
1242
	int i, j;
1243
	int ret;
1244

1245 1246
	lockdep_assert_held(&htt->rx_ring.lock);

1247 1248 1249 1250 1251 1252 1253
	fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
	fw_desc = (u8 *)&rx->fw_desc;

	num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
			     HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
	mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);

1254
	/* Fill this once, while this is per-ppdu */
1255 1256 1257 1258 1259
	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_START_VALID) {
		memset(rx_status, 0, sizeof(*rx_status));
		rx_status->signal  = ATH10K_DEFAULT_NOISE_FLOOR +
				     rx->ppdu.combined_rssi;
	}
1260 1261 1262

	if (rx->ppdu.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
		/* TSF available only in 32-bit */
1263 1264
		rx_status->mactime = __le32_to_cpu(rx->ppdu.tsf) & 0xffffffff;
		rx_status->flag |= RX_FLAG_MACTIME_END;
1265
	}
1266

1267
	channel_set = ath10k_htt_rx_h_channel(htt->ar, rx_status);
1268

1269
	if (channel_set) {
1270
		ath10k_htt_rx_h_rates(htt->ar, rx_status->band,
1271 1272 1273
				      rx->ppdu.info0,
				      __le32_to_cpu(rx->ppdu.info1),
				      __le32_to_cpu(rx->ppdu.info2),
1274
				      rx_status);
1275
	}
1276

1277
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
1278 1279 1280 1281 1282
			rx, sizeof(*rx) +
			(sizeof(struct htt_rx_indication_mpdu_range) *
				num_mpdu_ranges));

	for (i = 0; i < num_mpdu_ranges; i++) {
1283
		status = mpdu_ranges[i].mpdu_range_status;
1284 1285 1286 1287

		for (j = 0; j < mpdu_ranges[i].mpdu_count; j++) {
			struct sk_buff *msdu_head, *msdu_tail;

1288
			attention = 0;
1289 1290
			msdu_head = NULL;
			msdu_tail = NULL;
1291 1292 1293 1294
			ret = ath10k_htt_rx_amsdu_pop(htt,
						      &fw_desc,
						      &fw_desc_len,
						      &msdu_head,
1295 1296
						      &msdu_tail,
						      &attention);
1297 1298

			if (ret < 0) {
1299
				ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
1300 1301 1302 1303
					    ret);
				ath10k_htt_rx_free_msdu_chain(msdu_head);
				continue;
			}
1304

1305 1306 1307 1308
			rxd = container_of((void *)msdu_head->data,
					   struct htt_rx_desc,
					   msdu_payload);

1309
			if (!ath10k_htt_rx_amsdu_allowed(htt, msdu_head,
1310
							 status,
1311 1312
							 channel_set,
							 attention)) {
1313 1314 1315 1316
				ath10k_htt_rx_free_msdu_chain(msdu_head);
				continue;
			}

1317 1318
			if (ret > 0 &&
			    ath10k_unchain_msdu(msdu_head) < 0) {
1319 1320 1321 1322
				ath10k_htt_rx_free_msdu_chain(msdu_head);
				continue;
			}

1323
			if (attention & RX_ATTENTION_FLAGS_FCS_ERR)
1324
				rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
1325
			else
1326
				rx_status->flag &= ~RX_FLAG_FAILED_FCS_CRC;
1327

1328
			if (attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR)
1329
				rx_status->flag |= RX_FLAG_MMIC_ERROR;
1330
			else
1331
				rx_status->flag &= ~RX_FLAG_MMIC_ERROR;
1332

1333 1334 1335
			hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);

			if (ath10k_htt_rx_hdr_is_amsdu(hdr))
1336
				ath10k_htt_rx_amsdu(htt, rx_status, msdu_head);
1337
			else
1338
				ath10k_htt_rx_msdu(htt, rx_status, msdu_head);
1339 1340 1341
		}
	}

1342
	tasklet_schedule(&htt->rx_replenish_task);
1343 1344 1345
}

static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
1346
				       struct htt_rx_fragment_indication *frag)
1347
{
1348
	struct ath10k *ar = htt->ar;
1349
	struct sk_buff *msdu_head, *msdu_tail;
1350
	enum htt_rx_mpdu_encrypt_type enctype;
1351 1352
	struct htt_rx_desc *rxd;
	enum rx_msdu_decap_format fmt;
1353
	struct ieee80211_rx_status *rx_status = &htt->rx_status;
1354
	struct ieee80211_hdr *hdr;
1355
	int ret;
1356 1357 1358 1359 1360
	bool tkip_mic_err;
	bool decrypt_err;
	u8 *fw_desc;
	int fw_desc_len, hdrlen, paramlen;
	int trim;
1361
	u32 attention = 0;
1362 1363 1364 1365 1366 1367

	fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
	fw_desc = (u8 *)frag->fw_msdu_rx_desc;

	msdu_head = NULL;
	msdu_tail = NULL;
1368 1369

	spin_lock_bh(&htt->rx_ring.lock);
1370
	ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
1371 1372
				      &msdu_head, &msdu_tail,
				      &attention);
1373
	spin_unlock_bh(&htt->rx_ring.lock);
1374

1375
	ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
1376

1377
	if (ret) {
1378
		ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
1379
			    ret);
1380 1381 1382 1383 1384
		ath10k_htt_rx_free_msdu_chain(msdu_head);
		return;
	}

	/* FIXME: implement signal strength */
B
Ben Greear 已提交
1385
	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1386 1387 1388

	hdr = (struct ieee80211_hdr *)msdu_head->data;
	rxd = (void *)msdu_head->data - sizeof(*rxd);
1389 1390
	tkip_mic_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
	decrypt_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
1391
	fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
1392
		 RX_MSDU_START_INFO1_DECAP_FORMAT);
1393 1394

	if (fmt != RX_MSDU_DECAP_RAW) {
1395
		ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
1396 1397 1398 1399
		dev_kfree_skb_any(msdu_head);
		goto end;
	}

1400 1401
	enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
		     RX_MPDU_START_INFO0_ENCRYPT_TYPE);
1402 1403
	ath10k_htt_rx_h_protected(htt, rx_status, msdu_head, enctype, fmt,
				  true);
1404
	msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
1405

1406
	if (tkip_mic_err)
1407
		ath10k_warn(ar, "tkip mic error\n");
1408 1409

	if (decrypt_err) {
1410
		ath10k_warn(ar, "decryption err in fragmented rx\n");
1411
		dev_kfree_skb_any(msdu_head);
1412 1413 1414
		goto end;
	}

1415
	if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
1416
		hdrlen = ieee80211_hdrlen(hdr->frame_control);
1417
		paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
1418 1419

		/* It is more efficient to move the header than the payload */
1420 1421
		memmove((void *)msdu_head->data + paramlen,
			(void *)msdu_head->data,
1422
			hdrlen);
1423 1424
		skb_pull(msdu_head, paramlen);
		hdr = (struct ieee80211_hdr *)msdu_head->data;
1425 1426 1427 1428 1429 1430
	}

	/* remove trailing FCS */
	trim  = 4;

	/* remove crypto trailer */
1431
	trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
1432 1433 1434

	/* last fragment of TKIP frags has MIC */
	if (!ieee80211_has_morefrags(hdr->frame_control) &&
1435
	    enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
1436 1437
		trim += 8;

1438
	if (trim > msdu_head->len) {
1439
		ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
1440
		dev_kfree_skb_any(msdu_head);
1441 1442 1443
		goto end;
	}

1444
	skb_trim(msdu_head, msdu_head->len - trim);
1445

1446
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
1447
			msdu_head->data, msdu_head->len);
1448
	ath10k_process_rx(htt->ar, rx_status, msdu_head);
1449 1450 1451

end:
	if (fw_desc_len > 0) {
1452
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1453 1454 1455 1456 1457
			   "expecting more fragmented rx in one indication %d\n",
			   fw_desc_len);
	}
}

1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
				       struct sk_buff *skb)
{
	struct ath10k_htt *htt = &ar->htt;
	struct htt_resp *resp = (struct htt_resp *)skb->data;
	struct htt_tx_done tx_done = {};
	int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
	__le16 msdu_id;
	int i;

1468 1469
	lockdep_assert_held(&htt->tx_lock);

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
	switch (status) {
	case HTT_DATA_TX_STATUS_NO_ACK:
		tx_done.no_ack = true;
		break;
	case HTT_DATA_TX_STATUS_OK:
		break;
	case HTT_DATA_TX_STATUS_DISCARD:
	case HTT_DATA_TX_STATUS_POSTPONE:
	case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
		tx_done.discard = true;
		break;
	default:
1482
		ath10k_warn(ar, "unhandled tx completion status %d\n", status);
1483 1484 1485 1486
		tx_done.discard = true;
		break;
	}

1487
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
1488 1489 1490 1491 1492 1493 1494 1495 1496
		   resp->data_tx_completion.num_msdus);

	for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
		msdu_id = resp->data_tx_completion.msdus[i];
		tx_done.msdu_id = __le16_to_cpu(msdu_id);
		ath10k_txrx_tx_unref(htt, &tx_done);
	}
}

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507
static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
{
	struct htt_rx_addba *ev = &resp->rx_addba;
	struct ath10k_peer *peer;
	struct ath10k_vif *arvif;
	u16 info0, tid, peer_id;

	info0 = __le16_to_cpu(ev->info0);
	tid = MS(info0, HTT_RX_BA_INFO0_TID);
	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);

1508
	ath10k_dbg(ar, ATH10K_DBG_HTT,
1509 1510 1511 1512 1513 1514
		   "htt rx addba tid %hu peer_id %hu size %hhu\n",
		   tid, peer_id, ev->window_size);

	spin_lock_bh(&ar->data_lock);
	peer = ath10k_peer_find_by_id(ar, peer_id);
	if (!peer) {
1515
		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1516 1517 1518 1519 1520 1521 1522
			    peer_id);
		spin_unlock_bh(&ar->data_lock);
		return;
	}

	arvif = ath10k_get_arvif(ar, peer->vdev_id);
	if (!arvif) {
1523
		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1524 1525 1526 1527 1528
			    peer->vdev_id);
		spin_unlock_bh(&ar->data_lock);
		return;
	}

1529
	ath10k_dbg(ar, ATH10K_DBG_HTT,
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
		   "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
		   peer->addr, tid, ev->window_size);

	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
	spin_unlock_bh(&ar->data_lock);
}

static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
{
	struct htt_rx_delba *ev = &resp->rx_delba;
	struct ath10k_peer *peer;
	struct ath10k_vif *arvif;
	u16 info0, tid, peer_id;

	info0 = __le16_to_cpu(ev->info0);
	tid = MS(info0, HTT_RX_BA_INFO0_TID);
	peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);

1548
	ath10k_dbg(ar, ATH10K_DBG_HTT,
1549 1550 1551 1552 1553 1554
		   "htt rx delba tid %hu peer_id %hu\n",
		   tid, peer_id);

	spin_lock_bh(&ar->data_lock);
	peer = ath10k_peer_find_by_id(ar, peer_id);
	if (!peer) {
1555
		ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
1556 1557 1558 1559 1560 1561 1562
			    peer_id);
		spin_unlock_bh(&ar->data_lock);
		return;
	}

	arvif = ath10k_get_arvif(ar, peer->vdev_id);
	if (!arvif) {
1563
		ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
1564 1565 1566 1567 1568
			    peer->vdev_id);
		spin_unlock_bh(&ar->data_lock);
		return;
	}

1569
	ath10k_dbg(ar, ATH10K_DBG_HTT,
1570 1571 1572 1573 1574 1575 1576
		   "htt rx stop rx ba session sta %pM tid %hu\n",
		   peer->addr, tid);

	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
	spin_unlock_bh(&ar->data_lock);
}

1577 1578
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
1579
	struct ath10k_htt *htt = &ar->htt;
1580 1581 1582 1583
	struct htt_resp *resp = (struct htt_resp *)skb->data;

	/* confirm alignment */
	if (!IS_ALIGNED((unsigned long)skb->data, 4))
1584
		ath10k_warn(ar, "unaligned htt message, expect trouble\n");
1585

1586
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
1587 1588 1589 1590 1591 1592 1593 1594
		   resp->hdr.msg_type);
	switch (resp->hdr.msg_type) {
	case HTT_T2H_MSG_TYPE_VERSION_CONF: {
		htt->target_version_major = resp->ver_resp.major;
		htt->target_version_minor = resp->ver_resp.minor;
		complete(&htt->target_version_received);
		break;
	}
1595
	case HTT_T2H_MSG_TYPE_RX_IND:
1596 1597 1598
		spin_lock_bh(&htt->rx_ring.lock);
		__skb_queue_tail(&htt->rx_compl_q, skb);
		spin_unlock_bh(&htt->rx_ring.lock);
1599 1600
		tasklet_schedule(&htt->txrx_compl_task);
		return;
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
	case HTT_T2H_MSG_TYPE_PEER_MAP: {
		struct htt_peer_map_event ev = {
			.vdev_id = resp->peer_map.vdev_id,
			.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
		};
		memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
		ath10k_peer_map_event(htt, &ev);
		break;
	}
	case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
		struct htt_peer_unmap_event ev = {
			.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
		};
		ath10k_peer_unmap_event(htt, &ev);
		break;
	}
	case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
		struct htt_tx_done tx_done = {};
		int status = __le32_to_cpu(resp->mgmt_tx_completion.status);

		tx_done.msdu_id =
			__le32_to_cpu(resp->mgmt_tx_completion.desc_id);

		switch (status) {
		case HTT_MGMT_TX_STATUS_OK:
			break;
		case HTT_MGMT_TX_STATUS_RETRY:
			tx_done.no_ack = true;
			break;
		case HTT_MGMT_TX_STATUS_DROP:
			tx_done.discard = true;
			break;
		}

1635
		spin_lock_bh(&htt->tx_lock);
1636
		ath10k_txrx_tx_unref(htt, &tx_done);
1637
		spin_unlock_bh(&htt->tx_lock);
1638 1639
		break;
	}
1640 1641 1642 1643 1644 1645
	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
		spin_lock_bh(&htt->tx_lock);
		__skb_queue_tail(&htt->tx_compl_q, skb);
		spin_unlock_bh(&htt->tx_lock);
		tasklet_schedule(&htt->txrx_compl_task);
		return;
1646 1647 1648 1649
	case HTT_T2H_MSG_TYPE_SEC_IND: {
		struct ath10k *ar = htt->ar;
		struct htt_security_indication *ev = &resp->security_indication;

1650
		ath10k_dbg(ar, ATH10K_DBG_HTT,
1651 1652 1653 1654 1655 1656 1657 1658
			   "sec ind peer_id %d unicast %d type %d\n",
			  __le16_to_cpu(ev->peer_id),
			  !!(ev->flags & HTT_SECURITY_IS_UNICAST),
			  MS(ev->flags, HTT_SECURITY_TYPE));
		complete(&ar->install_key_done);
		break;
	}
	case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
1659
		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1660 1661 1662 1663 1664 1665 1666 1667
				skb->data, skb->len);
		ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
		break;
	}
	case HTT_T2H_MSG_TYPE_TEST:
		/* FIX THIS */
		break;
	case HTT_T2H_MSG_TYPE_STATS_CONF:
1668
		trace_ath10k_htt_stats(ar, skb->data, skb->len);
1669 1670
		break;
	case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
1671 1672 1673 1674 1675
		/* Firmware can return tx frames if it's unable to fully
		 * process them and suspects host may be able to fix it. ath10k
		 * sends all tx frames as already inspected so this shouldn't
		 * happen unless fw has a bug.
		 */
1676
		ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
1677
		break;
1678
	case HTT_T2H_MSG_TYPE_RX_ADDBA:
1679 1680
		ath10k_htt_rx_addba(ar, resp);
		break;
1681
	case HTT_T2H_MSG_TYPE_RX_DELBA:
1682 1683
		ath10k_htt_rx_delba(ar, resp);
		break;
1684 1685 1686 1687 1688 1689 1690 1691 1692
	case HTT_T2H_MSG_TYPE_PKTLOG: {
		struct ath10k_pktlog_hdr *hdr =
			(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;

		trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
					sizeof(*hdr) +
					__le16_to_cpu(hdr->size));
		break;
	}
1693 1694 1695 1696 1697 1698
	case HTT_T2H_MSG_TYPE_RX_FLUSH: {
		/* Ignore this event because mac80211 takes care of Rx
		 * aggregation reordering.
		 */
		break;
	}
1699
	default:
1700 1701
		ath10k_warn(ar, "htt event (%d) not handled\n",
			    resp->hdr.msg_type);
1702
		ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
1703 1704 1705 1706 1707 1708 1709
				skb->data, skb->len);
		break;
	};

	/* Free the indication buffer */
	dev_kfree_skb_any(skb);
}
1710 1711 1712 1713 1714 1715 1716

static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
	struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
	struct htt_resp *resp;
	struct sk_buff *skb;

1717 1718
	spin_lock_bh(&htt->tx_lock);
	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
1719 1720 1721
		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
		dev_kfree_skb_any(skb);
	}
1722
	spin_unlock_bh(&htt->tx_lock);
1723

1724 1725
	spin_lock_bh(&htt->rx_ring.lock);
	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
1726 1727 1728 1729
		resp = (struct htt_resp *)skb->data;
		ath10k_htt_rx_handler(htt, &resp->rx_ind);
		dev_kfree_skb_any(skb);
	}
1730
	spin_unlock_bh(&htt->rx_ring.lock);
1731
}