htt_tx.c 34.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include <linux/etherdevice.h>
#include "htt.h"
#include "mac.h"
#include "hif.h"
#include "txrx.h"
#include "debug.h"

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
static u8 ath10k_htt_tx_txq_calc_size(size_t count)
{
	int exp;
	int factor;

	exp = 0;
	factor = count >> 7;

	while (factor >= 64 && exp < 4) {
		factor >>= 3;
		exp++;
	}

	if (exp == 4)
		return 0xff;

	if (count > 0)
		factor = max(1, factor);

	return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
	       SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
}

static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
				       struct ieee80211_txq *txq)
{
	struct ath10k *ar = hw->priv;
52
	struct ath10k_sta *arsta;
53 54 55 56 57 58 59 60 61 62 63 64 65 66
	struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
	unsigned long frame_cnt;
	unsigned long byte_cnt;
	int idx;
	u32 bit;
	u16 peer_id;
	u8 tid;
	u8 count;

	lockdep_assert_held(&ar->htt.tx_lock);

	if (!ar->htt.tx_q_state.enabled)
		return;

M
Michal Kazior 已提交
67 68 69
	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
		return;

70 71
	if (txq->sta) {
		arsta = (void *)txq->sta->drv_priv;
72
		peer_id = arsta->peer_id;
73
	} else {
74
		peer_id = arvif->peer_id;
75
	}
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108

	tid = txq->tid;
	bit = BIT(peer_id % 32);
	idx = peer_id / 32;

	ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
	count = ath10k_htt_tx_txq_calc_size(byte_cnt);

	if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
	    unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
		ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
			    peer_id, tid);
		return;
	}

	ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
	ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
	ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
		   peer_id, tid, count);
}

static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
{
	u32 seq;
	size_t size;

	lockdep_assert_held(&ar->htt.tx_lock);

	if (!ar->htt.tx_q_state.enabled)
		return;

M
Michal Kazior 已提交
109 110 111
	if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
		return;

112 113 114 115 116 117 118 119 120 121 122 123 124 125
	seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
	seq++;
	ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
		   seq);

	size = sizeof(*ar->htt.tx_q_state.vaddr);
	dma_sync_single_for_device(ar->dev,
				   ar->htt.tx_q_state.paddr,
				   size,
				   DMA_TO_DEVICE);
}

M
Michal Kazior 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
			      struct ieee80211_txq *txq)
{
	struct ath10k *ar = hw->priv;

	spin_lock_bh(&ar->htt.tx_lock);
	__ath10k_htt_tx_txq_recalc(hw, txq);
	spin_unlock_bh(&ar->htt.tx_lock);
}

void ath10k_htt_tx_txq_sync(struct ath10k *ar)
{
	spin_lock_bh(&ar->htt.tx_lock);
	__ath10k_htt_tx_txq_sync(ar);
	spin_unlock_bh(&ar->htt.tx_lock);
}

143 144 145 146 147 148 149 150 151 152 153
void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
			      struct ieee80211_txq *txq)
{
	struct ath10k *ar = hw->priv;

	spin_lock_bh(&ar->htt.tx_lock);
	__ath10k_htt_tx_txq_recalc(hw, txq);
	__ath10k_htt_tx_txq_sync(ar);
	spin_unlock_bh(&ar->htt.tx_lock);
}

154
void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
155
{
156 157
	lockdep_assert_held(&htt->tx_lock);

158 159
	htt->num_pending_tx--;
	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
M
Michal Kazior 已提交
160
		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
161 162
}

163
int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
164
{
165
	lockdep_assert_held(&htt->tx_lock);
166

167 168
	if (htt->num_pending_tx >= htt->max_num_pending_tx)
		return -EBUSY;
169 170 171

	htt->num_pending_tx++;
	if (htt->num_pending_tx == htt->max_num_pending_tx)
M
Michal Kazior 已提交
172
		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
173

174
	return 0;
175 176
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
				   bool is_presp)
{
	struct ath10k *ar = htt->ar;

	lockdep_assert_held(&htt->tx_lock);

	if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
		return 0;

	if (is_presp &&
	    ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
		return -EBUSY;

	htt->num_pending_mgmt_tx++;

	return 0;
}

void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
{
	lockdep_assert_held(&htt->tx_lock);

	if (!htt->ar->hw_params.max_probe_resp_desc_thres)
		return;

	htt->num_pending_mgmt_tx--;
}

M
Michal Kazior 已提交
206
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
207
{
208
	struct ath10k *ar = htt->ar;
M
Michal Kazior 已提交
209
	int ret;
210 211 212

	lockdep_assert_held(&htt->tx_lock);

213 214
	ret = idr_alloc(&htt->pending_tx, skb, 0,
			htt->max_num_pending_tx, GFP_ATOMIC);
M
Michal Kazior 已提交
215 216

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
217

M
Michal Kazior 已提交
218
	return ret;
219 220 221 222
}

void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
223 224
	struct ath10k *ar = htt->ar;

225 226
	lockdep_assert_held(&htt->tx_lock);

227
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
M
Michal Kazior 已提交
228 229

	idr_remove(&htt->pending_tx, msdu_id);
230 231
}

232 233 234 235 236 237 238 239 240 241
static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	size_t size;

	if (!htt->txbuf.vaddr)
		return;

	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
	dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
242
	htt->txbuf.vaddr = NULL;
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
}

static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	size_t size;

	size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
	htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
					      GFP_KERNEL);
	if (!htt->txbuf.vaddr)
		return -ENOMEM;

	return 0;
}

259
static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
260 261 262
{
	size_t size;

263
	if (!htt->frag_desc.vaddr_desc_32)
264 265
		return;

266 267
	size = htt->max_num_pending_tx *
			sizeof(struct htt_msdu_ext_desc);
268 269 270

	dma_free_coherent(htt->ar->dev,
			  size,
271
			  htt->frag_desc.vaddr_desc_32,
272
			  htt->frag_desc.paddr);
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314

	htt->frag_desc.vaddr_desc_32 = NULL;
}

static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	size_t size;

	if (!ar->hw_params.continuous_frag_desc)
		return 0;

	size = htt->max_num_pending_tx *
			sizeof(struct htt_msdu_ext_desc);
	htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
							  &htt->frag_desc.paddr,
							  GFP_KERNEL);
	if (!htt->frag_desc.vaddr_desc_32) {
		ath10k_err(ar, "failed to alloc fragment desc memory\n");
		return -ENOMEM;
	}
	htt->frag_desc.size = size;

	return 0;
}

static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
{
	size_t size;

	if (!htt->frag_desc.vaddr_desc_64)
		return;

	size = htt->max_num_pending_tx *
			sizeof(struct htt_msdu_ext_desc_64);

	dma_free_coherent(htt->ar->dev,
			  size,
			  htt->frag_desc.vaddr_desc_64,
			  htt->frag_desc.paddr);

	htt->frag_desc.vaddr_desc_64 = NULL;
315 316
}

317
static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
318 319 320 321 322 323 324
{
	struct ath10k *ar = htt->ar;
	size_t size;

	if (!ar->hw_params.continuous_frag_desc)
		return 0;

325 326 327 328 329 330 331 332
	size = htt->max_num_pending_tx *
			sizeof(struct htt_msdu_ext_desc_64);

	htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
							  &htt->frag_desc.paddr,
							  GFP_KERNEL);
	if (!htt->frag_desc.vaddr_desc_64) {
		ath10k_err(ar, "failed to alloc fragment desc memory\n");
333
		return -ENOMEM;
334 335
	}
	htt->frag_desc.size = size;
336 337 338 339

	return 0;
}

340 341 342 343 344
static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	size_t size;

345 346
	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
		      ar->running_fw->fw_file.fw_features))
347 348 349 350 351 352 353 354 355 356 357 358 359 360
		return;

	size = sizeof(*htt->tx_q_state.vaddr);

	dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
	kfree(htt->tx_q_state.vaddr);
}

static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	size_t size;
	int ret;

361 362
	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
		      ar->running_fw->fw_file.fw_features))
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
		return 0;

	htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
	htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
	htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;

	size = sizeof(*htt->tx_q_state.vaddr);
	htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
	if (!htt->tx_q_state.vaddr)
		return -ENOMEM;

	htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
					       size, DMA_TO_DEVICE);
	ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
	if (ret) {
		ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
		kfree(htt->tx_q_state.vaddr);
		return -EIO;
	}

	return 0;
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
{
	WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
	kfifo_free(&htt->txdone_fifo);
}

static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
{
	int ret;
	size_t size;

	size = roundup_pow_of_two(htt->max_num_pending_tx);
	ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
	return ret;
}

402
static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
403
{
404
	struct ath10k *ar = htt->ar;
405
	int ret;
406

407 408 409
	ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
	if (ret) {
		ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
410
		return ret;
411 412
	}

413
	ret = htt->tx_ops->htt_alloc_frag_desc(htt);
414 415
	if (ret) {
		ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
416
		goto free_txbuf;
417 418
	}

419 420 421 422 423 424
	ret = ath10k_htt_tx_alloc_txq(htt);
	if (ret) {
		ath10k_err(ar, "failed to alloc txq: %d\n", ret);
		goto free_frag_desc;
	}

425
	ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
426 427 428 429 430
	if (ret) {
		ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
		goto free_txq;
	}

431
	return 0;
432

433 434 435
free_txq:
	ath10k_htt_tx_free_txq(htt);

436
free_frag_desc:
437
	htt->tx_ops->htt_free_frag_desc(htt);
438

439
free_txbuf:
440
	ath10k_htt_tx_free_cont_txbuf(htt);
441

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
	return ret;
}

int ath10k_htt_tx_start(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	int ret;

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
		   htt->max_num_pending_tx);

	spin_lock_init(&htt->tx_lock);
	idr_init(&htt->pending_tx);

	if (htt->tx_mem_allocated)
		return 0;

	ret = ath10k_htt_tx_alloc_buf(htt);
	if (ret)
		goto free_idr_pending_tx;

	htt->tx_mem_allocated = true;

	return 0;

467 468
free_idr_pending_tx:
	idr_destroy(&htt->pending_tx);
469

470
	return ret;
471 472
}

M
Michal Kazior 已提交
473
static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
474
{
M
Michal Kazior 已提交
475 476
	struct ath10k *ar = ctx;
	struct ath10k_htt *htt = &ar->htt;
477
	struct htt_tx_done tx_done = {0};
478

M
Michal Kazior 已提交
479
	ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
480

M
Michal Kazior 已提交
481
	tx_done.msdu_id = msdu_id;
482
	tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
483

M
Michal Kazior 已提交
484 485 486
	ath10k_txrx_tx_unref(htt, &tx_done);

	return 0;
487 488
}

489
void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
490
{
491 492
	if (!htt->tx_mem_allocated)
		return;
493

494
	ath10k_htt_tx_free_cont_txbuf(htt);
495
	ath10k_htt_tx_free_txq(htt);
496
	htt->tx_ops->htt_free_frag_desc(htt);
497
	ath10k_htt_tx_free_txdone_fifo(htt);
498 499 500 501 502 503 504 505 506 507 508 509 510
	htt->tx_mem_allocated = false;
}

void ath10k_htt_tx_stop(struct ath10k_htt *htt)
{
	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
	idr_destroy(&htt->pending_tx);
}

void ath10k_htt_tx_free(struct ath10k_htt *htt)
{
	ath10k_htt_tx_stop(htt);
	ath10k_htt_tx_destroy(htt);
511 512 513 514
}

void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
515
	dev_kfree_skb_any(skb);
516 517
}

518 519 520 521 522 523
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
	dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);

524 525
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
526
	struct ath10k *ar = htt->ar;
527 528 529 530 531 532 533 534
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	int len = 0;
	int ret;

	len += sizeof(cmd->hdr);
	len += sizeof(cmd->ver_req);

535
	skb = ath10k_htc_alloc_skb(ar, len);
536 537 538 539 540 541 542
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;

543
	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
544 545 546 547 548 549 550 551
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

552 553
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
554
	struct ath10k *ar = htt->ar;
555 556 557 558 559 560 561 562
	struct htt_stats_req *req;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	int len = 0, ret;

	len += sizeof(cmd->hdr);
	len += sizeof(cmd->stats_req);

563
	skb = ath10k_htc_alloc_skb(ar, len);
564 565 566 567 568 569 570 571 572 573 574 575
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;

	req = &cmd->stats_req;

	memset(req, 0, sizeof(*req));

	/* currently we support only max 8 bit masks so no need to worry
576 577
	 * about endian support
	 */
578 579 580 581 582 583 584 585
	req->upload_types[0] = mask;
	req->reset_types[0] = mask;
	req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
	req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
	req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
586 587
		ath10k_warn(ar, "failed to send htt type stats request: %d",
			    ret);
588 589 590 591 592 593 594
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

595
static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
596 597 598 599
{
	struct ath10k *ar = htt->ar;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
600
	struct htt_frag_desc_bank_cfg32 *cfg;
601
	int ret, size;
602
	u8 info;
603 604 605 606 607 608 609 610 611

	if (!ar->hw_params.continuous_frag_desc)
		return 0;

	if (!htt->frag_desc.paddr) {
		ath10k_warn(ar, "invalid frag desc memory\n");
		return -EINVAL;
	}

612
	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
613 614 615 616 617 618 619
	skb = ath10k_htc_alloc_skb(ar, size);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, size);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
620 621 622 623 624

	info = 0;
	info |= SM(htt->tx_q_state.type,
		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);

625 626
	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
		     ar->running_fw->fw_file.fw_features))
627 628
		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;

629
	cfg = &cmd->frag_desc_bank_cfg32;
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	cfg->info = info;
	cfg->num_banks = 1;
	cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
	cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
	cfg->bank_id[0].bank_min_id = 0;
	cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
						    1);

	cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
	cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
	cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
	cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
	cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
645 646 647 648 649 650 651 652 653 654 655 656

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
			    ret);
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	struct htt_frag_desc_bank_cfg64 *cfg;
	int ret, size;
	u8 info;

	if (!ar->hw_params.continuous_frag_desc)
		return 0;

	if (!htt->frag_desc.paddr) {
		ath10k_warn(ar, "invalid frag desc memory\n");
		return -EINVAL;
	}

	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
	skb = ath10k_htc_alloc_skb(ar, size);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, size);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;

	info = 0;
	info |= SM(htt->tx_q_state.type,
		   HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);

	if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
		     ar->running_fw->fw_file.fw_features))
		info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;

	cfg = &cmd->frag_desc_bank_cfg64;
	cfg->info = info;
	cfg->num_banks = 1;
	cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
	cfg->bank_base_addrs[0] =  __cpu_to_le64(htt->frag_desc.paddr);
	cfg->bank_id[0].bank_min_id = 0;
	cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
						    1);

	cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
	cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
	cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
	cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
	cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;

	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
			    ret);
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
{
	struct htt_rx_ring_setup_ring32 *ring =
			(struct htt_rx_ring_setup_ring32 *)rx_ring;

#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
}

static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
{
	struct htt_rx_ring_setup_ring64 *ring =
			(struct htt_rx_ring_setup_ring64 *)rx_ring;

#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
	ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
	ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
	ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
	ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
	ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
	ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
	ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
	ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
	ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
	ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
}

static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
758
{
759
	struct ath10k *ar = htt->ar;
760 761
	struct sk_buff *skb;
	struct htt_cmd *cmd;
762
	struct htt_rx_ring_setup_ring32 *ring;
763 764 765 766 767 768 769 770 771 772 773 774 775
	const int num_rx_ring = 1;
	u16 flags;
	u32 fw_idx;
	int len;
	int ret;

	/*
	 * the HW expects the buffer to be an integral number of 4-byte
	 * "words"
	 */
	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);

776
	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
777
	    + (sizeof(*ring) * num_rx_ring);
778
	skb = ath10k_htc_alloc_skb(ar, len);
779 780 781 782 783 784
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);

	cmd = (struct htt_cmd *)skb->data;
785
	ring = &cmd->rx_setup_32.rings[0];
786 787

	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
788
	cmd->rx_setup_32.hdr.num_rings = 1;
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818

	/* FIXME: do we need all of this? */
	flags = 0;
	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
	flags |= HTT_RX_RING_FLAGS_PPDU_START;
	flags |= HTT_RX_RING_FLAGS_PPDU_END;
	flags |= HTT_RX_RING_FLAGS_MPDU_START;
	flags |= HTT_RX_RING_FLAGS_MPDU_END;
	flags |= HTT_RX_RING_FLAGS_MSDU_START;
	flags |= HTT_RX_RING_FLAGS_MSDU_END;
	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
	flags |= HTT_RX_RING_FLAGS_NULL_RX;
	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;

	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);

	ring->fw_idx_shadow_reg_paddr =
		__cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
	ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
	ring->flags = __cpu_to_le16(flags);
	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);

819 820 821 822 823 824
	ath10k_htt_fill_rx_desc_offset_32(ring);
	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}
825

826 827
	return 0;
}
828

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	struct htt_rx_ring_setup_ring64 *ring;
	const int num_rx_ring = 1;
	u16 flags;
	u32 fw_idx;
	int len;
	int ret;

	/* HW expects the buffer to be an integral number of 4-byte
	 * "words"
	 */
	BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
	BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);

	len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
	    + (sizeof(*ring) * num_rx_ring);
	skb = ath10k_htc_alloc_skb(ar, len);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);

	cmd = (struct htt_cmd *)skb->data;
	ring = &cmd->rx_setup_64.rings[0];

	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
	cmd->rx_setup_64.hdr.num_rings = 1;

	flags = 0;
	flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
	flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
	flags |= HTT_RX_RING_FLAGS_PPDU_START;
	flags |= HTT_RX_RING_FLAGS_PPDU_END;
	flags |= HTT_RX_RING_FLAGS_MPDU_START;
	flags |= HTT_RX_RING_FLAGS_MPDU_END;
	flags |= HTT_RX_RING_FLAGS_MSDU_START;
	flags |= HTT_RX_RING_FLAGS_MSDU_END;
	flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
	flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
	flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
	flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
	flags |= HTT_RX_RING_FLAGS_CTRL_RX;
	flags |= HTT_RX_RING_FLAGS_MGMT_RX;
	flags |= HTT_RX_RING_FLAGS_NULL_RX;
	flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;

	fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
880

881 882 883 884 885 886 887 888
	ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
	ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
	ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
	ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
	ring->flags = __cpu_to_le16(flags);
	ring->fw_idx_init_val = __cpu_to_le16(fw_idx);

	ath10k_htt_fill_rx_desc_offset_64(ring);
889
	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
890 891 892 893 894 895 896 897
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

898 899 900 901
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
				u8 max_subfrms_ampdu,
				u8 max_subfrms_amsdu)
{
902
	struct ath10k *ar = htt->ar;
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
	struct htt_aggr_conf *aggr_conf;
	struct sk_buff *skb;
	struct htt_cmd *cmd;
	int len;
	int ret;

	/* Firmware defaults are: amsdu = 3 and ampdu = 64 */

	if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
		return -EINVAL;

	if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
		return -EINVAL;

	len = sizeof(cmd->hdr);
	len += sizeof(cmd->aggr_conf);

920
	skb = ath10k_htc_alloc_skb(ar, len);
921 922 923 924 925 926 927 928 929 930 931
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;

	aggr_conf = &cmd->aggr_conf;
	aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
	aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;

932
	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
933 934 935 936 937 938 939 940 941 942 943 944
		   aggr_conf->max_num_amsdu_subframes,
		   aggr_conf->max_num_ampdu_subframes);

	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
	if (ret) {
		dev_kfree_skb_any(skb);
		return ret;
	}

	return 0;
}

945 946 947 948 949 950 951 952
int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
			     __le32 token,
			     __le16 fetch_seq_num,
			     struct htt_tx_fetch_record *records,
			     size_t num_records)
{
	struct sk_buff *skb;
	struct htt_cmd *cmd;
M
Michal Kazior 已提交
953
	const u16 resp_id = 0;
954 955 956
	int len = 0;
	int ret;

M
Michal Kazior 已提交
957 958 959 960
	/* Response IDs are echo-ed back only for host driver convienence
	 * purposes. They aren't used for anything in the driver yet so use 0.
	 */

961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
	len += sizeof(cmd->hdr);
	len += sizeof(cmd->tx_fetch_resp);
	len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;

	skb = ath10k_htc_alloc_skb(ar, len);
	if (!skb)
		return -ENOMEM;

	skb_put(skb, len);
	cmd = (struct htt_cmd *)skb->data;
	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
	cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
	cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
	cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
	cmd->tx_fetch_resp.token = token;

	memcpy(cmd->tx_fetch_resp.records, records,
	       sizeof(records[0]) * num_records);

	ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
	if (ret) {
		ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
M
Michal Kazior 已提交
983
		goto err_free_skb;
984 985 986 987 988 989 990 991 992 993
	}

	return 0;

err_free_skb:
	dev_kfree_skb_any(skb);

	return ret;
}

994 995 996 997
static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
{
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
998
	struct ath10k_vif *arvif;
999

1000
	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
1001
		return ar->scan.vdev_id;
1002 1003
	} else if (cb->vif) {
		arvif = (void *)cb->vif->drv_priv;
1004
		return arvif->vdev_id;
1005
	} else if (ar->monitor_started) {
1006
		return ar->monitor_vdev_id;
1007
	} else {
1008
		return 0;
1009
	}
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
}

static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
{
	struct ieee80211_hdr *hdr = (void *)skb->data;
	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);

	if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
		return HTT_DATA_TX_EXT_TID_MGMT;
	else if (cb->flags & ATH10K_SKB_F_QOS)
		return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
	else
		return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
}

1025 1026
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
1027 1028
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
1029 1030
	struct sk_buff *txdesc = NULL;
	struct htt_cmd *cmd;
1031
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1032
	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
1033 1034 1035
	int len = 0;
	int msdu_id = -1;
	int res;
1036
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
1037 1038 1039 1040 1041

	len += sizeof(cmd->hdr);
	len += sizeof(cmd->mgmt_tx);

	spin_lock_bh(&htt->tx_lock);
M
Michal Kazior 已提交
1042
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1043
	spin_unlock_bh(&htt->tx_lock);
K
Kalle Valo 已提交
1044
	if (res < 0)
1045
		goto err;
K
Kalle Valo 已提交
1046

M
Michal Kazior 已提交
1047
	msdu_id = res;
1048

1049 1050 1051 1052 1053 1054 1055
	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
	     ieee80211_has_protected(hdr->frame_control)) {
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	}

1056
	txdesc = ath10k_htc_alloc_skb(ar, len);
M
Michal Kazior 已提交
1057 1058 1059 1060 1061
	if (!txdesc) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}

1062 1063 1064
	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
1065 1066
	if (res) {
		res = -EIO;
M
Michal Kazior 已提交
1067
		goto err_free_txdesc;
1068
	}
1069 1070 1071

	skb_put(txdesc, len);
	cmd = (struct htt_cmd *)txdesc->data;
1072 1073
	memset(cmd, 0, len);

1074 1075 1076 1077 1078 1079 1080 1081
	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
	cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
	cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
	memcpy(cmd->mgmt_tx.hdr, msdu->data,
	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));

1082
	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
1083
	if (res)
M
Michal Kazior 已提交
1084
		goto err_unmap_msdu;
1085 1086 1087

	return 0;

M
Michal Kazior 已提交
1088
err_unmap_msdu:
1089
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
M
Michal Kazior 已提交
1090 1091 1092 1093 1094 1095 1096
err_free_txdesc:
	dev_kfree_skb_any(txdesc);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err:
1097 1098 1099
	return res;
}

M
Michal Kazior 已提交
1100 1101
int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
		  struct sk_buff *msdu)
1102
{
1103 1104
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
1105
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
M
Michal Kazior 已提交
1106
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
1107
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
1108
	struct ath10k_hif_sg_item sg_items[2];
1109
	struct ath10k_htt_txbuf *txbuf;
1110
	struct htt_data_tx_desc_frag *frags;
1111 1112 1113
	bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
	u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
	u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
1114
	int prefetch_len;
1115
	int res;
1116 1117
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
M
Michal Kazior 已提交
1118
	u16 freq = 0;
1119
	u32 frags_paddr = 0;
1120
	u32 txbuf_paddr;
1121
	struct htt_msdu_ext_desc *ext_desc = NULL;
1122
	struct htt_msdu_ext_desc *ext_desc_t = NULL;
M
Michal Kazior 已提交
1123 1124

	spin_lock_bh(&htt->tx_lock);
M
Michal Kazior 已提交
1125
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
1126
	spin_unlock_bh(&htt->tx_lock);
K
Kalle Valo 已提交
1127
	if (res < 0)
1128
		goto err;
K
Kalle Valo 已提交
1129

M
Michal Kazior 已提交
1130
	msdu_id = res;
1131 1132 1133 1134

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

1135 1136 1137
	txbuf = &htt->txbuf.vaddr[msdu_id];
	txbuf_paddr = htt->txbuf.paddr +
		      (sizeof(struct ath10k_htt_txbuf) * msdu_id);
1138

1139 1140 1141
	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
1142
	     ieee80211_has_protected(hdr->frame_control)) {
1143
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
M
Michal Kazior 已提交
1144
	} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
M
Michal Kazior 已提交
1145
		   txmode == ATH10K_HW_TXRX_RAW &&
1146
		   ieee80211_has_protected(hdr->frame_control)) {
1147 1148
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
	}
1149

1150 1151 1152
	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
1153 1154
	if (res) {
		res = -EIO;
1155
		goto err_free_msdu_id;
1156
	}
1157

M
Michal Kazior 已提交
1158 1159 1160
	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
		freq = ar->scan.roc_freq;

M
Michal Kazior 已提交
1161
	switch (txmode) {
1162 1163 1164 1165 1166
	case ATH10K_HW_TXRX_RAW:
	case ATH10K_HW_TXRX_NATIVE_WIFI:
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
		/* pass through */
	case ATH10K_HW_TXRX_ETHERNET:
1167
		if (ar->hw_params.continuous_frag_desc) {
1168 1169
			ext_desc_t = htt->frag_desc.vaddr_desc_32;
			memset(&ext_desc_t[msdu_id], 0,
1170
			       sizeof(struct htt_msdu_ext_desc));
1171
			frags = (struct htt_data_tx_desc_frag *)
1172 1173
				&ext_desc_t[msdu_id].frags;
			ext_desc = &ext_desc_t[msdu_id];
1174 1175 1176 1177 1178 1179 1180 1181
			frags[0].tword_addr.paddr_lo =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].tword_addr.paddr_hi = 0;
			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);

			frags_paddr =  htt->frag_desc.paddr +
				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
		} else {
1182
			frags = txbuf->frags;
1183 1184 1185 1186 1187 1188
			frags[0].dword_addr.paddr =
				__cpu_to_le32(skb_cb->paddr);
			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
			frags[1].dword_addr.paddr = 0;
			frags[1].dword_addr.len = 0;

1189
			frags_paddr = txbuf_paddr;
1190
		}
M
Michal Kazior 已提交
1191
		flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1192 1193
		break;
	case ATH10K_HW_TXRX_MGMT:
1194 1195
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
1196
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
1197

1198
		frags_paddr = skb_cb->paddr;
1199
		break;
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
1216 1217
	 * improve performance.
	 */
1218

1219 1220 1221 1222 1223
	txbuf->htc_hdr.eid = htt->eid;
	txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
					   sizeof(txbuf->cmd_tx) +
					   prefetch_len);
	txbuf->htc_hdr.flags = 0;
1224

M
Michal Kazior 已提交
1225
	if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
1226 1227
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

1228 1229
	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
1230 1231
	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
1232 1233
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
1234 1235
		if (ar->hw_params.continuous_frag_desc)
			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
1236
	}
1237

1238 1239 1240 1241 1242 1243
	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

1244 1245 1246 1247 1248 1249
	txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	txbuf->cmd_tx.flags0 = flags0;
	txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
1250
	if (ath10k_mac_tx_frm_has_freq(ar)) {
1251
		txbuf->cmd_tx.offchan_tx.peerid =
1252
				__cpu_to_le16(HTT_INVALID_PEERID);
1253
		txbuf->cmd_tx.offchan_tx.freq =
M
Michal Kazior 已提交
1254
				__cpu_to_le16(freq);
1255
	} else {
1256
		txbuf->cmd_tx.peerid =
1257 1258
				__cpu_to_le32(HTT_INVALID_PEERID);
	}
1259

1260
	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
1261
	ath10k_dbg(ar, ATH10K_DBG_HTT,
M
Michal Kazior 已提交
1262
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
1263
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
M
Michal Kazior 已提交
1264
		   (u32)skb_cb->paddr, vdev_id, tid, freq);
1265
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
1266
			msdu->data, msdu->len);
1267 1268
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
1269

1270 1271
	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
1272 1273 1274 1275 1276 1277
	sg_items[0].vaddr = &txbuf->htc_hdr;
	sg_items[0].paddr = txbuf_paddr +
			    sizeof(txbuf->frags);
	sg_items[0].len = sizeof(txbuf->htc_hdr) +
			  sizeof(txbuf->cmd_hdr) +
			  sizeof(txbuf->cmd_tx);
1278 1279 1280 1281 1282 1283 1284 1285 1286 1287

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
1288
	if (res)
1289
		goto err_unmap_msdu;
1290 1291

	return 0;
M
Michal Kazior 已提交
1292 1293

err_unmap_msdu:
1294
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
M
Michal Kazior 已提交
1295 1296 1297
err_free_msdu_id:
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
err:
1298 1299
	return res;
}
1300 1301 1302

static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
1303 1304 1305
	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
1306 1307 1308 1309
};

static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
	.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
1310 1311 1312
	.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
	.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
	.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323
};

void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
	struct ath10k *ar = htt->ar;

	if (ar->hw_params.target_64bit)
		htt->tx_ops = &htt_tx_ops_64;
	else
		htt->tx_ops = &htt_tx_ops_32;
}