iwl-trans-pcie-tx.c 29.7 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-agn-hw.h"
38
#include "iwl-op-mode.h"
39
#include "iwl-trans-pcie-int.h"
40

41 42 43
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

44 45 46
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
47
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
48 49 50
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
51 52 53
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
54 55 56 57 58 59
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
60 61
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
62

63 64
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

65 66
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

67 68
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

91 92 93
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
94
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
95 96 97 98 99
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
100
		return;
101

102
	if (hw_params(trans).shadow_reg_enable) {
W
Wey-Yi Guy 已提交
103
		/* shadow register enabled */
104
		iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
105 106 107
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
108
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
W
Wey-Yi Guy 已提交
109 110 111
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
112
			reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
113

W
Wey-Yi Guy 已提交
114
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
115
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
116 117
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
118
				iwl_set_bit(trans, CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
119 120 121
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
122

123
			iwl_write_direct32(trans, HBUS_TARG_WRPTR,
124 125
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
126 127 128 129 130 131
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
132
			iwl_write32(trans, HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
133 134
				    txq->q.write_ptr | (txq_id << 8));
	}
135 136 137
	txq->need_update = 0;
}

J
Johannes Berg 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

177
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
178
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
179 180 181 182 183 184 185 186
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
187
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
188 189 190 191 192 193
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
194
		dma_unmap_single(trans->dev,
195 196
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
197
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
198 199 200

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
201
		dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
202
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
203 204 205 206
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
207
 * @trans - transport private data
208
 * @txq - tx queue
209
 * @index - the index of the TFD to be freed
210
 *@dma_dir - the direction of the DMA mapping
211 212 213 214
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
215
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
216
	int index, enum dma_data_direction dma_dir)
217 218 219
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

220 221
	lockdep_assert_held(&txq->lock);

222
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
J
Johannes Berg 已提交
223 224

	/* free SKB */
225
	if (txq->skbs) {
J
Johannes Berg 已提交
226 227
		struct sk_buff *skb;

228
		skb = txq->skbs[index];
J
Johannes Berg 已提交
229

230 231 232 233
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
234
		if (skb) {
235
			iwl_op_mode_free_skb(trans->op_mode, skb);
236
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
237 238 239 240
		}
	}
}

241
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
242 243
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
244
				 u8 reset)
J
Johannes Berg 已提交
245 246 247 248 249 250
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
251
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
252 253 254 255 256 257 258 259 260
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
261
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
262 263 264 265 266 267 268 269
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
270
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
271 272 273 274 275 276 277
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

316 317 318
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
319
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
320 321 322 323 324 325 326
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
327 328
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
329 330 331

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
332 333
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
334 335 336 337 338 339 340 341 342 343 344 345 346 347

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

348
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
349 350
					  struct iwl_tx_queue *txq)
{
351 352
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
353
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
354 355 356 357
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
358 359
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
360 361 362

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

363
	if (txq_id != trans->shrd->cmd_queue)
364
		sta_id = tx_cmd->sta_id;
365 366 367 368 369 370 371 372 373

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

374
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
375 376 377 378 379 380
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

381 382 383
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

384 385
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

386
	tbl_dw_addr = trans_pcie->scd_base_addr +
387 388
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

389
	tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr);
390 391 392 393 394 395

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

396
	iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw);
397 398 399 400

	return 0;
}

401
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
402 403 404
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
405
	iwl_write_prph(trans,
406 407 408 409 410
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

411
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
412 413
				int txq_id, u32 index)
{
414
	IWL_DEBUG_TX_QUEUES(trans, "Q %d  WrPtr: %d", txq_id, index & 0xff);
415
	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
416
			(index & 0xff) | (txq_id << 8));
417
	iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
418 419
}

420
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
421 422 423
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
424
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
425
	int txq_id = txq->q.id;
426
	int active =
427
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
428

429
	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
430 431 432 433 434 435 436
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

437 438 439 440 441 442
	if (active)
		IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n",
			scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
	else
		IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n",
			scd_retry ? "BA" : "AC/CMD", txq_id);
443 444
}

445 446
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
				    u8 ctx, u16 tid)
447
{
448
	const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
449
	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
450
		return ac_to_fifo[tid_to_ac[tid]];
451 452 453 454 455

	/* no support for TIDs 8-15 yet */
	return -EINVAL;
}

456 457 458 459 460 461 462 463
static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
{
	if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
		return false;
	return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
		hw_params(trans).num_ampdu_queues);
}

464 465
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
				 enum iwl_rxon_context_id ctx, int sta_id,
466
				 int tid, int frame_limit, u16 ssn)
467
{
468
	int tx_fifo, txq_id;
469 470 471
	u16 ra_tid;
	unsigned long flags;

472 473 474
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

475 476
	if (WARN_ON(sta_id == IWL_INVALID_STATION))
		return;
477
	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
478 479
		return;

480
	tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
481 482 483 484 485
	if (WARN_ON(tx_fifo < 0)) {
		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
		return;
	}

486 487 488 489 490 491 492 493 494
	txq_id = trans_pcie->agg_txq[sta_id][tid];
	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
		IWL_ERR(trans,
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
			hw_params(trans).num_ampdu_queues - 1);
		return;
	}
495 496 497

	ra_tid = BUILD_RAxTID(sta_id, tid);

J
Johannes Berg 已提交
498
	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
499 500

	/* Stop this Tx queue before configuring it */
501
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
502 503

	/* Map receiver-address / traffic-ID to this queue */
504
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
505 506

	/* Set this queue as a chain-building queue */
507
	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id));
508 509

	/* enable aggregations for the queue */
510
	iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id));
511 512 513

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
514 515 516
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
517 518

	/* Set up Tx window size and frame limit for this queue */
519
	iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
520 521 522 523 524 525 526 527 528
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
			sizeof(u32),
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

529
	iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
530 531

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
532
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
533
					tx_fifo, 1);
534

535 536
	trans_pcie->txq[txq_id].sta_id = sta_id;
	trans_pcie->txq[txq_id].tid = tid;
537

J
Johannes Berg 已提交
538
	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
539 540
}

541 542 543 544 545 546 547 548
/*
 * Find first available (lowest unused) Tx Queue, mark it "active".
 * Called only when finding queue for aggregation.
 * Should never return anything < 7, because they should already
 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
 */
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
{
549
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
550 551 552 553
	int txq_id;

	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		if (!test_and_set_bit(txq_id,
554
					&trans_pcie->txq_ctx_active_msk))
555 556 557 558 559
			return txq_id;
	return -1;
}

int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
560
				int sta_id, int tid)
561
{
562
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
563
	int txq_id;
564 565 566 567 568 569 570

	txq_id = iwlagn_txq_ctx_activate_free(trans);
	if (txq_id == -1) {
		IWL_ERR(trans, "No free aggregation queue available\n");
		return -ENXIO;
	}

571
	trans_pcie->agg_txq[sta_id][tid] = txq_id;
572
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
573 574 575

	return 0;
}
576

577
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
578
{
579
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580
	u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
581

582
	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
583
		IWL_ERR(trans,
584 585 586
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
587
			hw_params(trans).num_ampdu_queues - 1);
588 589 590
		return -EINVAL;
	}

591
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
592

593
	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id));
594

595
	trans_pcie->agg_txq[sta_id][tid] = 0;
596 597 598 599
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
600

601
	iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
602 603
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
604 605 606
	return 0;
}

607 608 609 610 611 612 613 614 615 616 617
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
618
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
619
{
620 621
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
622
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
623 624
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
625
	dma_addr_t phys_addr;
T
Tomas Winkler 已提交
626
	u32 idx;
627
	u16 copy_size, cmd_size;
628
	bool is_ct_kill = false;
629 630 631 632 633 634 635 636
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
637

638 639
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
640 641 642
		return -EIO;
	}

643
	if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
644
	    !(cmd->flags & CMD_ON_DEMAND)) {
645
		IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
646 647 648
		return -EIO;
	}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
668

669 670
	/*
	 * If any of the command structures end up being larger than
671 672 673
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
674
	 */
675
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
676
		return -EINVAL;
677

678 679 680
	if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
		IWL_WARN(trans, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
681 682
		return -EIO;
	}
683

684
	spin_lock_bh(&txq->lock);
685

J
Johannes Berg 已提交
686
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
687
		spin_unlock_bh(&txq->lock);
688

689
		IWL_ERR(trans, "No space in command queue\n");
690
		is_ct_kill = iwl_check_for_ct_kill(priv(trans));
691
		if (!is_ct_kill) {
692
			IWL_ERR(trans, "Restarting adapter queue is full\n");
693
			iwl_op_mode_nic_error(trans->op_mode);
694
		}
695 696 697
		return -ENOSPC;
	}

698
	idx = get_cmd_index(q, q->write_ptr);
699
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
700 701
	out_meta = &txq->meta[idx];

702
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
703 704
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
705

706
	/* set up the header */
707

708
	out_cmd->hdr.cmd = cmd->id;
709
	out_cmd->hdr.flags = 0;
710
	out_cmd->hdr.sequence =
711
		cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
712
					 INDEX_TO_SEQ(q->write_ptr));
713 714 715

	/* and copy the data that needs to be copied */

716
	cmd_dest = out_cmd->payload;
717 718 719 720 721 722 723
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
724
	}
725

726
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
727 728 729 730
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
731
			q->write_ptr, idx, trans->shrd->cmd_queue);
732

733
	phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
734
				DMA_BIDIRECTIONAL);
735
	if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
J
Johannes Berg 已提交
736 737 738 739
		idx = -ENOMEM;
		goto out;
	}

740
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
741 742
	dma_unmap_len_set(out_meta, len, copy_size);

743 744
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
745 746 747 748 749 750 751 752 753 754 755
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
756
		phys_addr = dma_map_single(trans->dev,
757
					   (void *)cmd->data[i],
758
					   cmd->len[i], DMA_BIDIRECTIONAL);
759
		if (dma_mapping_error(trans->dev, phys_addr)) {
760
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
761
					 &txq->tfds[q->write_ptr],
762
					 DMA_BIDIRECTIONAL);
763 764 765 766
			idx = -ENOMEM;
			goto out;
		}

767
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
768 769 770 771 772 773 774
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
775

776
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
777 778 779

	txq->need_update = 1;

780 781 782
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
783
	trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
784 785 786 787
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
788

789 790
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
791
	iwl_txq_update_write_ptr(trans, txq);
792

J
Johannes Berg 已提交
793
 out:
794
	spin_unlock_bh(&txq->lock);
795
	return idx;
796 797
}

798 799 800 801 802 803 804
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
805 806
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
807
{
808
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
809
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
810 811 812
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

813 814
	lockdep_assert_held(&txq->lock);

T
Tomas Winkler 已提交
815
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
816
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
817 818
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
819 820 821
		return;
	}

T
Tomas Winkler 已提交
822 823
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
824

T
Tomas Winkler 已提交
825
		if (nfreed++ > 0) {
826
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
827
					q->write_ptr, q->read_ptr);
828
			iwl_op_mode_nic_error(trans->op_mode);
829
		}
830

831 832 833 834 835 836
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
837 838
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
839 840 841 842 843
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
844
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
845
			 int handler_status)
846
{
Z
Zhu Yi 已提交
847
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
848 849 850 851
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
852 853
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
854 855
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
856 857 858 859

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
860
	if (WARN(txq_id != trans->shrd->cmd_queue,
861
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
862
		  txq_id, trans->shrd->cmd_queue, sequence,
863 864
		  trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
865
		iwl_print_hex_error(trans, pkt, 32);
866
		return;
867
	}
868

869 870
	spin_lock(&txq->lock);

871
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
872 873
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
874

875 876
	txq->time_stamp = jiffies;

877 878
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
879

880
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
881
	if (meta->flags & CMD_WANT_SKB) {
882
		struct page *p = rxb_steal_page(rxb);
883 884 885 886 887

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
		meta->source->_rx_page_order = hw_params(trans).rx_page_order;
		meta->source->handler_status = handler_status;
888
	}
889

890
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
891

J
Johannes Berg 已提交
892
	if (!(meta->flags & CMD_ASYNC)) {
893 894 895 896 897
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
				 get_cmd_string(cmd->hdr.cmd));
		}
898 899
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
900
			       get_cmd_string(cmd->hdr.cmd));
901
		wake_up(&trans->shrd->wait_command_queue);
902
	}
903

Z
Zhu Yi 已提交
904
	meta->flags = 0;
905

906
	spin_unlock(&txq->lock);
907
}
908 909 910

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

911
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
912 913 914 915 916 917 918 919
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


920
	ret = iwl_enqueue_hcmd(trans, cmd);
921
	if (ret < 0) {
922 923
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
924 925 926 927 928 929
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

930
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
931
{
932
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933 934 935
	int cmd_idx;
	int ret;

936
	lockdep_assert_held(&trans->shrd->mutex);
937

938
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
939 940
			get_cmd_string(cmd->id));

941 942 943 944 945 946 947 948 949 950
	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
			       get_cmd_string(cmd->id));
		return -ECANCELED;
	}
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s failed: FW Error\n",
			       get_cmd_string(cmd->id));
		return -EIO;
	}
951 952
	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
953 954
			get_cmd_string(cmd->id));

955
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
956 957
	if (cmd_idx < 0) {
		ret = cmd_idx;
958
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
959 960
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
961 962 963 964
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

965
	ret = wait_event_timeout(trans->shrd->wait_command_queue,
966
			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
967 968
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
969
		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
970
			struct iwl_tx_queue *txq =
971
				&trans_pcie->txq[trans->shrd->cmd_queue];
972 973
			struct iwl_queue *q = &txq->q;

974
			IWL_DEBUG_QUIET_RFKILL(trans,
975 976 977 978
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

979
			IWL_DEBUG_QUIET_RFKILL(trans,
980 981 982
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

983 984
			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
985 986 987 988 989 990
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

991
	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
992
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1008
		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1009 1010
							~CMD_WANT_SKB;
	}
1011

1012 1013 1014
	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
1015 1016 1017 1018 1019
	}

	return ret;
}

1020
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1021 1022
{
	if (cmd->flags & CMD_ASYNC)
1023
		return iwl_send_cmd_async(trans, cmd);
1024

1025
	return iwl_send_cmd_sync(trans, cmd);
1026 1027
}

1028
/* Frees buffers until index _not_ inclusive */
1029 1030
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
1031
{
1032 1033
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1034 1035
	struct iwl_queue *q = &txq->q;
	int last_to_free;
1036
	int freed = 0;
1037

1038 1039 1040 1041
	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans->shrd->cmd_queue))
		return 0;

1042 1043
	lockdep_assert_held(&txq->lock);

1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
1054
		return 0;
1055 1056 1057
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
1058
		return 0;
1059 1060 1061 1062 1063

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

1064
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1065 1066
			continue;

1067
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1068

1069
		txq->skbs[txq->q.read_ptr] = NULL;
1070

1071
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1072

1073
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1074
		freed++;
1075
	}
1076
	return freed;
1077
}