iwl-trans-pcie-tx.c 29.8 KB
Newer Older
1 2
/******************************************************************************
 *
W
Wey-Yi Guy 已提交
3
 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
#include <linux/slab.h>
31 32
#include <linux/sched.h>

33 34 35
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
36
#include "iwl-io.h"
37
#include "iwl-agn-hw.h"
38
#include "iwl-trans-pcie-int.h"
39

40 41 42
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4

43 44 45
/**
 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
 */
46
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
47 48 49
					   struct iwl_tx_queue *txq,
					   u16 byte_cnt)
{
50 51 52
	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
53 54 55 56 57 58
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;
59 60
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload;
61

62 63
	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;

64 65
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

66 67
	sta_id = tx_cmd->sta_id;
	sec_ctl = tx_cmd->sec_ctl;
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89

	switch (sec_ctl & TX_CMD_SEC_MSK) {
	case TX_CMD_SEC_CCM:
		len += CCMP_MIC_LEN;
		break;
	case TX_CMD_SEC_TKIP:
		len += TKIP_ICV_LEN;
		break;
	case TX_CMD_SEC_WEP:
		len += WEP_IV_LEN + WEP_ICV_LEN;
		break;
	}

	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

90 91 92
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
93
void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
94 95 96 97 98
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
99
		return;
100

101
	if (hw_params(trans).shadow_reg_enable) {
W
Wey-Yi Guy 已提交
102
		/* shadow register enabled */
103
		iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
104 105 106
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
107
		if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
W
Wey-Yi Guy 已提交
108 109 110
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
111
			reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
112

W
Wey-Yi Guy 已提交
113
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
114
				IWL_DEBUG_INFO(trans,
W
Wey-Yi Guy 已提交
115 116
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
117
				iwl_set_bit(bus(trans), CSR_GP_CNTRL,
W
Wey-Yi Guy 已提交
118 119 120
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
121

122
			iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
123 124
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
125 126 127 128 129 130
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
131
			iwl_write32(bus(trans), HBUS_TARG_WRPTR,
W
Wey-Yi Guy 已提交
132 133
				    txq->q.write_ptr | (txq_id << 8));
	}
134 135 136
	txq->need_update = 0;
}

J
Johannes Berg 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

176
static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
177
		     struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
J
Johannes Berg 已提交
178 179 180 181 182 183 184 185
{
	int i;
	int num_tbs;

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
186
		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
J
Johannes Berg 已提交
187 188 189 190 191 192
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
193
		dma_unmap_single(bus(trans)->dev,
194 195
				dma_unmap_addr(meta, mapping),
				dma_unmap_len(meta, len),
196
				DMA_BIDIRECTIONAL);
J
Johannes Berg 已提交
197 198 199

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++)
200
		dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
J
Johannes Berg 已提交
201
				iwl_tfd_tb_get_len(tfd, i), dma_dir);
202 203 204 205
}

/**
 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
206
 * @trans - transport private data
207
 * @txq - tx queue
208
 * @index - the index of the TFD to be freed
209
 *@dma_dir - the direction of the DMA mapping
210 211 212 213
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
214
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
215
	int index, enum dma_data_direction dma_dir)
216 217 218
{
	struct iwl_tfd *tfd_tmp = txq->tfds;

219
	iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir);
J
Johannes Berg 已提交
220 221

	/* free SKB */
222
	if (txq->skbs) {
J
Johannes Berg 已提交
223 224
		struct sk_buff *skb;

225
		skb = txq->skbs[index];
J
Johannes Berg 已提交
226

227 228 229 230
		/* Can be called from irqs-disabled context
		 * If skb is not NULL, it means that the whole queue is being
		 * freed and that the queue is not empty - free the skb
		 */
J
Johannes Berg 已提交
231
		if (skb) {
232
			iwl_free_skb(priv(trans), skb);
233
			txq->skbs[index] = NULL;
J
Johannes Berg 已提交
234 235 236 237
		}
	}
}

238
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
J
Johannes Berg 已提交
239 240
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
241
				 u8 reset)
J
Johannes Berg 已提交
242 243 244 245 246 247
{
	struct iwl_queue *q;
	struct iwl_tfd *tfd, *tfd_tmp;
	u32 num_tbs;

	q = &txq->q;
248
	tfd_tmp = txq->tfds;
J
Johannes Berg 已提交
249 250 251 252 253 254 255 256 257
	tfd = &tfd_tmp[q->write_ptr];

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
258
		IWL_ERR(trans, "Error can not send more than %d chunks\n",
J
Johannes Berg 已提交
259 260 261 262 263 264 265 266
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
		return -EINVAL;

	if (unlikely(addr & ~IWL_TX_DMA_MASK))
267
		IWL_ERR(trans, "Unaligned address = %llx\n",
J
Johannes Berg 已提交
268 269 270 271 272 273 274
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}

313 314 315
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
316
int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
317 318 319 320 321 322 323
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
324 325
	if (WARN_ON(!is_power_of_2(count)))
		return -EINVAL;
326 327 328

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
329 330
	if (WARN_ON(!is_power_of_2(slots_num)))
		return -EINVAL;
331 332 333 334 335 336 337 338 339 340 341 342 343 344

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

345
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
346 347
					  struct iwl_tx_queue *txq)
{
348 349
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
350
	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
351 352 353 354
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;
355 356
	struct iwl_tx_cmd *tx_cmd =
		(struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload;
357 358 359

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

360
	if (txq_id != trans->shrd->cmd_queue)
361
		sta_id = tx_cmd->sta_id;
362 363 364 365 366 367 368 369 370

	bc_ent = cpu_to_le16(1 | (sta_id << 12));
	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;

	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
		scd_bc_tbl[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}

371
static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
372 373 374 375 376 377
					u16 txq_id)
{
	u32 tbl_dw_addr;
	u32 tbl_dw;
	u16 scd_q2ratid;

378 379 380
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

381 382
	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;

383
	tbl_dw_addr = trans_pcie->scd_base_addr +
384 385
			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);

386
	tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
387 388 389 390 391 392

	if (txq_id & 0x1)
		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
	else
		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);

393
	iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
394 395 396 397

	return 0;
}

398
static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
399 400 401
{
	/* Simply stop the queue, but don't change any configuration;
	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
402
	iwl_write_prph(bus(trans),
403 404 405 406 407
		SCD_QUEUE_STATUS_BITS(txq_id),
		(0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
		(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}

408
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
409 410
				int txq_id, u32 index)
{
411
	iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
412
			(index & 0xff) | (txq_id << 8));
413
	iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
414 415
}

416
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
417 418 419
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
{
420
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
421
	int txq_id = txq->q.id;
422
	int active =
423
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
424

425
	iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
426 427 428 429 430 431 432
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
			(1 << SCD_QUEUE_STTS_REG_POS_WSL) |
			SCD_QUEUE_STTS_REG_MSK);

	txq->sched_retry = scd_retry;

433
	IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
434 435 436 437
		       active ? "Activate" : "Deactivate",
		       scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}

438 439
static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
				    u8 ctx, u16 tid)
440
{
441
	const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
442
	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
443
		return ac_to_fifo[tid_to_ac[tid]];
444 445 446 447 448

	/* no support for TIDs 8-15 yet */
	return -EINVAL;
}

449 450 451 452 453 454 455 456
static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
{
	if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
		return false;
	return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
		hw_params(trans).num_ampdu_queues);
}

457 458
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
				 enum iwl_rxon_context_id ctx, int sta_id,
459
				 int tid, int frame_limit, u16 ssn)
460
{
461
	int tx_fifo, txq_id;
462 463 464
	u16 ra_tid;
	unsigned long flags;

465 466 467
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

468 469
	if (WARN_ON(sta_id == IWL_INVALID_STATION))
		return;
470
	if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
471 472
		return;

473
	tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
474 475 476 477 478
	if (WARN_ON(tx_fifo < 0)) {
		IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
		return;
	}

479 480 481 482 483 484 485 486 487
	txq_id = trans_pcie->agg_txq[sta_id][tid];
	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
		IWL_ERR(trans,
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
			hw_params(trans).num_ampdu_queues - 1);
		return;
	}
488 489 490

	ra_tid = BUILD_RAxTID(sta_id, tid);

491
	spin_lock_irqsave(&trans->shrd->lock, flags);
492 493

	/* Stop this Tx queue before configuring it */
494
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
495 496

	/* Map receiver-address / traffic-ID to this queue */
497
	iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
498 499

	/* Set this queue as a chain-building queue */
500
	iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
501 502

	/* enable aggregations for the queue */
503
	iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
504 505 506

	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
507 508 509
	trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
510 511

	/* Set up Tx window size and frame limit for this queue */
512
	iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
513 514 515 516 517 518 519 520 521
			SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
			sizeof(u32),
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
			SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
			((frame_limit <<
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
			SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));

522
	iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
523 524

	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
525
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
526
					tx_fifo, 1);
527

528 529
	trans_pcie->txq[txq_id].sta_id = sta_id;
	trans_pcie->txq[txq_id].tid = tid;
530

531
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
532 533
}

534 535 536 537 538 539 540 541
/*
 * Find first available (lowest unused) Tx Queue, mark it "active".
 * Called only when finding queue for aggregation.
 * Should never return anything < 7, because they should already
 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
 */
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
{
542
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 544 545 546
	int txq_id;

	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		if (!test_and_set_bit(txq_id,
547
					&trans_pcie->txq_ctx_active_msk))
548 549 550 551 552
			return txq_id;
	return -1;
}

int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
553
				int sta_id, int tid)
554
{
555
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
W
Wey-Yi Guy 已提交
556
	int txq_id;
557 558 559 560 561 562 563

	txq_id = iwlagn_txq_ctx_activate_free(trans);
	if (txq_id == -1) {
		IWL_ERR(trans, "No free aggregation queue available\n");
		return -ENXIO;
	}

564
	trans_pcie->agg_txq[sta_id][tid] = txq_id;
565
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
566 567 568

	return 0;
}
569

570
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
571
{
572
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
573
	u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
574

575
	if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
576
		IWL_ERR(trans,
577 578 579
			"queue number out of range: %d, must be %d to %d\n",
			txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
			IWLAGN_FIRST_AMPDU_QUEUE +
580
			hw_params(trans).num_ampdu_queues - 1);
581 582 583
		return -EINVAL;
	}

584
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
585

586
	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
587

588
	trans_pcie->agg_txq[sta_id][tid] = 0;
589 590 591 592
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
593

594 595 596
	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
597 598 599
	return 0;
}

600 601 602 603 604 605 606 607 608 609 610
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
611
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
612
{
613 614
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
615
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
616 617
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
618 619
	dma_addr_t phys_addr;
	unsigned long flags;
T
Tomas Winkler 已提交
620
	u32 idx;
621
	u16 copy_size, cmd_size;
622
	bool is_ct_kill = false;
623 624 625 626 627 628 629 630
	bool had_nocopy = false;
	int i;
	u8 *cmd_dest;
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
	int trace_idx;
#endif
631

632 633
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_WARN(trans, "fw recovery, no hcmd send\n");
634 635 636
		return -EIO;
	}

637
	if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
638
	    !(cmd->flags & CMD_ON_DEMAND)) {
639
		IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
640 641 642
		return -EIO;
	}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
	copy_size = sizeof(out_cmd->hdr);
	cmd_size = sizeof(out_cmd->hdr);

	/* need one for the header if the first is NOCOPY */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
			had_nocopy = true;
		} else {
			/* NOCOPY must not be followed by normal! */
			if (WARN_ON(had_nocopy))
				return -EINVAL;
			copy_size += cmd->len[i];
		}
		cmd_size += cmd->len[i];
	}
662

663 664
	/*
	 * If any of the command structures end up being larger than
665 666 667
	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
	 * allocated into separate TFDs, then we will need to
	 * increase the size of the buffers.
668
	 */
669
	if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
670
		return -EINVAL;
671

672 673 674
	if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
		IWL_WARN(trans, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
675 676
		return -EIO;
	}
677

678
	spin_lock_irqsave(&trans->hcmd_lock, flags);
679

J
Johannes Berg 已提交
680
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
681
		spin_unlock_irqrestore(&trans->hcmd_lock, flags);
682

683
		IWL_ERR(trans, "No space in command queue\n");
684
		is_ct_kill = iwl_check_for_ct_kill(priv(trans));
685
		if (!is_ct_kill) {
686
			IWL_ERR(trans, "Restarting adapter queue is full\n");
687
			iwlagn_fw_error(priv(trans), false);
688
		}
689 690 691
		return -ENOSPC;
	}

692
	idx = get_cmd_index(q, q->write_ptr);
693
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
694 695
	out_meta = &txq->meta[idx];

696
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
697 698
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
699

700
	/* set up the header */
701

702
	out_cmd->hdr.cmd = cmd->id;
703
	out_cmd->hdr.flags = 0;
704
	out_cmd->hdr.sequence =
705
		cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
706
					 INDEX_TO_SEQ(q->write_ptr));
707 708 709

	/* and copy the data that needs to be copied */

710
	cmd_dest = out_cmd->payload;
711 712 713 714 715 716 717
	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
			break;
		memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
		cmd_dest += cmd->len[i];
718
	}
719

720
	IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
721 722 723 724
			"%d bytes at %d[%d]:%d\n",
			get_cmd_string(out_cmd->hdr.cmd),
			out_cmd->hdr.cmd,
			le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
725
			q->write_ptr, idx, trans->shrd->cmd_queue);
726

727
	phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
728
				DMA_BIDIRECTIONAL);
729
	if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
J
Johannes Berg 已提交
730 731 732 733
		idx = -ENOMEM;
		goto out;
	}

734
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
735 736
	dma_unmap_len_set(out_meta, len, copy_size);

737 738
	iwlagn_txq_attach_buf_to_tfd(trans, txq,
					phys_addr, copy_size, 1);
739 740 741 742 743 744 745 746 747 748 749
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
	trace_bufs[0] = &out_cmd->hdr;
	trace_lens[0] = copy_size;
	trace_idx = 1;
#endif

	for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
		if (!cmd->len[i])
			continue;
		if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
			continue;
750 751
		phys_addr = dma_map_single(bus(trans)->dev,
					   (void *)cmd->data[i],
752
					   cmd->len[i], DMA_BIDIRECTIONAL);
753 754
		if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
			iwlagn_unmap_tfd(trans, out_meta,
J
Johannes Berg 已提交
755
					 &txq->tfds[q->write_ptr],
756
					 DMA_BIDIRECTIONAL);
757 758 759 760
			idx = -ENOMEM;
			goto out;
		}

761
		iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
762 763 764 765 766 767 768
					     cmd->len[i], 0);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
		trace_bufs[trace_idx] = cmd->data[i];
		trace_lens[trace_idx] = cmd->len[i];
		trace_idx++;
#endif
	}
R
Reinette Chatre 已提交
769

770
	out_meta->flags = cmd->flags;
J
Johannes Berg 已提交
771 772 773

	txq->need_update = 1;

774 775 776
	/* check that tracing gets all possible blocks */
	BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
777
	trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
778 779 780 781
			       trace_bufs[0], trace_lens[0],
			       trace_bufs[1], trace_lens[1],
			       trace_bufs[2], trace_lens[2]);
#endif
R
Reinette Chatre 已提交
782

783 784
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
785
	iwl_txq_update_write_ptr(trans, txq);
786

J
Johannes Berg 已提交
787
 out:
788
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
789
	return idx;
790 791
}

792 793 794 795 796 797 798
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
799 800
static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
				   int idx)
801
{
802
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
803
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
804 805 806
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
807
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
808
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
809 810
			  "index %d is out of range [0-%d] %d %d.\n", __func__,
			  txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
811 812 813
		return;
	}

T
Tomas Winkler 已提交
814 815
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
816

T
Tomas Winkler 已提交
817
		if (nfreed++ > 0) {
818
			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
819
					q->write_ptr, q->read_ptr);
820
			iwlagn_fw_error(priv(trans), false);
821
		}
822

823 824 825 826 827 828
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
829 830
 * @handler_status: return value of the handler of the command
 *	(put in setup_rx_handlers)
831 832 833 834 835
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
836 837
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb,
			 int handler_status)
838
{
Z
Zhu Yi 已提交
839
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
840 841 842 843
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
J
Johannes Berg 已提交
844 845
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
846 847
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
848
	unsigned long flags;
849 850 851 852

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
853
	if (WARN(txq_id != trans->shrd->cmd_queue,
854
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
855
		  txq_id, trans->shrd->cmd_queue, sequence,
856 857
		  trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
858
		iwl_print_hex_error(trans, pkt, 32);
859
		return;
860
	}
861

862
	cmd_index = get_cmd_index(&txq->q, index);
Z
Zhu Yi 已提交
863 864
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
865

866 867
	txq->time_stamp = jiffies;

868 869
	iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
			 DMA_BIDIRECTIONAL);
R
Reinette Chatre 已提交
870

871
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
872
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
873
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
874
		meta->source->handler_status = handler_status;
Z
Zhu Yi 已提交
875
		rxb->page = NULL;
876
	}
877

878
	spin_lock_irqsave(&trans->hcmd_lock, flags);
879

880
	iwl_hcmd_queue_reclaim(trans, txq_id, index);
881

J
Johannes Berg 已提交
882
	if (!(meta->flags & CMD_ASYNC)) {
883 884 885 886 887
		if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
			IWL_WARN(trans,
				 "HCMD_ACTIVE already clear for command %s\n",
				 get_cmd_string(cmd->hdr.cmd));
		}
888 889
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
890
			       get_cmd_string(cmd->hdr.cmd));
891
		wake_up(&trans->shrd->wait_command_queue);
892
	}
893

Z
Zhu Yi 已提交
894
	meta->flags = 0;
895

896
	spin_unlock_irqrestore(&trans->hcmd_lock, flags);
897
}
898 899 900

#define HOST_COMPLETE_TIMEOUT (2 * HZ)

901
static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
902 903 904 905 906 907 908 909
{
	int ret;

	/* An asynchronous command can not expect an SKB to be set. */
	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
		return -EINVAL;


910
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
911 912
		return -EBUSY;

913
	ret = iwl_enqueue_hcmd(trans, cmd);
914
	if (ret < 0) {
915 916
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
917 918 919 920 921 922
			  get_cmd_string(cmd->id), ret);
		return ret;
	}
	return 0;
}

923
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
924
{
925
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
926 927 928
	int cmd_idx;
	int ret;

929
	lockdep_assert_held(&trans->shrd->mutex);
930

931
	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
932 933
			get_cmd_string(cmd->id));

934 935 936 937 938 939 940 941 942 943 944 945 946 947
	if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
		return -EBUSY;


	if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
			       get_cmd_string(cmd->id));
		return -ECANCELED;
	}
	if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
		IWL_ERR(trans, "Command %s failed: FW Error\n",
			       get_cmd_string(cmd->id));
		return -EIO;
	}
948 949
	set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
950 951
			get_cmd_string(cmd->id));

952
	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
953 954
	if (cmd_idx < 0) {
		ret = cmd_idx;
955
		clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
956 957
		IWL_DEBUG_QUIET_RFKILL(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
958 959 960 961
			  get_cmd_string(cmd->id), ret);
		return ret;
	}

962
	ret = wait_event_timeout(trans->shrd->wait_command_queue,
963
			!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
964 965
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
966
		if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
967
			struct iwl_tx_queue *txq =
968
				&trans_pcie->txq[trans->shrd->cmd_queue];
969 970
			struct iwl_queue *q = &txq->q;

971
			IWL_DEBUG_QUIET_RFKILL(trans,
972 973 974 975
				"Error sending %s: time out after %dms.\n",
				get_cmd_string(cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

976
			IWL_DEBUG_QUIET_RFKILL(trans,
977 978 979
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

980 981
			clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
			IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
982 983 984 985 986 987 988
				 "%s\n", get_cmd_string(cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
989
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
			  get_cmd_string(cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
1005
		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1006 1007
							~CMD_WANT_SKB;
	}
1008

1009
	if (cmd->reply_page) {
1010
		iwl_free_pages(trans->shrd, cmd->reply_page);
1011 1012 1013 1014 1015 1016
		cmd->reply_page = 0;
	}

	return ret;
}

1017
int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1018 1019
{
	if (cmd->flags & CMD_ASYNC)
1020
		return iwl_send_cmd_async(trans, cmd);
1021

1022
	return iwl_send_cmd_sync(trans, cmd);
1023 1024
}

1025
/* Frees buffers until index _not_ inclusive */
1026 1027
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
1028
{
1029 1030
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1031 1032
	struct iwl_queue *q = &txq->q;
	int last_to_free;
1033
	int freed = 0;
1034

1035 1036 1037 1038
	/* This function is not meant to release cmd queue*/
	if (WARN_ON(txq_id == trans->shrd->cmd_queue))
		return 0;

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	/*Since we free until index _not_ inclusive, the one before index is
	 * the last we will free. This one must be used */
	last_to_free = iwl_queue_dec_wrap(index, q->n_bd);

	if ((index >= q->n_bd) ||
	   (iwl_queue_used(q, last_to_free) == 0)) {
		IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
			  "last_to_free %d is out of range [0-%d] %d %d.\n",
			  __func__, txq_id, last_to_free, q->n_bd,
			  q->write_ptr, q->read_ptr);
1049
		return 0;
1050 1051 1052
	}

	if (WARN_ON(!skb_queue_empty(skbs)))
1053
		return 0;
1054 1055 1056 1057 1058

	for (;
	     q->read_ptr != index;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {

1059
		if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1060 1061
			continue;

1062
		__skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1063

1064
		txq->skbs[txq->q.read_ptr] = NULL;
1065

1066
		iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1067

1068
		iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1069
		freed++;
1070
	}
1071
	return freed;
1072
}