iwl-tx.c 18.2 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28 29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

30
#include <linux/etherdevice.h>
31
#include <linux/sched.h>
32
#include <linux/slab.h>
33 34 35 36 37 38 39 40
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"

41 42 43
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
44
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
45 46 47 48 49
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
50
		return;
51

W
Wey-Yi Guy 已提交
52 53 54 55 56 57 58 59 60 61 62
	if (priv->cfg->base_params->shadow_reg_enable) {
		/* shadow register enabled */
		iwl_write32(priv, HBUS_TARG_WRPTR,
			    txq->q.write_ptr | (txq_id << 8));
	} else {
		/* if we're trying to save power */
		if (test_bit(STATUS_POWER_PMI, &priv->status)) {
			/* wake up nic if it's powered down ...
			 * uCode will wake up, and interrupt us again, so next
			 * time we'll skip this part. */
			reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
63

W
Wey-Yi Guy 已提交
64 65 66 67 68 69 70 71
			if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
				IWL_DEBUG_INFO(priv,
					"Tx queue %d requesting wakeup,"
					" GP1 = 0x%x\n", txq_id, reg);
				iwl_set_bit(priv, CSR_GP_CNTRL,
					CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
				return;
			}
72

W
Wey-Yi Guy 已提交
73
			iwl_write_direct32(priv, HBUS_TARG_WRPTR,
74 75
				     txq->q.write_ptr | (txq_id << 8));

W
Wey-Yi Guy 已提交
76 77 78 79 80 81 82 83 84
		/*
		 * else not in power-save mode,
		 * uCode will never sleep when we're
		 * trying to tx (during RFKILL, we're not trying to tx).
		 */
		} else
			iwl_write32(priv, HBUS_TARG_WRPTR,
				    txq->q.write_ptr | (txq_id << 8));
	}
85 86 87
	txq->need_update = 0;
}

88 89 90 91 92 93 94 95
/**
 * iwl_tx_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
96
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
97
{
98
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
99
	struct iwl_queue *q = &txq->q;
100
	struct device *dev = &priv->pci_dev->dev;
W
Wey-Yi Guy 已提交
101
	int i;
102 103 104 105 106 107 108

	if (q->n_bd == 0)
		return;

	/* first, empty all BD's */
	for (; q->write_ptr != q->read_ptr;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
109
		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
110 111

	/* De-alloc array of command/tx buffers */
112
	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
113
		kfree(txq->cmd[i]);
114 115 116

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd)
117 118
		dma_free_coherent(dev, priv->hw_params.tfd_size *
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
119 120 121 122 123

	/* De-alloc array of per-TFD driver data */
	kfree(txq->txb);
	txq->txb = NULL;

J
Johannes Berg 已提交
124 125 126 127 128 129
	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

130 131 132
	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}
133 134 135 136 137 138 139 140 141

/**
 * iwl_cmd_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
142
void iwl_cmd_queue_free(struct iwl_priv *priv)
143
{
144
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
145
	struct iwl_queue *q = &txq->q;
146
	struct device *dev = &priv->pci_dev->dev;
W
Wey-Yi Guy 已提交
147
	int i;
Z
Zhu Yi 已提交
148
	bool huge = false;
149 150 151 152

	if (q->n_bd == 0)
		return;

Z
Zhu Yi 已提交
153 154 155 156 157 158 159 160 161 162 163
	for (; q->read_ptr != q->write_ptr;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
		/* we have no way to tell if it is a huge cmd ATM */
		i = get_cmd_index(q, q->read_ptr, 0);

		if (txq->meta[i].flags & CMD_SIZE_HUGE) {
			huge = true;
			continue;
		}

		pci_unmap_single(priv->pci_dev,
164 165
				 dma_unmap_addr(&txq->meta[i], mapping),
				 dma_unmap_len(&txq->meta[i], len),
Z
Zhu Yi 已提交
166 167 168 169 170
				 PCI_DMA_BIDIRECTIONAL);
	}
	if (huge) {
		i = q->n_window;
		pci_unmap_single(priv->pci_dev,
171 172
				 dma_unmap_addr(&txq->meta[i], mapping),
				 dma_unmap_len(&txq->meta[i], len),
Z
Zhu Yi 已提交
173 174 175
				 PCI_DMA_BIDIRECTIONAL);
	}

176 177 178 179 180 181
	/* De-alloc array of command/tx buffers */
	for (i = 0; i <= TFD_CMD_SLOTS; i++)
		kfree(txq->cmd[i]);

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd)
182 183
		dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
				  txq->tfds, txq->q.dma_addr);
184

185 186 187 188 189 190
	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

191 192 193
	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}
194

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 * See more detailed info in iwl-4965-hw.h.
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}


235 236 237
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
238
static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
			  int count, int slots_num, u32 id)
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
	BUG_ON(!is_power_of_2(count));

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	BUG_ON(!is_power_of_2(slots_num));

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;

	return 0;
}

/**
 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
 */
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
270
			      struct iwl_tx_queue *txq, u32 id)
271
{
272
	struct device *dev = &priv->pci_dev->dev;
273
	size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
274 275 276

	/* Driver private data, only for Tx (not command) queues,
	 * not shared with device. */
277
	if (id != priv->cmd_queue) {
J
Johannes Berg 已提交
278
		txq->txb = kzalloc(sizeof(txq->txb[0]) *
279 280
				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
		if (!txq->txb) {
281
			IWL_ERR(priv, "kmalloc for auxiliary BD "
282 283 284
				  "structures failed\n");
			goto error;
		}
285
	} else {
286
		txq->txb = NULL;
287
	}
288 289 290

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
291 292
	txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
				       GFP_KERNEL);
T
Tomas Winkler 已提交
293
	if (!txq->tfds) {
294
		IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
		goto error;
	}
	txq->q.id = id;

	return 0;

 error:
	kfree(txq->txb);
	txq->txb = NULL;

	return -ENOMEM;
}

/**
 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
 */
311 312
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
		      int slots_num, u32 txq_id)
313
{
314
	int i, len;
315
	int ret;
J
Johannes Berg 已提交
316
	int actual_slots = slots_num;
317 318 319

	/*
	 * Alloc buffer array for commands (Tx or other types of commands).
320
	 * For the command queue (#4/#9), allocate command space + one big
321 322 323 324 325
	 * command for scan, since scan command is very huge; the system will
	 * not have two scans at the same time, so only one is needed.
	 * For normal Tx queues (all other queues), no super-size command
	 * space is needed.
	 */
326
	if (txq_id == priv->cmd_queue)
J
Johannes Berg 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340
		actual_slots++;

	txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
			    GFP_KERNEL);
	txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
			   GFP_KERNEL);

	if (!txq->meta || !txq->cmd)
		goto out_free_arrays;

	len = sizeof(struct iwl_device_cmd);
	for (i = 0; i < actual_slots; i++) {
		/* only happens for cmd queue */
		if (i == slots_num)
341
			len = IWL_MAX_CMD_SIZE;
342

343
		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
344
		if (!txq->cmd[i])
345
			goto err;
346
	}
347 348

	/* Alloc driver data array and TFD circular buffer */
349 350 351
	ret = iwl_tx_queue_alloc(priv, txq, txq_id);
	if (ret)
		goto err;
352 353 354

	txq->need_update = 0;

355
	/*
356 357 358
	 * For the default queues 0-3, set up the swq_id
	 * already -- all others need to get one later
	 * (if they need one at all).
359
	 */
360 361
	if (txq_id < 4)
		iwl_set_swq_id(txq, txq_id, txq_id);
362

363 364 365 366 367 368 369 370
	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);

	/* Tell device where to find queue */
371
	priv->cfg->ops->lib->txq_init(priv, txq);
372 373

	return 0;
374
err:
J
Johannes Berg 已提交
375
	for (i = 0; i < actual_slots; i++)
376
		kfree(txq->cmd[i]);
J
Johannes Berg 已提交
377 378 379
out_free_arrays:
	kfree(txq->meta);
	kfree(txq->cmd);
380 381

	return -ENOMEM;
382
}
383

384 385 386 387 388
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
			int slots_num, u32 txq_id)
{
	int actual_slots = slots_num;

389
	if (txq_id == priv->cmd_queue)
390 391 392 393 394 395 396 397 398 399 400 401 402
		actual_slots++;

	memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);

	txq->need_update = 0;

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);

	/* Tell device where to find queue */
	priv->cfg->ops->lib->txq_init(priv, txq);
}

403 404 405 406 407 408 409 410 411 412 413 414 415
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
416
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
417
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
418 419
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
420 421
	dma_addr_t phys_addr;
	unsigned long flags;
422
	int len;
T
Tomas Winkler 已提交
423 424
	u32 idx;
	u16 fix_size;
425
	bool is_ct_kill = false;
426 427 428 429 430 431

	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));

	/* If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
432 433 434
	 * we will need to increase the size of the TFD entries
	 * Also, check to see if command buffer should not exceed the size
	 * of device_cmd and max_cmd_size. */
435
	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
J
Johannes Berg 已提交
436
	       !(cmd->flags & CMD_SIZE_HUGE));
437
	BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
438

439
	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
440 441
		IWL_WARN(priv, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(priv) ? "RF" : "CT");
442 443 444
		return -EIO;
	}

J
Johannes Berg 已提交
445
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
446
		IWL_ERR(priv, "No space in command queue\n");
447 448 449 450 451
		if (priv->cfg->ops->lib->tt_ops.ct_kill_check) {
			is_ct_kill =
				priv->cfg->ops->lib->tt_ops.ct_kill_check(priv);
		}
		if (!is_ct_kill) {
452 453 454
			IWL_ERR(priv, "Restarting adapter due to queue full\n");
			queue_work(priv->workqueue, &priv->restart);
		}
455 456 457 458 459
		return -ENOSPC;
	}

	spin_lock_irqsave(&priv->hcmd_lock, flags);

Z
Zhu Yi 已提交
460 461 462 463 464 465 466 467
	/* If this is a huge cmd, mark the huge flag also on the meta.flags
	 * of the _original_ cmd. This is used for DMA mapping clean up.
	 */
	if (cmd->flags & CMD_SIZE_HUGE) {
		idx = get_cmd_index(q, q->write_ptr, 0);
		txq->meta[idx].flags = CMD_SIZE_HUGE;
	}

J
Johannes Berg 已提交
468
	idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
469
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
470 471
	out_meta = &txq->meta[idx];

472
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
473 474 475 476 477
	out_meta->flags = cmd->flags;
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;
478 479 480 481 482 483 484 485

	out_cmd->hdr.cmd = cmd->id;
	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);

	/* At this point, the out_cmd now has all of the incoming cmd
	 * information */

	out_cmd->hdr.flags = 0;
486
	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
487
			INDEX_TO_SEQ(q->write_ptr));
J
Johannes Berg 已提交
488
	if (cmd->flags & CMD_SIZE_HUGE)
489
		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
J
Johannes Berg 已提交
490
	len = sizeof(struct iwl_device_cmd);
491 492
	if (idx == TFD_CMD_SLOTS)
		len = IWL_MAX_CMD_SIZE;
493

494 495 496 497
#ifdef CONFIG_IWLWIFI_DEBUG
	switch (out_cmd->hdr.cmd) {
	case REPLY_TX_LINK_QUALITY_CMD:
	case SENSITIVITY_CMD:
498
		IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
499 500 501 502
				"%d bytes at %d[%d]:%d\n",
				get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
503 504
				q->write_ptr, idx, priv->cmd_queue);
		break;
505
	default:
506
		IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
507 508 509 510
				"%d bytes at %d[%d]:%d\n",
				get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
511
				q->write_ptr, idx, priv->cmd_queue);
512 513
	}
#endif
514 515
	txq->need_update = 1;

516 517 518
	if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
		/* Set up entry in queue's byte count circular buffer */
		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
519

R
Reinette Chatre 已提交
520 521
	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
				   fix_size, PCI_DMA_BIDIRECTIONAL);
522 523
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, fix_size);
R
Reinette Chatre 已提交
524

J
Johannes Berg 已提交
525 526
	trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);

R
Reinette Chatre 已提交
527 528 529 530
	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   phys_addr, fix_size, 1,
						   U32_PAD(cmd->len));

531 532
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
533
	iwl_txq_update_write_ptr(priv, txq);
534 535

	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
536
	return idx;
537 538
}

539 540 541 542 543 544 545
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
T
Tomas Winkler 已提交
546 547
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
				   int idx, int cmd_idx)
548 549 550 551 552
{
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
553
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
554
		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
555
			  "is out of range [0-%d] %d %d.\n", txq_id,
T
Tomas Winkler 已提交
556
			  idx, q->n_bd, q->write_ptr, q->read_ptr);
557 558 559
		return;
	}

T
Tomas Winkler 已提交
560 561
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
562

T
Tomas Winkler 已提交
563
		if (nfreed++ > 0) {
564
			IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
565 566 567
					q->write_ptr, q->read_ptr);
			queue_work(priv->workqueue, &priv->restart);
		}
568

569 570 571 572 573 574 575 576 577 578 579 580 581
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
Z
Zhu Yi 已提交
582
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
583 584 585 586
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
587
	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
J
Johannes Berg 已提交
588 589
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
590
	struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
591 592 593 594

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
595 596 597 598 599
	if (WARN(txq_id != priv->cmd_queue,
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
		  txq_id, priv->cmd_queue, sequence,
		  priv->txq[priv->cmd_queue].q.read_ptr,
		  priv->txq[priv->cmd_queue].q.write_ptr)) {
600
		iwl_print_hex_error(priv, pkt, 32);
601
		return;
602
	}
603

Z
Zhu Yi 已提交
604 605 606 607 608 609 610 611 612 613 614
	/* If this is a huge cmd, clear the huge flag on the meta.flags
	 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
	 * the DMA buffer for the scan (huge) command.
	 */
	if (huge) {
		cmd_index = get_cmd_index(&txq->q, index, 0);
		txq->meta[cmd_index].flags = 0;
	}
	cmd_index = get_cmd_index(&txq->q, index, huge);
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
615

R
Reinette Chatre 已提交
616
	pci_unmap_single(priv->pci_dev,
617 618
			 dma_unmap_addr(meta, mapping),
			 dma_unmap_len(meta, len),
R
Reinette Chatre 已提交
619 620
			 PCI_DMA_BIDIRECTIONAL);

621
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
622
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
623 624
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
		rxb->page = NULL;
625
	} else if (meta->callback)
Z
Zhu Yi 已提交
626
		meta->callback(priv, cmd, pkt);
627

T
Tomas Winkler 已提交
628
	iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
629

J
Johannes Berg 已提交
630
	if (!(meta->flags & CMD_ASYNC)) {
631
		clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
632
		IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
633
			       get_cmd_string(cmd->hdr.cmd));
634 635
		wake_up_interruptible(&priv->wait_command_queue);
	}
Z
Zhu Yi 已提交
636
	meta->flags = 0;
637
}