iwl-tx.c 19.4 KB
Newer Older
1 2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26 27 28 29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

30
#include <linux/etherdevice.h>
31
#include <linux/sched.h>
32
#include <linux/slab.h>
33 34 35 36 37 38 39 40
#include <net/mac80211.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-sta.h"
#include "iwl-io.h"
#include "iwl-helpers.h"

41 42 43
/**
 * iwl_txq_update_write_ptr - Send new write index to hardware
 */
44
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
45 46 47 48 49
{
	u32 reg = 0;
	int txq_id = txq->q.id;

	if (txq->need_update == 0)
50
		return;
51 52 53 54 55 56 57 58 59

	/* if we're trying to save power */
	if (test_bit(STATUS_POWER_PMI, &priv->status)) {
		/* wake up nic if it's powered down ...
		 * uCode will wake up, and interrupt us again, so next
		 * time we'll skip this part. */
		reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);

		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
B
Ben Cahill 已提交
60 61
			IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
				      txq_id, reg);
62 63
			iwl_set_bit(priv, CSR_GP_CNTRL,
				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
64
			return;
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
		}

		iwl_write_direct32(priv, HBUS_TARG_WRPTR,
				     txq->q.write_ptr | (txq_id << 8));

	/* else not in power-save mode, uCode will never sleep when we're
	 * trying to tx (during RFKILL, we're not trying to tx). */
	} else
		iwl_write32(priv, HBUS_TARG_WRPTR,
			    txq->q.write_ptr | (txq_id << 8));

	txq->need_update = 0;
}
EXPORT_SYMBOL(iwl_txq_update_write_ptr);

80 81 82 83 84 85 86 87
/**
 * iwl_tx_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
88
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
89
{
90
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
91
	struct iwl_queue *q = &txq->q;
92
	struct device *dev = &priv->pci_dev->dev;
W
Wey-Yi Guy 已提交
93
	int i;
94 95 96 97 98 99 100

	if (q->n_bd == 0)
		return;

	/* first, empty all BD's */
	for (; q->write_ptr != q->read_ptr;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
101
		priv->cfg->ops->lib->txq_free_tfd(priv, txq);
102 103

	/* De-alloc array of command/tx buffers */
104
	for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
105
		kfree(txq->cmd[i]);
106 107 108

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd)
109 110
		dma_free_coherent(dev, priv->hw_params.tfd_size *
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
111 112 113 114 115

	/* De-alloc array of per-TFD driver data */
	kfree(txq->txb);
	txq->txb = NULL;

J
Johannes Berg 已提交
116 117 118 119 120 121
	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

122 123 124
	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}
125
EXPORT_SYMBOL(iwl_tx_queue_free);
126 127 128 129 130 131 132 133 134

/**
 * iwl_cmd_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
135
void iwl_cmd_queue_free(struct iwl_priv *priv)
136 137 138
{
	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
	struct iwl_queue *q = &txq->q;
139
	struct device *dev = &priv->pci_dev->dev;
W
Wey-Yi Guy 已提交
140
	int i;
Z
Zhu Yi 已提交
141
	bool huge = false;
142 143 144 145

	if (q->n_bd == 0)
		return;

Z
Zhu Yi 已提交
146 147 148 149 150 151 152 153 154 155 156
	for (; q->read_ptr != q->write_ptr;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
		/* we have no way to tell if it is a huge cmd ATM */
		i = get_cmd_index(q, q->read_ptr, 0);

		if (txq->meta[i].flags & CMD_SIZE_HUGE) {
			huge = true;
			continue;
		}

		pci_unmap_single(priv->pci_dev,
157 158
				 dma_unmap_addr(&txq->meta[i], mapping),
				 dma_unmap_len(&txq->meta[i], len),
Z
Zhu Yi 已提交
159 160 161 162 163
				 PCI_DMA_BIDIRECTIONAL);
	}
	if (huge) {
		i = q->n_window;
		pci_unmap_single(priv->pci_dev,
164 165
				 dma_unmap_addr(&txq->meta[i], mapping),
				 dma_unmap_len(&txq->meta[i], len),
Z
Zhu Yi 已提交
166 167 168
				 PCI_DMA_BIDIRECTIONAL);
	}

169 170 171 172 173 174
	/* De-alloc array of command/tx buffers */
	for (i = 0; i <= TFD_CMD_SLOTS; i++)
		kfree(txq->cmd[i]);

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd)
175 176
		dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
				  txq->tfds, txq->q.dma_addr);
177

178 179 180 181 182 183
	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

184 185 186
	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}
187 188
EXPORT_SYMBOL(iwl_cmd_queue_free);

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
 * DMA services
 *
 * Theory of operation
 *
 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
 * of buffer descriptors, each of which points to one or more data buffers for
 * the device to read from or fill.  Driver and device exchange status of each
 * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
 * entries in each circular buffer, to protect against confusing empty and full
 * queue states.
 *
 * The device reads or writes the data in the queues via the device's several
 * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
 *
 * For Tx queue, there are low mark and high mark limits. If, after queuing
 * the packet for Tx, free space become < low mark, Tx queue stopped. When
 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
 * Tx queue resumed.
 *
 * See more detailed info in iwl-4965-hw.h.
 ***************************************************/

int iwl_queue_space(const struct iwl_queue *q)
{
	int s = q->read_ptr - q->write_ptr;

	if (q->read_ptr > q->write_ptr)
		s -= q->n_bd;

	if (s <= 0)
		s += q->n_window;
	/* keep some reserve to not confuse empty and full situations */
	s -= 2;
	if (s < 0)
		s = 0;
	return s;
}
EXPORT_SYMBOL(iwl_queue_space);


230 231 232
/**
 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
 */
233
static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
			  int count, int slots_num, u32 id)
{
	q->n_bd = count;
	q->n_window = slots_num;
	q->id = id;

	/* count must be power-of-two size, otherwise iwl_queue_inc_wrap
	 * and iwl_queue_dec_wrap are broken. */
	BUG_ON(!is_power_of_2(count));

	/* slots_num must be power-of-two size, otherwise
	 * get_cmd_index is broken. */
	BUG_ON(!is_power_of_2(slots_num));

	q->low_mark = q->n_window / 4;
	if (q->low_mark < 4)
		q->low_mark = 4;

	q->high_mark = q->n_window / 8;
	if (q->high_mark < 2)
		q->high_mark = 2;

	q->write_ptr = q->read_ptr = 0;
257 258
	q->last_read_ptr = 0;
	q->repeat_same_read_ptr = 0;
259 260 261 262 263 264 265 266

	return 0;
}

/**
 * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
 */
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
267
			      struct iwl_tx_queue *txq, u32 id)
268
{
269
	struct device *dev = &priv->pci_dev->dev;
270
	size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
271 272 273 274 275 276 277

	/* Driver private data, only for Tx (not command) queues,
	 * not shared with device. */
	if (id != IWL_CMD_QUEUE_NUM) {
		txq->txb = kmalloc(sizeof(txq->txb[0]) *
				   TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
		if (!txq->txb) {
278
			IWL_ERR(priv, "kmalloc for auxiliary BD "
279 280 281
				  "structures failed\n");
			goto error;
		}
282
	} else {
283
		txq->txb = NULL;
284
	}
285 286 287

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
288 289
	txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
				       GFP_KERNEL);
T
Tomas Winkler 已提交
290
	if (!txq->tfds) {
291
		IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
		goto error;
	}
	txq->q.id = id;

	return 0;

 error:
	kfree(txq->txb);
	txq->txb = NULL;

	return -ENOMEM;
}

/**
 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
 */
308 309
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
		      int slots_num, u32 txq_id)
310
{
311
	int i, len;
312
	int ret;
J
Johannes Berg 已提交
313
	int actual_slots = slots_num;
314 315 316 317 318 319 320 321 322

	/*
	 * Alloc buffer array for commands (Tx or other types of commands).
	 * For the command queue (#4), allocate command space + one big
	 * command for scan, since scan command is very huge; the system will
	 * not have two scans at the same time, so only one is needed.
	 * For normal Tx queues (all other queues), no super-size command
	 * space is needed.
	 */
J
Johannes Berg 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	if (txq_id == IWL_CMD_QUEUE_NUM)
		actual_slots++;

	txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
			    GFP_KERNEL);
	txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
			   GFP_KERNEL);

	if (!txq->meta || !txq->cmd)
		goto out_free_arrays;

	len = sizeof(struct iwl_device_cmd);
	for (i = 0; i < actual_slots; i++) {
		/* only happens for cmd queue */
		if (i == slots_num)
338
			len = IWL_MAX_CMD_SIZE;
339

340
		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
341
		if (!txq->cmd[i])
342
			goto err;
343
	}
344 345

	/* Alloc driver data array and TFD circular buffer */
346 347 348
	ret = iwl_tx_queue_alloc(priv, txq, txq_id);
	if (ret)
		goto err;
349 350 351

	txq->need_update = 0;

352 353 354 355 356 357 358
	/*
	 * Aggregation TX queues will get their ID when aggregation begins;
	 * they overwrite the setting done here. The command FIFO doesn't
	 * need an swq_id so don't set one to catch errors, all others can
	 * be set up to the identity mapping.
	 */
	if (txq_id != IWL_CMD_QUEUE_NUM)
359 360
		txq->swq_id = txq_id;

361 362 363 364 365 366 367 368
	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);

	/* Tell device where to find queue */
369
	priv->cfg->ops->lib->txq_init(priv, txq);
370 371

	return 0;
372
err:
J
Johannes Berg 已提交
373
	for (i = 0; i < actual_slots; i++)
374
		kfree(txq->cmd[i]);
J
Johannes Berg 已提交
375 376 377
out_free_arrays:
	kfree(txq->meta);
	kfree(txq->cmd);
378 379

	return -ENOMEM;
380
}
381 382
EXPORT_SYMBOL(iwl_tx_queue_init);

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
			int slots_num, u32 txq_id)
{
	int actual_slots = slots_num;

	if (txq_id == IWL_CMD_QUEUE_NUM)
		actual_slots++;

	memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);

	txq->need_update = 0;

	/* Initialize queue's high/low-water marks, and head/tail indexes */
	iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);

	/* Tell device where to find queue */
	priv->cfg->ops->lib->txq_init(priv, txq);
}
EXPORT_SYMBOL(iwl_tx_queue_reset);

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
/*************** HOST COMMAND QUEUE FUNCTIONS   *****/

/**
 * iwl_enqueue_hcmd - enqueue a uCode command
 * @priv: device private data point
 * @cmd: a point to the ucode command structure
 *
 * The function returns < 0 values to indicate the operation is
 * failed. On success, it turns the index (> 0) of command in the
 * command queue.
 */
int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
	struct iwl_queue *q = &txq->q;
J
Johannes Berg 已提交
418 419
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
420 421
	dma_addr_t phys_addr;
	unsigned long flags;
422
	int len;
T
Tomas Winkler 已提交
423 424
	u32 idx;
	u16 fix_size;
425 426 427 428 429 430

	cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
	fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));

	/* If any of the command structures end up being larger than
	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
431 432 433
	 * we will need to increase the size of the TFD entries
	 * Also, check to see if command buffer should not exceed the size
	 * of device_cmd and max_cmd_size. */
434
	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
J
Johannes Berg 已提交
435
	       !(cmd->flags & CMD_SIZE_HUGE));
436
	BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
437

438
	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
439 440
		IWL_WARN(priv, "Not sending command - %s KILL\n",
			 iwl_is_rfkill(priv) ? "RF" : "CT");
441 442 443
		return -EIO;
	}

J
Johannes Berg 已提交
444
	if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
445
		IWL_ERR(priv, "No space in command queue\n");
446 447 448 449 450 451
		if (iwl_within_ct_kill_margin(priv))
			iwl_tt_enter_ct_kill(priv);
		else {
			IWL_ERR(priv, "Restarting adapter due to queue full\n");
			queue_work(priv->workqueue, &priv->restart);
		}
452 453 454 455 456
		return -ENOSPC;
	}

	spin_lock_irqsave(&priv->hcmd_lock, flags);

Z
Zhu Yi 已提交
457 458 459 460 461 462 463 464
	/* If this is a huge cmd, mark the huge flag also on the meta.flags
	 * of the _original_ cmd. This is used for DMA mapping clean up.
	 */
	if (cmd->flags & CMD_SIZE_HUGE) {
		idx = get_cmd_index(q, q->write_ptr, 0);
		txq->meta[idx].flags = CMD_SIZE_HUGE;
	}

J
Johannes Berg 已提交
465
	idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
466
	out_cmd = txq->cmd[idx];
J
Johannes Berg 已提交
467 468
	out_meta = &txq->meta[idx];

469
	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
J
Johannes Berg 已提交
470 471 472 473 474
	out_meta->flags = cmd->flags;
	if (cmd->flags & CMD_WANT_SKB)
		out_meta->source = cmd;
	if (cmd->flags & CMD_ASYNC)
		out_meta->callback = cmd->callback;
475 476 477 478 479 480 481 482 483 484

	out_cmd->hdr.cmd = cmd->id;
	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);

	/* At this point, the out_cmd now has all of the incoming cmd
	 * information */

	out_cmd->hdr.flags = 0;
	out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
			INDEX_TO_SEQ(q->write_ptr));
J
Johannes Berg 已提交
485
	if (cmd->flags & CMD_SIZE_HUGE)
486
		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
J
Johannes Berg 已提交
487
	len = sizeof(struct iwl_device_cmd);
488 489
	if (idx == TFD_CMD_SLOTS)
		len = IWL_MAX_CMD_SIZE;
490

491 492 493 494
#ifdef CONFIG_IWLWIFI_DEBUG
	switch (out_cmd->hdr.cmd) {
	case REPLY_TX_LINK_QUALITY_CMD:
	case SENSITIVITY_CMD:
495
		IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
496 497 498 499 500 501 502
				"%d bytes at %d[%d]:%d\n",
				get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
				q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
				break;
	default:
503
		IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
504 505 506 507 508 509 510
				"%d bytes at %d[%d]:%d\n",
				get_cmd_string(out_cmd->hdr.cmd),
				out_cmd->hdr.cmd,
				le16_to_cpu(out_cmd->hdr.sequence), fix_size,
				q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
	}
#endif
511 512
	txq->need_update = 1;

513 514 515
	if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
		/* Set up entry in queue's byte count circular buffer */
		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
516

R
Reinette Chatre 已提交
517 518
	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
				   fix_size, PCI_DMA_BIDIRECTIONAL);
519 520
	dma_unmap_addr_set(out_meta, mapping, phys_addr);
	dma_unmap_len_set(out_meta, len, fix_size);
R
Reinette Chatre 已提交
521

J
Johannes Berg 已提交
522 523
	trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);

R
Reinette Chatre 已提交
524 525 526 527
	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   phys_addr, fix_size, 1,
						   U32_PAD(cmd->len));

528 529
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
530
	iwl_txq_update_write_ptr(priv, txq);
531 532

	spin_unlock_irqrestore(&priv->hcmd_lock, flags);
533
	return idx;
534 535
}

536 537 538 539 540 541 542
/**
 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
 *
 * When FW advances 'R' index, all entries between old and new 'R' index
 * need to be reclaimed. As result, some free space forms.  If there is
 * enough free space (> low mark), wake the stack that feeds us.
 */
T
Tomas Winkler 已提交
543 544
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
				   int idx, int cmd_idx)
545 546 547 548 549
{
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;

T
Tomas Winkler 已提交
550
	if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
551
		IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
552
			  "is out of range [0-%d] %d %d.\n", txq_id,
T
Tomas Winkler 已提交
553
			  idx, q->n_bd, q->write_ptr, q->read_ptr);
554 555 556
		return;
	}

T
Tomas Winkler 已提交
557 558
	for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
	     q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
559

T
Tomas Winkler 已提交
560
		if (nfreed++ > 0) {
561
			IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
562 563 564
					q->write_ptr, q->read_ptr);
			queue_work(priv->workqueue, &priv->restart);
		}
565

566 567 568 569 570 571 572 573 574 575 576 577 578
	}
}

/**
 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
 * @rxb: Rx buffer to reclaim
 *
 * If an Rx buffer has an async callback associated with it the callback
 * will be executed.  The attached skb (if present) will only be freed
 * if the callback returns 1
 */
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
Z
Zhu Yi 已提交
579
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
580 581 582 583
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	int cmd_index;
584
	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
J
Johannes Berg 已提交
585 586
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
Z
Zhu Yi 已提交
587
	struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
588 589 590 591

	/* If a Tx command is being handled and it isn't in the actual
	 * command queue then there a command routing bug has been introduced
	 * in the queue management code. */
592
	if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
593 594 595 596
		 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
		  txq_id, sequence,
		  priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
		  priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
597
		iwl_print_hex_error(priv, pkt, 32);
598
		return;
599
	}
600

Z
Zhu Yi 已提交
601 602 603 604 605 606 607 608 609 610 611
	/* If this is a huge cmd, clear the huge flag on the meta.flags
	 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
	 * the DMA buffer for the scan (huge) command.
	 */
	if (huge) {
		cmd_index = get_cmd_index(&txq->q, index, 0);
		txq->meta[cmd_index].flags = 0;
	}
	cmd_index = get_cmd_index(&txq->q, index, huge);
	cmd = txq->cmd[cmd_index];
	meta = &txq->meta[cmd_index];
612

R
Reinette Chatre 已提交
613
	pci_unmap_single(priv->pci_dev,
614 615
			 dma_unmap_addr(meta, mapping),
			 dma_unmap_len(meta, len),
R
Reinette Chatre 已提交
616 617
			 PCI_DMA_BIDIRECTIONAL);

618
	/* Input error checking is done when commands are added to queue. */
J
Johannes Berg 已提交
619
	if (meta->flags & CMD_WANT_SKB) {
Z
Zhu Yi 已提交
620 621
		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
		rxb->page = NULL;
622
	} else if (meta->callback)
Z
Zhu Yi 已提交
623
		meta->callback(priv, cmd, pkt);
624

T
Tomas Winkler 已提交
625
	iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
626

J
Johannes Berg 已提交
627
	if (!(meta->flags & CMD_ASYNC)) {
628
		clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
629
		IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
630
			       get_cmd_string(cmd->hdr.cmd));
631 632
		wake_up_interruptible(&priv->wait_command_queue);
	}
Z
Zhu Yi 已提交
633
	meta->flags = 0;
634 635 636
}
EXPORT_SYMBOL(iwl_tx_cmd_complete);

637
#ifdef CONFIG_IWLWIFI_DEBUG
638 639
#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
640 641 642 643 644 645

const char *iwl_get_tx_fail_reason(u32 status)
{
	switch (status & TX_STATUS_MSK) {
	case TX_STATUS_SUCCESS:
		return "SUCCESS";
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
		TX_STATUS_POSTPONE(DELAY);
		TX_STATUS_POSTPONE(FEW_BYTES);
		TX_STATUS_POSTPONE(BT_PRIO);
		TX_STATUS_POSTPONE(QUIET_PERIOD);
		TX_STATUS_POSTPONE(CALC_TTAK);
		TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
		TX_STATUS_FAIL(SHORT_LIMIT);
		TX_STATUS_FAIL(LONG_LIMIT);
		TX_STATUS_FAIL(FIFO_UNDERRUN);
		TX_STATUS_FAIL(DRAIN_FLOW);
		TX_STATUS_FAIL(RFKILL_FLUSH);
		TX_STATUS_FAIL(LIFE_EXPIRE);
		TX_STATUS_FAIL(DEST_PS);
		TX_STATUS_FAIL(HOST_ABORTED);
		TX_STATUS_FAIL(BT_RETRY);
		TX_STATUS_FAIL(STA_INVALID);
		TX_STATUS_FAIL(FRAG_DROPPED);
		TX_STATUS_FAIL(TID_DISABLE);
		TX_STATUS_FAIL(FIFO_FLUSHED);
		TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
		TX_STATUS_FAIL(FW_DROP);
		TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
668 669 670 671 672 673
	}

	return "UNKNOWN";
}
EXPORT_SYMBOL(iwl_get_tx_fail_reason);
#endif /* CONFIG_IWLWIFI_DEBUG */