mac.c 25.9 KB
Newer Older
S
Sujith 已提交
1
/*
2
 * Copyright (c) 2008-2009 Atheros Communications Inc.
S
Sujith 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include "hw.h"
S
Sujith 已提交
18

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
					struct ath9k_tx_queue_info *qi)
{
	ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
		  "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
		  ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
		  ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
		  ah->txurn_interrupt_mask);

	REG_WRITE(ah, AR_IMR_S0,
		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
		  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
	REG_WRITE(ah, AR_IMR_S1,
		  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
		  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));

	ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
	ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
}

u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
{
	return REG_READ(ah, AR_QTXDP(q));
}
EXPORT_SYMBOL(ath9k_hw_gettxbuf);

void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
{
	REG_WRITE(ah, AR_QTXDP(q), txdp);
}
EXPORT_SYMBOL(ath9k_hw_puttxbuf);

void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
	ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
		  "Enable TXE on queue: %u\n", q);
	REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);

void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
{
	struct ar5416_desc *ads = AR5416DESC(ds);

	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
}
EXPORT_SYMBOL(ath9k_hw_cleartxdesc);

u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
	u32 npend;

	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
	if (npend == 0) {

		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
			npend = 1;
	}

	return npend;
}
EXPORT_SYMBOL(ath9k_hw_numtxpending);

/**
 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
 *
 * @ah: atheros hardware struct
 * @bIncTrigLevel: whether or not the frame trigger level should be updated
 *
 * The frame trigger level specifies the minimum number of bytes,
 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
 * before the PCU will initiate sending the frame on the air. This can
 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
 * first)
 *
 * Caution must be taken to ensure to set the frame trigger level based
 * on the DMA request size. For example if the DMA request size is set to
 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
 * there need to be enough space in the tx FIFO for the requested transfer
 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
 * the threshold to a value beyond 6, then the transmit will hang.
 *
 * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 * there is a hardware issue which forces us to use 2 KB instead so the
 * frame trigger level must not exceed 2 KB for these chipsets.
 */
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
	u32 txcfg, curLevel, newLevel;
	enum ath9k_int omask;

	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
		return false;

	omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);

	txcfg = REG_READ(ah, AR_TXCFG);
	curLevel = MS(txcfg, AR_FTRIG);
	newLevel = curLevel;
	if (bIncTrigLevel) {
		if (curLevel < ah->config.max_txtrig_level)
			newLevel++;
	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
		newLevel--;
	if (newLevel != curLevel)
		REG_WRITE(ah, AR_TXCFG,
			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));

	ath9k_hw_set_interrupts(ah, omask);

	ah->tx_trig_level = newLevel;

	return newLevel != curLevel;
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);

bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
{
#define ATH9K_TX_STOP_DMA_TIMEOUT	4000    /* usec */
#define ATH9K_TIME_QUANTUM		100     /* usec */
	struct ath_common *common = ath9k_hw_common(ah);
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath9k_tx_queue_info *qi;
	u32 tsfLow, j, wait;
	u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;

	if (q >= pCap->total_queues) {
		ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
			  "invalid queue: %u\n", q);
		return false;
	}

	qi = &ah->txq[q];
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
		ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
			  "inactive queue: %u\n", q);
		return false;
	}

	REG_WRITE(ah, AR_Q_TXD, 1 << q);

	for (wait = wait_time; wait != 0; wait--) {
		if (ath9k_hw_numtxpending(ah, q) == 0)
			break;
		udelay(ATH9K_TIME_QUANTUM);
	}

	if (ath9k_hw_numtxpending(ah, q)) {
		ath_print(common, ATH_DBG_QUEUE,
			  "%s: Num of pending TX Frames %d on Q %d\n",
			  __func__, ath9k_hw_numtxpending(ah, q), q);

		for (j = 0; j < 2; j++) {
			tsfLow = REG_READ(ah, AR_TSF_L32);
			REG_WRITE(ah, AR_QUIET2,
				  SM(10, AR_QUIET2_QUIET_DUR));
			REG_WRITE(ah, AR_QUIET_PERIOD, 100);
			REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
			REG_SET_BIT(ah, AR_TIMER_MODE,
				       AR_QUIET_TIMER_EN);

			if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
				break;

			ath_print(common, ATH_DBG_QUEUE,
				  "TSF has moved while trying to set "
				  "quiet time TSF: 0x%08x\n", tsfLow);
		}

		REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);

		udelay(200);
		REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);

		wait = wait_time;
		while (ath9k_hw_numtxpending(ah, q)) {
			if ((--wait) == 0) {
				ath_print(common, ATH_DBG_FATAL,
					  "Failed to stop TX DMA in 100 "
					  "msec after killing last frame\n");
				break;
			}
			udelay(ATH9K_TIME_QUANTUM);
		}

		REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	}

	REG_WRITE(ah, AR_Q_TXD, 0);
	return wait != 0;

#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
EXPORT_SYMBOL(ath9k_hw_stoptxdma);

222
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
S
Sujith 已提交
223
{
224 225
	*txqs &= ah->intr_txqs;
	ah->intr_txqs &= ~(*txqs);
S
Sujith 已提交
226
}
227
EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
S
Sujith 已提交
228

229
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
230 231 232
			    const struct ath9k_tx_queue_info *qinfo)
{
	u32 cw;
233
	struct ath_common *common = ath9k_hw_common(ah);
234
	struct ath9k_hw_capabilities *pCap = &ah->caps;
S
Sujith 已提交
235 236 237
	struct ath9k_tx_queue_info *qi;

	if (q >= pCap->total_queues) {
238 239
		ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
			  "invalid queue: %u\n", q);
S
Sujith 已提交
240 241 242
		return false;
	}

243
	qi = &ah->txq[q];
S
Sujith 已提交
244
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
245 246
		ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
			  "inactive queue: %u\n", q);
S
Sujith 已提交
247 248 249
		return false;
	}

250
	ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
S
Sujith 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

	qi->tqi_ver = qinfo->tqi_ver;
	qi->tqi_subtype = qinfo->tqi_subtype;
	qi->tqi_qflags = qinfo->tqi_qflags;
	qi->tqi_priority = qinfo->tqi_priority;
	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
	else
		qi->tqi_aifs = INIT_AIFS;
	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmin, 1024U);
		qi->tqi_cwmin = 1;
		while (qi->tqi_cwmin < cw)
			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
	} else
		qi->tqi_cwmin = qinfo->tqi_cwmin;
	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmax, 1024U);
		qi->tqi_cwmax = 1;
		while (qi->tqi_cwmax < cw)
			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
	} else
		qi->tqi_cwmax = INIT_CWMAX;

	if (qinfo->tqi_shretry != 0)
		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
	else
		qi->tqi_shretry = INIT_SH_RETRY;
	if (qinfo->tqi_lgretry != 0)
		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
	else
		qi->tqi_lgretry = INIT_LG_RETRY;
	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
	qi->tqi_burstTime = qinfo->tqi_burstTime;
	qi->tqi_readyTime = qinfo->tqi_readyTime;

	switch (qinfo->tqi_subtype) {
	case ATH9K_WME_UPSD:
		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
		break;
	default:
		break;
	}

	return true;
}
299
EXPORT_SYMBOL(ath9k_hw_set_txq_props);
S
Sujith 已提交
300

301
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
302 303
			    struct ath9k_tx_queue_info *qinfo)
{
304
	struct ath_common *common = ath9k_hw_common(ah);
305
	struct ath9k_hw_capabilities *pCap = &ah->caps;
S
Sujith 已提交
306 307 308
	struct ath9k_tx_queue_info *qi;

	if (q >= pCap->total_queues) {
309 310
		ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
			  "invalid queue: %u\n", q);
S
Sujith 已提交
311 312 313
		return false;
	}

314
	qi = &ah->txq[q];
S
Sujith 已提交
315
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
316 317
		ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
			  "inactive queue: %u\n", q);
S
Sujith 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		return false;
	}

	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_ver = qi->tqi_ver;
	qinfo->tqi_subtype = qi->tqi_subtype;
	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_priority = qi->tqi_priority;
	qinfo->tqi_aifs = qi->tqi_aifs;
	qinfo->tqi_cwmin = qi->tqi_cwmin;
	qinfo->tqi_cwmax = qi->tqi_cwmax;
	qinfo->tqi_shretry = qi->tqi_shretry;
	qinfo->tqi_lgretry = qi->tqi_lgretry;
	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
	qinfo->tqi_burstTime = qi->tqi_burstTime;
	qinfo->tqi_readyTime = qi->tqi_readyTime;

	return true;
}
338
EXPORT_SYMBOL(ath9k_hw_get_txq_props);
S
Sujith 已提交
339

340
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
S
Sujith 已提交
341 342
			  const struct ath9k_tx_queue_info *qinfo)
{
343
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
344
	struct ath9k_tx_queue_info *qi;
345
	struct ath9k_hw_capabilities *pCap = &ah->caps;
S
Sujith 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
	int q;

	switch (type) {
	case ATH9K_TX_QUEUE_BEACON:
		q = pCap->total_queues - 1;
		break;
	case ATH9K_TX_QUEUE_CAB:
		q = pCap->total_queues - 2;
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		q = 1;
		break;
	case ATH9K_TX_QUEUE_UAPSD:
		q = pCap->total_queues - 3;
		break;
	case ATH9K_TX_QUEUE_DATA:
		for (q = 0; q < pCap->total_queues; q++)
363
			if (ah->txq[q].tqi_type ==
S
Sujith 已提交
364 365 366
			    ATH9K_TX_QUEUE_INACTIVE)
				break;
		if (q == pCap->total_queues) {
367 368
			ath_print(common, ATH_DBG_FATAL,
				  "No available TX queue\n");
S
Sujith 已提交
369 370 371 372
			return -1;
		}
		break;
	default:
373 374
		ath_print(common, ATH_DBG_FATAL,
			  "Invalid TX queue type: %u\n", type);
S
Sujith 已提交
375 376 377
		return -1;
	}

378
	ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
S
Sujith 已提交
379

380
	qi = &ah->txq[q];
S
Sujith 已提交
381
	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
382 383
		ath_print(common, ATH_DBG_FATAL,
			  "TX queue: %u already active\n", q);
S
Sujith 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
		return -1;
	}
	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
	qi->tqi_type = type;
	if (qinfo == NULL) {
		qi->tqi_qflags =
			TXQ_FLAG_TXOKINT_ENABLE
			| TXQ_FLAG_TXERRINT_ENABLE
			| TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
		qi->tqi_aifs = INIT_AIFS;
		qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
		qi->tqi_cwmax = INIT_CWMAX;
		qi->tqi_shretry = INIT_SH_RETRY;
		qi->tqi_lgretry = INIT_LG_RETRY;
		qi->tqi_physCompBuf = 0;
	} else {
		qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
		(void) ath9k_hw_set_txq_props(ah, q, qinfo);
	}

	return q;
}
406
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
S
Sujith 已提交
407

408
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
409
{
410
	struct ath9k_hw_capabilities *pCap = &ah->caps;
411
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
412 413 414
	struct ath9k_tx_queue_info *qi;

	if (q >= pCap->total_queues) {
415 416
		ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
			  "invalid queue: %u\n", q);
S
Sujith 已提交
417 418
		return false;
	}
419
	qi = &ah->txq[q];
S
Sujith 已提交
420
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
421 422
		ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
			  "inactive queue: %u\n", q);
S
Sujith 已提交
423 424 425
		return false;
	}

426
	ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
S
Sujith 已提交
427 428

	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
429 430 431 432 433
	ah->txok_interrupt_mask &= ~(1 << q);
	ah->txerr_interrupt_mask &= ~(1 << q);
	ah->txdesc_interrupt_mask &= ~(1 << q);
	ah->txeol_interrupt_mask &= ~(1 << q);
	ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
434 435 436 437
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
438
EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
S
Sujith 已提交
439

440
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
441
{
442
	struct ath9k_hw_capabilities *pCap = &ah->caps;
443
	struct ath_common *common = ath9k_hw_common(ah);
444
	struct ath9k_channel *chan = ah->curchan;
S
Sujith 已提交
445 446 447 448
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

	if (q >= pCap->total_queues) {
449 450
		ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
			  "invalid queue: %u\n", q);
S
Sujith 已提交
451 452 453
		return false;
	}

454
	qi = &ah->txq[q];
S
Sujith 已提交
455
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
456 457
		ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
			  "inactive queue: %u\n", q);
S
Sujith 已提交
458 459 460
		return true;
	}

461
	ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
S
Sujith 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		if (chan && IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
	REG_WRITE(ah, AR_DMISC(q),
		  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
		REG_WRITE(ah, AR_QMISC(q),
			  REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
			  (qi->tqi_cbrOverflowLimit ?
			   AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
		REG_WRITE(ah, AR_QMISC(q),
			  REG_READ(ah, AR_QMISC(q)) |
			  AR_Q_MISC_RDYTIME_EXP_POLICY);

	}

	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
		REG_WRITE(ah, AR_DMISC(q),
			  REG_READ(ah, AR_DMISC(q)) |
			  AR_D_MISC_POST_FR_BKOFF_DIS);
	}
	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
		REG_WRITE(ah, AR_DMISC(q),
			  REG_READ(ah, AR_DMISC(q)) |
			  AR_D_MISC_FRAG_BKOFF_EN);
	}
	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
			  | AR_Q_MISC_FSP_DBA_GATED
			  | AR_Q_MISC_BEACON_USE
			  | AR_Q_MISC_CBR_INCR_DIS1);

		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
			  | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
			  | AR_D_MISC_BEACON_USE
			  | AR_D_MISC_POST_FR_BKOFF_DIS);
536 537 538 539 540 541
		/* cwmin and cwmax should be 0 for beacon queue */
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
S
Sujith 已提交
542 543 544 545 546 547 548
		break;
	case ATH9K_TX_QUEUE_CAB:
		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
			  | AR_Q_MISC_FSP_DBA_GATED
			  | AR_Q_MISC_CBR_INCR_DIS1
			  | AR_Q_MISC_CBR_INCR_DIS0);
		value = (qi->tqi_readyTime -
549 550 551
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time) -
			 ah->config.additional_swba_backoff) * 1024;
S
Sujith 已提交
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
			  | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		REG_WRITE(ah, AR_QMISC(q),
			  REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
		break;
	case ATH9K_TX_QUEUE_UAPSD:
		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
			  AR_D_MISC_POST_FR_BKOFF_DIS);
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
		REG_WRITE(ah, AR_DMISC(q),
			  REG_READ(ah, AR_DMISC(q)) |
			  SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			     AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			  AR_D_MISC_POST_FR_BKOFF_DIS);
	}

578 579 580
	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

S
Sujith 已提交
581
	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
582
		ah->txok_interrupt_mask |= 1 << q;
S
Sujith 已提交
583
	else
584
		ah->txok_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
585
	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
586
		ah->txerr_interrupt_mask |= 1 << q;
S
Sujith 已提交
587
	else
588
		ah->txerr_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
589
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
590
		ah->txdesc_interrupt_mask |= 1 << q;
S
Sujith 已提交
591
	else
592
		ah->txdesc_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
593
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
594
		ah->txeol_interrupt_mask |= 1 << q;
S
Sujith 已提交
595
	else
596
		ah->txeol_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
597
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
598
		ah->txurn_interrupt_mask |= 1 << q;
S
Sujith 已提交
599
	else
600
		ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
601 602 603 604
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
605
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
S
Sujith 已提交
606

607
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
608
			struct ath_rx_status *rs, u64 tsf)
S
Sujith 已提交
609 610 611 612 613 614 615 616 617 618
{
	struct ar5416_desc ads;
	struct ar5416_desc *adsp = AR5416DESC(ds);
	u32 phyerr;

	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
		return -EINPROGRESS;

	ads.u.rx = adsp->u.rx;

619 620
	rs->rs_status = 0;
	rs->rs_flags = 0;
S
Sujith 已提交
621

622 623
	rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
	rs->rs_tstamp = ads.AR_RcvTimestamp;
S
Sujith 已提交
624

625
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
626 627 628 629 630 631 632
		rs->rs_rssi = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
633
	} else {
634 635
		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
636
						AR_RxRSSIAnt00);
637
		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
638
						AR_RxRSSIAnt01);
639
		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
640
						AR_RxRSSIAnt02);
641
		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
642
						AR_RxRSSIAnt10);
643
		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
644
						AR_RxRSSIAnt11);
645
		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
646 647
						AR_RxRSSIAnt12);
	}
S
Sujith 已提交
648
	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
649
		rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
S
Sujith 已提交
650
	else
651
		rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
S
Sujith 已提交
652

653 654
	rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
	rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
S
Sujith 已提交
655

656 657
	rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
	rs->rs_moreaggr =
S
Sujith 已提交
658
		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
659 660
	rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
	rs->rs_flags =
S
Sujith 已提交
661
		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
662
	rs->rs_flags |=
S
Sujith 已提交
663 664 665
		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;

	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
666
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
S
Sujith 已提交
667
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
668
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
S
Sujith 已提交
669
	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
670
		rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
S
Sujith 已提交
671 672 673

	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
		if (ads.ds_rxstatus8 & AR_CRCErr)
674
			rs->rs_status |= ATH9K_RXERR_CRC;
S
Sujith 已提交
675
		else if (ads.ds_rxstatus8 & AR_PHYErr) {
676
			rs->rs_status |= ATH9K_RXERR_PHY;
S
Sujith 已提交
677
			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
678
			rs->rs_phyerr = phyerr;
S
Sujith 已提交
679
		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
680
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
S
Sujith 已提交
681
		else if (ads.ds_rxstatus8 & AR_MichaelErr)
682
			rs->rs_status |= ATH9K_RXERR_MIC;
S
Sujith 已提交
683 684 685 686
	}

	return 0;
}
687
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
S
Sujith 已提交
688

689 690 691 692 693 694 695
/*
 * This can stop or re-enables RX.
 *
 * If bool is set this will kill any frame which is currently being
 * transferred between the MAC and baseband and also prevent any new
 * frames from getting started.
 */
696
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
S
Sujith 已提交
697 698 699 700 701 702 703
{
	u32 reg;

	if (set) {
		REG_SET_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));

S
Sujith 已提交
704 705
		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
				   0, AH_WAIT_TIMEOUT)) {
S
Sujith 已提交
706 707 708 709 710
			REG_CLR_BIT(ah, AR_DIAG_SW,
				    (AR_DIAG_RX_DIS |
				     AR_DIAG_RX_ABORT));

			reg = REG_READ(ah, AR_OBS_BUS_1);
711 712 713
			ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
				  "RX failed to go idle in 10 ms RXSM=0x%x\n",
				  reg);
S
Sujith 已提交
714 715 716 717 718 719 720 721 722 723

			return false;
		}
	} else {
		REG_CLR_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
	}

	return true;
}
724
EXPORT_SYMBOL(ath9k_hw_setrxabort);
S
Sujith 已提交
725

726
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
S
Sujith 已提交
727 728 729
{
	REG_WRITE(ah, AR_RXDP, rxdp);
}
730
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
S
Sujith 已提交
731

732
void ath9k_hw_startpcureceive(struct ath_hw *ah)
S
Sujith 已提交
733 734 735 736
{
	ath9k_enable_mib_counters(ah);

	ath9k_ani_reset(ah);
737

738
	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
S
Sujith 已提交
739
}
740
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
S
Sujith 已提交
741

742
void ath9k_hw_stoppcurecv(struct ath_hw *ah)
S
Sujith 已提交
743 744 745 746 747
{
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);

	ath9k_hw_disable_mib_counters(ah);
}
748
EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
S
Sujith 已提交
749

750
bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
S
Sujith 已提交
751
{
S
Sujith 已提交
752 753
#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
#define AH_RX_TIME_QUANTUM     100     /* usec */
754
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
755 756
	int i;

S
Sujith 已提交
757 758
	REG_WRITE(ah, AR_CR, AR_CR_RXD);

S
Sujith 已提交
759 760 761 762 763 764 765 766
	/* Wait for rx enable bit to go low */
	for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
		if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
			break;
		udelay(AH_TIME_QUANTUM);
	}

	if (i == 0) {
767 768 769 770 771 772
		ath_print(common, ATH_DBG_FATAL,
			  "DMA failed to stop in %d ms "
			  "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
			  AH_RX_STOP_DMA_TIMEOUT / 1000,
			  REG_READ(ah, AR_CR),
			  REG_READ(ah, AR_DIAG_SW));
S
Sujith 已提交
773 774 775 776
		return false;
	} else {
		return true;
	}
S
Sujith 已提交
777 778 779

#undef AH_RX_TIME_QUANTUM
#undef AH_RX_STOP_DMA_TIMEOUT
S
Sujith 已提交
780
}
781
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
782 783 784 785 786 787 788 789 790 791 792 793 794

int ath9k_hw_beaconq_setup(struct ath_hw *ah)
{
	struct ath9k_tx_queue_info qi;

	memset(&qi, 0, sizeof(qi));
	qi.tqi_aifs = 1;
	qi.tqi_cwmin = 0;
	qi.tqi_cwmax = 0;
	/* NB: don't enable any interrupts */
	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931

bool ath9k_hw_intrpend(struct ath_hw *ah)
{
	u32 host_isr;

	if (AR_SREV_9100(ah))
		return true;

	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
	if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
		return true;

	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
	if ((host_isr & AR_INTR_SYNC_DEFAULT)
	    && (host_isr != AR_INTR_SPURIOUS))
		return true;

	return false;
}
EXPORT_SYMBOL(ath9k_hw_intrpend);

enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
					      enum ath9k_int ints)
{
	enum ath9k_int omask = ah->imask;
	u32 mask, mask2;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

	ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);

	if (omask & ATH9K_INT_GLOBAL) {
		ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
		REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
		(void) REG_READ(ah, AR_IER);
		if (!AR_SREV_9100(ah)) {
			REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
			(void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);

			REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
			(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
		}
	}

	/* TODO: global int Ref count */
	mask = ints & ATH9K_INT_COMMON;
	mask2 = 0;

	if (ints & ATH9K_INT_TX) {
		if (ah->config.tx_intr_mitigation)
			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
		if (ah->txok_interrupt_mask)
			mask |= AR_IMR_TXOK;
		if (ah->txdesc_interrupt_mask)
			mask |= AR_IMR_TXDESC;
		if (ah->txerr_interrupt_mask)
			mask |= AR_IMR_TXERR;
		if (ah->txeol_interrupt_mask)
			mask |= AR_IMR_TXEOL;
	}
	if (ints & ATH9K_INT_RX) {
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
			if (ah->config.rx_intr_mitigation) {
				mask &= ~AR_IMR_RXOK_LP;
				mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
			} else {
				mask |= AR_IMR_RXOK_LP;
			}
		} else {
			if (ah->config.rx_intr_mitigation)
				mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
			else
				mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
		}
		if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
			mask |= AR_IMR_GENTMR;
	}

	if (ints & (ATH9K_INT_BMISC)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_TIM)
			mask2 |= AR_IMR_S2_TIM;
		if (ints & ATH9K_INT_DTIM)
			mask2 |= AR_IMR_S2_DTIM;
		if (ints & ATH9K_INT_DTIMSYNC)
			mask2 |= AR_IMR_S2_DTIMSYNC;
		if (ints & ATH9K_INT_CABEND)
			mask2 |= AR_IMR_S2_CABEND;
		if (ints & ATH9K_INT_TSFOOR)
			mask2 |= AR_IMR_S2_TSFOOR;
	}

	if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_GTT)
			mask2 |= AR_IMR_S2_GTT;
		if (ints & ATH9K_INT_CST)
			mask2 |= AR_IMR_S2_CST;
	}

	ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
	REG_WRITE(ah, AR_IMR, mask);
	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
	ah->imrs2_reg |= mask2;
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);

	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
		if (ints & ATH9K_INT_TIM_TIMER)
			REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
		else
			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
	}

	if (ints & ATH9K_INT_GLOBAL) {
		ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
		REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
		if (!AR_SREV_9100(ah)) {
			REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
				  AR_INTR_MAC_IRQ);
			REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);


			REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
				  AR_INTR_SYNC_DEFAULT);
			REG_WRITE(ah, AR_INTR_SYNC_MASK,
				  AR_INTR_SYNC_DEFAULT);
		}
		ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
			  REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
	}

	return omask;
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);