mac.c 25.1 KB
Newer Older
S
Sujith 已提交
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
S
Sujith 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include "hw.h"
18
#include "hw-ops.h"
19
#include <linux/export.h>
S
Sujith 已提交
20

21 22 23
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
					struct ath9k_tx_queue_info *qi)
{
24
	ath_dbg(ath9k_hw_common(ah), INTERRUPT,
J
Joe Perches 已提交
25 26 27 28
		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
		ah->txurn_interrupt_mask);
29

S
Sujith 已提交
30 31
	ENABLE_REGWRITE_BUFFER(ah);

32 33 34 35 36 37 38 39 40 41
	REG_WRITE(ah, AR_IMR_S0,
		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
		  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
	REG_WRITE(ah, AR_IMR_S1,
		  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
		  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));

	ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
	ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
S
Sujith 已提交
42 43

	REGWRITE_BUFFER_FLUSH(ah);
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
}

u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
{
	return REG_READ(ah, AR_QTXDP(q));
}
EXPORT_SYMBOL(ath9k_hw_gettxbuf);

void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
{
	REG_WRITE(ah, AR_QTXDP(q), txdp);
}
EXPORT_SYMBOL(ath9k_hw_puttxbuf);

void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
60
	ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);

u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
	u32 npend;

	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
	if (npend == 0) {

		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
			npend = 1;
	}

	return npend;
}
EXPORT_SYMBOL(ath9k_hw_numtxpending);

/**
 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
 *
 * @ah: atheros hardware struct
 * @bIncTrigLevel: whether or not the frame trigger level should be updated
 *
 * The frame trigger level specifies the minimum number of bytes,
 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
 * before the PCU will initiate sending the frame on the air. This can
 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
 * first)
 *
 * Caution must be taken to ensure to set the frame trigger level based
 * on the DMA request size. For example if the DMA request size is set to
 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
 * there need to be enough space in the tx FIFO for the requested transfer
 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
 * the threshold to a value beyond 6, then the transmit will hang.
 *
 * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 * there is a hardware issue which forces us to use 2 KB instead so the
 * frame trigger level must not exceed 2 KB for these chipsets.
 */
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
	u32 txcfg, curLevel, newLevel;

	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
		return false;

112
	ath9k_hw_disable_interrupts(ah);
113 114 115 116 117 118 119 120 121 122 123 124 125

	txcfg = REG_READ(ah, AR_TXCFG);
	curLevel = MS(txcfg, AR_FTRIG);
	newLevel = curLevel;
	if (bIncTrigLevel) {
		if (curLevel < ah->config.max_txtrig_level)
			newLevel++;
	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
		newLevel--;
	if (newLevel != curLevel)
		REG_WRITE(ah, AR_TXCFG,
			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));

126
	ath9k_hw_enable_interrupts(ah);
127 128 129 130 131 132 133

	ah->tx_trig_level = newLevel;

	return newLevel != curLevel;
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);

134
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
135
{
136
	int i, q;
137

138
	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
139

140 141 142
	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
143

144 145 146 147
	for (q = 0; q < AR_NUM_QCU; q++) {
		for (i = 0; i < 1000; i++) {
			if (i)
				udelay(5);
148

149
			if (!ath9k_hw_numtxpending(ah, q))
150 151
				break;
		}
152
	}
153

154 155 156
	REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
157

158 159 160
	REG_WRITE(ah, AR_Q_TXD, 0);
}
EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
161

162
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
163
{
164
#define ATH9K_TX_STOP_DMA_TIMEOUT	1000    /* usec */
165
#define ATH9K_TIME_QUANTUM		100     /* usec */
166 167
	int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
	int wait;
168 169 170 171

	REG_WRITE(ah, AR_Q_TXD, 1 << q);

	for (wait = wait_time; wait != 0; wait--) {
172
		if (wait != wait_time)
173 174
			udelay(ATH9K_TIME_QUANTUM);

175 176
		if (ath9k_hw_numtxpending(ah, q) == 0)
			break;
177 178 179
	}

	REG_WRITE(ah, AR_Q_TXD, 0);
180

181 182 183 184 185
	return wait != 0;

#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
186
EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
187

188
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
S
Sujith 已提交
189
{
190 191
	*txqs &= ah->intr_txqs;
	ah->intr_txqs &= ~(*txqs);
S
Sujith 已提交
192
}
193
EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
S
Sujith 已提交
194

195
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
196 197 198
			    const struct ath9k_tx_queue_info *qinfo)
{
	u32 cw;
199
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
200 201
	struct ath9k_tx_queue_info *qi;

202
	qi = &ah->txq[q];
S
Sujith 已提交
203
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
204
		ath_dbg(common, QUEUE,
J
Joe Perches 已提交
205
			"Set TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
206 207 208
		return false;
	}

209
	ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
S
Sujith 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

	qi->tqi_ver = qinfo->tqi_ver;
	qi->tqi_subtype = qinfo->tqi_subtype;
	qi->tqi_qflags = qinfo->tqi_qflags;
	qi->tqi_priority = qinfo->tqi_priority;
	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
	else
		qi->tqi_aifs = INIT_AIFS;
	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmin, 1024U);
		qi->tqi_cwmin = 1;
		while (qi->tqi_cwmin < cw)
			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
	} else
		qi->tqi_cwmin = qinfo->tqi_cwmin;
	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmax, 1024U);
		qi->tqi_cwmax = 1;
		while (qi->tqi_cwmax < cw)
			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
	} else
		qi->tqi_cwmax = INIT_CWMAX;

	if (qinfo->tqi_shretry != 0)
		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
	else
		qi->tqi_shretry = INIT_SH_RETRY;
	if (qinfo->tqi_lgretry != 0)
		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
	else
		qi->tqi_lgretry = INIT_LG_RETRY;
	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
	qi->tqi_burstTime = qinfo->tqi_burstTime;
	qi->tqi_readyTime = qinfo->tqi_readyTime;

	switch (qinfo->tqi_subtype) {
	case ATH9K_WME_UPSD:
		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
		break;
	default:
		break;
	}

	return true;
}
258
EXPORT_SYMBOL(ath9k_hw_set_txq_props);
S
Sujith 已提交
259

260
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
261 262
			    struct ath9k_tx_queue_info *qinfo)
{
263
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
264 265
	struct ath9k_tx_queue_info *qi;

266
	qi = &ah->txq[q];
S
Sujith 已提交
267
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
268
		ath_dbg(common, QUEUE,
J
Joe Perches 已提交
269
			"Get TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		return false;
	}

	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_ver = qi->tqi_ver;
	qinfo->tqi_subtype = qi->tqi_subtype;
	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_priority = qi->tqi_priority;
	qinfo->tqi_aifs = qi->tqi_aifs;
	qinfo->tqi_cwmin = qi->tqi_cwmin;
	qinfo->tqi_cwmax = qi->tqi_cwmax;
	qinfo->tqi_shretry = qi->tqi_shretry;
	qinfo->tqi_lgretry = qi->tqi_lgretry;
	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
	qinfo->tqi_burstTime = qi->tqi_burstTime;
	qinfo->tqi_readyTime = qi->tqi_readyTime;

	return true;
}
290
EXPORT_SYMBOL(ath9k_hw_get_txq_props);
S
Sujith 已提交
291

292
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
S
Sujith 已提交
293 294
			  const struct ath9k_tx_queue_info *qinfo)
{
295
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
296 297 298 299 300
	struct ath9k_tx_queue_info *qi;
	int q;

	switch (type) {
	case ATH9K_TX_QUEUE_BEACON:
301
		q = ATH9K_NUM_TX_QUEUES - 1;
S
Sujith 已提交
302 303
		break;
	case ATH9K_TX_QUEUE_CAB:
304
		q = ATH9K_NUM_TX_QUEUES - 2;
S
Sujith 已提交
305 306 307 308 309
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		q = 1;
		break;
	case ATH9K_TX_QUEUE_UAPSD:
310
		q = ATH9K_NUM_TX_QUEUES - 3;
S
Sujith 已提交
311 312
		break;
	case ATH9K_TX_QUEUE_DATA:
313
		for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
314
			if (ah->txq[q].tqi_type ==
S
Sujith 已提交
315 316
			    ATH9K_TX_QUEUE_INACTIVE)
				break;
317
		if (q == ATH9K_NUM_TX_QUEUES) {
318
			ath_err(common, "No available TX queue\n");
S
Sujith 已提交
319 320 321 322
			return -1;
		}
		break;
	default:
323
		ath_err(common, "Invalid TX queue type: %u\n", type);
S
Sujith 已提交
324 325 326
		return -1;
	}

327
	ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
S
Sujith 已提交
328

329
	qi = &ah->txq[q];
S
Sujith 已提交
330
	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
331
		ath_err(common, "TX queue: %u already active\n", q);
S
Sujith 已提交
332 333 334 335
		return -1;
	}
	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
	qi->tqi_type = type;
336 337
	qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
	(void) ath9k_hw_set_txq_props(ah, q, qinfo);
S
Sujith 已提交
338 339 340

	return q;
}
341
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
S
Sujith 已提交
342

343
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
344
{
345
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
346 347
	struct ath9k_tx_queue_info *qi;

348
	qi = &ah->txq[q];
S
Sujith 已提交
349
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
350
		ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
351 352 353
		return false;
	}

354
	ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
S
Sujith 已提交
355 356

	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
357 358 359 360 361
	ah->txok_interrupt_mask &= ~(1 << q);
	ah->txerr_interrupt_mask &= ~(1 << q);
	ah->txdesc_interrupt_mask &= ~(1 << q);
	ah->txeol_interrupt_mask &= ~(1 << q);
	ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
362 363 364 365
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
366
EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
S
Sujith 已提交
367

368
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
369
{
370
	struct ath_common *common = ath9k_hw_common(ah);
371
	struct ath9k_channel *chan = ah->curchan;
S
Sujith 已提交
372 373 374
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

375
	qi = &ah->txq[q];
S
Sujith 已提交
376
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
377
		ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
378 379 380
		return true;
	}

381
	ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
S
Sujith 已提交
382 383 384 385 386 387 388 389 390 391 392

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		if (chan && IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

S
Sujith 已提交
393 394
	ENABLE_REGWRITE_BUFFER(ah);

S
Sujith 已提交
395 396 397 398 399 400 401 402 403 404 405
	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
406 407 408 409 410 411 412

	if (AR_SREV_9340(ah))
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
	else
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
S
Sujith 已提交
413 414 415 416 417

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
418 419 420
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
			    (qi->tqi_cbrOverflowLimit ?
			     AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
S
Sujith 已提交
421 422 423 424 425 426 427 428 429 430 431 432
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
433 434
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
S
Sujith 已提交
435

436 437
	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
438 439 440

	REGWRITE_BUFFER_FLUSH(ah);

441 442 443
	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);

S
Sujith 已提交
444 445
	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
S
Sujith 已提交
446 447
		ENABLE_REGWRITE_BUFFER(ah);

448 449 450 451
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_BEACON_USE
			    | AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
452

453 454
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
455
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
456 457
			    | AR_D_MISC_BEACON_USE
			    | AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
458 459 460

		REGWRITE_BUFFER_FLUSH(ah);

461 462 463 464 465 466 467
		/*
		 * cwmin and cwmax should be 0 for beacon queue
		 * but not for IBSS as we would create an imbalance
		 * on beaconing fairness for participating nodes.
		 */
		if (AR_SREV_9300_20_OR_LATER(ah) &&
		    ah->opmode != NL80211_IFTYPE_ADHOC) {
468 469 470 471
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
S
Sujith 已提交
472 473
		break;
	case ATH9K_TX_QUEUE_CAB:
S
Sujith 已提交
474 475
		ENABLE_REGWRITE_BUFFER(ah);

476 477 478 479
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_CBR_INCR_DIS1
			    | AR_Q_MISC_CBR_INCR_DIS0);
S
Sujith 已提交
480
		value = (qi->tqi_readyTime -
481 482 483
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time) -
			 ah->config.additional_swba_backoff) * 1024;
S
Sujith 已提交
484 485
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
486 487
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
488
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
S
Sujith 已提交
489 490 491

		REGWRITE_BUFFER_FLUSH(ah);

S
Sujith 已提交
492 493
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
494
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
495 496
		break;
	case ATH9K_TX_QUEUE_UAPSD:
497
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
498 499 500 501 502 503
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
504 505 506 507
		REG_SET_BIT(ah, AR_DMISC(q),
			    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			       AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			    AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
508 509
	}

510 511 512
	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

S
Sujith 已提交
513
	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
514
		ah->txok_interrupt_mask |= 1 << q;
S
Sujith 已提交
515
	else
516
		ah->txok_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
517
	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
518
		ah->txerr_interrupt_mask |= 1 << q;
S
Sujith 已提交
519
	else
520
		ah->txerr_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
521
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
522
		ah->txdesc_interrupt_mask |= 1 << q;
S
Sujith 已提交
523
	else
524
		ah->txdesc_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
525
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
526
		ah->txeol_interrupt_mask |= 1 << q;
S
Sujith 已提交
527
	else
528
		ah->txeol_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
529
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
530
		ah->txurn_interrupt_mask |= 1 << q;
S
Sujith 已提交
531
	else
532
		ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
533 534 535 536
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
537
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
S
Sujith 已提交
538

539
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
540
			struct ath_rx_status *rs)
S
Sujith 已提交
541 542 543 544 545 546 547 548 549 550
{
	struct ar5416_desc ads;
	struct ar5416_desc *adsp = AR5416DESC(ds);
	u32 phyerr;

	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
		return -EINPROGRESS;

	ads.u.rx = adsp->u.rx;

551 552
	rs->rs_status = 0;
	rs->rs_flags = 0;
S
Sujith 已提交
553

554 555
	rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
	rs->rs_tstamp = ads.AR_RcvTimestamp;
S
Sujith 已提交
556

557
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
558 559 560 561 562 563 564
		rs->rs_rssi = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
565
	} else {
566 567
		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
568
						AR_RxRSSIAnt00);
569
		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
570
						AR_RxRSSIAnt01);
571
		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
572
						AR_RxRSSIAnt02);
573
		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
574
						AR_RxRSSIAnt10);
575
		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
576
						AR_RxRSSIAnt11);
577
		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
578 579
						AR_RxRSSIAnt12);
	}
S
Sujith 已提交
580
	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
581
		rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
S
Sujith 已提交
582
	else
583
		rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
S
Sujith 已提交
584

585
	rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
586
	rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
S
Sujith 已提交
587

588 589
	rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
	rs->rs_moreaggr =
S
Sujith 已提交
590
		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
591 592
	rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
	rs->rs_flags =
S
Sujith 已提交
593
		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
594
	rs->rs_flags |=
S
Sujith 已提交
595 596 597
		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;

	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
598
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
S
Sujith 已提交
599
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
600
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
S
Sujith 已提交
601
	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
602
		rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
S
Sujith 已提交
603 604

	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
605 606 607 608 609 610
		/*
		 * Treat these errors as mutually exclusive to avoid spurious
		 * extra error reports from the hardware. If a CRC error is
		 * reported, then decryption and MIC errors are irrelevant,
		 * the frame is going to be dropped either way
		 */
S
Sujith 已提交
611
		if (ads.ds_rxstatus8 & AR_CRCErr)
612
			rs->rs_status |= ATH9K_RXERR_CRC;
613
		else if (ads.ds_rxstatus8 & AR_PHYErr) {
614
			rs->rs_status |= ATH9K_RXERR_PHY;
S
Sujith 已提交
615
			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
616
			rs->rs_phyerr = phyerr;
617
		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
618
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
619
		else if (ads.ds_rxstatus8 & AR_MichaelErr)
620
			rs->rs_status |= ATH9K_RXERR_MIC;
621 622
		if (ads.ds_rxstatus8 & AR_KeyMiss)
			rs->rs_status |= ATH9K_RXERR_KEYMISS;
S
Sujith 已提交
623 624 625 626
	}

	return 0;
}
627
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
S
Sujith 已提交
628

629 630 631 632 633 634 635
/*
 * This can stop or re-enables RX.
 *
 * If bool is set this will kill any frame which is currently being
 * transferred between the MAC and baseband and also prevent any new
 * frames from getting started.
 */
636
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
S
Sujith 已提交
637 638 639 640 641 642 643
{
	u32 reg;

	if (set) {
		REG_SET_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));

S
Sujith 已提交
644 645
		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
				   0, AH_WAIT_TIMEOUT)) {
S
Sujith 已提交
646 647 648 649 650
			REG_CLR_BIT(ah, AR_DIAG_SW,
				    (AR_DIAG_RX_DIS |
				     AR_DIAG_RX_ABORT));

			reg = REG_READ(ah, AR_OBS_BUS_1);
651 652 653
			ath_err(ath9k_hw_common(ah),
				"RX failed to go idle in 10 ms RXSM=0x%x\n",
				reg);
S
Sujith 已提交
654 655 656 657 658 659 660 661 662 663

			return false;
		}
	} else {
		REG_CLR_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
	}

	return true;
}
664
EXPORT_SYMBOL(ath9k_hw_setrxabort);
S
Sujith 已提交
665

666
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
S
Sujith 已提交
667 668 669
{
	REG_WRITE(ah, AR_RXDP, rxdp);
}
670
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
S
Sujith 已提交
671

672
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
S
Sujith 已提交
673 674 675
{
	ath9k_enable_mib_counters(ah);

676
	ath9k_ani_reset(ah, is_scanning);
677

678
	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
S
Sujith 已提交
679
}
680
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
S
Sujith 已提交
681

682 683 684 685 686 687 688 689
void ath9k_hw_abortpcurecv(struct ath_hw *ah)
{
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);

	ath9k_hw_disable_mib_counters(ah);
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);

690
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
S
Sujith 已提交
691
{
S
Sujith 已提交
692
#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
693
	struct ath_common *common = ath9k_hw_common(ah);
694
	u32 mac_status, last_mac_status = 0;
S
Sujith 已提交
695 696
	int i;

697 698 699 700 701 702
	/* Enable access to the DMA observation bus */
	REG_WRITE(ah, AR_MACMISC,
		  ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
		   (AR_MACMISC_MISC_OBS_BUS_1 <<
		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));

S
Sujith 已提交
703 704
	REG_WRITE(ah, AR_CR, AR_CR_RXD);

S
Sujith 已提交
705 706 707 708
	/* Wait for rx enable bit to go low */
	for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
		if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
			break;
709 710 711 712 713 714 715 716 717 718 719

		if (!AR_SREV_9300_20_OR_LATER(ah)) {
			mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
			if (mac_status == 0x1c0 && mac_status == last_mac_status) {
				*reset = true;
				break;
			}

			last_mac_status = mac_status;
		}

S
Sujith 已提交
720 721 722 723
		udelay(AH_TIME_QUANTUM);
	}

	if (i == 0) {
724
		ath_err(common,
725
			"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
726 727
			AH_RX_STOP_DMA_TIMEOUT / 1000,
			REG_READ(ah, AR_CR),
728 729
			REG_READ(ah, AR_DIAG_SW),
			REG_READ(ah, AR_DMADBG_7));
S
Sujith 已提交
730 731 732 733
		return false;
	} else {
		return true;
	}
S
Sujith 已提交
734 735

#undef AH_RX_STOP_DMA_TIMEOUT
S
Sujith 已提交
736
}
737
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
738 739 740 741 742 743 744 745 746 747 748 749 750

int ath9k_hw_beaconq_setup(struct ath_hw *ah)
{
	struct ath9k_tx_queue_info qi;

	memset(&qi, 0, sizeof(qi));
	qi.tqi_aifs = 1;
	qi.tqi_cwmin = 0;
	qi.tqi_cwmax = 0;
	/* NB: don't enable any interrupts */
	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
751 752 753 754 755 756 757 758 759

bool ath9k_hw_intrpend(struct ath_hw *ah)
{
	u32 host_isr;

	if (AR_SREV_9100(ah))
		return true;

	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
760 761 762 763

	if (((host_isr & AR_INTR_MAC_IRQ) ||
	     (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
	    (host_isr != AR_INTR_SPURIOUS))
764 765 766 767 768 769 770 771 772 773 774
		return true;

	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
	if ((host_isr & AR_INTR_SYNC_DEFAULT)
	    && (host_isr != AR_INTR_SPURIOUS))
		return true;

	return false;
}
EXPORT_SYMBOL(ath9k_hw_intrpend);

775 776 777 778
void ath9k_hw_disable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);

779 780 781 782 783
	if (!(ah->imask & ATH9K_INT_GLOBAL))
		atomic_set(&ah->intr_ref_cnt, -1);
	else
		atomic_dec(&ah->intr_ref_cnt);

784
	ath_dbg(common, INTERRUPT, "disable IER\n");
785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
	(void) REG_READ(ah, AR_IER);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);

		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
	}
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);

void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);
800
	u32 sync_default = AR_INTR_SYNC_DEFAULT;
801
	u32 async_mask;
802 803 804 805

	if (!(ah->imask & ATH9K_INT_GLOBAL))
		return;

806
	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
807
		ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
808 809 810 811
			atomic_read(&ah->intr_ref_cnt));
		return;
	}

812 813 814
	if (AR_SREV_9340(ah))
		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;

815 816 817 818 819
	async_mask = AR_INTR_MAC_IRQ;

	if (ah->imask & ATH9K_INT_MCI)
		async_mask |= AR_INTR_ASYNC_MASK_MCI;

820
	ath_dbg(common, INTERRUPT, "enable IER\n");
821 822
	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
	if (!AR_SREV_9100(ah)) {
823 824
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
		REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
825

826 827
		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
828
	}
829
	ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
J
Joe Perches 已提交
830
		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
831 832 833
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);

834
void ath9k_hw_set_interrupts(struct ath_hw *ah)
835
{
836
	enum ath9k_int ints = ah->imask;
837 838 839 840
	u32 mask, mask2;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

841
	if (!(ints & ATH9K_INT_GLOBAL))
842
		ath9k_hw_disable_interrupts(ah);
843

844
	ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
845 846 847 848 849 850 851

	mask = ints & ATH9K_INT_COMMON;
	mask2 = 0;

	if (ints & ATH9K_INT_TX) {
		if (ah->config.tx_intr_mitigation)
			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
852 853 854 855 856 857
		else {
			if (ah->txok_interrupt_mask)
				mask |= AR_IMR_TXOK;
			if (ah->txdesc_interrupt_mask)
				mask |= AR_IMR_TXDESC;
		}
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
		if (ah->txerr_interrupt_mask)
			mask |= AR_IMR_TXERR;
		if (ah->txeol_interrupt_mask)
			mask |= AR_IMR_TXEOL;
	}
	if (ints & ATH9K_INT_RX) {
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
			if (ah->config.rx_intr_mitigation) {
				mask &= ~AR_IMR_RXOK_LP;
				mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
			} else {
				mask |= AR_IMR_RXOK_LP;
			}
		} else {
			if (ah->config.rx_intr_mitigation)
				mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
			else
				mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
		}
		if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
			mask |= AR_IMR_GENTMR;
	}

882 883 884
	if (ints & ATH9K_INT_GENTIMER)
		mask |= AR_IMR_GENTMR;

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	if (ints & (ATH9K_INT_BMISC)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_TIM)
			mask2 |= AR_IMR_S2_TIM;
		if (ints & ATH9K_INT_DTIM)
			mask2 |= AR_IMR_S2_DTIM;
		if (ints & ATH9K_INT_DTIMSYNC)
			mask2 |= AR_IMR_S2_DTIMSYNC;
		if (ints & ATH9K_INT_CABEND)
			mask2 |= AR_IMR_S2_CABEND;
		if (ints & ATH9K_INT_TSFOOR)
			mask2 |= AR_IMR_S2_TSFOOR;
	}

	if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_GTT)
			mask2 |= AR_IMR_S2_GTT;
		if (ints & ATH9K_INT_CST)
			mask2 |= AR_IMR_S2_CST;
	}

907
	ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
908 909 910 911 912 913 914 915 916 917 918 919 920 921
	REG_WRITE(ah, AR_IMR, mask);
	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
	ah->imrs2_reg |= mask2;
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);

	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
		if (ints & ATH9K_INT_TIM_TIMER)
			REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
		else
			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
	}

922
	return;
923 924
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);