mac.c 25.2 KB
Newer Older
S
Sujith 已提交
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
S
Sujith 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include "hw.h"
18
#include "hw-ops.h"
19
#include <linux/export.h>
S
Sujith 已提交
20

21 22 23
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
					struct ath9k_tx_queue_info *qi)
{
J
Joe Perches 已提交
24 25 26 27 28
	ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
		ah->txurn_interrupt_mask);
29

S
Sujith 已提交
30 31
	ENABLE_REGWRITE_BUFFER(ah);

32 33 34 35 36 37 38 39 40 41
	REG_WRITE(ah, AR_IMR_S0,
		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
		  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
	REG_WRITE(ah, AR_IMR_S1,
		  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
		  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));

	ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
	ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
S
Sujith 已提交
42 43

	REGWRITE_BUFFER_FLUSH(ah);
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
}

u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
{
	return REG_READ(ah, AR_QTXDP(q));
}
EXPORT_SYMBOL(ath9k_hw_gettxbuf);

void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
{
	REG_WRITE(ah, AR_QTXDP(q), txdp);
}
EXPORT_SYMBOL(ath9k_hw_puttxbuf);

void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
J
Joe Perches 已提交
60 61
	ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
		"Enable TXE on queue: %u\n", q);
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);

u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
	u32 npend;

	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
	if (npend == 0) {

		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
			npend = 1;
	}

	return npend;
}
EXPORT_SYMBOL(ath9k_hw_numtxpending);

/**
 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
 *
 * @ah: atheros hardware struct
 * @bIncTrigLevel: whether or not the frame trigger level should be updated
 *
 * The frame trigger level specifies the minimum number of bytes,
 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
 * before the PCU will initiate sending the frame on the air. This can
 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
 * first)
 *
 * Caution must be taken to ensure to set the frame trigger level based
 * on the DMA request size. For example if the DMA request size is set to
 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
 * there need to be enough space in the tx FIFO for the requested transfer
 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
 * the threshold to a value beyond 6, then the transmit will hang.
 *
 * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 * there is a hardware issue which forces us to use 2 KB instead so the
 * frame trigger level must not exceed 2 KB for these chipsets.
 */
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
	u32 txcfg, curLevel, newLevel;

	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
		return false;

113
	ath9k_hw_disable_interrupts(ah);
114 115 116 117 118 119 120 121 122 123 124 125 126

	txcfg = REG_READ(ah, AR_TXCFG);
	curLevel = MS(txcfg, AR_FTRIG);
	newLevel = curLevel;
	if (bIncTrigLevel) {
		if (curLevel < ah->config.max_txtrig_level)
			newLevel++;
	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
		newLevel--;
	if (newLevel != curLevel)
		REG_WRITE(ah, AR_TXCFG,
			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));

127
	ath9k_hw_enable_interrupts(ah);
128 129 130 131 132 133 134

	ah->tx_trig_level = newLevel;

	return newLevel != curLevel;
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);

135
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
136
{
137
	int i, q;
138

139
	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
140

141 142 143
	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
144

145 146 147 148
	for (q = 0; q < AR_NUM_QCU; q++) {
		for (i = 0; i < 1000; i++) {
			if (i)
				udelay(5);
149

150
			if (!ath9k_hw_numtxpending(ah, q))
151 152
				break;
		}
153
	}
154

155 156 157
	REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
158

159 160 161
	REG_WRITE(ah, AR_Q_TXD, 0);
}
EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
162

163
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
164
{
165
#define ATH9K_TX_STOP_DMA_TIMEOUT	1000    /* usec */
166
#define ATH9K_TIME_QUANTUM		100     /* usec */
167 168
	int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
	int wait;
169 170 171 172

	REG_WRITE(ah, AR_Q_TXD, 1 << q);

	for (wait = wait_time; wait != 0; wait--) {
173
		if (wait != wait_time)
174 175
			udelay(ATH9K_TIME_QUANTUM);

176 177
		if (ath9k_hw_numtxpending(ah, q) == 0)
			break;
178 179 180
	}

	REG_WRITE(ah, AR_Q_TXD, 0);
181

182 183 184 185 186
	return wait != 0;

#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
187
EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
188

189
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
S
Sujith 已提交
190
{
191 192
	*txqs &= ah->intr_txqs;
	ah->intr_txqs &= ~(*txqs);
S
Sujith 已提交
193
}
194
EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
S
Sujith 已提交
195

196
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
197 198 199
			    const struct ath9k_tx_queue_info *qinfo)
{
	u32 cw;
200
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
201 202
	struct ath9k_tx_queue_info *qi;

203
	qi = &ah->txq[q];
S
Sujith 已提交
204
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
205 206
		ath_dbg(common, ATH_DBG_QUEUE,
			"Set TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
207 208 209
		return false;
	}

J
Joe Perches 已提交
210
	ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
S
Sujith 已提交
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258

	qi->tqi_ver = qinfo->tqi_ver;
	qi->tqi_subtype = qinfo->tqi_subtype;
	qi->tqi_qflags = qinfo->tqi_qflags;
	qi->tqi_priority = qinfo->tqi_priority;
	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
	else
		qi->tqi_aifs = INIT_AIFS;
	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmin, 1024U);
		qi->tqi_cwmin = 1;
		while (qi->tqi_cwmin < cw)
			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
	} else
		qi->tqi_cwmin = qinfo->tqi_cwmin;
	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmax, 1024U);
		qi->tqi_cwmax = 1;
		while (qi->tqi_cwmax < cw)
			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
	} else
		qi->tqi_cwmax = INIT_CWMAX;

	if (qinfo->tqi_shretry != 0)
		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
	else
		qi->tqi_shretry = INIT_SH_RETRY;
	if (qinfo->tqi_lgretry != 0)
		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
	else
		qi->tqi_lgretry = INIT_LG_RETRY;
	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
	qi->tqi_burstTime = qinfo->tqi_burstTime;
	qi->tqi_readyTime = qinfo->tqi_readyTime;

	switch (qinfo->tqi_subtype) {
	case ATH9K_WME_UPSD:
		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
		break;
	default:
		break;
	}

	return true;
}
259
EXPORT_SYMBOL(ath9k_hw_set_txq_props);
S
Sujith 已提交
260

261
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
262 263
			    struct ath9k_tx_queue_info *qinfo)
{
264
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
265 266
	struct ath9k_tx_queue_info *qi;

267
	qi = &ah->txq[q];
S
Sujith 已提交
268
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
269 270
		ath_dbg(common, ATH_DBG_QUEUE,
			"Get TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
		return false;
	}

	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_ver = qi->tqi_ver;
	qinfo->tqi_subtype = qi->tqi_subtype;
	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_priority = qi->tqi_priority;
	qinfo->tqi_aifs = qi->tqi_aifs;
	qinfo->tqi_cwmin = qi->tqi_cwmin;
	qinfo->tqi_cwmax = qi->tqi_cwmax;
	qinfo->tqi_shretry = qi->tqi_shretry;
	qinfo->tqi_lgretry = qi->tqi_lgretry;
	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
	qinfo->tqi_burstTime = qi->tqi_burstTime;
	qinfo->tqi_readyTime = qi->tqi_readyTime;

	return true;
}
291
EXPORT_SYMBOL(ath9k_hw_get_txq_props);
S
Sujith 已提交
292

293
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
S
Sujith 已提交
294 295
			  const struct ath9k_tx_queue_info *qinfo)
{
296
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
297 298 299 300 301
	struct ath9k_tx_queue_info *qi;
	int q;

	switch (type) {
	case ATH9K_TX_QUEUE_BEACON:
302
		q = ATH9K_NUM_TX_QUEUES - 1;
S
Sujith 已提交
303 304
		break;
	case ATH9K_TX_QUEUE_CAB:
305
		q = ATH9K_NUM_TX_QUEUES - 2;
S
Sujith 已提交
306 307 308 309 310
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		q = 1;
		break;
	case ATH9K_TX_QUEUE_UAPSD:
311
		q = ATH9K_NUM_TX_QUEUES - 3;
S
Sujith 已提交
312 313
		break;
	case ATH9K_TX_QUEUE_DATA:
314
		for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
315
			if (ah->txq[q].tqi_type ==
S
Sujith 已提交
316 317
			    ATH9K_TX_QUEUE_INACTIVE)
				break;
318
		if (q == ATH9K_NUM_TX_QUEUES) {
319
			ath_err(common, "No available TX queue\n");
S
Sujith 已提交
320 321 322 323
			return -1;
		}
		break;
	default:
324
		ath_err(common, "Invalid TX queue type: %u\n", type);
S
Sujith 已提交
325 326 327
		return -1;
	}

J
Joe Perches 已提交
328
	ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
S
Sujith 已提交
329

330
	qi = &ah->txq[q];
S
Sujith 已提交
331
	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
332
		ath_err(common, "TX queue: %u already active\n", q);
S
Sujith 已提交
333 334 335 336
		return -1;
	}
	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
	qi->tqi_type = type;
337 338
	qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
	(void) ath9k_hw_set_txq_props(ah, q, qinfo);
S
Sujith 已提交
339 340 341

	return q;
}
342
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
S
Sujith 已提交
343

344
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
345
{
346
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
347 348
	struct ath9k_tx_queue_info *qi;

349
	qi = &ah->txq[q];
S
Sujith 已提交
350
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
351 352
		ath_dbg(common, ATH_DBG_QUEUE,
			"Release TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
353 354 355
		return false;
	}

J
Joe Perches 已提交
356
	ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
S
Sujith 已提交
357 358

	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
359 360 361 362 363
	ah->txok_interrupt_mask &= ~(1 << q);
	ah->txerr_interrupt_mask &= ~(1 << q);
	ah->txdesc_interrupt_mask &= ~(1 << q);
	ah->txeol_interrupt_mask &= ~(1 << q);
	ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
364 365 366 367
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
368
EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
S
Sujith 已提交
369

370
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
371
{
372
	struct ath_common *common = ath9k_hw_common(ah);
373
	struct ath9k_channel *chan = ah->curchan;
S
Sujith 已提交
374 375 376
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

377
	qi = &ah->txq[q];
S
Sujith 已提交
378
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
379 380
		ath_dbg(common, ATH_DBG_QUEUE,
			"Reset TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
381 382 383
		return true;
	}

J
Joe Perches 已提交
384
	ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
S
Sujith 已提交
385 386 387 388 389 390 391 392 393 394 395

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		if (chan && IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

S
Sujith 已提交
396 397
	ENABLE_REGWRITE_BUFFER(ah);

S
Sujith 已提交
398 399 400 401 402 403 404 405 406 407 408
	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
409 410 411 412 413 414 415

	if (AR_SREV_9340(ah))
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
	else
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
S
Sujith 已提交
416 417 418 419 420

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
421 422 423
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
			    (qi->tqi_cbrOverflowLimit ?
			     AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
S
Sujith 已提交
424 425 426 427 428 429 430 431 432 433 434 435
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
436 437
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
S
Sujith 已提交
438

439 440
	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
441 442 443

	REGWRITE_BUFFER_FLUSH(ah);

444 445 446
	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);

S
Sujith 已提交
447 448
	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
S
Sujith 已提交
449 450
		ENABLE_REGWRITE_BUFFER(ah);

451 452 453 454
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_BEACON_USE
			    | AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
455

456 457
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
458
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
459 460
			    | AR_D_MISC_BEACON_USE
			    | AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
461 462 463

		REGWRITE_BUFFER_FLUSH(ah);

464 465 466 467 468 469 470
		/*
		 * cwmin and cwmax should be 0 for beacon queue
		 * but not for IBSS as we would create an imbalance
		 * on beaconing fairness for participating nodes.
		 */
		if (AR_SREV_9300_20_OR_LATER(ah) &&
		    ah->opmode != NL80211_IFTYPE_ADHOC) {
471 472 473 474
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
S
Sujith 已提交
475 476
		break;
	case ATH9K_TX_QUEUE_CAB:
S
Sujith 已提交
477 478
		ENABLE_REGWRITE_BUFFER(ah);

479 480 481 482
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_CBR_INCR_DIS1
			    | AR_Q_MISC_CBR_INCR_DIS0);
S
Sujith 已提交
483
		value = (qi->tqi_readyTime -
484 485 486
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time) -
			 ah->config.additional_swba_backoff) * 1024;
S
Sujith 已提交
487 488
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
489 490
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
491
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
S
Sujith 已提交
492 493 494

		REGWRITE_BUFFER_FLUSH(ah);

S
Sujith 已提交
495 496
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
497
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
498 499
		break;
	case ATH9K_TX_QUEUE_UAPSD:
500
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
501 502 503 504 505 506
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
507 508 509 510
		REG_SET_BIT(ah, AR_DMISC(q),
			    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			       AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			    AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
511 512
	}

513 514 515
	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

S
Sujith 已提交
516
	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
517
		ah->txok_interrupt_mask |= 1 << q;
S
Sujith 已提交
518
	else
519
		ah->txok_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
520
	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
521
		ah->txerr_interrupt_mask |= 1 << q;
S
Sujith 已提交
522
	else
523
		ah->txerr_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
524
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
525
		ah->txdesc_interrupt_mask |= 1 << q;
S
Sujith 已提交
526
	else
527
		ah->txdesc_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
528
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
529
		ah->txeol_interrupt_mask |= 1 << q;
S
Sujith 已提交
530
	else
531
		ah->txeol_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
532
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
533
		ah->txurn_interrupt_mask |= 1 << q;
S
Sujith 已提交
534
	else
535
		ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
536 537 538 539
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
540
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
S
Sujith 已提交
541

542
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
543
			struct ath_rx_status *rs)
S
Sujith 已提交
544 545 546 547 548 549 550 551 552 553
{
	struct ar5416_desc ads;
	struct ar5416_desc *adsp = AR5416DESC(ds);
	u32 phyerr;

	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
		return -EINPROGRESS;

	ads.u.rx = adsp->u.rx;

554 555
	rs->rs_status = 0;
	rs->rs_flags = 0;
S
Sujith 已提交
556

557 558
	rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
	rs->rs_tstamp = ads.AR_RcvTimestamp;
S
Sujith 已提交
559

560
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
561 562 563 564 565 566 567
		rs->rs_rssi = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
568
	} else {
569 570
		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
571
						AR_RxRSSIAnt00);
572
		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
573
						AR_RxRSSIAnt01);
574
		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
575
						AR_RxRSSIAnt02);
576
		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
577
						AR_RxRSSIAnt10);
578
		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
579
						AR_RxRSSIAnt11);
580
		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
581 582
						AR_RxRSSIAnt12);
	}
S
Sujith 已提交
583
	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
584
		rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
S
Sujith 已提交
585
	else
586
		rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
S
Sujith 已提交
587

588
	rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
589
	rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
S
Sujith 已提交
590

591 592
	rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
	rs->rs_moreaggr =
S
Sujith 已提交
593
		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
594 595
	rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
	rs->rs_flags =
S
Sujith 已提交
596
		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
597
	rs->rs_flags |=
S
Sujith 已提交
598 599 600
		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;

	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
601
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
S
Sujith 已提交
602
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
603
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
S
Sujith 已提交
604
	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
605
		rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
S
Sujith 已提交
606 607

	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
608 609 610 611 612 613
		/*
		 * Treat these errors as mutually exclusive to avoid spurious
		 * extra error reports from the hardware. If a CRC error is
		 * reported, then decryption and MIC errors are irrelevant,
		 * the frame is going to be dropped either way
		 */
S
Sujith 已提交
614
		if (ads.ds_rxstatus8 & AR_CRCErr)
615
			rs->rs_status |= ATH9K_RXERR_CRC;
616
		else if (ads.ds_rxstatus8 & AR_PHYErr) {
617
			rs->rs_status |= ATH9K_RXERR_PHY;
S
Sujith 已提交
618
			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
619
			rs->rs_phyerr = phyerr;
620
		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
621
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
622
		else if (ads.ds_rxstatus8 & AR_MichaelErr)
623
			rs->rs_status |= ATH9K_RXERR_MIC;
624 625
		if (ads.ds_rxstatus8 & AR_KeyMiss)
			rs->rs_status |= ATH9K_RXERR_KEYMISS;
S
Sujith 已提交
626 627 628 629
	}

	return 0;
}
630
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
S
Sujith 已提交
631

632 633 634 635 636 637 638
/*
 * This can stop or re-enables RX.
 *
 * If bool is set this will kill any frame which is currently being
 * transferred between the MAC and baseband and also prevent any new
 * frames from getting started.
 */
639
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
S
Sujith 已提交
640 641 642 643 644 645 646
{
	u32 reg;

	if (set) {
		REG_SET_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));

S
Sujith 已提交
647 648
		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
				   0, AH_WAIT_TIMEOUT)) {
S
Sujith 已提交
649 650 651 652 653
			REG_CLR_BIT(ah, AR_DIAG_SW,
				    (AR_DIAG_RX_DIS |
				     AR_DIAG_RX_ABORT));

			reg = REG_READ(ah, AR_OBS_BUS_1);
654 655 656
			ath_err(ath9k_hw_common(ah),
				"RX failed to go idle in 10 ms RXSM=0x%x\n",
				reg);
S
Sujith 已提交
657 658 659 660 661 662 663 664 665 666

			return false;
		}
	} else {
		REG_CLR_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
	}

	return true;
}
667
EXPORT_SYMBOL(ath9k_hw_setrxabort);
S
Sujith 已提交
668

669
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
S
Sujith 已提交
670 671 672
{
	REG_WRITE(ah, AR_RXDP, rxdp);
}
673
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
S
Sujith 已提交
674

675
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
S
Sujith 已提交
676 677 678
{
	ath9k_enable_mib_counters(ah);

679
	ath9k_ani_reset(ah, is_scanning);
680

681
	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
S
Sujith 已提交
682
}
683
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
S
Sujith 已提交
684

685 686 687 688 689 690 691 692
void ath9k_hw_abortpcurecv(struct ath_hw *ah)
{
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);

	ath9k_hw_disable_mib_counters(ah);
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);

693
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
S
Sujith 已提交
694
{
S
Sujith 已提交
695
#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
696
	struct ath_common *common = ath9k_hw_common(ah);
697
	u32 mac_status, last_mac_status = 0;
S
Sujith 已提交
698 699
	int i;

700 701 702 703 704 705
	/* Enable access to the DMA observation bus */
	REG_WRITE(ah, AR_MACMISC,
		  ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
		   (AR_MACMISC_MISC_OBS_BUS_1 <<
		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));

S
Sujith 已提交
706 707
	REG_WRITE(ah, AR_CR, AR_CR_RXD);

S
Sujith 已提交
708 709 710 711
	/* Wait for rx enable bit to go low */
	for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
		if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
			break;
712 713 714 715 716 717 718 719 720 721 722

		if (!AR_SREV_9300_20_OR_LATER(ah)) {
			mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
			if (mac_status == 0x1c0 && mac_status == last_mac_status) {
				*reset = true;
				break;
			}

			last_mac_status = mac_status;
		}

S
Sujith 已提交
723 724 725 726
		udelay(AH_TIME_QUANTUM);
	}

	if (i == 0) {
727
		ath_err(common,
728
			"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
729 730
			AH_RX_STOP_DMA_TIMEOUT / 1000,
			REG_READ(ah, AR_CR),
731 732
			REG_READ(ah, AR_DIAG_SW),
			REG_READ(ah, AR_DMADBG_7));
S
Sujith 已提交
733 734 735 736
		return false;
	} else {
		return true;
	}
S
Sujith 已提交
737 738

#undef AH_RX_STOP_DMA_TIMEOUT
S
Sujith 已提交
739
}
740
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
741 742 743 744 745 746 747 748 749 750 751 752 753

int ath9k_hw_beaconq_setup(struct ath_hw *ah)
{
	struct ath9k_tx_queue_info qi;

	memset(&qi, 0, sizeof(qi));
	qi.tqi_aifs = 1;
	qi.tqi_cwmin = 0;
	qi.tqi_cwmax = 0;
	/* NB: don't enable any interrupts */
	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
754 755 756 757 758 759 760 761 762

bool ath9k_hw_intrpend(struct ath_hw *ah)
{
	u32 host_isr;

	if (AR_SREV_9100(ah))
		return true;

	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
763 764 765 766

	if (((host_isr & AR_INTR_MAC_IRQ) ||
	     (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
	    (host_isr != AR_INTR_SPURIOUS))
767 768 769 770 771 772 773 774 775 776 777
		return true;

	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
	if ((host_isr & AR_INTR_SYNC_DEFAULT)
	    && (host_isr != AR_INTR_SPURIOUS))
		return true;

	return false;
}
EXPORT_SYMBOL(ath9k_hw_intrpend);

778 779 780 781
void ath9k_hw_disable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);

782 783 784 785 786
	if (!(ah->imask & ATH9K_INT_GLOBAL))
		atomic_set(&ah->intr_ref_cnt, -1);
	else
		atomic_dec(&ah->intr_ref_cnt);

J
Joe Perches 已提交
787
	ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
	(void) REG_READ(ah, AR_IER);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);

		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
	}
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);

void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);
803
	u32 sync_default = AR_INTR_SYNC_DEFAULT;
804
	u32 async_mask;
805 806 807 808

	if (!(ah->imask & ATH9K_INT_GLOBAL))
		return;

809 810 811 812 813 814 815
	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
		ath_dbg(common, ATH_DBG_INTERRUPT,
			"Do not enable IER ref count %d\n",
			atomic_read(&ah->intr_ref_cnt));
		return;
	}

816 817 818
	if (AR_SREV_9340(ah))
		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;

819 820 821 822 823
	async_mask = AR_INTR_MAC_IRQ;

	if (ah->imask & ATH9K_INT_MCI)
		async_mask |= AR_INTR_ASYNC_MASK_MCI;

J
Joe Perches 已提交
824
	ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
825 826
	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
	if (!AR_SREV_9100(ah)) {
827 828
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
		REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
829

830 831
		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
832
	}
J
Joe Perches 已提交
833 834
	ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
835 836 837
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);

838
void ath9k_hw_set_interrupts(struct ath_hw *ah)
839
{
840
	enum ath9k_int ints = ah->imask;
841 842 843 844
	u32 mask, mask2;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

845
	if (!(ints & ATH9K_INT_GLOBAL))
846
		ath9k_hw_disable_interrupts(ah);
847

848
	ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
849 850 851 852 853 854 855

	mask = ints & ATH9K_INT_COMMON;
	mask2 = 0;

	if (ints & ATH9K_INT_TX) {
		if (ah->config.tx_intr_mitigation)
			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
856 857 858 859 860 861
		else {
			if (ah->txok_interrupt_mask)
				mask |= AR_IMR_TXOK;
			if (ah->txdesc_interrupt_mask)
				mask |= AR_IMR_TXDESC;
		}
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
		if (ah->txerr_interrupt_mask)
			mask |= AR_IMR_TXERR;
		if (ah->txeol_interrupt_mask)
			mask |= AR_IMR_TXEOL;
	}
	if (ints & ATH9K_INT_RX) {
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
			if (ah->config.rx_intr_mitigation) {
				mask &= ~AR_IMR_RXOK_LP;
				mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
			} else {
				mask |= AR_IMR_RXOK_LP;
			}
		} else {
			if (ah->config.rx_intr_mitigation)
				mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
			else
				mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
		}
		if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
			mask |= AR_IMR_GENTMR;
	}

886 887 888
	if (ints & ATH9K_INT_GENTIMER)
		mask |= AR_IMR_GENTMR;

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
	if (ints & (ATH9K_INT_BMISC)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_TIM)
			mask2 |= AR_IMR_S2_TIM;
		if (ints & ATH9K_INT_DTIM)
			mask2 |= AR_IMR_S2_DTIM;
		if (ints & ATH9K_INT_DTIMSYNC)
			mask2 |= AR_IMR_S2_DTIMSYNC;
		if (ints & ATH9K_INT_CABEND)
			mask2 |= AR_IMR_S2_CABEND;
		if (ints & ATH9K_INT_TSFOOR)
			mask2 |= AR_IMR_S2_TSFOOR;
	}

	if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_GTT)
			mask2 |= AR_IMR_S2_GTT;
		if (ints & ATH9K_INT_CST)
			mask2 |= AR_IMR_S2_CST;
	}

J
Joe Perches 已提交
911
	ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
912 913 914 915 916 917 918 919 920 921 922 923 924 925
	REG_WRITE(ah, AR_IMR, mask);
	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
	ah->imrs2_reg |= mask2;
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);

	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
		if (ints & ATH9K_INT_TIM_TIMER)
			REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
		else
			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
	}

926
	return;
927 928
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);