mac.c 25.1 KB
Newer Older
S
Sujith 已提交
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
S
Sujith 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include "hw.h"
18
#include "hw-ops.h"
S
Sujith 已提交
19

20 21 22
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
					struct ath9k_tx_queue_info *qi)
{
J
Joe Perches 已提交
23 24 25 26 27
	ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
		ah->txurn_interrupt_mask);
28

S
Sujith 已提交
29 30
	ENABLE_REGWRITE_BUFFER(ah);

31 32 33 34 35 36 37 38 39 40
	REG_WRITE(ah, AR_IMR_S0,
		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
		  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
	REG_WRITE(ah, AR_IMR_S1,
		  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
		  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));

	ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
	ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
S
Sujith 已提交
41 42

	REGWRITE_BUFFER_FLUSH(ah);
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
}

u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
{
	return REG_READ(ah, AR_QTXDP(q));
}
EXPORT_SYMBOL(ath9k_hw_gettxbuf);

void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
{
	REG_WRITE(ah, AR_QTXDP(q), txdp);
}
EXPORT_SYMBOL(ath9k_hw_puttxbuf);

void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
J
Joe Perches 已提交
59 60
	ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
		"Enable TXE on queue: %u\n", q);
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);

u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
	u32 npend;

	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
	if (npend == 0) {

		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
			npend = 1;
	}

	return npend;
}
EXPORT_SYMBOL(ath9k_hw_numtxpending);

/**
 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
 *
 * @ah: atheros hardware struct
 * @bIncTrigLevel: whether or not the frame trigger level should be updated
 *
 * The frame trigger level specifies the minimum number of bytes,
 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
 * before the PCU will initiate sending the frame on the air. This can
 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
 * first)
 *
 * Caution must be taken to ensure to set the frame trigger level based
 * on the DMA request size. For example if the DMA request size is set to
 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
 * there need to be enough space in the tx FIFO for the requested transfer
 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
 * the threshold to a value beyond 6, then the transmit will hang.
 *
 * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 * there is a hardware issue which forces us to use 2 KB instead so the
 * frame trigger level must not exceed 2 KB for these chipsets.
 */
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
	u32 txcfg, curLevel, newLevel;

	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
		return false;

112
	ath9k_hw_disable_interrupts(ah);
113 114 115 116 117 118 119 120 121 122 123 124 125

	txcfg = REG_READ(ah, AR_TXCFG);
	curLevel = MS(txcfg, AR_FTRIG);
	newLevel = curLevel;
	if (bIncTrigLevel) {
		if (curLevel < ah->config.max_txtrig_level)
			newLevel++;
	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
		newLevel--;
	if (newLevel != curLevel)
		REG_WRITE(ah, AR_TXCFG,
			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));

126
	ath9k_hw_enable_interrupts(ah);
127 128 129 130 131 132 133

	ah->tx_trig_level = newLevel;

	return newLevel != curLevel;
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);

134
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
135
{
136
	int i, q;
137

138
	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
139

140 141 142
	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
143

144 145 146 147
	for (q = 0; q < AR_NUM_QCU; q++) {
		for (i = 0; i < 1000; i++) {
			if (i)
				udelay(5);
148

149
			if (!ath9k_hw_numtxpending(ah, q))
150 151
				break;
		}
152
	}
153

154 155 156
	REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
157

158 159 160
	REG_WRITE(ah, AR_Q_TXD, 0);
}
EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
161

162
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
163
{
164
#define ATH9K_TX_STOP_DMA_TIMEOUT	1000    /* usec */
165
#define ATH9K_TIME_QUANTUM		100     /* usec */
166 167
	int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
	int wait;
168 169 170 171

	REG_WRITE(ah, AR_Q_TXD, 1 << q);

	for (wait = wait_time; wait != 0; wait--) {
172
		if (wait != wait_time)
173 174
			udelay(ATH9K_TIME_QUANTUM);

175 176
		if (ath9k_hw_numtxpending(ah, q) == 0)
			break;
177 178 179
	}

	REG_WRITE(ah, AR_Q_TXD, 0);
180

181 182 183 184 185
	return wait != 0;

#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
186
EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
187

188
void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
S
Sujith 已提交
189
{
190 191
	*txqs &= ah->intr_txqs;
	ah->intr_txqs &= ~(*txqs);
S
Sujith 已提交
192
}
193
EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
S
Sujith 已提交
194

195
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
196 197 198
			    const struct ath9k_tx_queue_info *qinfo)
{
	u32 cw;
199
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
200 201
	struct ath9k_tx_queue_info *qi;

202
	qi = &ah->txq[q];
S
Sujith 已提交
203
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
204 205
		ath_dbg(common, ATH_DBG_QUEUE,
			"Set TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
206 207 208
		return false;
	}

J
Joe Perches 已提交
209
	ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
S
Sujith 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

	qi->tqi_ver = qinfo->tqi_ver;
	qi->tqi_subtype = qinfo->tqi_subtype;
	qi->tqi_qflags = qinfo->tqi_qflags;
	qi->tqi_priority = qinfo->tqi_priority;
	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
	else
		qi->tqi_aifs = INIT_AIFS;
	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmin, 1024U);
		qi->tqi_cwmin = 1;
		while (qi->tqi_cwmin < cw)
			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
	} else
		qi->tqi_cwmin = qinfo->tqi_cwmin;
	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmax, 1024U);
		qi->tqi_cwmax = 1;
		while (qi->tqi_cwmax < cw)
			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
	} else
		qi->tqi_cwmax = INIT_CWMAX;

	if (qinfo->tqi_shretry != 0)
		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
	else
		qi->tqi_shretry = INIT_SH_RETRY;
	if (qinfo->tqi_lgretry != 0)
		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
	else
		qi->tqi_lgretry = INIT_LG_RETRY;
	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
	qi->tqi_burstTime = qinfo->tqi_burstTime;
	qi->tqi_readyTime = qinfo->tqi_readyTime;

	switch (qinfo->tqi_subtype) {
	case ATH9K_WME_UPSD:
		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
		break;
	default:
		break;
	}

	return true;
}
258
EXPORT_SYMBOL(ath9k_hw_set_txq_props);
S
Sujith 已提交
259

260
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
261 262
			    struct ath9k_tx_queue_info *qinfo)
{
263
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
264 265
	struct ath9k_tx_queue_info *qi;

266
	qi = &ah->txq[q];
S
Sujith 已提交
267
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
268 269
		ath_dbg(common, ATH_DBG_QUEUE,
			"Get TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
		return false;
	}

	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_ver = qi->tqi_ver;
	qinfo->tqi_subtype = qi->tqi_subtype;
	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_priority = qi->tqi_priority;
	qinfo->tqi_aifs = qi->tqi_aifs;
	qinfo->tqi_cwmin = qi->tqi_cwmin;
	qinfo->tqi_cwmax = qi->tqi_cwmax;
	qinfo->tqi_shretry = qi->tqi_shretry;
	qinfo->tqi_lgretry = qi->tqi_lgretry;
	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
	qinfo->tqi_burstTime = qi->tqi_burstTime;
	qinfo->tqi_readyTime = qi->tqi_readyTime;

	return true;
}
290
EXPORT_SYMBOL(ath9k_hw_get_txq_props);
S
Sujith 已提交
291

292
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
S
Sujith 已提交
293 294
			  const struct ath9k_tx_queue_info *qinfo)
{
295
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
296 297 298 299 300
	struct ath9k_tx_queue_info *qi;
	int q;

	switch (type) {
	case ATH9K_TX_QUEUE_BEACON:
301
		q = ATH9K_NUM_TX_QUEUES - 1;
S
Sujith 已提交
302 303
		break;
	case ATH9K_TX_QUEUE_CAB:
304
		q = ATH9K_NUM_TX_QUEUES - 2;
S
Sujith 已提交
305 306 307 308 309
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		q = 1;
		break;
	case ATH9K_TX_QUEUE_UAPSD:
310
		q = ATH9K_NUM_TX_QUEUES - 3;
S
Sujith 已提交
311 312
		break;
	case ATH9K_TX_QUEUE_DATA:
313
		for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
314
			if (ah->txq[q].tqi_type ==
S
Sujith 已提交
315 316
			    ATH9K_TX_QUEUE_INACTIVE)
				break;
317
		if (q == ATH9K_NUM_TX_QUEUES) {
318
			ath_err(common, "No available TX queue\n");
S
Sujith 已提交
319 320 321 322
			return -1;
		}
		break;
	default:
323
		ath_err(common, "Invalid TX queue type: %u\n", type);
S
Sujith 已提交
324 325 326
		return -1;
	}

J
Joe Perches 已提交
327
	ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
S
Sujith 已提交
328

329
	qi = &ah->txq[q];
S
Sujith 已提交
330
	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
331
		ath_err(common, "TX queue: %u already active\n", q);
S
Sujith 已提交
332 333 334 335
		return -1;
	}
	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
	qi->tqi_type = type;
336 337
	qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
	(void) ath9k_hw_set_txq_props(ah, q, qinfo);
S
Sujith 已提交
338 339 340

	return q;
}
341
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
S
Sujith 已提交
342

343
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
344
{
345
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
346 347
	struct ath9k_tx_queue_info *qi;

348
	qi = &ah->txq[q];
S
Sujith 已提交
349
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
350 351
		ath_dbg(common, ATH_DBG_QUEUE,
			"Release TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
352 353 354
		return false;
	}

J
Joe Perches 已提交
355
	ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
S
Sujith 已提交
356 357

	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
358 359 360 361 362
	ah->txok_interrupt_mask &= ~(1 << q);
	ah->txerr_interrupt_mask &= ~(1 << q);
	ah->txdesc_interrupt_mask &= ~(1 << q);
	ah->txeol_interrupt_mask &= ~(1 << q);
	ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
363 364 365 366
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
367
EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
S
Sujith 已提交
368

369
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
370
{
371
	struct ath_common *common = ath9k_hw_common(ah);
372
	struct ath9k_channel *chan = ah->curchan;
S
Sujith 已提交
373 374 375
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

376
	qi = &ah->txq[q];
S
Sujith 已提交
377
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
J
Joe Perches 已提交
378 379
		ath_dbg(common, ATH_DBG_QUEUE,
			"Reset TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
380 381 382
		return true;
	}

J
Joe Perches 已提交
383
	ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
S
Sujith 已提交
384 385 386 387 388 389 390 391 392 393 394

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		if (chan && IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

S
Sujith 已提交
395 396
	ENABLE_REGWRITE_BUFFER(ah);

S
Sujith 已提交
397 398 399 400 401 402 403 404 405 406 407
	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
408 409 410 411 412 413 414

	if (AR_SREV_9340(ah))
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
	else
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
S
Sujith 已提交
415 416 417 418 419

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
420 421 422
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
			    (qi->tqi_cbrOverflowLimit ?
			     AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
S
Sujith 已提交
423 424 425 426 427 428 429 430 431 432 433 434
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
435 436
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
S
Sujith 已提交
437

438 439
	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
440 441 442

	REGWRITE_BUFFER_FLUSH(ah);

443 444 445
	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);

S
Sujith 已提交
446 447
	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
S
Sujith 已提交
448 449
		ENABLE_REGWRITE_BUFFER(ah);

450 451 452 453
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_BEACON_USE
			    | AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
454

455 456
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
457
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
458 459
			    | AR_D_MISC_BEACON_USE
			    | AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
460 461 462

		REGWRITE_BUFFER_FLUSH(ah);

463 464 465 466 467 468 469
		/*
		 * cwmin and cwmax should be 0 for beacon queue
		 * but not for IBSS as we would create an imbalance
		 * on beaconing fairness for participating nodes.
		 */
		if (AR_SREV_9300_20_OR_LATER(ah) &&
		    ah->opmode != NL80211_IFTYPE_ADHOC) {
470 471 472 473
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
S
Sujith 已提交
474 475
		break;
	case ATH9K_TX_QUEUE_CAB:
S
Sujith 已提交
476 477
		ENABLE_REGWRITE_BUFFER(ah);

478 479 480 481
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_CBR_INCR_DIS1
			    | AR_Q_MISC_CBR_INCR_DIS0);
S
Sujith 已提交
482
		value = (qi->tqi_readyTime -
483 484 485
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time) -
			 ah->config.additional_swba_backoff) * 1024;
S
Sujith 已提交
486 487
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
488 489
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
490
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
S
Sujith 已提交
491 492 493

		REGWRITE_BUFFER_FLUSH(ah);

S
Sujith 已提交
494 495
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
496
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
497 498
		break;
	case ATH9K_TX_QUEUE_UAPSD:
499
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
500 501 502 503 504 505
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
506 507 508 509
		REG_SET_BIT(ah, AR_DMISC(q),
			    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			       AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			    AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
510 511
	}

512 513 514
	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

S
Sujith 已提交
515
	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
516
		ah->txok_interrupt_mask |= 1 << q;
S
Sujith 已提交
517
	else
518
		ah->txok_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
519
	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
520
		ah->txerr_interrupt_mask |= 1 << q;
S
Sujith 已提交
521
	else
522
		ah->txerr_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
523
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
524
		ah->txdesc_interrupt_mask |= 1 << q;
S
Sujith 已提交
525
	else
526
		ah->txdesc_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
527
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
528
		ah->txeol_interrupt_mask |= 1 << q;
S
Sujith 已提交
529
	else
530
		ah->txeol_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
531
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
532
		ah->txurn_interrupt_mask |= 1 << q;
S
Sujith 已提交
533
	else
534
		ah->txurn_interrupt_mask &= ~(1 << q);
S
Sujith 已提交
535 536 537 538
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
539
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
S
Sujith 已提交
540

541
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
542
			struct ath_rx_status *rs)
S
Sujith 已提交
543 544 545 546 547 548 549 550 551 552
{
	struct ar5416_desc ads;
	struct ar5416_desc *adsp = AR5416DESC(ds);
	u32 phyerr;

	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
		return -EINPROGRESS;

	ads.u.rx = adsp->u.rx;

553 554
	rs->rs_status = 0;
	rs->rs_flags = 0;
S
Sujith 已提交
555

556 557
	rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
	rs->rs_tstamp = ads.AR_RcvTimestamp;
S
Sujith 已提交
558

559
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
560 561 562 563 564 565 566
		rs->rs_rssi = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
567
	} else {
568 569
		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
570
						AR_RxRSSIAnt00);
571
		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
572
						AR_RxRSSIAnt01);
573
		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
574
						AR_RxRSSIAnt02);
575
		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
576
						AR_RxRSSIAnt10);
577
		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
578
						AR_RxRSSIAnt11);
579
		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
580 581
						AR_RxRSSIAnt12);
	}
S
Sujith 已提交
582
	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
583
		rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
S
Sujith 已提交
584
	else
585
		rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
S
Sujith 已提交
586

587
	rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
588
	rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
S
Sujith 已提交
589

590 591
	rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
	rs->rs_moreaggr =
S
Sujith 已提交
592
		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
593 594
	rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
	rs->rs_flags =
S
Sujith 已提交
595
		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
596
	rs->rs_flags |=
S
Sujith 已提交
597 598 599
		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;

	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
600
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
S
Sujith 已提交
601
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
602
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
S
Sujith 已提交
603
	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
604
		rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
S
Sujith 已提交
605 606

	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
607 608 609 610 611 612
		/*
		 * Treat these errors as mutually exclusive to avoid spurious
		 * extra error reports from the hardware. If a CRC error is
		 * reported, then decryption and MIC errors are irrelevant,
		 * the frame is going to be dropped either way
		 */
S
Sujith 已提交
613
		if (ads.ds_rxstatus8 & AR_CRCErr)
614
			rs->rs_status |= ATH9K_RXERR_CRC;
615
		else if (ads.ds_rxstatus8 & AR_PHYErr) {
616
			rs->rs_status |= ATH9K_RXERR_PHY;
S
Sujith 已提交
617
			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
618
			rs->rs_phyerr = phyerr;
619
		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
620
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
621
		else if (ads.ds_rxstatus8 & AR_MichaelErr)
622
			rs->rs_status |= ATH9K_RXERR_MIC;
623
		else if (ads.ds_rxstatus8 & AR_KeyMiss)
F
Felix Fietkau 已提交
624
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
S
Sujith 已提交
625 626 627 628
	}

	return 0;
}
629
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
S
Sujith 已提交
630

631 632 633 634 635 636 637
/*
 * This can stop or re-enables RX.
 *
 * If bool is set this will kill any frame which is currently being
 * transferred between the MAC and baseband and also prevent any new
 * frames from getting started.
 */
638
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
S
Sujith 已提交
639 640 641 642 643 644 645
{
	u32 reg;

	if (set) {
		REG_SET_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));

S
Sujith 已提交
646 647
		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
				   0, AH_WAIT_TIMEOUT)) {
S
Sujith 已提交
648 649 650 651 652
			REG_CLR_BIT(ah, AR_DIAG_SW,
				    (AR_DIAG_RX_DIS |
				     AR_DIAG_RX_ABORT));

			reg = REG_READ(ah, AR_OBS_BUS_1);
653 654 655
			ath_err(ath9k_hw_common(ah),
				"RX failed to go idle in 10 ms RXSM=0x%x\n",
				reg);
S
Sujith 已提交
656 657 658 659 660 661 662 663 664 665

			return false;
		}
	} else {
		REG_CLR_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
	}

	return true;
}
666
EXPORT_SYMBOL(ath9k_hw_setrxabort);
S
Sujith 已提交
667

668
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
S
Sujith 已提交
669 670 671
{
	REG_WRITE(ah, AR_RXDP, rxdp);
}
672
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
S
Sujith 已提交
673

674
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
S
Sujith 已提交
675 676 677
{
	ath9k_enable_mib_counters(ah);

678
	ath9k_ani_reset(ah, is_scanning);
679

680
	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
S
Sujith 已提交
681
}
682
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
S
Sujith 已提交
683

684 685 686 687 688 689 690 691
void ath9k_hw_abortpcurecv(struct ath_hw *ah)
{
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);

	ath9k_hw_disable_mib_counters(ah);
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);

692
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
S
Sujith 已提交
693
{
S
Sujith 已提交
694
#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
695
	struct ath_common *common = ath9k_hw_common(ah);
696
	u32 mac_status, last_mac_status = 0;
S
Sujith 已提交
697 698
	int i;

699 700 701 702 703 704
	/* Enable access to the DMA observation bus */
	REG_WRITE(ah, AR_MACMISC,
		  ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
		   (AR_MACMISC_MISC_OBS_BUS_1 <<
		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));

S
Sujith 已提交
705 706
	REG_WRITE(ah, AR_CR, AR_CR_RXD);

S
Sujith 已提交
707 708 709 710
	/* Wait for rx enable bit to go low */
	for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
		if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
			break;
711 712 713 714 715 716 717 718 719 720 721

		if (!AR_SREV_9300_20_OR_LATER(ah)) {
			mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
			if (mac_status == 0x1c0 && mac_status == last_mac_status) {
				*reset = true;
				break;
			}

			last_mac_status = mac_status;
		}

S
Sujith 已提交
722 723 724 725
		udelay(AH_TIME_QUANTUM);
	}

	if (i == 0) {
726
		ath_err(common,
727
			"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
728 729
			AH_RX_STOP_DMA_TIMEOUT / 1000,
			REG_READ(ah, AR_CR),
730 731
			REG_READ(ah, AR_DIAG_SW),
			REG_READ(ah, AR_DMADBG_7));
S
Sujith 已提交
732 733 734 735
		return false;
	} else {
		return true;
	}
S
Sujith 已提交
736 737

#undef AH_RX_STOP_DMA_TIMEOUT
S
Sujith 已提交
738
}
739
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
740 741 742 743 744 745 746 747 748 749 750 751 752

int ath9k_hw_beaconq_setup(struct ath_hw *ah)
{
	struct ath9k_tx_queue_info qi;

	memset(&qi, 0, sizeof(qi));
	qi.tqi_aifs = 1;
	qi.tqi_cwmin = 0;
	qi.tqi_cwmax = 0;
	/* NB: don't enable any interrupts */
	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773

bool ath9k_hw_intrpend(struct ath_hw *ah)
{
	u32 host_isr;

	if (AR_SREV_9100(ah))
		return true;

	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
	if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
		return true;

	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
	if ((host_isr & AR_INTR_SYNC_DEFAULT)
	    && (host_isr != AR_INTR_SPURIOUS))
		return true;

	return false;
}
EXPORT_SYMBOL(ath9k_hw_intrpend);

774 775 776 777
void ath9k_hw_disable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);

778 779 780 781 782
	if (!(ah->imask & ATH9K_INT_GLOBAL))
		atomic_set(&ah->intr_ref_cnt, -1);
	else
		atomic_dec(&ah->intr_ref_cnt);

J
Joe Perches 已提交
783
	ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
	(void) REG_READ(ah, AR_IER);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);

		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
	}
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);

void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);
799
	u32 sync_default = AR_INTR_SYNC_DEFAULT;
800 801 802 803

	if (!(ah->imask & ATH9K_INT_GLOBAL))
		return;

804 805 806 807 808 809 810
	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
		ath_dbg(common, ATH_DBG_INTERRUPT,
			"Do not enable IER ref count %d\n",
			atomic_read(&ah->intr_ref_cnt));
		return;
	}

811 812 813
	if (AR_SREV_9340(ah))
		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;

J
Joe Perches 已提交
814
	ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
815 816 817 818 819 820 821
	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
			  AR_INTR_MAC_IRQ);
		REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);


822 823
		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
824
	}
J
Joe Perches 已提交
825 826
	ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
827 828 829
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);

830
void ath9k_hw_set_interrupts(struct ath_hw *ah)
831
{
832
	enum ath9k_int ints = ah->imask;
833 834 835 836
	u32 mask, mask2;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

837
	if (!(ints & ATH9K_INT_GLOBAL))
838
		ath9k_hw_disable_interrupts(ah);
839

840
	ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
841 842 843 844 845 846 847

	mask = ints & ATH9K_INT_COMMON;
	mask2 = 0;

	if (ints & ATH9K_INT_TX) {
		if (ah->config.tx_intr_mitigation)
			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
848 849 850 851 852 853
		else {
			if (ah->txok_interrupt_mask)
				mask |= AR_IMR_TXOK;
			if (ah->txdesc_interrupt_mask)
				mask |= AR_IMR_TXDESC;
		}
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
		if (ah->txerr_interrupt_mask)
			mask |= AR_IMR_TXERR;
		if (ah->txeol_interrupt_mask)
			mask |= AR_IMR_TXEOL;
	}
	if (ints & ATH9K_INT_RX) {
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
			if (ah->config.rx_intr_mitigation) {
				mask &= ~AR_IMR_RXOK_LP;
				mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
			} else {
				mask |= AR_IMR_RXOK_LP;
			}
		} else {
			if (ah->config.rx_intr_mitigation)
				mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
			else
				mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
		}
		if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
			mask |= AR_IMR_GENTMR;
	}

878 879 880
	if (ints & ATH9K_INT_GENTIMER)
		mask |= AR_IMR_GENTMR;

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	if (ints & (ATH9K_INT_BMISC)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_TIM)
			mask2 |= AR_IMR_S2_TIM;
		if (ints & ATH9K_INT_DTIM)
			mask2 |= AR_IMR_S2_DTIM;
		if (ints & ATH9K_INT_DTIMSYNC)
			mask2 |= AR_IMR_S2_DTIMSYNC;
		if (ints & ATH9K_INT_CABEND)
			mask2 |= AR_IMR_S2_CABEND;
		if (ints & ATH9K_INT_TSFOOR)
			mask2 |= AR_IMR_S2_TSFOOR;
	}

	if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_GTT)
			mask2 |= AR_IMR_S2_GTT;
		if (ints & ATH9K_INT_CST)
			mask2 |= AR_IMR_S2_CST;
	}

J
Joe Perches 已提交
903
	ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
904 905 906 907 908 909 910 911 912 913 914 915 916 917
	REG_WRITE(ah, AR_IMR, mask);
	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
	ah->imrs2_reg |= mask2;
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);

	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
		if (ints & ATH9K_INT_TIM_TIMER)
			REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
		else
			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
	}

918
	return;
919 920
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);