mac.c 24.9 KB
Newer Older
S
Sujith 已提交
1
/*
2
 * Copyright (c) 2008-2011 Atheros Communications Inc.
S
Sujith 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

17
#include "hw.h"
18
#include "hw-ops.h"
19
#include <linux/export.h>
S
Sujith 已提交
20

21 22 23
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
					struct ath9k_tx_queue_info *qi)
{
24
	ath_dbg(ath9k_hw_common(ah), INTERRUPT,
J
Joe Perches 已提交
25 26 27 28
		"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
		ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
		ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
		ah->txurn_interrupt_mask);
29

S
Sujith 已提交
30 31
	ENABLE_REGWRITE_BUFFER(ah);

32 33 34 35 36 37 38 39 40 41
	REG_WRITE(ah, AR_IMR_S0,
		  SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
		  | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
	REG_WRITE(ah, AR_IMR_S1,
		  SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
		  | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));

	ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
	ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
S
Sujith 已提交
42 43

	REGWRITE_BUFFER_FLUSH(ah);
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
}

u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
{
	return REG_READ(ah, AR_QTXDP(q));
}
EXPORT_SYMBOL(ath9k_hw_gettxbuf);

void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
{
	REG_WRITE(ah, AR_QTXDP(q), txdp);
}
EXPORT_SYMBOL(ath9k_hw_puttxbuf);

void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
60
	ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);

u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
{
	u32 npend;

	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
	if (npend == 0) {

		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
			npend = 1;
	}

	return npend;
}
EXPORT_SYMBOL(ath9k_hw_numtxpending);

/**
 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
 *
 * @ah: atheros hardware struct
 * @bIncTrigLevel: whether or not the frame trigger level should be updated
 *
 * The frame trigger level specifies the minimum number of bytes,
 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
 * before the PCU will initiate sending the frame on the air. This can
 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
 * first)
 *
 * Caution must be taken to ensure to set the frame trigger level based
 * on the DMA request size. For example if the DMA request size is set to
 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
 * there need to be enough space in the tx FIFO for the requested transfer
 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
 * the threshold to a value beyond 6, then the transmit will hang.
 *
 * Current dual   stream devices have a PCU TX FIFO size of 8 KB.
 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
 * there is a hardware issue which forces us to use 2 KB instead so the
 * frame trigger level must not exceed 2 KB for these chipsets.
 */
bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
{
	u32 txcfg, curLevel, newLevel;

	if (ah->tx_trig_level >= ah->config.max_txtrig_level)
		return false;

112
	ath9k_hw_disable_interrupts(ah);
113 114 115 116 117 118 119 120 121 122 123 124 125

	txcfg = REG_READ(ah, AR_TXCFG);
	curLevel = MS(txcfg, AR_FTRIG);
	newLevel = curLevel;
	if (bIncTrigLevel) {
		if (curLevel < ah->config.max_txtrig_level)
			newLevel++;
	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
		newLevel--;
	if (newLevel != curLevel)
		REG_WRITE(ah, AR_TXCFG,
			  (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));

126
	ath9k_hw_enable_interrupts(ah);
127 128 129 130 131 132 133

	ah->tx_trig_level = newLevel;

	return newLevel != curLevel;
}
EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);

134
void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
135
{
136
	int i, q;
137

138
	REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
139

140 141 142
	REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
143

144 145 146 147
	for (q = 0; q < AR_NUM_QCU; q++) {
		for (i = 0; i < 1000; i++) {
			if (i)
				udelay(5);
148

149
			if (!ath9k_hw_numtxpending(ah, q))
150 151
				break;
		}
152
	}
153

154 155 156
	REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
	REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
157

158 159 160
	REG_WRITE(ah, AR_Q_TXD, 0);
}
EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
161

162
bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
163
{
164
#define ATH9K_TX_STOP_DMA_TIMEOUT	1000    /* usec */
165
#define ATH9K_TIME_QUANTUM		100     /* usec */
166 167
	int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
	int wait;
168 169 170 171

	REG_WRITE(ah, AR_Q_TXD, 1 << q);

	for (wait = wait_time; wait != 0; wait--) {
172
		if (wait != wait_time)
173 174
			udelay(ATH9K_TIME_QUANTUM);

175 176
		if (ath9k_hw_numtxpending(ah, q) == 0)
			break;
177 178 179
	}

	REG_WRITE(ah, AR_Q_TXD, 0);
180

181 182 183 184 185
	return wait != 0;

#undef ATH9K_TX_STOP_DMA_TIMEOUT
#undef ATH9K_TIME_QUANTUM
}
186
EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
187

188
bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
189 190 191
			    const struct ath9k_tx_queue_info *qinfo)
{
	u32 cw;
192
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
193 194
	struct ath9k_tx_queue_info *qi;

195
	qi = &ah->txq[q];
S
Sujith 已提交
196
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
197
		ath_dbg(common, QUEUE,
J
Joe Perches 已提交
198
			"Set TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
199 200 201
		return false;
	}

202
	ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
S
Sujith 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

	qi->tqi_ver = qinfo->tqi_ver;
	qi->tqi_subtype = qinfo->tqi_subtype;
	qi->tqi_qflags = qinfo->tqi_qflags;
	qi->tqi_priority = qinfo->tqi_priority;
	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
	else
		qi->tqi_aifs = INIT_AIFS;
	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmin, 1024U);
		qi->tqi_cwmin = 1;
		while (qi->tqi_cwmin < cw)
			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
	} else
		qi->tqi_cwmin = qinfo->tqi_cwmin;
	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
		cw = min(qinfo->tqi_cwmax, 1024U);
		qi->tqi_cwmax = 1;
		while (qi->tqi_cwmax < cw)
			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
	} else
		qi->tqi_cwmax = INIT_CWMAX;

	if (qinfo->tqi_shretry != 0)
		qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
	else
		qi->tqi_shretry = INIT_SH_RETRY;
	if (qinfo->tqi_lgretry != 0)
		qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
	else
		qi->tqi_lgretry = INIT_LG_RETRY;
	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
	qi->tqi_burstTime = qinfo->tqi_burstTime;
	qi->tqi_readyTime = qinfo->tqi_readyTime;

	switch (qinfo->tqi_subtype) {
	case ATH9K_WME_UPSD:
		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
		break;
	default:
		break;
	}

	return true;
}
251
EXPORT_SYMBOL(ath9k_hw_set_txq_props);
S
Sujith 已提交
252

253
bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
S
Sujith 已提交
254 255
			    struct ath9k_tx_queue_info *qinfo)
{
256
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
257 258
	struct ath9k_tx_queue_info *qi;

259
	qi = &ah->txq[q];
S
Sujith 已提交
260
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
261
		ath_dbg(common, QUEUE,
J
Joe Perches 已提交
262
			"Get TXQ properties, inactive queue: %u\n", q);
S
Sujith 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		return false;
	}

	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_ver = qi->tqi_ver;
	qinfo->tqi_subtype = qi->tqi_subtype;
	qinfo->tqi_qflags = qi->tqi_qflags;
	qinfo->tqi_priority = qi->tqi_priority;
	qinfo->tqi_aifs = qi->tqi_aifs;
	qinfo->tqi_cwmin = qi->tqi_cwmin;
	qinfo->tqi_cwmax = qi->tqi_cwmax;
	qinfo->tqi_shretry = qi->tqi_shretry;
	qinfo->tqi_lgretry = qi->tqi_lgretry;
	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
	qinfo->tqi_burstTime = qi->tqi_burstTime;
	qinfo->tqi_readyTime = qi->tqi_readyTime;

	return true;
}
283
EXPORT_SYMBOL(ath9k_hw_get_txq_props);
S
Sujith 已提交
284

285
int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
S
Sujith 已提交
286 287
			  const struct ath9k_tx_queue_info *qinfo)
{
288
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
289 290 291 292 293
	struct ath9k_tx_queue_info *qi;
	int q;

	switch (type) {
	case ATH9K_TX_QUEUE_BEACON:
294
		q = ATH9K_NUM_TX_QUEUES - 1;
S
Sujith 已提交
295 296
		break;
	case ATH9K_TX_QUEUE_CAB:
297
		q = ATH9K_NUM_TX_QUEUES - 2;
S
Sujith 已提交
298 299 300 301 302
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
		q = 1;
		break;
	case ATH9K_TX_QUEUE_UAPSD:
303
		q = ATH9K_NUM_TX_QUEUES - 3;
S
Sujith 已提交
304 305
		break;
	case ATH9K_TX_QUEUE_DATA:
306
		for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
307
			if (ah->txq[q].tqi_type ==
S
Sujith 已提交
308 309
			    ATH9K_TX_QUEUE_INACTIVE)
				break;
310
		if (q == ATH9K_NUM_TX_QUEUES) {
311
			ath_err(common, "No available TX queue\n");
S
Sujith 已提交
312 313 314 315
			return -1;
		}
		break;
	default:
316
		ath_err(common, "Invalid TX queue type: %u\n", type);
S
Sujith 已提交
317 318 319
		return -1;
	}

320
	ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
S
Sujith 已提交
321

322
	qi = &ah->txq[q];
S
Sujith 已提交
323
	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
324
		ath_err(common, "TX queue: %u already active\n", q);
S
Sujith 已提交
325 326 327 328
		return -1;
	}
	memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
	qi->tqi_type = type;
329 330
	qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
	(void) ath9k_hw_set_txq_props(ah, q, qinfo);
S
Sujith 已提交
331 332 333

	return q;
}
334
EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
S
Sujith 已提交
335

336 337 338 339 340 341 342 343 344
static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q)
{
	ah->txok_interrupt_mask &= ~(1 << q);
	ah->txerr_interrupt_mask &= ~(1 << q);
	ah->txdesc_interrupt_mask &= ~(1 << q);
	ah->txeol_interrupt_mask &= ~(1 << q);
	ah->txurn_interrupt_mask &= ~(1 << q);
}

345
bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
346
{
347
	struct ath_common *common = ath9k_hw_common(ah);
S
Sujith 已提交
348 349
	struct ath9k_tx_queue_info *qi;

350
	qi = &ah->txq[q];
S
Sujith 已提交
351
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
352
		ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
353 354 355
		return false;
	}

356
	ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
S
Sujith 已提交
357 358

	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
359
	ath9k_hw_clear_queue_interrupts(ah, q);
S
Sujith 已提交
360 361 362 363
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
364
EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
S
Sujith 已提交
365

366
bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
S
Sujith 已提交
367
{
368
	struct ath_common *common = ath9k_hw_common(ah);
369
	struct ath9k_channel *chan = ah->curchan;
S
Sujith 已提交
370 371 372
	struct ath9k_tx_queue_info *qi;
	u32 cwMin, chanCwMin, value;

373
	qi = &ah->txq[q];
S
Sujith 已提交
374
	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
375
		ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
S
Sujith 已提交
376 377 378
		return true;
	}

379
	ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
S
Sujith 已提交
380 381 382 383 384 385 386 387 388 389 390

	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
		if (chan && IS_CHAN_B(chan))
			chanCwMin = INIT_CWMIN_11B;
		else
			chanCwMin = INIT_CWMIN;

		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
	} else
		cwMin = qi->tqi_cwmin;

S
Sujith 已提交
391 392
	ENABLE_REGWRITE_BUFFER(ah);

S
Sujith 已提交
393 394 395 396 397 398 399 400 401 402 403
	REG_WRITE(ah, AR_DLCL_IFS(q),
		  SM(cwMin, AR_D_LCL_IFS_CWMIN) |
		  SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
		  SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));

	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
		  SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
		  SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
		  SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));

	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
404 405 406 407 408 409 410

	if (AR_SREV_9340(ah))
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
	else
		REG_WRITE(ah, AR_DMISC(q),
			  AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
S
Sujith 已提交
411 412 413 414 415

	if (qi->tqi_cbrPeriod) {
		REG_WRITE(ah, AR_QCBRCFG(q),
			  SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
			  SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
416 417 418
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
			    (qi->tqi_cbrOverflowLimit ?
			     AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
S
Sujith 已提交
419 420 421 422 423 424 425 426 427 428 429 430
	}
	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
			  AR_Q_RDYTIMECFG_EN);
	}

	REG_WRITE(ah, AR_DCHNTIME(q),
		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
		  (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));

	if (qi->tqi_burstTime
431 432
	    && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
S
Sujith 已提交
433

434 435
	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
436 437 438

	REGWRITE_BUFFER_FLUSH(ah);

439 440 441
	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);

S
Sujith 已提交
442 443
	switch (qi->tqi_type) {
	case ATH9K_TX_QUEUE_BEACON:
S
Sujith 已提交
444 445
		ENABLE_REGWRITE_BUFFER(ah);

446 447 448 449
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_BEACON_USE
			    | AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
450

451 452
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
453
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
454 455
			    | AR_D_MISC_BEACON_USE
			    | AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
456 457 458

		REGWRITE_BUFFER_FLUSH(ah);

459 460 461 462 463 464 465
		/*
		 * cwmin and cwmax should be 0 for beacon queue
		 * but not for IBSS as we would create an imbalance
		 * on beaconing fairness for participating nodes.
		 */
		if (AR_SREV_9300_20_OR_LATER(ah) &&
		    ah->opmode != NL80211_IFTYPE_ADHOC) {
466 467 468 469
			REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
				  | SM(0, AR_D_LCL_IFS_CWMAX)
				  | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
		}
S
Sujith 已提交
470 471
		break;
	case ATH9K_TX_QUEUE_CAB:
S
Sujith 已提交
472 473
		ENABLE_REGWRITE_BUFFER(ah);

474 475 476 477
		REG_SET_BIT(ah, AR_QMISC(q),
			    AR_Q_MISC_FSP_DBA_GATED
			    | AR_Q_MISC_CBR_INCR_DIS1
			    | AR_Q_MISC_CBR_INCR_DIS0);
S
Sujith 已提交
478
		value = (qi->tqi_readyTime -
479 480 481
			 (ah->config.sw_beacon_response_time -
			  ah->config.dma_beacon_response_time) -
			 ah->config.additional_swba_backoff) * 1024;
S
Sujith 已提交
482 483
		REG_WRITE(ah, AR_QRDYTIMECFG(q),
			  value | AR_Q_RDYTIMECFG_EN);
484 485
		REG_SET_BIT(ah, AR_DMISC(q),
			    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
S
Sujith 已提交
486
			     AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
S
Sujith 已提交
487 488 489

		REGWRITE_BUFFER_FLUSH(ah);

S
Sujith 已提交
490 491
		break;
	case ATH9K_TX_QUEUE_PSPOLL:
492
		REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
S
Sujith 已提交
493 494
		break;
	case ATH9K_TX_QUEUE_UAPSD:
495
		REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
496 497 498 499 500 501
		break;
	default:
		break;
	}

	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
502 503 504 505
		REG_SET_BIT(ah, AR_DMISC(q),
			    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
			       AR_D_MISC_ARB_LOCKOUT_CNTRL) |
			    AR_D_MISC_POST_FR_BKOFF_DIS);
S
Sujith 已提交
506 507
	}

508 509 510
	if (AR_SREV_9300_20_OR_LATER(ah))
		REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);

511
	ath9k_hw_clear_queue_interrupts(ah, q);
512
	if (qi->tqi_qflags & TXQ_FLAG_TXINT_ENABLE) {
513 514
		ah->txok_interrupt_mask |= 1 << q;
		ah->txerr_interrupt_mask |= 1 << q;
515
	}
S
Sujith 已提交
516
	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
517
		ah->txdesc_interrupt_mask |= 1 << q;
S
Sujith 已提交
518
	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
519
		ah->txeol_interrupt_mask |= 1 << q;
S
Sujith 已提交
520
	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
521
		ah->txurn_interrupt_mask |= 1 << q;
S
Sujith 已提交
522 523 524 525
	ath9k_hw_set_txq_interrupts(ah, qi);

	return true;
}
526
EXPORT_SYMBOL(ath9k_hw_resettxqueue);
S
Sujith 已提交
527

528
int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
529
			struct ath_rx_status *rs)
S
Sujith 已提交
530 531 532 533 534 535 536 537 538 539
{
	struct ar5416_desc ads;
	struct ar5416_desc *adsp = AR5416DESC(ds);
	u32 phyerr;

	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
		return -EINPROGRESS;

	ads.u.rx = adsp->u.rx;

540 541
	rs->rs_status = 0;
	rs->rs_flags = 0;
S
Sujith 已提交
542

543 544
	rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
	rs->rs_tstamp = ads.AR_RcvTimestamp;
S
Sujith 已提交
545

546
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
547 548 549 550 551 552 553
		rs->rs_rssi = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
		rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
554
	} else {
555 556
		rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
		rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
557
						AR_RxRSSIAnt00);
558
		rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
559
						AR_RxRSSIAnt01);
560
		rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
561
						AR_RxRSSIAnt02);
562
		rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
563
						AR_RxRSSIAnt10);
564
		rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
565
						AR_RxRSSIAnt11);
566
		rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
567 568
						AR_RxRSSIAnt12);
	}
S
Sujith 已提交
569
	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
570
		rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
S
Sujith 已提交
571
	else
572
		rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
S
Sujith 已提交
573

574
	rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
575
	rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
S
Sujith 已提交
576

577 578
	rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
	rs->rs_moreaggr =
S
Sujith 已提交
579
		(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
580 581
	rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
	rs->rs_flags =
S
Sujith 已提交
582
		(ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
583
	rs->rs_flags |=
S
Sujith 已提交
584 585 586
		(ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;

	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
587
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
S
Sujith 已提交
588
	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
589
		rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
S
Sujith 已提交
590
	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
591
		rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
S
Sujith 已提交
592 593

	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
594 595 596 597 598 599
		/*
		 * Treat these errors as mutually exclusive to avoid spurious
		 * extra error reports from the hardware. If a CRC error is
		 * reported, then decryption and MIC errors are irrelevant,
		 * the frame is going to be dropped either way
		 */
S
Sujith 已提交
600
		if (ads.ds_rxstatus8 & AR_CRCErr)
601
			rs->rs_status |= ATH9K_RXERR_CRC;
602
		else if (ads.ds_rxstatus8 & AR_PHYErr) {
603
			rs->rs_status |= ATH9K_RXERR_PHY;
S
Sujith 已提交
604
			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
605
			rs->rs_phyerr = phyerr;
606
		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
607
			rs->rs_status |= ATH9K_RXERR_DECRYPT;
608
		else if (ads.ds_rxstatus8 & AR_MichaelErr)
609
			rs->rs_status |= ATH9K_RXERR_MIC;
S
Sujith 已提交
610 611
	}

612 613 614
	if (ads.ds_rxstatus8 & AR_KeyMiss)
		rs->rs_status |= ATH9K_RXERR_KEYMISS;

S
Sujith 已提交
615 616
	return 0;
}
617
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
S
Sujith 已提交
618

619 620 621 622 623 624 625
/*
 * This can stop or re-enables RX.
 *
 * If bool is set this will kill any frame which is currently being
 * transferred between the MAC and baseband and also prevent any new
 * frames from getting started.
 */
626
bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
S
Sujith 已提交
627 628 629 630 631 632 633
{
	u32 reg;

	if (set) {
		REG_SET_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));

S
Sujith 已提交
634 635
		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
				   0, AH_WAIT_TIMEOUT)) {
S
Sujith 已提交
636 637 638 639 640
			REG_CLR_BIT(ah, AR_DIAG_SW,
				    (AR_DIAG_RX_DIS |
				     AR_DIAG_RX_ABORT));

			reg = REG_READ(ah, AR_OBS_BUS_1);
641 642 643
			ath_err(ath9k_hw_common(ah),
				"RX failed to go idle in 10 ms RXSM=0x%x\n",
				reg);
S
Sujith 已提交
644 645 646 647 648 649 650 651 652 653

			return false;
		}
	} else {
		REG_CLR_BIT(ah, AR_DIAG_SW,
			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
	}

	return true;
}
654
EXPORT_SYMBOL(ath9k_hw_setrxabort);
S
Sujith 已提交
655

656
void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
S
Sujith 已提交
657 658 659
{
	REG_WRITE(ah, AR_RXDP, rxdp);
}
660
EXPORT_SYMBOL(ath9k_hw_putrxbuf);
S
Sujith 已提交
661

662
void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
S
Sujith 已提交
663 664 665
{
	ath9k_enable_mib_counters(ah);

666
	ath9k_ani_reset(ah, is_scanning);
667

668
	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
S
Sujith 已提交
669
}
670
EXPORT_SYMBOL(ath9k_hw_startpcureceive);
S
Sujith 已提交
671

672 673 674 675 676 677 678 679
void ath9k_hw_abortpcurecv(struct ath_hw *ah)
{
	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);

	ath9k_hw_disable_mib_counters(ah);
}
EXPORT_SYMBOL(ath9k_hw_abortpcurecv);

680
bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
S
Sujith 已提交
681
{
S
Sujith 已提交
682
#define AH_RX_STOP_DMA_TIMEOUT 10000   /* usec */
683
	struct ath_common *common = ath9k_hw_common(ah);
684
	u32 mac_status, last_mac_status = 0;
S
Sujith 已提交
685 686
	int i;

687 688 689 690 691 692
	/* Enable access to the DMA observation bus */
	REG_WRITE(ah, AR_MACMISC,
		  ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
		   (AR_MACMISC_MISC_OBS_BUS_1 <<
		    AR_MACMISC_MISC_OBS_BUS_MSB_S)));

S
Sujith 已提交
693 694
	REG_WRITE(ah, AR_CR, AR_CR_RXD);

S
Sujith 已提交
695 696 697 698
	/* Wait for rx enable bit to go low */
	for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
		if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
			break;
699 700 701 702 703 704 705 706 707 708 709

		if (!AR_SREV_9300_20_OR_LATER(ah)) {
			mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
			if (mac_status == 0x1c0 && mac_status == last_mac_status) {
				*reset = true;
				break;
			}

			last_mac_status = mac_status;
		}

S
Sujith 已提交
710 711 712 713
		udelay(AH_TIME_QUANTUM);
	}

	if (i == 0) {
714
		ath_err(common,
715
			"DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
716 717
			AH_RX_STOP_DMA_TIMEOUT / 1000,
			REG_READ(ah, AR_CR),
718 719
			REG_READ(ah, AR_DIAG_SW),
			REG_READ(ah, AR_DMADBG_7));
S
Sujith 已提交
720 721 722 723
		return false;
	} else {
		return true;
	}
S
Sujith 已提交
724 725

#undef AH_RX_STOP_DMA_TIMEOUT
S
Sujith 已提交
726
}
727
EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
728 729 730 731 732 733 734 735 736

int ath9k_hw_beaconq_setup(struct ath_hw *ah)
{
	struct ath9k_tx_queue_info qi;

	memset(&qi, 0, sizeof(qi));
	qi.tqi_aifs = 1;
	qi.tqi_cwmin = 0;
	qi.tqi_cwmax = 0;
737 738

	if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
739
		qi.tqi_qflags = TXQ_FLAG_TXINT_ENABLE;
740

741 742 743
	return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
}
EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
744 745 746 747 748 749 750 751 752

bool ath9k_hw_intrpend(struct ath_hw *ah)
{
	u32 host_isr;

	if (AR_SREV_9100(ah))
		return true;

	host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
753 754 755 756

	if (((host_isr & AR_INTR_MAC_IRQ) ||
	     (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
	    (host_isr != AR_INTR_SPURIOUS))
757 758 759 760 761 762 763 764 765 766 767
		return true;

	host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
	if ((host_isr & AR_INTR_SYNC_DEFAULT)
	    && (host_isr != AR_INTR_SPURIOUS))
		return true;

	return false;
}
EXPORT_SYMBOL(ath9k_hw_intrpend);

768 769 770 771
void ath9k_hw_disable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);

772 773 774 775 776
	if (!(ah->imask & ATH9K_INT_GLOBAL))
		atomic_set(&ah->intr_ref_cnt, -1);
	else
		atomic_dec(&ah->intr_ref_cnt);

777
	ath_dbg(common, INTERRUPT, "disable IER\n");
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
	REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
	(void) REG_READ(ah, AR_IER);
	if (!AR_SREV_9100(ah)) {
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);

		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
		(void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
	}
}
EXPORT_SYMBOL(ath9k_hw_disable_interrupts);

void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
	struct ath_common *common = ath9k_hw_common(ah);
793
	u32 sync_default = AR_INTR_SYNC_DEFAULT;
794
	u32 async_mask;
795 796 797 798

	if (!(ah->imask & ATH9K_INT_GLOBAL))
		return;

799
	if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
800
		ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
801 802 803 804
			atomic_read(&ah->intr_ref_cnt));
		return;
	}

805 806 807
	if (AR_SREV_9340(ah))
		sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;

808 809 810 811 812
	async_mask = AR_INTR_MAC_IRQ;

	if (ah->imask & ATH9K_INT_MCI)
		async_mask |= AR_INTR_ASYNC_MASK_MCI;

813
	ath_dbg(common, INTERRUPT, "enable IER\n");
814 815
	REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
	if (!AR_SREV_9100(ah)) {
816 817
		REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
		REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
818

819 820
		REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
		REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
821
	}
822
	ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
J
Joe Perches 已提交
823
		REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
824 825 826
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);

827
void ath9k_hw_set_interrupts(struct ath_hw *ah)
828
{
829
	enum ath9k_int ints = ah->imask;
830 831 832 833
	u32 mask, mask2;
	struct ath9k_hw_capabilities *pCap = &ah->caps;
	struct ath_common *common = ath9k_hw_common(ah);

834
	if (!(ints & ATH9K_INT_GLOBAL))
835
		ath9k_hw_disable_interrupts(ah);
836

837
	ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
838 839 840 841 842 843 844

	mask = ints & ATH9K_INT_COMMON;
	mask2 = 0;

	if (ints & ATH9K_INT_TX) {
		if (ah->config.tx_intr_mitigation)
			mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
845 846 847 848 849 850
		else {
			if (ah->txok_interrupt_mask)
				mask |= AR_IMR_TXOK;
			if (ah->txdesc_interrupt_mask)
				mask |= AR_IMR_TXDESC;
		}
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
		if (ah->txerr_interrupt_mask)
			mask |= AR_IMR_TXERR;
		if (ah->txeol_interrupt_mask)
			mask |= AR_IMR_TXEOL;
	}
	if (ints & ATH9K_INT_RX) {
		if (AR_SREV_9300_20_OR_LATER(ah)) {
			mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
			if (ah->config.rx_intr_mitigation) {
				mask &= ~AR_IMR_RXOK_LP;
				mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
			} else {
				mask |= AR_IMR_RXOK_LP;
			}
		} else {
			if (ah->config.rx_intr_mitigation)
				mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
			else
				mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
		}
		if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
			mask |= AR_IMR_GENTMR;
	}

875 876 877
	if (ints & ATH9K_INT_GENTIMER)
		mask |= AR_IMR_GENTMR;

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
	if (ints & (ATH9K_INT_BMISC)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_TIM)
			mask2 |= AR_IMR_S2_TIM;
		if (ints & ATH9K_INT_DTIM)
			mask2 |= AR_IMR_S2_DTIM;
		if (ints & ATH9K_INT_DTIMSYNC)
			mask2 |= AR_IMR_S2_DTIMSYNC;
		if (ints & ATH9K_INT_CABEND)
			mask2 |= AR_IMR_S2_CABEND;
		if (ints & ATH9K_INT_TSFOOR)
			mask2 |= AR_IMR_S2_TSFOOR;
	}

	if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
		mask |= AR_IMR_BCNMISC;
		if (ints & ATH9K_INT_GTT)
			mask2 |= AR_IMR_S2_GTT;
		if (ints & ATH9K_INT_CST)
			mask2 |= AR_IMR_S2_CST;
	}

900
	ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
901 902 903 904 905 906 907 908 909 910 911 912 913 914
	REG_WRITE(ah, AR_IMR, mask);
	ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
			   AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
			   AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
	ah->imrs2_reg |= mask2;
	REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);

	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
		if (ints & ATH9K_INT_TIM_TIMER)
			REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
		else
			REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
	}

915
	return;
916 917
}
EXPORT_SYMBOL(ath9k_hw_set_interrupts);