base.c 83.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

43 44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

45 46
#include <linux/module.h>
#include <linux/delay.h>
47
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
48
#include <linux/hardirq.h>
49
#include <linux/if.h>
J
Jiri Slaby 已提交
50
#include <linux/io.h>
51 52 53 54
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
55
#include <linux/slab.h>
56
#include <linux/etherdevice.h>
57
#include <linux/nl80211.h>
58

59
#include <net/cfg80211.h>
60 61 62 63
#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

64
#include <net/mac80211.h>
65 66 67
#include "base.h"
#include "reg.h"
#include "debug.h"
68
#include "ani.h"
69 70
#include "ath5k.h"
#include "../regd.h"
71

72 73 74
#define CREATE_TRACE_POINTS
#include "trace.h"

75
bool ath5k_modparam_nohwcrypt;
76
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
77
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
78

79
static bool modparam_fastchanswitch;
80 81 82
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");

83
static bool ath5k_modparam_no_hw_rfkill_switch;
84 85 86 87
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
								bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");

88

89 90 91 92 93 94 95
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

96
static int ath5k_init(struct ieee80211_hw *hw);
97
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
98
								bool skip_pcu);
99 100

/* Known SREVs */
J
Jiri Slaby 已提交
101
static const struct ath5k_srev_name srev_names[] = {
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
121 122
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
123
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
124 125 126
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
127
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
128 129
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
130 131 132 133
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
134 135 136 137
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
138
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
155 156
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
157 158
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
159 160
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
161 162
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
163 164
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
165 166
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
167 168
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
169 170
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
171 172
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
173 174
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
175 176
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
177 178
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
179 180
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
181 182
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
183 184
	  .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
		   IEEE80211_RATE_SUPPORTS_10MHZ },
B
Bruno Randolf 已提交
185 186
};

187 188 189 190 191 192 193 194 195 196
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

197
const char *
198 199 200 201 202 203 204 205
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
206 207 208 209 210

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
211 212 213 214 215 216 217
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
234

235 236 237 238
/***********************\
* Driver Initialization *
\***********************/

239 240
static void ath5k_reg_notifier(struct wiphy *wiphy,
			       struct regulatory_request *request)
241
{
242
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
243 244
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
245

246
	ath_reg_notifier_apply(wiphy, request, regulatory);
247
}
248

249 250 251
/********************\
* Channel/mode setup *
\********************/
252

253
/*
254
 * Returns true for the channel numbers used.
255
 */
256 257 258 259 260 261 262
#ifdef CONFIG_ATH5K_TEST_CHANNELS
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
	return true;
}

#else
263
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
264
{
265 266 267 268 269
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
270 271 272
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
273 274 275 276 277
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
278
}
279
#endif
280

281
static unsigned int
282 283
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
284
{
285
	unsigned int count, size, freq, ch;
286
	enum ieee80211_band band;
287

288 289 290
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
291
		size = 220;
292
		band = IEEE80211_BAND_5GHZ;
293 294 295 296
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
297
		band = IEEE80211_BAND_2GHZ;
298 299
		break;
	default:
300
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
301
		return 0;
302 303
	}

304 305
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
306 307 308 309
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
310

311 312 313 314 315
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

316
		/* Check if channel is supported by the chipset */
317
		if (!ath5k_channel_ok(ah, &channels[count]))
318
			continue;
319

320
		if (!ath5k_is_standard_channel(ch, band))
321
			continue;
322

323 324
		count++;
	}
325

326 327
	return count;
}
328

329
static void
330
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
331 332
{
	u8 i;
333

334
	for (i = 0; i < AR5K_MAX_RATES; i++)
335
		ah->rate_idx[b->band][i] = -1;
336

337
	for (i = 0; i < b->n_bitrates; i++) {
338
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
339
		if (b->bitrates[i].hw_value_short)
340
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
341
	}
342
}
343

344 345 346
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
347
	struct ath5k_hw *ah = hw->priv;
348 349 350
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
351

352 353
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
354

355
	/* 2GHz band */
356
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
357
	sband->band = IEEE80211_BAND_2GHZ;
358
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
359

360
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
361 362 363 364
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
365

366
		sband->channels = ah->channels;
367
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368
					AR5K_MODE_11G, max_c);
369

370 371 372
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
373
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
374 375 376 377
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
378

379 380 381 382 383 384 385 386 387 388
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
389 390 391
			}
		}

392
		sband->channels = ah->channels;
393
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394
					AR5K_MODE_11B, max_c);
395

396 397 398 399
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
400
	ath5k_setup_rate_idx(ah, sband);
401

402
	/* 5GHz band, A mode */
403 404
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
405
		sband->band = IEEE80211_BAND_5GHZ;
406
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
407

408 409 410
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
411

412
		sband->channels = &ah->channels[count_c];
413
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414
					AR5K_MODE_11A, max_c);
415

416 417
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
418
	ath5k_setup_rate_idx(ah, sband);
419

420
	ath5k_debug_dump_bands(ah);
421 422 423 424

	return 0;
}

425 426 427 428 429
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
430
 * Called with ah->lock.
431
 */
432
int
433
ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
434
{
435
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
436
		  "channel set, resetting (%u -> %u MHz)\n",
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		  ah->curchan->center_freq, chandef->chan->center_freq);

	switch (chandef->width) {
	case NL80211_CHAN_WIDTH_20:
	case NL80211_CHAN_WIDTH_20_NOHT:
		ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
		break;
	case NL80211_CHAN_WIDTH_5:
		ah->ah_bwmode = AR5K_BWMODE_5MHZ;
		break;
	case NL80211_CHAN_WIDTH_10:
		ah->ah_bwmode = AR5K_BWMODE_10MHZ;
		break;
	default:
		WARN_ON(1);
		return -EINVAL;
	}
454

455
	/*
456 457 458 459
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
460
	 */
461
	return ath5k_reset(ah, chandef->chan, true);
462 463
}

464
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
465
{
466
	struct ath5k_vif_iter_data *iter_data = data;
467
	int i;
468
	struct ath5k_vif *avf = (void *)vif->drv_priv;
469 470 471 472 473 474 475 476 477 478 479 480

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
481
		if (ether_addr_equal(iter_data->hw_macaddr, mac))
482 483 484 485 486 487
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
488 489 490 491

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
492
	 * interfaces is allowed.
493 494 495
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
496 497 498
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
499 500
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
501
	}
502 503
}

504
void
505
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
506
				   struct ieee80211_vif *vif)
507
{
508
	struct ath_common *common = ath5k_hw_common(ah);
509 510
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
511 512 513 514 515 516 517 518 519

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
520
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
521
	iter_data.n_stas = 0;
522 523

	if (vif)
524
		ath5k_vif_iter(&iter_data, vif->addr, vif);
525 526

	/* Get list of all active MAC addresses */
527 528 529
	ieee80211_iterate_active_interfaces_atomic(
		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
		ath5k_vif_iter, &iter_data);
530
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
531

532 533
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
534
		/* Nothing active, default to station mode */
535
		ah->opmode = NL80211_IFTYPE_STATION;
536

537 538 539
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
540

541
	if (iter_data.need_set_hw_addr && iter_data.found_active)
542
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
543

544 545
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
546

547 548 549 550
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
551
		 * Enabling PROMISC appears to fix that problem.
552
		 */
553
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
554
	}
555

556 557 558
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
559
}
560

561
static inline int
562
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
563 564
{
	int rix;
565

566 567 568 569 570
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

571
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
572 573 574 575 576 577 578 579 580 581 582
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
583
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
584
{
585
	struct ath_common *common = ath5k_hw_common(ah);
586
	struct sk_buff *skb;
587 588

	/*
589 590
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
591
	 */
592 593 594
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
595

596
	if (!skb) {
597
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
598 599
				common->rx_bufsize);
		return NULL;
600 601
	}

602
	*skb_addr = dma_map_single(ah->dev,
603
				   skb->data, common->rx_bufsize,
604 605
				   DMA_FROM_DEVICE);

606 607
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
608 609
		dev_kfree_skb(skb);
		return NULL;
610
	}
611 612
	return skb;
}
613

614
static int
615
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
616 617 618 619
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
620

621
	if (!skb) {
622
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
623 624 625
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
626 627
	}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
647
	if (ret) {
648
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
649
		return ret;
650 651
	}

652 653 654
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
655 656 657
	return 0;
}

658
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
659
{
660 661 662
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
663

664 665
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
666

667 668 669 670 671 672 673 674
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
675
	else
676
		htype = AR5K_PKT_TYPE_NORMAL;
677

678
	return htype;
679 680
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
static struct ieee80211_rate *
ath5k_get_rate(const struct ieee80211_hw *hw,
	       const struct ieee80211_tx_info *info,
	       struct ath5k_buf *bf, int idx)
{
	/*
	* convert a ieee80211_tx_rate RC-table entry to
	* the respective ieee80211_rate struct
	*/
	if (bf->rates[idx].idx < 0) {
		return NULL;
	}

	return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
}

static u16
ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
			const struct ieee80211_tx_info *info,
			struct ath5k_buf *bf, int idx)
{
	struct ieee80211_rate *rate;
	u16 hw_rate;
	u8 rc_flags;

	rate = ath5k_get_rate(hw, info, bf, idx);
	if (!rate)
		return 0;

	rc_flags = bf->rates[idx].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		   rate->hw_value_short : rate->hw_value;

	return hw_rate;
}

717
static int
718
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
719 720
		  struct ath5k_txq *txq, int padsize,
		  struct ieee80211_tx_control *control)
721
{
722 723 724 725 726 727 728 729 730 731 732
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
733

734
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
735

736
	/* XXX endianness */
737
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
738
			DMA_TO_DEVICE);
739

740 741 742
	if (dma_mapping_error(ah->dev, bf->skbaddr))
		return -ENOSPC;

743 744 745 746 747
	ieee80211_get_tx_rates(info->control.vif, (control) ? control->sta : NULL, skb, bf->rates,
			       ARRAY_SIZE(bf->rates));

	rate = ath5k_get_rate(ah->hw, info, bf, 0);

748 749 750 751
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
752

753 754
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
755

756
	rc_flags = info->control.rates[0].flags;
757 758

	hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
759

760 761 762 763 764 765 766 767 768 769 770
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
771 772
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
773
			info->control.vif, pktlen, info));
774 775 776
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
777 778
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
779
			info->control.vif, pktlen, info));
780
	}
781

782 783 784
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
785
		(ah->ah_txpower.txp_requested * 2),
786
		hw_rate,
787
		bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
788 789 790 791
		cts_rate, duration);
	if (ret)
		goto err_unmap;

792 793 794 795
	/* Set up MRR descriptor */
	if (ah->ah_capabilities.cap_has_mrr_support) {
		memset(mrr_rate, 0, sizeof(mrr_rate));
		memset(mrr_tries, 0, sizeof(mrr_tries));
796

797
		for (i = 0; i < 3; i++) {
798 799

			rate = ath5k_get_rate(ah->hw, info, bf, i);
800 801
			if (!rate)
				break;
802

803 804
			mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
			mrr_tries[i] = bf->rates[i].count;
805
		}
806

807 808 809 810 811
		ath5k_hw_setup_mrr_tx_desc(ah, ds,
			mrr_rate[0], mrr_tries[0],
			mrr_rate[1], mrr_tries[1],
			mrr_rate[2], mrr_tries[2]);
	}
812

813 814
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
815

816 817
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
818
	txq->txq_len++;
819 820 821 822
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
823

824 825 826 827 828 829 830
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
831
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
832
	return ret;
B
Bruno Randolf 已提交
833 834
}

835 836 837 838
/*******************\
* Descriptors setup *
\*******************/

839
static int
840
ath5k_desc_alloc(struct ath5k_hw *ah)
841
{
842 843 844 845 846
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
847

848
	/* allocate descriptors */
849
	ah->desc_len = sizeof(struct ath5k_desc) *
850
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
851

852 853 854 855
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
856 857 858
		ret = -ENOMEM;
		goto err;
	}
859 860 861 862
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
863

864 865 866
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
867
		ATH5K_ERR(ah, "can't allocate bufptr\n");
868 869 870
		ret = -ENOMEM;
		goto err_free;
	}
871
	ah->bufptr = bf;
872

873
	INIT_LIST_HEAD(&ah->rxbuf);
874 875 876
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
877
		list_add_tail(&bf->list, &ah->rxbuf);
878
	}
879

880 881
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
882
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
883 884
		bf->desc = ds;
		bf->daddr = da;
885
		list_add_tail(&bf->list, &ah->txbuf);
886 887
	}

888
	/* beacon buffers */
889
	INIT_LIST_HEAD(&ah->bcbuf);
890 891 892
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
893
		list_add_tail(&bf->list, &ah->bcbuf);
894
	}
895

896 897
	return 0;
err_free:
898
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
899
err:
900
	ah->desc = NULL;
901 902
	return ret;
}
903

904
void
905
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
906 907 908 909
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
910
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
911
			DMA_TO_DEVICE);
F
Felix Fietkau 已提交
912
	ieee80211_free_txskb(ah->hw, bf->skb);
913 914 915 916 917 918
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
919
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
920 921 922 923 924 925
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
926
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
927 928 929 930 931 932 933
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

934
static void
935
ath5k_desc_free(struct ath5k_hw *ah)
936 937
{
	struct ath5k_buf *bf;
938

939 940 941 942 943 944
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
945

946
	/* Free memory associated with all descriptors */
947 948 949
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
950

951 952
	kfree(ah->bufptr);
	ah->bufptr = NULL;
953 954
}

955 956 957 958 959 960

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
961
ath5k_txq_setup(struct ath5k_hw *ah,
962
		int qtype, int subtype)
963
{
964 965 966
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
967 968 969 970 971
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
972 973
	};
	int qnum;
974

975
	/*
976 977 978 979 980 981 982 983 984 985
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
986
	 */
987 988 989 990 991 992 993 994 995 996
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
997
	txq = &ah->txqs[qnum];
998 999 1000 1001 1002 1003
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
1004
		txq->txq_len = 0;
1005
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
1006
		txq->txq_poll_mark = false;
1007
		txq->txq_stuck = 0;
1008
	}
1009
	return &ah->txqs[qnum];
1010 1011
}

1012 1013
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
1014
{
1015
	struct ath5k_txq_info qi = {
1016 1017 1018 1019 1020
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
1021 1022 1023
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
1024

1025
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
1026 1027
}

1028
static int
1029
ath5k_beaconq_config(struct ath5k_hw *ah)
1030
{
1031 1032
	struct ath5k_txq_info qi;
	int ret;
1033

1034
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
1035 1036
	if (ret)
		goto err;
1037

1038 1039
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1040 1041 1042 1043 1044 1045 1046
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
1047
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
1048 1049 1050 1051 1052
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
1053
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1054
	}
1055

1056
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1057 1058
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1059

1060
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
1061
	if (ret) {
1062
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
1063 1064 1065
			"hardware queue!\n", __func__);
		goto err;
	}
1066
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1067 1068
	if (ret)
		goto err;
1069

1070 1071 1072 1073
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1074

1075
	qi.tqi_ready_time = (ah->bintval * 80) / 100;
1076 1077 1078
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1079

1080 1081 1082
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1083 1084
}

1085 1086 1087
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
1088
 * @ah The &struct ath5k_hw
1089 1090 1091 1092 1093 1094 1095
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1096
static void
1097
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1098
{
1099
	struct ath5k_txq *txq;
1100
	struct ath5k_buf *bf, *bf0;
1101
	int i;
1102

1103 1104 1105
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
1106 1107
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1108
				ath5k_debug_printtxbuf(ah, bf);
1109

1110
				ath5k_txbuf_free_skb(ah, bf);
1111

1112
				spin_lock(&ah->txbuflock);
1113 1114
				list_move_tail(&bf->list, &ah->txbuf);
				ah->txbuf_len++;
1115
				txq->txq_len--;
1116
				spin_unlock(&ah->txbuflock);
1117
			}
1118 1119 1120 1121
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1122
	}
1123 1124
}

1125
static void
1126
ath5k_txq_release(struct ath5k_hw *ah)
1127
{
1128
	struct ath5k_txq *txq = ah->txqs;
1129
	unsigned int i;
1130

1131
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1132
		if (txq->setup) {
1133
			ath5k_hw_release_tx_queue(ah, txq->qnum);
1134 1135 1136
			txq->setup = false;
		}
}
1137 1138


1139 1140 1141
/*************\
* RX Handling *
\*************/
1142

1143 1144 1145
/*
 * Enable the receive h/w following a reset.
 */
1146
static int
1147
ath5k_rx_start(struct ath5k_hw *ah)
1148
{
1149 1150 1151
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1152

1153
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1154

1155
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1156
		  common->cachelsz, common->rx_bufsize);
1157

1158 1159 1160 1161
	spin_lock_bh(&ah->rxbuflock);
	ah->rxlink = NULL;
	list_for_each_entry(bf, &ah->rxbuf, list) {
		ret = ath5k_rxbuf_setup(ah, bf);
1162
		if (ret != 0) {
1163
			spin_unlock_bh(&ah->rxbuflock);
1164 1165
			goto err;
		}
1166
	}
1167
	bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1168
	ath5k_hw_set_rxdp(ah, bf->daddr);
1169
	spin_unlock_bh(&ah->rxbuflock);
1170

1171
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1172
	ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1173
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1174 1175

	return 0;
1176
err:
1177 1178 1179
	return ret;
}

1180
/*
1181 1182 1183 1184 1185
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1186 1187
 */
static void
1188
ath5k_rx_stop(struct ath5k_hw *ah)
1189 1190
{

1191
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1192
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1193

1194
	ath5k_debug_printrxbuffs(ah);
1195
}
1196

1197
static unsigned int
1198
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1199 1200 1201 1202 1203
		   struct ath5k_rx_status *rs)
{
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1204

1205 1206 1207
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1208

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1221 1222 1223 1224

	return 0;
}

1225

1226
static void
1227
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1228
		     struct ieee80211_rx_status *rxs)
1229
{
1230 1231 1232
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1233

O
Oleksij Rempel 已提交
1234
	if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
1235 1236 1237 1238 1239
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
1240
		tsf = ath5k_hw_get_tsf64(ah);
1241 1242
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1243

1244
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1245 1246 1247 1248 1249
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1250

1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
1263
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1264 1265 1266 1267 1268
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1269

1270 1271 1272 1273 1274 1275
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
1276 1277
		if (hw_tu >= ah->nexttbtt)
			ath5k_beacon_update_timers(ah, bc_tstamp);
B
Bruno Randolf 已提交
1278 1279 1280 1281

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
1282 1283 1284
		if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
			ath5k_beacon_update_timers(ah, bc_tstamp);
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
B
Bruno Randolf 已提交
1285 1286
				"fixed beacon timers after beacon receive\n");
		}
1287 1288
	}
}
1289

1290 1291 1292 1293
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1294
{
1295
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1296 1297
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1298

1299
	if (ieee80211_has_a4(frame_control))
1300
		padpos += ETH_ALEN;
1301 1302

	if (ieee80211_is_data_qos(frame_control))
1303 1304 1305
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1306 1307
}

1308 1309 1310 1311 1312
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1313
{
1314 1315
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1316

1317
	if (padsize && skb->len > padpos) {
1318

1319 1320
		if (skb_headroom(skb) < padsize)
			return -1;
1321

1322
		skb_push(skb, padsize);
1323
		memmove(skb->data, skb->data + padsize, padpos);
1324 1325
		return padsize;
	}
B
Bob Copeland 已提交
1326

1327 1328
	return 0;
}
1329

1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1347

1348
	if (padsize && skb->len >= padpos + padsize) {
1349 1350 1351
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1352
	}
B
Bob Copeland 已提交
1353

1354
	return 0;
1355 1356 1357
}

static void
1358
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1359
		    struct ath5k_rx_status *rs)
1360
{
1361
	struct ieee80211_rx_status *rxs;
O
Oleksij Rempel 已提交
1362
	struct ath_common *common = ath5k_hw_common(ah);
1363 1364 1365 1366 1367 1368 1369 1370

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1371 1372 1373
	if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
		rxs->flag |= RX_FLAG_FAILED_FCS_CRC;

1374 1375

	/*
1376 1377 1378 1379 1380 1381 1382
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
1383
	 */
1384
	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
1385
	rxs->flag |= RX_FLAG_MACTIME_END;
1386

1387 1388
	rxs->freq = ah->curchan->center_freq;
	rxs->band = ah->curchan->band;
1389

1390
	rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1391

1392
	rxs->antenna = rs->rs_antenna;
1393

1394
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1395
		ah->stats.antenna_rx[rs->rs_antenna]++;
1396
	else
1397
		ah->stats.antenna_rx[0]++; /* invalid */
1398

1399 1400
	rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
	switch (ah->ah_bwmode) {
	case AR5K_BWMODE_5MHZ:
		rxs->flag |= RX_FLAG_5MHZ;
		break;
	case AR5K_BWMODE_10MHZ:
		rxs->flag |= RX_FLAG_10MHZ;
		break;
	default:
		break;
	}
1411

B
Bob Copeland 已提交
1412
	if (rs->rs_rate ==
1413
	    ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1414
		rxs->flag |= RX_FLAG_SHORTPRE;
1415

1416
	trace_ath5k_rx(ah, skb);
1417

O
Oleksij Rempel 已提交
1418 1419
	if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
		ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
1420

O
Oleksij Rempel 已提交
1421 1422 1423 1424
		/* check beacons in IBSS mode */
		if (ah->opmode == NL80211_IFTYPE_ADHOC)
			ath5k_check_ibss_tsf(ah, skb, rxs);
	}
1425

1426
	ieee80211_rx(ah->hw, skb);
1427
}
1428

1429 1430 1431 1432
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1433
 */
1434
static bool
1435
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1436
{
1437 1438
	ah->stats.rx_all_count++;
	ah->stats.rx_bytes_count += rs->rs_datalen;
1439

1440
	if (unlikely(rs->rs_status)) {
1441 1442
		unsigned int filters;

1443
		if (rs->rs_status & AR5K_RXERR_CRC)
1444
			ah->stats.rxerr_crc++;
1445
		if (rs->rs_status & AR5K_RXERR_FIFO)
1446
			ah->stats.rxerr_fifo++;
1447
		if (rs->rs_status & AR5K_RXERR_PHY) {
1448
			ah->stats.rxerr_phy++;
1449
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1450
				ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464

			/*
			 * Treat packets that underwent a CCK or OFDM reset as having a bad CRC.
			 * These restarts happen when the radio resynchronizes to a stronger frame
			 * while receiving a weaker frame. Here we receive the prefix of the weak
			 * frame. Since these are incomplete packets, mark their CRC as invalid.
			 */
			if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
			    rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
				rs->rs_status |= AR5K_RXERR_CRC;
				rs->rs_status &= ~AR5K_RXERR_PHY;
			} else {
				return false;
			}
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
1477
			ah->stats.rxerr_decrypt++;
1478 1479 1480 1481 1482
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
1483
			ah->stats.rxerr_mic++;
1484
			return true;
1485 1486
		}

1487 1488 1489 1490 1491 1492 1493 1494 1495
		/*
		 * Reject any frames with non-crypto errors, and take into account the
		 * current FIF_* filters.
		 */
		filters = AR5K_RXERR_DECRYPT;
		if (ah->fif_filter_flags & FIF_FCSFAIL)
			filters |= AR5K_RXERR_CRC;

		if (rs->rs_status & ~filters)
1496 1497
			return false;
	}
1498

1499
	if (unlikely(rs->rs_more)) {
1500
		ah->stats.rxerr_jumbo++;
1501 1502 1503
		return false;
	}
	return true;
1504 1505
}

1506
static void
1507
ath5k_set_current_imask(struct ath5k_hw *ah)
1508
{
1509
	enum ath5k_int imask;
1510 1511
	unsigned long flags;

1512 1513 1514
	spin_lock_irqsave(&ah->irqlock, flags);
	imask = ah->imask;
	if (ah->rx_pending)
1515
		imask &= ~AR5K_INT_RX_ALL;
1516
	if (ah->tx_pending)
1517
		imask &= ~AR5K_INT_TX_ALL;
1518 1519
	ath5k_hw_set_imr(ah, imask);
	spin_unlock_irqrestore(&ah->irqlock, flags);
1520 1521
}

1522
static void
1523
ath5k_tasklet_rx(unsigned long data)
1524
{
1525 1526 1527
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
1528
	struct ath5k_hw *ah = (void *)data;
L
Luis R. Rodriguez 已提交
1529
	struct ath_common *common = ath5k_hw_common(ah);
1530 1531 1532
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1533

1534 1535 1536
	spin_lock(&ah->rxbuflock);
	if (list_empty(&ah->rxbuf)) {
		ATH5K_WARN(ah, "empty rx buf pool\n");
1537 1538 1539
		goto unlock;
	}
	do {
1540
		bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1541 1542 1543
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1544

1545
		/* bail if HW is still using self-linked descriptor */
1546
		if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1547
			break;
1548

1549
		ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1550 1551 1552
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
1553 1554
			ATH5K_ERR(ah, "error in processing rx descriptor\n");
			ah->stats.rxerr_proc++;
1555 1556
			break;
		}
1557

1558 1559
		if (ath5k_receive_frame_ok(ah, &rs)) {
			next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1560

1561 1562 1563 1564 1565 1566
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1567

1568
			dma_unmap_single(ah->dev, bf->skbaddr,
1569
					 common->rx_bufsize,
1570
					 DMA_FROM_DEVICE);
1571

1572
			skb_put(skb, rs.rs_datalen);
1573

1574
			ath5k_receive_frame(ah, skb, &rs);
1575

1576 1577
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1578
		}
1579
next:
1580 1581
		list_move_tail(&bf->list, &ah->rxbuf);
	} while (ath5k_rxbuf_setup(ah, bf) == 0);
1582
unlock:
1583 1584 1585
	spin_unlock(&ah->rxbuflock);
	ah->rx_pending = false;
	ath5k_set_current_imask(ah);
1586 1587
}

B
Bruno Randolf 已提交
1588

1589 1590 1591
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1592

1593
void
1594
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1595
	       struct ath5k_txq *txq, struct ieee80211_tx_control *control)
1596
{
1597
	struct ath5k_hw *ah = hw->priv;
1598 1599 1600
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1601

1602
	trace_ath5k_tx(ah, skb, txq);
B
Bruno Randolf 已提交
1603

1604 1605 1606 1607 1608 1609
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
1610
		ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1611 1612 1613
			  " headroom to pad");
		goto drop_packet;
	}
1614

1615 1616
	if (txq->txq_len >= txq->txq_max &&
	    txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
B
Bruno Randolf 已提交
1617 1618
		ieee80211_stop_queue(hw, txq->qnum);

1619 1620 1621 1622
	spin_lock_irqsave(&ah->txbuflock, flags);
	if (list_empty(&ah->txbuf)) {
		ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&ah->txbuflock, flags);
B
Bruno Randolf 已提交
1623
		ieee80211_stop_queues(hw);
1624
		goto drop_packet;
1625
	}
1626
	bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1627
	list_del(&bf->list);
1628 1629
	ah->txbuf_len--;
	if (list_empty(&ah->txbuf))
1630
		ieee80211_stop_queues(hw);
1631
	spin_unlock_irqrestore(&ah->txbuflock, flags);
1632 1633 1634

	bf->skb = skb;

1635
	if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
1636
		bf->skb = NULL;
1637 1638 1639 1640
		spin_lock_irqsave(&ah->txbuflock, flags);
		list_add_tail(&bf->list, &ah->txbuf);
		ah->txbuf_len++;
		spin_unlock_irqrestore(&ah->txbuflock, flags);
1641
		goto drop_packet;
1642
	}
1643
	return;
1644

1645
drop_packet:
F
Felix Fietkau 已提交
1646
	ieee80211_free_txskb(hw, skb);
1647 1648
}

1649
static void
1650
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1651 1652
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts,
			 struct ath5k_buf *bf)
1653 1654
{
	struct ieee80211_tx_info *info;
1655
	u8 tries[3];
1656
	int i;
1657
	int size = 0;
1658

1659 1660
	ah->stats.tx_all_count++;
	ah->stats.tx_bytes_count += skb->len;
1661 1662
	info = IEEE80211_SKB_CB(skb);

1663 1664 1665
	size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
	memcpy(info->status.rates, bf->rates, size);

1666 1667 1668 1669
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1670
	ieee80211_tx_info_clear_status(info);
1671 1672

	for (i = 0; i < ts->ts_final_idx; i++) {
1673 1674 1675
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1676
		r->count = tries[i];
1677 1678
	}

1679
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1680
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1681 1682

	if (unlikely(ts->ts_status)) {
1683
		ah->stats.ack_fail++;
1684 1685
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1686
			ah->stats.txerr_filt++;
1687 1688
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
1689
			ah->stats.txerr_retry++;
1690
		if (ts->ts_status & AR5K_TXERR_FIFO)
1691
			ah->stats.txerr_fifo++;
1692 1693 1694
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1695 1696 1697

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1698 1699 1700 1701 1702 1703 1704 1705 1706
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1707
		ah->stats.antenna_tx[ts->ts_antenna]++;
1708
	else
1709
		ah->stats.antenna_tx[0]++; /* invalid */
1710

1711 1712
	trace_ath5k_tx_complete(ah, skb, txq, ts);
	ieee80211_tx_status(ah->hw, skb);
1713
}
1714 1715

static void
1716
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1717
{
1718 1719 1720 1721
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1722
	int ret;
1723

1724 1725
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1726 1727 1728 1729 1730 1731 1732

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

1733
			ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1734 1735 1736
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
1737
				ATH5K_ERR(ah,
1738 1739 1740 1741 1742 1743 1744
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1745

1746
			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1747
					DMA_TO_DEVICE);
1748
			ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
1749
		}
1750

1751 1752 1753
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1754 1755
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1756
		 */
1757 1758 1759 1760
		if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
			spin_lock(&ah->txbuflock);
			list_move_tail(&bf->list, &ah->txbuf);
			ah->txbuf_len++;
1761
			txq->txq_len--;
1762
			spin_unlock(&ah->txbuflock);
1763
		}
1764 1765
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1766
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1767
		ieee80211_wake_queue(ah->hw, txq->qnum);
1768 1769 1770 1771 1772
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1773
	int i;
1774
	struct ath5k_hw *ah = (void *)data;
1775

1776
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1777
		if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1778
			ath5k_tx_processq(ah, &ah->txqs[i]);
1779

1780 1781
	ah->tx_pending = false;
	ath5k_set_current_imask(ah);
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1793
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1794 1795
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1796
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1797
	struct ath5k_desc *ds;
1798 1799
	int ret = 0;
	u8 antenna;
1800
	u32 flags;
1801
	const int padsize = 0;
1802

1803
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1804
			DMA_TO_DEVICE);
1805
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1806 1807
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1808

1809 1810
	if (dma_mapping_error(ah->dev, bf->skbaddr)) {
		ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1811 1812
		dev_kfree_skb_any(skb);
		bf->skb = NULL;
1813 1814 1815 1816
		return -EIO;
	}

	ds = bf->desc;
1817
	antenna = ah->ah_tx_ant;
1818 1819

	flags = AR5K_TXDESC_NOACK;
1820
	if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1821 1822
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1823
	} else
1824
		ds->ds_link = 0;
1825 1826 1827 1828 1829 1830 1831

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1832
	 * automatically, based on ACKed packets.
1833 1834 1835 1836 1837
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1838 1839 1840 1841 1842
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1843 1844
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1845
		antenna = ah->bsent & 4 ? 2 : 1;
1846

1847

1848 1849 1850
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1851
	ds->ds_data = bf->skbaddr;
1852
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1853
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1854 1855
			AR5K_PKT_TYPE_BEACON,
			(ah->ah_txpower.txp_requested * 2),
1856
			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1857
			1, AR5K_TXKEYIX_INVALID,
1858
			antenna, flags, 0, 0);
1859 1860 1861 1862 1863
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1864
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1865 1866 1867
	return ret;
}

1868 1869 1870 1871 1872 1873 1874
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1875
int
1876 1877 1878
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
1879
	struct ath5k_hw *ah = hw->priv;
1880
	struct ath5k_vif *avf;
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1895
	avf = (void *)vif->drv_priv;
1896
	ath5k_txbuf_free_skb(ah, avf->bbuf);
1897
	avf->bbuf->skb = skb;
1898
	ret = ath5k_beacon_setup(ah, avf->bbuf);
1899 1900 1901 1902
out:
	return ret;
}

1903 1904 1905 1906 1907
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1908 1909
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1910 1911
 */
static void
1912
ath5k_beacon_send(struct ath5k_hw *ah)
1913
{
1914 1915 1916
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1917
	struct sk_buff *skb;
1918
	int err;
1919

1920
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1921 1922 1923

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1924
	 * not, don't don't try to post another: skip this
1925 1926 1927 1928
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
1929 1930 1931 1932 1933 1934
	if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
		ah->bmisscount++;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
			"missed %u consecutive beacons\n", ah->bmisscount);
		if (ah->bmisscount > 10) {	/* NB: 10 is a guess */
			ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1935
				"stuck beacon time (%u missed)\n",
1936 1937
				ah->bmisscount);
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1938
				  "stuck beacon, resetting\n");
1939
			ieee80211_queue_work(ah->hw, &ah->reset_work);
1940 1941 1942
		}
		return;
	}
1943 1944
	if (unlikely(ah->bmisscount != 0)) {
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1945
			"resume beacon xmit after %u misses\n",
1946 1947
			ah->bmisscount);
		ah->bmisscount = 0;
1948 1949
	}

1950 1951
	if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
			ah->num_mesh_vifs > 1) ||
1952
			ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1953 1954
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
1955 1956 1957
		int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
		vif = ah->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1958
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
1959
			(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1960
	} else /* only one interface */
1961
		vif = ah->bslot[0];
1962 1963 1964 1965 1966 1967 1968

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;

1969 1970 1971 1972 1973
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1974 1975
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
		ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1976 1977 1978
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1979
	/* refresh the beacon for AP or MESH mode */
1980
	if (ah->opmode == NL80211_IFTYPE_AP ||
1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
		err = ath5k_beacon_update(ah->hw, vif);
		if (err)
			return;
	}

	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
		     ah->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
		return;
	}
B
Bob Copeland 已提交
1992

1993
	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1994

1995 1996 1997 1998
	ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, ah->bhalq);
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
		ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1999

2000
	skb = ieee80211_get_buffered_bc(ah->hw, vif);
2001
	while (skb) {
2002
		ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
2003

2004
		if (ah->cabq->txq_len >= ah->cabq->txq_max)
2005 2006
			break;

2007
		skb = ieee80211_get_buffered_bc(ah->hw, vif);
2008 2009
	}

2010
	ah->bsent++;
2011 2012
}

2013 2014 2015
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
2016
 * @ah: struct ath5k_hw pointer we are operating on
2017 2018 2019 2020 2021 2022 2023 2024
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
2025
 * when a TSF update has been detected, but also when an new IBSS is created or
2026 2027 2028
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
2029
void
2030
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
2031
{
2032 2033
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
2034

2035
	intval = ah->bintval & AR5K_BEACON_PERIOD;
2036 2037
	if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
		+ ah->num_mesh_vifs > 1) {
2038 2039
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
2040
			ATH5K_WARN(ah, "intval %u is too low, min 15\n",
2041 2042
				   intval);
	}
2043 2044 2045
	if (WARN_ON(!intval))
		return;

2046 2047
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
2048

2049 2050 2051
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
2052

2053
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
2054
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
2055
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
2056 2057
	 * configuration we need to make sure it is bigger than that. */

2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
2073
		 * beacon received, SW merge happened but HW TSF not yet updated.
2074 2075 2076 2077 2078
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
2079
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2093

2094
	ah->nexttbtt = nexttbtt;
2095

2096
	intval |= AR5K_BEACON_ENA;
2097
	ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2098 2099 2100 2101 2102 2103

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
2104
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2105 2106
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
2107
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2108 2109
			"reset HW TSF and timers\n");
	else
2110
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2111 2112
			"updated timers based on beacon TSF\n");

2113
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2114 2115 2116
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2117
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2118 2119 2120
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2121 2122
}

2123 2124 2125
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
2126
 * @ah: struct ath5k_hw pointer we are operating on
2127
 *
2128
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2129
 * interrupts to detect TSF updates only.
2130
 */
2131
void
2132
ath5k_beacon_config(struct ath5k_hw *ah)
2133
{
2134
	spin_lock_bh(&ah->block);
2135 2136
	ah->bmisscount = 0;
	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2137

2138
	if (ah->enable_beacon) {
2139
		/*
2140 2141
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2142
		 * only once here.
2143
		 * We use the SWBA interrupt only to keep track of the beacon
2144
		 * timers in order to detect automatic TSF updates.
2145
		 */
2146
		ath5k_beaconq_config(ah);
2147

2148
		ah->imask |= AR5K_INT_SWBA;
2149

2150
		if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2151
			if (ath5k_hw_hasveol(ah))
2152
				ath5k_beacon_send(ah);
J
Jiri Slaby 已提交
2153
		} else
2154
			ath5k_beacon_update_timers(ah, -1);
2155
	} else {
2156
		ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2157 2158
	}

2159
	ath5k_hw_set_imr(ah, ah->imask);
2160
	mmiowb();
2161
	spin_unlock_bh(&ah->block);
2162 2163
}

N
Nick Kossifidis 已提交
2164 2165
static void ath5k_tasklet_beacon(unsigned long data)
{
2166
	struct ath5k_hw *ah = (struct ath5k_hw *) data;
N
Nick Kossifidis 已提交
2167 2168 2169 2170 2171 2172

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2173
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2174 2175
	 * automatic TSF updates happened.
	 */
2176
	if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2177
		/* XXX: only if VEOL supported */
2178 2179 2180
		u64 tsf = ath5k_hw_get_tsf64(ah);
		ah->nexttbtt += ah->bintval;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
N
Nick Kossifidis 已提交
2181 2182
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
2183
				ah->nexttbtt,
N
Nick Kossifidis 已提交
2184 2185 2186
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
2187 2188 2189
		spin_lock(&ah->block);
		ath5k_beacon_send(ah);
		spin_unlock(&ah->block);
N
Nick Kossifidis 已提交
2190 2191 2192
	}
}

2193 2194 2195 2196 2197

/********************\
* Interrupt handling *
\********************/

2198 2199 2200
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2201
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
N
Nick Kossifidis 已提交
2202 2203 2204 2205 2206
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run ANI only when calibration is not active */

2207 2208
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2209
		tasklet_schedule(&ah->ani_tasklet);
2210

N
Nick Kossifidis 已提交
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run calibration only when another calibration
		 * is not running.
		 *
		 * Note: This is for both full/short calibration,
		 * if it's time for a full one, ath5k_calibrate_work will deal
		 * with it. */

		ah->ah_cal_next_short = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
		ieee80211_queue_work(ah->hw, &ah->calib_work);
2225 2226 2227 2228 2229 2230
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2231
static void
2232
ath5k_schedule_rx(struct ath5k_hw *ah)
2233
{
2234 2235
	ah->rx_pending = true;
	tasklet_schedule(&ah->rxtq);
2236 2237 2238
}

static void
2239
ath5k_schedule_tx(struct ath5k_hw *ah)
2240
{
2241 2242
	ah->tx_pending = true;
	tasklet_schedule(&ah->txtq);
2243 2244
}

P
Pavel Roskin 已提交
2245
static irqreturn_t
2246 2247
ath5k_intr(int irq, void *dev_id)
{
2248
	struct ath5k_hw *ah = dev_id;
2249 2250 2251
	enum ath5k_int status;
	unsigned int counter = 1000;

N
Nick Kossifidis 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262

	/*
	 * If hw is not ready (or detached) and we get an
	 * interrupt, or if we have no interrupts pending
	 * (that means it's not for us) skip it.
	 *
	 * NOTE: Group 0/1 PCI interface registers are not
	 * supported on WiSOCs, so we can't check for pending
	 * interrupts (ISR belongs to another register group
	 * so we are ok).
	 */
2263
	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
N
Nick Kossifidis 已提交
2264 2265
			((ath5k_get_bus_type(ah) != ATH_AHB) &&
			!ath5k_hw_is_intr_pending(ah))))
2266 2267
		return IRQ_NONE;

N
Nick Kossifidis 已提交
2268
	/** Main loop **/
2269
	do {
N
Nick Kossifidis 已提交
2270 2271
		ath5k_hw_get_isr(ah, &status);	/* NB: clears IRQ too */

2272 2273
		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, ah->imask);
N
Nick Kossifidis 已提交
2274 2275 2276 2277 2278 2279 2280 2281

		/*
		 * Fatal hw error -> Log and reset
		 *
		 * Fatal errors are unrecoverable so we have to
		 * reset the card. These errors include bus and
		 * dma errors.
		 */
2282
		if (unlikely(status & AR5K_INT_FATAL)) {
N
Nick Kossifidis 已提交
2283

2284
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2285
				  "fatal int, resetting\n");
2286
			ieee80211_queue_work(ah->hw, &ah->reset_work);
N
Nick Kossifidis 已提交
2287 2288 2289 2290 2291 2292 2293 2294

		/*
		 * RX Overrun -> Count and reset if needed
		 *
		 * Receive buffers are full. Either the bus is busy or
		 * the CPU is not fast enough to process all received
		 * frames.
		 */
2295
		} else if (unlikely(status & AR5K_INT_RXORN)) {
N
Nick Kossifidis 已提交
2296

B
Bruno Randolf 已提交
2297 2298 2299
			/*
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
N
Nick Kossifidis 已提交
2300
			 * We don't know exactly which versions need a reset
B
Bruno Randolf 已提交
2301 2302
			 * this guess is copied from the HAL.
			 */
2303
			ah->stats.rxorn_intr++;
N
Nick Kossifidis 已提交
2304

2305
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2306
				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2307
					  "rx overrun, resetting\n");
2308
				ieee80211_queue_work(ah->hw, &ah->reset_work);
2309
			} else
2310
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2311

2312
		} else {
N
Nick Kossifidis 已提交
2313 2314

			/* Software Beacon Alert -> Schedule beacon tasklet */
2315
			if (status & AR5K_INT_SWBA)
2316
				tasklet_hi_schedule(&ah->beacontq);
2317

N
Nick Kossifidis 已提交
2318 2319 2320 2321 2322 2323 2324 2325
			/*
			 * No more RX descriptors -> Just count
			 *
			 * NB: the hardware should re-read the link when
			 *     RXE bit is written, but it doesn't work at
			 *     least on older hardware revs.
			 */
			if (status & AR5K_INT_RXEOL)
2326
				ah->stats.rxeol_intr++;
N
Nick Kossifidis 已提交
2327 2328 2329 2330


			/* TX Underrun -> Bump tx trigger level */
			if (status & AR5K_INT_TXURN)
2331
				ath5k_hw_update_tx_triglevel(ah, true);
N
Nick Kossifidis 已提交
2332 2333

			/* RX -> Schedule rx tasklet */
2334
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2335
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2336 2337 2338 2339 2340 2341

			/* TX -> Schedule tx tasklet */
			if (status & (AR5K_INT_TXOK
					| AR5K_INT_TXDESC
					| AR5K_INT_TXERR
					| AR5K_INT_TXEOL))
2342
				ath5k_schedule_tx(ah);
N
Nick Kossifidis 已提交
2343 2344 2345 2346 2347 2348

			/* Missed beacon -> TODO
			if (status & AR5K_INT_BMISS)
			*/

			/* MIB event -> Update counters and notify ANI */
2349
			if (status & AR5K_INT_MIB) {
2350
				ah->stats.mib_intr++;
B
Bruno Randolf 已提交
2351
				ath5k_hw_update_mib_counters(ah);
2352
				ath5k_ani_mib_intr(ah);
2353
			}
N
Nick Kossifidis 已提交
2354 2355

			/* GPIO -> Notify RFKill layer */
2356
			if (status & AR5K_INT_GPIO)
2357
				tasklet_schedule(&ah->rf_kill.toggleq);
B
Bob Copeland 已提交
2358

2359
		}
2360 2361 2362 2363

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2364
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2365

N
Nick Kossifidis 已提交
2366 2367 2368 2369 2370 2371
	/*
	 * Until we handle rx/tx interrupts mask them on IMR
	 *
	 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
	 * and unset after we 've handled the interrupts.
	 */
2372 2373
	if (ah->rx_pending || ah->tx_pending)
		ath5k_set_current_imask(ah);
2374

2375
	if (unlikely(!counter))
2376
		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2377

N
Nick Kossifidis 已提交
2378
	/* Fire up calibration poll */
2379
	ath5k_intr_calibration_poll(ah);
2380

2381 2382 2383 2384 2385 2386 2387 2388
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
N
Nick Kossifidis 已提交
2389
ath5k_calibrate_work(struct work_struct *work)
2390
{
N
Nick Kossifidis 已提交
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
		calib_work);

	/* Should we run a full calibration ? */
	if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {

		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;

		ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
				"running full calibration\n");

		if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
			/*
			 * Rfgain is out of bounds, reset the chip
			 * to load new gain values.
			 */
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
					"got new rfgain, resetting\n");
			ieee80211_queue_work(ah->hw, &ah->reset_work);
		}
	} else
		ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2415

2416

2417 2418 2419
	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
		ieee80211_frequency_to_channel(ah->curchan->center_freq),
		ah->curchan->hw_value);
2420

2421 2422
	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
		ATH5K_ERR(ah, "calibration of channel %u failed\n",
2423
			ieee80211_frequency_to_channel(
2424
				ah->curchan->center_freq));
2425

N
Nick Kossifidis 已提交
2426
	/* Clear calibration flags */
2427
	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
N
Nick Kossifidis 已提交
2428
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2429
	else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
N
Nick Kossifidis 已提交
2430
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2431 2432 2433
}


2434 2435 2436
static void
ath5k_tasklet_ani(unsigned long data)
{
2437
	struct ath5k_hw *ah = (void *)data;
2438 2439 2440 2441

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2442 2443 2444
}


2445 2446 2447
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
2448
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2449 2450 2451 2452 2453
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2454 2455 2456
	if (!test_bit(ATH_STAT_STARTED, ah->status))
		return;

2457
	mutex_lock(&ah->lock);
2458

2459 2460 2461
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
2462
			spin_lock_bh(&txq->lock);
2463
			if (txq->txq_len > 1) {
2464
				if (txq->txq_poll_mark) {
2465
					ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2466 2467 2468
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2469
					txq->txq_stuck++;
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
2481
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2482
			  "TX queues stuck, resetting\n");
2483
		ath5k_reset(ah, NULL, true);
2484 2485
	}

2486
	mutex_unlock(&ah->lock);
2487

2488
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2489 2490 2491 2492
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2493 2494 2495
/*************************\
* Initialization routines *
\*************************/
2496

2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512
static const struct ieee80211_iface_limit if_limits[] = {
	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) },
	{ .max = 4,	.types =
#ifdef CONFIG_MAC80211_MESH
				 BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
				 BIT(NL80211_IFTYPE_AP) },
};

static const struct ieee80211_iface_combination if_comb = {
	.limits = if_limits,
	.n_limits = ARRAY_SIZE(if_limits),
	.max_interfaces = 2048,
	.num_different_channels = 1,
};

B
Bill Pemberton 已提交
2513
int
2514
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2515
{
2516
	struct ieee80211_hw *hw = ah->hw;
2517 2518 2519 2520 2521
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
2522
	SET_IEEE80211_DEV(hw, ah->dev);
2523
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2524 2525
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
2526
			IEEE80211_HW_MFP_CAPABLE |
2527 2528
			IEEE80211_HW_REPORTS_TX_ACK_STATUS |
			IEEE80211_HW_SUPPORTS_RC_TABLE;
2529 2530 2531 2532 2533 2534 2535

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2536 2537 2538
	hw->wiphy->iface_combinations = &if_comb;
	hw->wiphy->n_iface_combinations = 1;

2539 2540 2541
	/* SW support for IBSS_RSN is provided by mac80211 */
	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

2542 2543
	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;

2544 2545 2546 2547
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2548 2549 2550 2551 2552 2553
	hw->extra_tx_headroom = 2;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
2554
	__set_bit(ATH_STAT_INVALID, ah->status);
2555

2556 2557 2558 2559 2560 2561 2562
	ah->opmode = NL80211_IFTYPE_STATION;
	ah->bintval = 1000;
	mutex_init(&ah->lock);
	spin_lock_init(&ah->rxbuflock);
	spin_lock_init(&ah->txbuflock);
	spin_lock_init(&ah->block);
	spin_lock_init(&ah->irqlock);
2563 2564

	/* Setup interrupt handler */
2565
	ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2566
	if (ret) {
2567
		ATH5K_ERR(ah, "request_irq failed\n");
2568 2569 2570
		goto err;
	}

2571
	common = ath5k_hw_common(ah);
2572 2573
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
2574
	common->ah = ah;
2575
	common->hw = hw;
2576
	common->priv = ah;
2577
	common->clockrate = 40;
2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
2589
	ret = ath5k_hw_init(ah);
2590
	if (ret)
2591
		goto err_irq;
2592

2593 2594
	/* Set up multi-rate retry capabilities */
	if (ah->ah_capabilities.cap_has_mrr_support) {
2595
		hw->max_rates = 4;
2596 2597
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2598 2599 2600 2601 2602 2603 2604 2605 2606
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

2607 2608 2609 2610
	ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
					ah->ah_mac_srev,
					ah->ah_phy_revision);
2611

2612
	if (!ah->ah_single_chip) {
2613
		/* Single chip radio (!RF5111) */
2614 2615
		if (ah->ah_radio_5ghz_revision &&
			!ah->ah_radio_2ghz_revision) {
2616 2617
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
2618 2619
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2620
					ath5k_chip_name(AR5K_VERSION_RAD,
2621 2622
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2623
			/* No 2GHz support (5110 and some
2624
			 * 5GHz only cards) -> report 5GHz radio */
2625
			} else if (!test_bit(AR5K_MODE_11B,
2626 2627
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2628
					ath5k_chip_name(AR5K_VERSION_RAD,
2629 2630
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2631 2632
			/* Multiband radio */
			} else {
2633
				ATH5K_INFO(ah, "RF%s multiband radio found"
2634 2635
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
2636 2637
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2638 2639 2640 2641
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
2642 2643 2644
		else if (ah->ah_radio_5ghz_revision &&
				ah->ah_radio_2ghz_revision) {
			ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2645
				ath5k_chip_name(AR5K_VERSION_RAD,
2646 2647 2648
					ah->ah_radio_5ghz_revision),
					ah->ah_radio_5ghz_revision);
			ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2649
				ath5k_chip_name(AR5K_VERSION_RAD,
2650 2651
					ah->ah_radio_2ghz_revision),
					ah->ah_radio_2ghz_revision);
2652 2653 2654
		}
	}

2655
	ath5k_debug_init_device(ah);
2656 2657

	/* ready to process interrupts */
2658
	__clear_bit(ATH_STAT_INVALID, ah->status);
2659 2660 2661

	return 0;
err_ah:
2662
	ath5k_hw_deinit(ah);
2663
err_irq:
2664
	free_irq(ah->irq, ah);
2665 2666 2667 2668
err:
	return ret;
}

2669
static int
2670
ath5k_stop_locked(struct ath5k_hw *ah)
2671 2672
{

2673 2674
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, ah->status));
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
2691
	ieee80211_stop_queues(ah->hw);
2692

2693 2694
	if (!test_bit(ATH_STAT_INVALID, ah->status)) {
		ath5k_led_off(ah);
2695
		ath5k_hw_set_imr(ah, 0);
2696 2697
		synchronize_irq(ah->irq);
		ath5k_rx_stop(ah);
2698
		ath5k_hw_dma_stop(ah);
2699
		ath5k_drain_tx_buffs(ah);
2700 2701 2702 2703
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2704 2705
}

2706
int ath5k_start(struct ieee80211_hw *hw)
2707
{
2708
	struct ath5k_hw *ah = hw->priv;
2709 2710
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2711

2712
	mutex_lock(&ah->lock);
2713

2714
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2715 2716

	/*
2717 2718
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2719
	 */
2720
	ath5k_stop_locked(ah);
2721

2722 2723 2724 2725 2726 2727 2728
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
2729
	ah->curchan = ah->hw->conf.chandef.chan;
N
Nick Kossifidis 已提交
2730 2731 2732 2733 2734 2735 2736 2737 2738
	ah->imask = AR5K_INT_RXOK
		| AR5K_INT_RXERR
		| AR5K_INT_RXEOL
		| AR5K_INT_RXORN
		| AR5K_INT_TXDESC
		| AR5K_INT_TXEOL
		| AR5K_INT_FATAL
		| AR5K_INT_GLOBAL
		| AR5K_INT_MIB;
2739

2740
	ret = ath5k_reset(ah, NULL, false);
2741 2742
	if (ret)
		goto done;
2743

2744 2745
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_start(ah);
2746 2747 2748 2749 2750 2751 2752 2753

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2754 2755 2756
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2757

2758 2759
	for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
		ah->bslot[i] = NULL;
2760

2761 2762 2763
	ret = 0;
done:
	mmiowb();
2764
	mutex_unlock(&ah->lock);
2765

2766
	set_bit(ATH_STAT_STARTED, ah->status);
2767
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2768 2769
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2770 2771 2772
	return ret;
}

2773
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2774
{
2775 2776 2777 2778 2779 2780
	ah->rx_pending = false;
	ah->tx_pending = false;
	tasklet_kill(&ah->rxtq);
	tasklet_kill(&ah->txtq);
	tasklet_kill(&ah->beacontq);
	tasklet_kill(&ah->ani_tasklet);
2781 2782 2783 2784 2785 2786 2787 2788
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2789
void ath5k_stop(struct ieee80211_hw *hw)
2790
{
2791
	struct ath5k_hw *ah = hw->priv;
2792 2793
	int ret;

2794 2795 2796
	mutex_lock(&ah->lock);
	ret = ath5k_stop_locked(ah);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
2817
		ret = ath5k_hw_on_hold(ah);
2818

2819
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2820
				"putting device to sleep\n");
2821 2822
	}

2823
	mmiowb();
2824
	mutex_unlock(&ah->lock);
2825

2826
	ath5k_stop_tasklets(ah);
2827

2828
	clear_bit(ATH_STAT_STARTED, ah->status);
2829
	cancel_delayed_work_sync(&ah->tx_complete_work);
2830

2831 2832
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_stop(ah);
2833 2834
}

2835 2836 2837
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2838
 *
2839
 * This should be called with ah->lock.
2840
 */
2841
static int
2842
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2843
							bool skip_pcu)
2844
{
B
Bruno Randolf 已提交
2845
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2846
	int ret, ani_mode;
2847
	bool fast;
2848

2849
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2850

2851
	ath5k_hw_set_imr(ah, 0);
2852 2853
	synchronize_irq(ah->irq);
	ath5k_stop_tasklets(ah);
2854

L
Lucas De Marchi 已提交
2855
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2856 2857
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
2858
	ani_mode = ah->ani_state.ani_mode;
N
Nick Kossifidis 已提交
2859 2860
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2861 2862 2863
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
2864
	ath5k_drain_tx_buffs(ah);
2865
	if (chan)
2866
		ah->curchan = chan;
2867 2868 2869

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2870
	ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2871
	if (ret) {
2872
		ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2873 2874
		goto err;
	}
J
Jiri Slaby 已提交
2875

2876
	ret = ath5k_rx_start(ah);
J
Jiri Slaby 已提交
2877
	if (ret) {
2878
		ATH5K_ERR(ah, "can't start recv logic\n");
2879 2880
		goto err;
	}
J
Jiri Slaby 已提交
2881

N
Nick Kossifidis 已提交
2882
	ath5k_ani_init(ah, ani_mode);
2883

N
Nick Kossifidis 已提交
2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901
	/*
	 * Set calibration intervals
	 *
	 * Note: We don't need to run calibration imediately
	 * since some initial calibration is done on reset
	 * even for fast channel switching. Also on scanning
	 * this will get set again and again and it won't get
	 * executed unless we connect somewhere and spend some
	 * time on the channel (that's what calibration needs
	 * anyway to be accurate).
	 */
	ah->ah_cal_next_full = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
	ah->ah_cal_next_ani = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
	ah->ah_cal_next_short = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);

2902
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2903

B
Bruno Randolf 已提交
2904
	/* clear survey data and cycle counters */
2905
	memset(&ah->survey, 0, sizeof(ah->survey));
2906
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2907 2908 2909
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2910
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2911

2912
	/*
J
Jiri Slaby 已提交
2913 2914 2915 2916 2917
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2918 2919 2920
	 *
	 * XXX needed?
	 */
2921
/*	ath5k_chan_change(ah, c); */
2922

2923
	ath5k_beacon_config(ah);
J
Jiri Slaby 已提交
2924
	/* intrs are enabled by ath5k_beacon_config */
2925

2926
	ieee80211_wake_queues(ah->hw);
B
Bruno Randolf 已提交
2927

2928 2929 2930 2931 2932
	return 0;
err:
	return ret;
}

2933 2934
static void ath5k_reset_work(struct work_struct *work)
{
2935
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2936 2937
		reset_work);

2938 2939 2940
	mutex_lock(&ah->lock);
	ath5k_reset(ah, NULL, true);
	mutex_unlock(&ah->lock);
2941 2942
}

B
Bill Pemberton 已提交
2943
static int
2944
ath5k_init(struct ieee80211_hw *hw)
2945
{
2946

2947
	struct ath5k_hw *ah = hw->priv;
2948
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2949
	struct ath5k_txq *txq;
2950
	u8 mac[ETH_ALEN] = {};
2951 2952 2953
	int ret;


2954 2955
	/*
	 * Collect the channel list.  The 802.11 layer
2956
	 * is responsible for filtering this list based
2957 2958 2959 2960 2961
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
2962
		ATH5K_ERR(ah, "can't get channels\n");
2963 2964
		goto err;
	}
J
Jiri Slaby 已提交
2965

2966 2967 2968
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2969
	ret = ath5k_desc_alloc(ah);
2970
	if (ret) {
2971
		ATH5K_ERR(ah, "can't allocate descriptors\n");
2972 2973
		goto err;
	}
2974

2975 2976 2977 2978 2979 2980 2981 2982
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
2983
		ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2984 2985
		goto err_desc;
	}
2986 2987 2988 2989 2990
	ah->bhalq = ret;
	ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(ah->cabq)) {
		ATH5K_ERR(ah, "can't setup cab queue\n");
		ret = PTR_ERR(ah->cabq);
2991 2992
		goto err_bhal;
	}
2993

2994 2995 2996 2997 2998
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
2999
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
3000
		if (IS_ERR(txq)) {
3001
			ATH5K_ERR(ah, "can't setup xmit queue\n");
3002 3003 3004
			ret = PTR_ERR(txq);
			goto err_queues;
		}
3005
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
3006
		if (IS_ERR(txq)) {
3007
			ATH5K_ERR(ah, "can't setup xmit queue\n");
3008 3009 3010
			ret = PTR_ERR(txq);
			goto err_queues;
		}
3011
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
3012
		if (IS_ERR(txq)) {
3013
			ATH5K_ERR(ah, "can't setup xmit queue\n");
3014 3015 3016
			ret = PTR_ERR(txq);
			goto err_queues;
		}
3017
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
3018
		if (IS_ERR(txq)) {
3019
			ATH5K_ERR(ah, "can't setup xmit queue\n");
3020 3021 3022 3023 3024 3025
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
3026
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
3027
		if (IS_ERR(txq)) {
3028
			ATH5K_ERR(ah, "can't setup xmit queue\n");
3029 3030 3031 3032 3033
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
3034

3035 3036 3037 3038
	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
3039

3040
	INIT_WORK(&ah->reset_work, ath5k_reset_work);
N
Nick Kossifidis 已提交
3041
	INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
3042
	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
3043

3044
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
3045
	if (ret) {
3046
		ATH5K_ERR(ah, "unable to read address from EEPROM\n");
3047
		goto err_queues;
3048
	}
3049

3050 3051
	SET_IEEE80211_PERM_ADDR(hw, mac);
	/* All MAC address bits matter for ACKs */
3052
	ath5k_update_bssid_mask_and_opmode(ah, NULL);
3053 3054 3055 3056

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
3057
		ATH5K_ERR(ah, "can't initialize regulatory system\n");
3058 3059 3060 3061 3062
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
3063
		ATH5K_ERR(ah, "can't register ieee80211 hw\n");
3064 3065 3066 3067 3068 3069
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

3070
	ath5k_init_leds(ah);
3071

3072
	ath5k_sysfs_register(ah);
3073 3074 3075

	return 0;
err_queues:
3076
	ath5k_txq_release(ah);
3077
err_bhal:
3078
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
3079
err_desc:
3080
	ath5k_desc_free(ah);
3081 3082 3083 3084
err:
	return ret;
}

3085
void
3086
ath5k_deinit_ah(struct ath5k_hw *ah)
3087
{
3088
	struct ieee80211_hw *hw = ah->hw;
3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
3104 3105 3106 3107
	ath5k_desc_free(ah);
	ath5k_txq_release(ah);
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
	ath5k_unregister_leds(ah);
3108

3109
	ath5k_sysfs_unregister(ah);
3110 3111 3112 3113 3114
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
3115 3116
	ath5k_hw_deinit(ah);
	free_irq(ah->irq, ah);
3117 3118
}

3119
bool
3120
ath5k_any_vif_assoc(struct ath5k_hw *ah)
3121
{
3122
	struct ath5k_vif_iter_data iter_data;
3123 3124 3125 3126 3127
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

3128 3129 3130
	ieee80211_iterate_active_interfaces_atomic(
		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
		ath5k_vif_iter, &iter_data);
3131 3132 3133
	return iter_data.any_assoc;
}

3134
void
P
Pavel Roskin 已提交
3135
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3136
{
3137
	struct ath5k_hw *ah = hw->priv;
3138 3139 3140 3141 3142 3143 3144
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
3145
	ah->filter_flags = rfilt;
3146
}
3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166

void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
		   const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (ah && ah->hw)
		printk("%s" pr_fmt("%s: %pV"),
		       level, wiphy_name(ah->hw->wiphy), &vaf);
	else
		printk("%s" pr_fmt("%pV"), level, &vaf);

	va_end(args);
}