base.c 80.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
J
Jiri Slaby 已提交
45
#include <linux/hardirq.h>
46
#include <linux/if.h>
J
Jiri Slaby 已提交
47
#include <linux/io.h>
48 49 50 51
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
52
#include <linux/slab.h>
53
#include <linux/etherdevice.h>
54 55 56 57 58 59 60 61

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
62
#include "ani.h"
63

64 65 66
#define CREATE_TRACE_POINTS
#include "trace.h"

67 68
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70

71
static int modparam_all_channels;
B
Bob Copeland 已提交
72
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73 74
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

75 76 77 78 79
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");


80 81 82 83 84 85 86
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

87
static int ath5k_init(struct ieee80211_hw *hw);
88 89
static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
								bool skip_pcu);
90 91

/* Known SREVs */
J
Jiri Slaby 已提交
92
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
93 94 95 96 97 98 99 100 101
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
120
#endif
121
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
122 123
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
124
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
125 126 127
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
128
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
129 130
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
131 132 133 134
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
135
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
136 137 138 139
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
140 141 142
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
143
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

185 186 187 188 189 190 191 192 193 194
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

195
const char *
196 197 198 199 200 201 202 203
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
204 205 206 207 208

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
209 210 211 212 213 214 215
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
232

233 234 235 236 237
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
238
{
239 240 241
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
	struct ath5k_softc *sc = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
242

243 244
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
245

246 247 248
/********************\
* Channel/mode setup *
\********************/
249

250 251 252
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
253
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
254
{
255 256 257 258 259
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
260 261 262
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
263 264 265 266 267
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
268
}
269

270
static unsigned int
271 272
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
273
{
274
	unsigned int count, size, chfreq, freq, ch;
275
	enum ieee80211_band band;
276

277 278 279
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
280
		size = 220;
281
		chfreq = CHANNEL_5GHZ;
282
		band = IEEE80211_BAND_5GHZ;
283 284 285 286 287
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
		chfreq = CHANNEL_2GHZ;
288
		band = IEEE80211_BAND_2GHZ;
289 290 291 292
		break;
	default:
		ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
		return 0;
293 294
	}

295 296
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
297 298 299 300
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
301

302 303 304
		/* Check if channel is supported by the chipset */
		if (!ath5k_channel_ok(ah, freq, chfreq))
			continue;
305

306 307
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
308
			continue;
309

310 311
		/* Write channel info and increment counter */
		channels[count].center_freq = freq;
312
		channels[count].band = band;
313 314 315 316 317 318 319 320
		switch (mode) {
		case AR5K_MODE_11A:
		case AR5K_MODE_11G:
			channels[count].hw_value = chfreq | CHANNEL_OFDM;
			break;
		case AR5K_MODE_11B:
			channels[count].hw_value = CHANNEL_B;
		}
321

322 323
		count++;
	}
324

325 326
	return count;
}
327

328 329 330 331
static void
ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
{
	u8 i;
332

333 334
	for (i = 0; i < AR5K_MAX_RATES; i++)
		sc->rate_idx[b->band][i] = -1;
335

336 337 338 339
	for (i = 0; i < b->n_bitrates; i++) {
		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
		if (b->bitrates[i].hw_value_short)
			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
340
	}
341
}
342

343 344 345 346 347 348 349 350
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
351

352 353
	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(sc->channels);
354

355 356 357 358
	/* 2GHz band */
	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
	sband->band = IEEE80211_BAND_2GHZ;
	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
359

360 361 362 363 364
	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
365

366
		sband->channels = sc->channels;
367
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368
					AR5K_MODE_11G, max_c);
369

370 371 372 373 374 375 376 377
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
378

379 380 381 382 383 384 385 386 387 388
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
389 390 391
			}
		}

392
		sband->channels = sc->channels;
393
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394
					AR5K_MODE_11B, max_c);
395

396 397 398 399 400
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
	ath5k_setup_rate_idx(sc, sband);
401

402 403 404 405 406
	/* 5GHz band, A mode */
	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
		sband->band = IEEE80211_BAND_5GHZ;
		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
407

408 409 410
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
411

412
		sband->channels = &sc->channels[count_c];
413
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414
					AR5K_MODE_11A, max_c);
415

416 417 418 419 420
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
	ath5k_setup_rate_idx(sc, sband);

	ath5k_debug_dump_bands(sc);
421 422 423 424

	return 0;
}

425 426 427 428 429 430 431
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
 * Called with sc->lock.
 */
432
int
433 434 435 436 437 438
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
		  "channel set, resetting (%u -> %u MHz)\n",
		  sc->curchan->center_freq, chan->center_freq);

439
	/*
440 441 442 443
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
444
	 */
445
	return ath5k_reset(sc, chan, true);
446 447
}

448
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
449
{
450
	struct ath5k_vif_iter_data *iter_data = data;
451
	int i;
452
	struct ath5k_vif *avf = (void *)vif->drv_priv;
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
472 473 474 475

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
476
	 * interfaces is allowed.
477 478 479
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
480 481 482
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
483 484
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
485
	}
486 487
}

488 489 490
void
ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
				   struct ieee80211_vif *vif)
491 492
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
493 494
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
495 496 497 498 499 500 501 502 503

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
504
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
505
	iter_data.n_stas = 0;
506 507

	if (vif)
508
		ath5k_vif_iter(&iter_data, vif->addr, vif);
509 510

	/* Get list of all active MAC addresses */
511
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
512 513 514
						   &iter_data);
	memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);

515 516 517 518 519
	sc->opmode = iter_data.opmode;
	if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
		/* Nothing active, default to station mode */
		sc->opmode = NL80211_IFTYPE_STATION;

B
Ben Greear 已提交
520 521 522
	ath5k_hw_set_opmode(sc->ah, sc->opmode);
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  sc->opmode, ath_opmode_to_string(sc->opmode));
523

524 525 526
	if (iter_data.need_set_hw_addr && iter_data.found_active)
		ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);

527 528
	if (ath5k_hw_hasbssidmask(sc->ah))
		ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
529

530 531 532 533
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
534
		 * Enabling PROMISC appears to fix that problem.
535 536 537
		 */
		sc->filter_flags |= AR5K_RX_FILTER_PROM;
	}
538

539
	rfilt = sc->filter_flags;
540
	ath5k_hw_set_rx_filter(sc->ah, rfilt);
541 542
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
543

544 545 546 547
static inline int
ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
	int rix;
548

549 550 551 552 553
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

554
	rix = sc->rate_idx[sc->curchan->band][hw_rix];
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
	struct sk_buff *skb;
570 571

	/*
572 573
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
574
	 */
575 576 577
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
578

579 580 581 582
	if (!skb) {
		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
				common->rx_bufsize);
		return NULL;
583 584
	}

585
	*skb_addr = dma_map_single(sc->dev,
586
				   skb->data, common->rx_bufsize,
587 588 589
				   DMA_FROM_DEVICE);

	if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
590 591 592
		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
		dev_kfree_skb(skb);
		return NULL;
593
	}
594 595
	return skb;
}
596

597 598 599 600 601 602 603
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
604

605 606 607 608 609
	if (!skb) {
		skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
610 611
	}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
631
	if (ret) {
632 633
		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
		return ret;
634 635
	}

636 637 638
	if (sc->rxlink != NULL)
		*sc->rxlink = bf->daddr;
	sc->rxlink = &ds->ds_link;
639 640 641
	return 0;
}

642
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
643
{
644 645 646
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
647

648 649
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
650

651 652 653 654 655 656 657 658
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
659
	else
660
		htype = AR5K_PKT_TYPE_NORMAL;
661

662
	return htype;
663 664
}

665 666 667
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
		  struct ath5k_txq *txq, int padsize)
668
{
669 670 671 672 673 674 675 676 677 678 679 680
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
681

682
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
683

684
	/* XXX endianness */
685 686
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
687

688
	rate = ieee80211_get_tx_rate(sc->hw, info);
689 690 691 692
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
693

694 695
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
696

697 698 699
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
700

701 702 703 704 705 706 707 708 709 710 711 712 713
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
714
			info->control.vif, pktlen, info));
715 716 717 718 719
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
720
			info->control.vif, pktlen, info));
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
		(sc->power_level * 2),
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
		rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
		if (!rate)
737
			break;
738

739 740
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
741 742
	}

743 744 745 746
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
747

748 749
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
750

751 752
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
753
	txq->txq_len++;
754 755 756 757
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
758

759 760 761 762 763 764 765
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
766
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
767
	return ret;
B
Bruno Randolf 已提交
768 769
}

770 771 772 773
/*******************\
* Descriptors setup *
\*******************/

774
static int
775
ath5k_desc_alloc(struct ath5k_softc *sc)
776
{
777 778 779 780 781
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
782

783 784 785
	/* allocate descriptors */
	sc->desc_len = sizeof(struct ath5k_desc) *
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
786 787 788

	sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
				&sc->desc_daddr, GFP_KERNEL);
789 790 791 792 793 794 795 796 797
	if (sc->desc == NULL) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		ret = -ENOMEM;
		goto err;
	}
	ds = sc->desc;
	da = sc->desc_daddr;
	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
798

799 800 801 802 803 804 805 806
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
		ATH5K_ERR(sc, "can't allocate bufptr\n");
		ret = -ENOMEM;
		goto err_free;
	}
	sc->bufptr = bf;
807

808 809 810 811 812 813
	INIT_LIST_HEAD(&sc->rxbuf);
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->rxbuf);
	}
814

815 816
	INIT_LIST_HEAD(&sc->txbuf);
	sc->txbuf_len = ATH_TXBUF;
817
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
818 819 820
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->txbuf);
821 822
	}

823 824 825 826 827 828 829
	/* beacon buffers */
	INIT_LIST_HEAD(&sc->bcbuf);
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->bcbuf);
	}
830

831 832
	return 0;
err_free:
833
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
834 835 836 837
err:
	sc->desc = NULL;
	return ret;
}
838

839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
void
ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

870
static void
871
ath5k_desc_free(struct ath5k_softc *sc)
872 873
{
	struct ath5k_buf *bf;
874

875 876 877 878
	list_for_each_entry(bf, &sc->txbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
	list_for_each_entry(bf, &sc->rxbuf, list)
		ath5k_rxbuf_free_skb(sc, bf);
879 880
	list_for_each_entry(bf, &sc->bcbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
881

882
	/* Free memory associated with all descriptors */
883
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
884 885
	sc->desc = NULL;
	sc->desc_daddr = 0;
886

887 888
	kfree(sc->bufptr);
	sc->bufptr = NULL;
889 890
}

891 892 893 894 895 896 897 898

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
ath5k_txq_setup(struct ath5k_softc *sc,
		int qtype, int subtype)
899
{
900 901 902 903
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
904 905 906 907 908
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
909 910
	};
	int qnum;
911

912
	/*
913 914 915 916 917 918 919 920 921 922
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
923
	 */
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
	if (qnum >= ARRAY_SIZE(sc->txqs)) {
		ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(sc->txqs));
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
	txq = &sc->txqs[qnum];
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
947
		txq->txq_len = 0;
948
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
949
		txq->txq_poll_mark = false;
950
		txq->txq_stuck = 0;
951 952
	}
	return &sc->txqs[qnum];
953 954
}

955 956
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
957
{
958
	struct ath5k_txq_info qi = {
959 960 961 962 963
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
964 965 966
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
967

968
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
969 970
}

971 972
static int
ath5k_beaconq_config(struct ath5k_softc *sc)
973 974
{
	struct ath5k_hw *ah = sc->ah;
975 976
	struct ath5k_txq_info qi;
	int ret;
977

978 979 980
	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret)
		goto err;
981

982
	if (sc->opmode == NL80211_IFTYPE_AP ||
983
	    sc->opmode == NL80211_IFTYPE_MESH_POINT) {
984 985 986 987 988 989 990 991 992 993 994 995 996
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
997
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
998
	}
999

1000 1001 1002
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1003

1004 1005 1006 1007 1008 1009 1010 1011 1012
	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret) {
		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
			"hardware queue!\n", __func__);
		goto err;
	}
	ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
	if (ret)
		goto err;
1013

1014 1015 1016 1017
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1018

1019 1020 1021 1022
	qi.tqi_ready_time = (sc->bintval * 80) / 100;
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1023

1024 1025 1026
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1027 1028
}

1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
 * @sc The &struct ath5k_softc
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1040
static void
1041
ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1042
{
1043
	struct ath5k_txq *txq;
1044
	struct ath5k_buf *bf, *bf0;
1045
	int i;
1046

1047 1048 1049 1050 1051 1052
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
				ath5k_debug_printtxbuf(sc, bf);
1053

1054
				ath5k_txbuf_free_skb(sc, bf);
1055

1056 1057 1058 1059 1060
				spin_lock_bh(&sc->txbuflock);
				list_move_tail(&bf->list, &sc->txbuf);
				sc->txbuf_len++;
				txq->txq_len--;
				spin_unlock_bh(&sc->txbuflock);
1061
			}
1062 1063 1064 1065
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1066
	}
1067 1068
}

1069 1070
static void
ath5k_txq_release(struct ath5k_softc *sc)
1071
{
1072 1073
	struct ath5k_txq *txq = sc->txqs;
	unsigned int i;
1074

1075 1076 1077 1078 1079 1080
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
		if (txq->setup) {
			ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
			txq->setup = false;
		}
}
1081 1082


1083 1084 1085
/*************\
* RX Handling *
\*************/
1086

1087 1088 1089
/*
 * Enable the receive h/w following a reset.
 */
1090
static int
1091
ath5k_rx_start(struct ath5k_softc *sc)
1092 1093
{
	struct ath5k_hw *ah = sc->ah;
1094 1095 1096
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1097

1098
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1099

1100 1101
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
		  common->cachelsz, common->rx_bufsize);
1102

1103 1104 1105 1106 1107 1108 1109 1110
	spin_lock_bh(&sc->rxbuflock);
	sc->rxlink = NULL;
	list_for_each_entry(bf, &sc->rxbuf, list) {
		ret = ath5k_rxbuf_setup(sc, bf);
		if (ret != 0) {
			spin_unlock_bh(&sc->rxbuflock);
			goto err;
		}
1111
	}
1112 1113 1114
	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
	ath5k_hw_set_rxdp(ah, bf->daddr);
	spin_unlock_bh(&sc->rxbuflock);
1115

1116
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1117
	ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1118
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1119 1120

	return 0;
1121
err:
1122 1123 1124
	return ret;
}

1125
/*
1126 1127 1128 1129 1130
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1131 1132 1133
 */
static void
ath5k_rx_stop(struct ath5k_softc *sc)
1134
{
1135
	struct ath5k_hw *ah = sc->ah;
1136

1137
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1138
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1139

1140 1141
	ath5k_debug_printrxbuffs(sc, ah);
}
1142

1143 1144 1145 1146 1147 1148 1149 1150
static unsigned int
ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
		   struct ath5k_rx_status *rs)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1151

1152 1153 1154
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1155

1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1168 1169 1170 1171

	return 0;
}

1172

1173
static void
1174 1175
ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
		     struct ieee80211_rx_status *rxs)
1176
{
1177 1178 1179 1180
	struct ath_common *common = ath5k_hw_common(sc->ah);
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1181

1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
		tsf = ath5k_hw_get_tsf64(sc->ah);
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1193

1194 1195 1196 1197 1198 1199
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1200

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1219

1220 1221 1222 1223 1224 1225 1226 1227
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
		if (hw_tu >= sc->nexttbtt)
			ath5k_beacon_update_timers(sc, bc_tstamp);
B
Bruno Randolf 已提交
1228 1229 1230 1231 1232 1233 1234 1235 1236

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
		if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
			ath5k_beacon_update_timers(sc, bc_tstamp);
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixed beacon timers after beacon receive\n");
		}
1237 1238
	}
}
1239

1240 1241 1242 1243 1244 1245
static void
ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1246

1247 1248 1249 1250
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
		return;
1251

B
Bruno Randolf 已提交
1252
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1253

1254 1255 1256
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1257

1258 1259 1260 1261
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1262
{
1263
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1264 1265
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1266

1267
	if (ieee80211_has_a4(frame_control))
1268
		padpos += ETH_ALEN;
1269 1270

	if (ieee80211_is_data_qos(frame_control))
1271 1272 1273
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1274 1275
}

1276 1277 1278 1279 1280
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1281
{
1282 1283
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1284

1285
	if (padsize && skb->len > padpos) {
1286

1287 1288
		if (skb_headroom(skb) < padsize)
			return -1;
1289

1290
		skb_push(skb, padsize);
1291
		memmove(skb->data, skb->data + padsize, padpos);
1292 1293
		return padsize;
	}
B
Bob Copeland 已提交
1294

1295 1296
	return 0;
}
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1315

1316
	if (padsize && skb->len >= padpos + padsize) {
1317 1318 1319
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1320
	}
B
Bob Copeland 已提交
1321

1322
	return 0;
1323 1324 1325
}

static void
1326 1327
ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
		    struct ath5k_rx_status *rs)
1328
{
1329 1330 1331 1332 1333 1334 1335 1336 1337
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1338 1339

	/*
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
1352
	 * frame, but I'm not sure.
1353 1354 1355 1356 1357
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1358
	 */
1359
	rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1360
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1361

1362
	rxs->freq = sc->curchan->center_freq;
1363
	rxs->band = sc->curchan->band;
1364

1365
	rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1366

1367
	rxs->antenna = rs->rs_antenna;
1368

1369 1370 1371 1372
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
		sc->stats.antenna_rx[rs->rs_antenna]++;
	else
		sc->stats.antenna_rx[0]++; /* invalid */
1373

1374 1375
	rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1376

1377
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1378
	    sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1379
		rxs->flag |= RX_FLAG_SHORTPRE;
1380

1381
	trace_ath5k_rx(sc, skb);
1382

1383
	ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1384

1385 1386 1387
	/* check beacons in IBSS mode */
	if (sc->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(sc, skb, rxs);
1388

1389 1390
	ieee80211_rx(sc->hw, skb);
}
1391

1392 1393 1394 1395
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1396
 */
1397 1398
static bool
ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1399
{
1400
	sc->stats.rx_all_count++;
B
Ben Greear 已提交
1401
	sc->stats.rx_bytes_count += rs->rs_datalen;
1402

1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
			sc->stats.rxerr_crc++;
		if (rs->rs_status & AR5K_RXERR_FIFO)
			sc->stats.rxerr_fifo++;
		if (rs->rs_status & AR5K_RXERR_PHY) {
			sc->stats.rxerr_phy++;
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
				sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
			sc->stats.rxerr_decrypt++;
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
			sc->stats.rxerr_mic++;
			return true;
1433 1434
		}

1435 1436 1437 1438
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1439

1440 1441 1442 1443 1444
	if (unlikely(rs->rs_more)) {
		sc->stats.rxerr_jumbo++;
		return false;
	}
	return true;
1445 1446
}

1447 1448 1449
static void
ath5k_set_current_imask(struct ath5k_softc *sc)
{
1450
	enum ath5k_int imask;
1451 1452 1453
	unsigned long flags;

	spin_lock_irqsave(&sc->irqlock, flags);
1454
	imask = sc->imask;
1455 1456 1457 1458 1459 1460 1461 1462
	if (sc->rx_pending)
		imask &= ~AR5K_INT_RX_ALL;
	if (sc->tx_pending)
		imask &= ~AR5K_INT_TX_ALL;
	ath5k_hw_set_imr(sc->ah, imask);
	spin_unlock_irqrestore(&sc->irqlock, flags);
}

1463
static void
1464
ath5k_tasklet_rx(unsigned long data)
1465
{
1466 1467 1468 1469
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
	struct ath5k_softc *sc = (void *)data;
L
Luis R. Rodriguez 已提交
1470 1471
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1472 1473 1474
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1475

1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	spin_lock(&sc->rxbuflock);
	if (list_empty(&sc->rxbuf)) {
		ATH5K_WARN(sc, "empty rx buf pool\n");
		goto unlock;
	}
	do {
		bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1486

1487 1488 1489
		/* bail if HW is still using self-linked descriptor */
		if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
			break;
1490

1491 1492 1493 1494 1495 1496 1497 1498
		ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
			ATH5K_ERR(sc, "error in processing rx descriptor\n");
			sc->stats.rxerr_proc++;
			break;
		}
1499

1500 1501
		if (ath5k_receive_frame_ok(sc, &rs)) {
			next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1502

1503 1504 1505 1506 1507 1508
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1509

1510
			dma_unmap_single(sc->dev, bf->skbaddr,
1511
					 common->rx_bufsize,
1512
					 DMA_FROM_DEVICE);
1513

1514
			skb_put(skb, rs.rs_datalen);
1515

1516
			ath5k_receive_frame(sc, skb, &rs);
1517

1518 1519
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1520
		}
1521 1522 1523 1524 1525
next:
		list_move_tail(&bf->list, &sc->rxbuf);
	} while (ath5k_rxbuf_setup(sc, bf) == 0);
unlock:
	spin_unlock(&sc->rxbuflock);
1526 1527
	sc->rx_pending = false;
	ath5k_set_current_imask(sc);
1528 1529
}

B
Bruno Randolf 已提交
1530

1531 1532 1533
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1534

1535
void
1536 1537
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1538 1539 1540 1541 1542
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1543

1544
	trace_ath5k_tx(sc, skb, txq);
B
Bruno Randolf 已提交
1545

1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
		ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
			  " headroom to pad");
		goto drop_packet;
	}
1556

1557
	if (txq->txq_len >= txq->txq_max)
B
Bruno Randolf 已提交
1558 1559
		ieee80211_stop_queue(hw, txq->qnum);

1560 1561 1562 1563
	spin_lock_irqsave(&sc->txbuflock, flags);
	if (list_empty(&sc->txbuf)) {
		ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&sc->txbuflock, flags);
B
Bruno Randolf 已提交
1564
		ieee80211_stop_queues(hw);
1565
		goto drop_packet;
1566
	}
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
	bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
	list_del(&bf->list);
	sc->txbuf_len--;
	if (list_empty(&sc->txbuf))
		ieee80211_stop_queues(hw);
	spin_unlock_irqrestore(&sc->txbuflock, flags);

	bf->skb = skb;

	if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
		bf->skb = NULL;
		spin_lock_irqsave(&sc->txbuflock, flags);
		list_add_tail(&bf->list, &sc->txbuf);
		sc->txbuf_len++;
		spin_unlock_irqrestore(&sc->txbuflock, flags);
		goto drop_packet;
1583
	}
1584
	return;
1585

1586 1587
drop_packet:
	dev_kfree_skb_any(skb);
1588 1589
}

1590 1591
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1592
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1593 1594
{
	struct ieee80211_tx_info *info;
1595
	u8 tries[3];
1596 1597 1598
	int i;

	sc->stats.tx_all_count++;
B
Ben Greear 已提交
1599
	sc->stats.tx_bytes_count += skb->len;
1600 1601
	info = IEEE80211_SKB_CB(skb);

1602 1603 1604 1605
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1606
	ieee80211_tx_info_clear_status(info);
1607 1608

	for (i = 0; i < ts->ts_final_idx; i++) {
1609 1610 1611
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1612
		r->count = tries[i];
1613 1614
	}

1615
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1616
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630

	if (unlikely(ts->ts_status)) {
		sc->stats.ack_fail++;
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
			sc->stats.txerr_filt++;
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
			sc->stats.txerr_retry++;
		if (ts->ts_status & AR5K_TXERR_FIFO)
			sc->stats.txerr_fifo++;
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1631 1632 1633

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
		sc->stats.antenna_tx[ts->ts_antenna]++;
	else
		sc->stats.antenna_tx[0]++; /* invalid */

1647
	trace_ath5k_tx_complete(sc, skb, txq, ts);
1648 1649
	ieee80211_tx_status(sc->hw, skb);
}
1650 1651 1652

static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1653
{
1654 1655 1656 1657
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1658
	int ret;
1659

1660 1661
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

			ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
				ATH5K_ERR(sc,
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1681 1682 1683

			dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
					DMA_TO_DEVICE);
1684
			ath5k_tx_frame_completed(sc, skb, txq, &ts);
1685
		}
1686

1687 1688 1689
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1690 1691
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1692
		 */
1693 1694 1695 1696 1697 1698
		if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
			spin_lock(&sc->txbuflock);
			list_move_tail(&bf->list, &sc->txbuf);
			sc->txbuf_len++;
			txq->txq_len--;
			spin_unlock(&sc->txbuflock);
1699
		}
1700 1701
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1702
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
B
Bruno Randolf 已提交
1703
		ieee80211_wake_queue(sc->hw, txq->qnum);
1704 1705 1706 1707 1708
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1709
	int i;
1710 1711
	struct ath5k_softc *sc = (void *)data;

1712
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
B
Bob Copeland 已提交
1713 1714
		if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
			ath5k_tx_processq(sc, &sc->txqs[i]);
1715 1716 1717

	sc->tx_pending = false;
	ath5k_set_current_imask(sc);
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1729
ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1730 1731
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1732
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1733 1734
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds;
1735 1736
	int ret = 0;
	u8 antenna;
1737
	u32 flags;
1738
	const int padsize = 0;
1739

1740 1741
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
1742 1743 1744
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1745 1746

	if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1747 1748 1749 1750 1751
		ATH5K_ERR(sc, "beacon DMA mapping failed\n");
		return -EIO;
	}

	ds = bf->desc;
1752
	antenna = ah->ah_tx_ant;
1753 1754

	flags = AR5K_TXDESC_NOACK;
1755
	if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1756 1757
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1758
	} else
1759
		ds->ds_link = 0;
1760 1761 1762 1763 1764 1765 1766

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1767
	 * automatically, based on ACKed packets.
1768 1769 1770 1771 1772
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1773 1774 1775 1776 1777
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1778 1779 1780 1781
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
		antenna = sc->bsent & 4 ? 2 : 1;

1782

1783 1784 1785
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1786
	ds->ds_data = bf->skbaddr;
1787
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1788
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1789
			AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1790
			ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1791
			1, AR5K_TXKEYIX_INVALID,
1792
			antenna, flags, 0, 0);
1793 1794 1795 1796 1797
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1798
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1799 1800 1801
	return ret;
}

1802 1803 1804 1805 1806 1807 1808
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1809
int
1810 1811 1812 1813
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
	struct ath5k_softc *sc = hw->priv;
1814
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1829 1830 1831
	ath5k_txbuf_free_skb(sc, avf->bbuf);
	avf->bbuf->skb = skb;
	ret = ath5k_beacon_setup(sc, avf->bbuf);
1832
	if (ret)
1833
		avf->bbuf->skb = NULL;
1834 1835 1836 1837
out:
	return ret;
}

1838 1839 1840 1841 1842
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1843 1844
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1845 1846 1847 1848 1849
 */
static void
ath5k_beacon_send(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
1850 1851 1852
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1853
	struct sk_buff *skb;
1854

B
Bruno Randolf 已提交
1855
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1856 1857 1858

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1859
	 * not, don't don't try to post another: skip this
1860 1861 1862 1863 1864 1865
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
	if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
		sc->bmisscount++;
B
Bruno Randolf 已提交
1866
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1867
			"missed %u consecutive beacons\n", sc->bmisscount);
N
Nick Kossifidis 已提交
1868
		if (sc->bmisscount > 10) {	/* NB: 10 is a guess */
B
Bruno Randolf 已提交
1869
			ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1870 1871
				"stuck beacon time (%u missed)\n",
				sc->bmisscount);
1872 1873
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "stuck beacon, resetting\n");
1874
			ieee80211_queue_work(sc->hw, &sc->reset_work);
1875 1876 1877 1878
		}
		return;
	}
	if (unlikely(sc->bmisscount != 0)) {
B
Bruno Randolf 已提交
1879
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1880 1881 1882 1883 1884
			"resume beacon xmit after %u misses\n",
			sc->bmisscount);
		sc->bmisscount = 0;
	}

1885 1886
	if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
			sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
		int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
		vif = sc->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
			(unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
	} else /* only one interface */
		vif = sc->bslot[0];

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;
	if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1903
		     sc->opmode == NL80211_IFTYPE_MONITOR)) {
1904 1905 1906 1907
		ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
		return;
	}

1908 1909 1910 1911 1912
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1913
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
N
Nick Kossifidis 已提交
1914
		ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1915 1916 1917
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1918 1919
	/* refresh the beacon for AP or MESH mode */
	if (sc->opmode == NL80211_IFTYPE_AP ||
1920
	    sc->opmode == NL80211_IFTYPE_MESH_POINT)
1921
		ath5k_beacon_update(sc->hw, vif);
B
Bob Copeland 已提交
1922

1923 1924
	trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);

N
Nick Kossifidis 已提交
1925 1926
	ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, sc->bhalq);
B
Bruno Randolf 已提交
1927
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1928 1929
		sc->bhalq, (unsigned long long)bf->daddr, bf->desc);

1930
	skb = ieee80211_get_buffered_bc(sc->hw, vif);
1931 1932
	while (skb) {
		ath5k_tx_queue(sc->hw, skb, sc->cabq);
1933
		skb = ieee80211_get_buffered_bc(sc->hw, vif);
1934 1935
	}

1936 1937 1938
	sc->bsent++;
}

1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
 * @sc: struct ath5k_softc pointer we are operating on
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1951
 * when a TSF update has been detected, but also when an new IBSS is created or
1952 1953 1954
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1955
void
1956
ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1957 1958
{
	struct ath5k_hw *ah = sc->ah;
1959 1960
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1961 1962

	intval = sc->bintval & AR5K_BEACON_PERIOD;
1963 1964 1965 1966 1967 1968
	if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
			ATH5K_WARN(sc, "intval %u is too low, min 15\n",
				   intval);
	}
1969 1970 1971
	if (WARN_ON(!intval))
		return;

1972 1973
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1974

1975 1976 1977
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1978

1979
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1980
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1981
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1982 1983
	 * configuration we need to make sure it is bigger than that. */

1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
1999
		 * beacon received, SW merge happened but HW TSF not yet updated.
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2019

2020 2021
	sc->nexttbtt = nexttbtt;

2022 2023
	intval |= AR5K_BEACON_ENA;
	ath5k_hw_init_beacon(ah, nexttbtt, intval);
2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reset HW TSF and timers\n");
	else
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"updated timers based on beacon TSF\n");

	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2040 2041 2042
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2043 2044 2045 2046
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2047 2048
}

2049 2050 2051 2052
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
 * @sc: struct ath5k_softc pointer we are operating on
2053
 *
2054
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2055
 * interrupts to detect TSF updates only.
2056
 */
2057
void
2058 2059 2060
ath5k_beacon_config(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
2061
	unsigned long flags;
2062

2063
	spin_lock_irqsave(&sc->block, flags);
2064
	sc->bmisscount = 0;
J
Jiri Slaby 已提交
2065
	sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2066

2067
	if (sc->enable_beacon) {
2068
		/*
2069 2070
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2071
		 * only once here.
2072
		 * We use the SWBA interrupt only to keep track of the beacon
2073
		 * timers in order to detect automatic TSF updates.
2074 2075 2076
		 */
		ath5k_beaconq_config(sc);

2077 2078
		sc->imask |= AR5K_INT_SWBA;

J
Jiri Slaby 已提交
2079
		if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2080
			if (ath5k_hw_hasveol(ah))
J
Jiri Slaby 已提交
2081 2082 2083
				ath5k_beacon_send(sc);
		} else
			ath5k_beacon_update_timers(sc, -1);
2084
	} else {
2085
		ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2086 2087
	}

N
Nick Kossifidis 已提交
2088
	ath5k_hw_set_imr(ah, sc->imask);
2089 2090
	mmiowb();
	spin_unlock_irqrestore(&sc->block, flags);
2091 2092
}

N
Nick Kossifidis 已提交
2093 2094 2095 2096 2097 2098 2099 2100 2101
static void ath5k_tasklet_beacon(unsigned long data)
{
	struct ath5k_softc *sc = (struct ath5k_softc *) data;

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2102
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2103 2104 2105
	 * automatic TSF updates happened.
	 */
	if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2106
		/* XXX: only if VEOL supported */
N
Nick Kossifidis 已提交
2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
		u64 tsf = ath5k_hw_get_tsf64(sc->ah);
		sc->nexttbtt += sc->bintval;
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
				sc->nexttbtt,
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
		spin_lock(&sc->block);
		ath5k_beacon_send(sc);
		spin_unlock(&sc->block);
	}
}

2122 2123 2124 2125 2126

/********************\
* Interrupt handling *
\********************/

2127 2128 2129
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2130 2131 2132 2133 2134 2135 2136 2137
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
		/* run ANI only when full calibration is not active */
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
		tasklet_schedule(&ah->ah_sc->ani_tasklet);

	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2138 2139 2140 2141 2142 2143 2144 2145 2146
		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		tasklet_schedule(&ah->ah_sc->calib);
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
static void
ath5k_schedule_rx(struct ath5k_softc *sc)
{
	sc->rx_pending = true;
	tasklet_schedule(&sc->rxtq);
}

static void
ath5k_schedule_tx(struct ath5k_softc *sc)
{
	sc->tx_pending = true;
	tasklet_schedule(&sc->txtq);
}

P
Pavel Roskin 已提交
2161
static irqreturn_t
2162 2163 2164 2165 2166 2167 2168 2169
ath5k_intr(int irq, void *dev_id)
{
	struct ath5k_softc *sc = dev_id;
	struct ath5k_hw *ah = sc->ah;
	enum ath5k_int status;
	unsigned int counter = 1000;

	if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2170 2171
		((ath5k_get_bus_type(ah) != ATH_AHB) &&
				!ath5k_hw_is_intr_pending(ah))))
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182
		return IRQ_NONE;

	do {
		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
		ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, sc->imask);
		if (unlikely(status & AR5K_INT_FATAL)) {
			/*
			 * Fatal errors are unrecoverable.
			 * Typically these are caused by DMA errors.
			 */
2183 2184
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "fatal int, resetting\n");
2185
			ieee80211_queue_work(sc->hw, &sc->reset_work);
2186
		} else if (unlikely(status & AR5K_INT_RXORN)) {
B
Bruno Randolf 已提交
2187 2188 2189 2190 2191 2192 2193 2194 2195 2196
			/*
			 * Receive buffers are full. Either the bus is busy or
			 * the CPU is not fast enough to process all received
			 * frames.
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
			 * We don't know exactly which versions need a reset -
			 * this guess is copied from the HAL.
			 */
			sc->stats.rxorn_intr++;
2197 2198 2199
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
				ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
					  "rx overrun, resetting\n");
2200
				ieee80211_queue_work(sc->hw, &sc->reset_work);
2201
			} else
2202
				ath5k_schedule_rx(sc);
2203
		} else {
2204
			if (status & AR5K_INT_SWBA)
2205
				tasklet_hi_schedule(&sc->beacontq);
2206

2207 2208 2209 2210 2211 2212
			if (status & AR5K_INT_RXEOL) {
				/*
				* NB: the hardware should re-read the link when
				*     RXE bit is written, but it doesn't work at
				*     least on older hardware revs.
				*/
B
Bruno Randolf 已提交
2213
				sc->stats.rxeol_intr++;
2214 2215 2216 2217 2218
			}
			if (status & AR5K_INT_TXURN) {
				/* bump tx trigger level */
				ath5k_hw_update_tx_triglevel(ah, true);
			}
2219
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2220
				ath5k_schedule_rx(sc);
2221 2222
			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
2223
				ath5k_schedule_tx(sc);
2224
			if (status & AR5K_INT_BMISS) {
2225
				/* TODO */
2226 2227
			}
			if (status & AR5K_INT_MIB) {
2228
				sc->stats.mib_intr++;
B
Bruno Randolf 已提交
2229
				ath5k_hw_update_mib_counters(ah);
2230
				ath5k_ani_mib_intr(ah);
2231
			}
2232 2233
			if (status & AR5K_INT_GPIO)
				tasklet_schedule(&sc->rf_kill.toggleq);
B
Bob Copeland 已提交
2234

2235
		}
2236 2237 2238 2239

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2240
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2241

2242 2243 2244
	if (sc->rx_pending || sc->tx_pending)
		ath5k_set_current_imask(sc);

2245 2246 2247
	if (unlikely(!counter))
		ATH5K_WARN(sc, "too many interrupts, giving up for now\n");

2248
	ath5k_intr_calibration_poll(ah);
2249

2250 2251 2252 2253 2254 2255 2256 2257
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
2258
ath5k_tasklet_calibrate(unsigned long data)
2259 2260 2261 2262
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

2263
	/* Only full calibration for now */
2264
	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2265

2266
	ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2267 2268
		ieee80211_frequency_to_channel(sc->curchan->center_freq),
		sc->curchan->hw_value);
2269

2270
	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2271 2272 2273 2274 2275
		/*
		 * Rfgain is out of bounds, reset the chip
		 * to load new gain values.
		 */
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2276
		ieee80211_queue_work(sc->hw, &sc->reset_work);
2277 2278 2279
	}
	if (ath5k_hw_phy_calibrate(ah, sc->curchan))
		ATH5K_ERR(sc, "calibration of channel %u failed\n",
2280 2281
			ieee80211_frequency_to_channel(
				sc->curchan->center_freq));
2282

2283
	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
B
Bruno Randolf 已提交
2284 2285 2286
	 * doesn't.
	 * TODO: We should stop TX here, so that it doesn't interfere.
	 * Note that stopping the queues is not enough to stop TX! */
2287 2288 2289 2290 2291
	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
		ah->ah_cal_next_nf = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
		ath5k_hw_update_noise_floor(ah);
	}
2292

2293
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2294 2295 2296
}


2297 2298 2299 2300 2301 2302 2303 2304 2305
static void
ath5k_tasklet_ani(unsigned long data)
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2306 2307 2308
}


2309 2310 2311 2312 2313 2314 2315 2316 2317
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2318 2319
	mutex_lock(&sc->lock);

2320 2321 2322 2323
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
2324
			if (txq->txq_len > 1) {
2325 2326 2327 2328 2329
				if (txq->txq_poll_mark) {
					ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2330
					txq->txq_stuck++;
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
			  "TX queues stuck, resetting\n");
2344
		ath5k_reset(sc, NULL, true);
2345 2346
	}

2347 2348
	mutex_unlock(&sc->lock);

2349 2350 2351 2352 2353
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2354 2355 2356
/*************************\
* Initialization routines *
\*************************/
2357

2358
int __devinit
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368
ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
{
	struct ieee80211_hw *hw = sc->hw;
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
	SET_IEEE80211_DEV(hw, sc->dev);
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2369 2370 2371
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2372 2373 2374 2375 2376 2377 2378

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2379 2380 2381 2382
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
	__set_bit(ATH_STAT_INVALID, sc->status);

	sc->opmode = NL80211_IFTYPE_STATION;
	sc->bintval = 1000;
	mutex_init(&sc->lock);
	spin_lock_init(&sc->rxbuflock);
	spin_lock_init(&sc->txbuflock);
	spin_lock_init(&sc->block);
2398
	spin_lock_init(&sc->irqlock);
2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440

	/* Setup interrupt handler */
	ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
	if (ret) {
		ATH5K_ERR(sc, "request_irq failed\n");
		goto err;
	}

	/* If we passed the test, malloc an ath5k_hw struct */
	sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
	if (!sc->ah) {
		ret = -ENOMEM;
		ATH5K_ERR(sc, "out of memory\n");
		goto err_irq;
	}

	sc->ah->ah_sc = sc;
	sc->ah->ah_iobase = sc->iobase;
	common = ath5k_hw_common(sc->ah);
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
	common->ah = sc->ah;
	common->hw = hw;
	common->priv = sc;

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
	ret = ath5k_hw_init(sc);
	if (ret)
		goto err_free_ah;

	/* set up multi-rate retry capabilities */
	if (sc->ah->ah_version == AR5K_AR5212) {
		hw->max_rates = 4;
2441 2442
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

	ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
					sc->ah->ah_mac_srev,
					sc->ah->ah_phy_revision);

	if (!sc->ah->ah_single_chip) {
		/* Single chip radio (!RF5111) */
		if (sc->ah->ah_radio_5ghz_revision &&
			!sc->ah->ah_radio_2ghz_revision) {
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* No 2GHz support (5110 and some
2469
			 * 5GHz only cards) -> report 5GHz radio */
2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
			} else if (!test_bit(AR5K_MODE_11B,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* Multiband radio */
			} else {
				ATH5K_INFO(sc, "RF%s multiband radio found"
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
		else if (sc->ah->ah_radio_5ghz_revision &&
2488
				sc->ah->ah_radio_2ghz_revision) {
2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515
			ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_5ghz_revision),
					sc->ah->ah_radio_5ghz_revision);
			ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_2ghz_revision),
					sc->ah->ah_radio_2ghz_revision);
		}
	}

	ath5k_debug_init_device(sc);

	/* ready to process interrupts */
	__clear_bit(ATH_STAT_INVALID, sc->status);

	return 0;
err_ah:
	ath5k_hw_deinit(sc->ah);
err_free_ah:
	kfree(sc->ah);
err_irq:
	free_irq(sc->irq, sc);
err:
	return ret;
}

2516
static int
2517
ath5k_stop_locked(struct ath5k_softc *sc)
2518
{
2519
	struct ath5k_hw *ah = sc->ah;
2520

2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, sc->status));

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
	ieee80211_stop_queues(sc->hw);

	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
		ath5k_led_off(sc);
		ath5k_hw_set_imr(ah, 0);
2544
		synchronize_irq(sc->irq);
2545
		ath5k_rx_stop(sc);
2546 2547
		ath5k_hw_dma_stop(ah);
		ath5k_drain_tx_buffs(sc);
2548 2549 2550 2551
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2552 2553
}

2554
int
2555
ath5k_init_hw(struct ath5k_softc *sc)
2556
{
2557 2558 2559
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2560

2561 2562 2563
	mutex_lock(&sc->lock);

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2564 2565

	/*
2566 2567
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2568
	 */
2569
	ath5k_stop_locked(sc);
2570

2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
	sc->curchan = sc->hw->conf.channel;
	sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2582

2583
	ret = ath5k_reset(sc, NULL, false);
2584 2585
	if (ret)
		goto done;
2586

2587 2588 2589 2590 2591 2592 2593 2594 2595
	ath5k_rfkill_hw_start(ah);

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2596 2597 2598
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2599 2600 2601 2602

	for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
		sc->bslot[i] = NULL;

2603 2604 2605 2606
	ret = 0;
done:
	mmiowb();
	mutex_unlock(&sc->lock);
2607 2608 2609 2610

	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2611 2612 2613
	return ret;
}

P
Pavel Roskin 已提交
2614
static void ath5k_stop_tasklets(struct ath5k_softc *sc)
2615
{
2616 2617
	sc->rx_pending = false;
	sc->tx_pending = false;
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	tasklet_kill(&sc->rxtq);
	tasklet_kill(&sc->txtq);
	tasklet_kill(&sc->calib);
	tasklet_kill(&sc->beacontq);
	tasklet_kill(&sc->ani_tasklet);
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2631
int
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
ath5k_stop_hw(struct ath5k_softc *sc)
{
	int ret;

	mutex_lock(&sc->lock);
	ret = ath5k_stop_locked(sc);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
		ret = ath5k_hw_on_hold(sc->ah);

		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				"putting device to sleep\n");
2663 2664
	}

2665 2666 2667
	mmiowb();
	mutex_unlock(&sc->lock);

P
Pavel Roskin 已提交
2668
	ath5k_stop_tasklets(sc);
2669

2670 2671
	cancel_delayed_work_sync(&sc->tx_complete_work);

2672 2673 2674
	ath5k_rfkill_hw_stop(sc->ah);

	return ret;
2675 2676
}

2677 2678 2679
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2680 2681
 *
 * This should be called with sc->lock.
2682
 */
2683
static int
2684 2685
ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
							bool skip_pcu)
2686 2687
{
	struct ath5k_hw *ah = sc->ah;
B
Bruno Randolf 已提交
2688
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2689
	int ret, ani_mode;
2690
	bool fast;
2691 2692 2693

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");

2694
	ath5k_hw_set_imr(ah, 0);
2695
	synchronize_irq(sc->irq);
P
Pavel Roskin 已提交
2696
	ath5k_stop_tasklets(sc);
2697

L
Lucas De Marchi 已提交
2698
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2699 2700 2701 2702 2703
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
	ani_mode = ah->ah_sc->ani_state.ani_mode;
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2704 2705 2706 2707
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
	ath5k_drain_tx_buffs(sc);
2708
	if (chan)
2709
		sc->curchan = chan;
2710 2711 2712

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2713
	ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2714
	if (ret) {
2715 2716 2717
		ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
		goto err;
	}
J
Jiri Slaby 已提交
2718

2719
	ret = ath5k_rx_start(sc);
J
Jiri Slaby 已提交
2720
	if (ret) {
2721 2722 2723
		ATH5K_ERR(sc, "can't start recv logic\n");
		goto err;
	}
J
Jiri Slaby 已提交
2724

N
Nick Kossifidis 已提交
2725
	ath5k_ani_init(ah, ani_mode);
2726

2727 2728
	ah->ah_cal_next_full = jiffies;
	ah->ah_cal_next_ani = jiffies;
2729
	ah->ah_cal_next_nf = jiffies;
2730
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2731

B
Bruno Randolf 已提交
2732 2733
	/* clear survey data and cycle counters */
	memset(&sc->survey, 0, sizeof(sc->survey));
2734
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2735 2736 2737
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2738
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2739

2740
	/*
J
Jiri Slaby 已提交
2741 2742 2743 2744 2745
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2746 2747 2748 2749 2750
	 *
	 * XXX needed?
	 */
/*	ath5k_chan_change(sc, c); */

J
Jiri Slaby 已提交
2751 2752
	ath5k_beacon_config(sc);
	/* intrs are enabled by ath5k_beacon_config */
2753

B
Bruno Randolf 已提交
2754 2755
	ieee80211_wake_queues(sc->hw);

2756 2757 2758 2759 2760
	return 0;
err:
	return ret;
}

2761 2762 2763 2764 2765 2766
static void ath5k_reset_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
		reset_work);

	mutex_lock(&sc->lock);
2767
	ath5k_reset(sc, NULL, true);
2768 2769 2770
	mutex_unlock(&sc->lock);
}

2771
static int __devinit
2772
ath5k_init(struct ieee80211_hw *hw)
2773
{
2774

2775
	struct ath5k_softc *sc = hw->priv;
2776 2777
	struct ath5k_hw *ah = sc->ah;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2778
	struct ath5k_txq *txq;
2779
	u8 mac[ETH_ALEN] = {};
2780 2781 2782
	int ret;


2783 2784 2785 2786 2787 2788 2789 2790
	/*
	 * Check if the MAC has multi-rate retry support.
	 * We do this by trying to setup a fake extended
	 * descriptor.  MACs that don't have support will
	 * return false w/o doing anything.  MACs that do
	 * support it will return true w/o doing anything.
	 */
	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
J
Jiri Slaby 已提交
2791

2792 2793 2794 2795
	if (ret < 0)
		goto err;
	if (ret > 0)
		__set_bit(ATH_STAT_MRRETRY, sc->status);
2796

2797 2798
	/*
	 * Collect the channel list.  The 802.11 layer
2799
	 * is responsible for filtering this list based
2800 2801 2802 2803 2804 2805 2806 2807
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't get channels\n");
		goto err;
	}
J
Jiri Slaby 已提交
2808

2809 2810 2811
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2812
	ret = ath5k_desc_alloc(sc);
2813 2814 2815 2816
	if (ret) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		goto err;
	}
2817

2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
		ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
		goto err_desc;
	}
	sc->bhalq = ret;
	sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(sc->cabq)) {
		ATH5K_ERR(sc, "can't setup cab queue\n");
		ret = PTR_ERR(sc->cabq);
		goto err_bhal;
	}
2836

2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2877

2878 2879 2880 2881 2882
	tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
	tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
	tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
	tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
	tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2883

2884
	INIT_WORK(&sc->reset_work, ath5k_reset_work);
2885
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2886

2887
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2888
	if (ret) {
2889
		ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2890
		goto err_queues;
2891
	}
2892

2893
	SET_IEEE80211_PERM_ADDR(hw, mac);
2894
	memcpy(&sc->lladdr, mac, ETH_ALEN);
2895
	/* All MAC address bits matter for ACKs */
2896
	ath5k_update_bssid_mask_and_opmode(sc, NULL);
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
		ATH5K_ERR(sc, "can't initialize regulatory system\n");
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't register ieee80211 hw\n");
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

	ath5k_init_leds(sc);

	ath5k_sysfs_register(sc);

	return 0;
err_queues:
	ath5k_txq_release(sc);
err_bhal:
	ath5k_hw_release_tx_queue(ah, sc->bhalq);
err_desc:
2924
	ath5k_desc_free(sc);
2925 2926 2927 2928
err:
	return ret;
}

2929 2930
void
ath5k_deinit_softc(struct ath5k_softc *sc)
2931
{
2932
	struct ieee80211_hw *hw = sc->hw;
2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
2948
	ath5k_desc_free(sc);
2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
	ath5k_txq_release(sc);
	ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
	ath5k_unregister_leds(sc);

	ath5k_sysfs_unregister(sc);
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
2959
	ath5k_hw_deinit(sc->ah);
2960
	kfree(sc->ah);
2961
	free_irq(sc->irq, sc);
2962 2963
}

2964
bool
P
Pavel Roskin 已提交
2965
ath5k_any_vif_assoc(struct ath5k_softc *sc)
2966
{
2967
	struct ath5k_vif_iter_data iter_data;
2968 2969 2970 2971 2972
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

2973
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
2974 2975 2976 2977
						   &iter_data);
	return iter_data.any_assoc;
}

2978
void
P
Pavel Roskin 已提交
2979
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
	sc->filter_flags = rfilt;
}