base.c 79.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
J
Jiri Slaby 已提交
45
#include <linux/hardirq.h>
46
#include <linux/if.h>
J
Jiri Slaby 已提交
47
#include <linux/io.h>
48 49 50 51
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
52
#include <linux/slab.h>
53
#include <linux/etherdevice.h>
54 55 56 57 58 59 60 61

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
62
#include "ani.h"
63

64 65 66
#define CREATE_TRACE_POINTS
#include "trace.h"

67 68
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70

71
static int modparam_all_channels;
B
Bob Copeland 已提交
72
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73 74
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

75 76 77 78 79 80 81
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

82
static int ath5k_init(struct ieee80211_hw *hw);
83 84
static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
								bool skip_pcu);
85 86
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
87 88

/* Known SREVs */
J
Jiri Slaby 已提交
89
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
90 91 92 93 94 95 96 97 98
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
117
#endif
118
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
119 120
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
121
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
122 123 124
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
125
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
126 127
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
128 129 130 131
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
132
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
133 134 135 136
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
137 138 139
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
140
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

182 183 184 185 186 187 188 189 190 191
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

192
const char *
193 194 195 196 197 198 199 200
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
201 202 203 204 205

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
206 207 208 209 210 211 212
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
229

230 231 232 233 234
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
235
{
236 237 238
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
	struct ath5k_softc *sc = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
239

240 241
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
242

243 244 245
/********************\
* Channel/mode setup *
\********************/
246

247 248 249
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
250
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
251
{
252 253 254 255 256
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
257 258 259
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
260 261 262 263 264
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
265
}
266

267
static unsigned int
268 269
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
270
{
271
	unsigned int count, size, chfreq, freq, ch;
272
	enum ieee80211_band band;
273

274 275 276
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
277
		size = 220;
278
		chfreq = CHANNEL_5GHZ;
279
		band = IEEE80211_BAND_5GHZ;
280 281 282 283 284
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
		chfreq = CHANNEL_2GHZ;
285
		band = IEEE80211_BAND_2GHZ;
286 287 288 289
		break;
	default:
		ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
		return 0;
290 291
	}

292 293
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
294 295 296 297
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
298

299 300 301
		/* Check if channel is supported by the chipset */
		if (!ath5k_channel_ok(ah, freq, chfreq))
			continue;
302

303 304
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
305
			continue;
306

307 308
		/* Write channel info and increment counter */
		channels[count].center_freq = freq;
309
		channels[count].band = band;
310 311 312 313 314 315 316 317
		switch (mode) {
		case AR5K_MODE_11A:
		case AR5K_MODE_11G:
			channels[count].hw_value = chfreq | CHANNEL_OFDM;
			break;
		case AR5K_MODE_11B:
			channels[count].hw_value = CHANNEL_B;
		}
318

319 320
		count++;
	}
321

322 323
	return count;
}
324

325 326 327 328
static void
ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
{
	u8 i;
329

330 331
	for (i = 0; i < AR5K_MAX_RATES; i++)
		sc->rate_idx[b->band][i] = -1;
332

333 334 335 336
	for (i = 0; i < b->n_bitrates; i++) {
		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
		if (b->bitrates[i].hw_value_short)
			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
337
	}
338
}
339

340 341 342 343 344 345 346 347
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
348

349 350
	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(sc->channels);
351

352 353 354 355
	/* 2GHz band */
	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
	sband->band = IEEE80211_BAND_2GHZ;
	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
356

357 358 359 360 361
	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
362

363
		sband->channels = sc->channels;
364
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
365
					AR5K_MODE_11G, max_c);
366

367 368 369 370 371 372 373 374
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
375

376 377 378 379 380 381 382 383 384 385
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
386 387 388
			}
		}

389
		sband->channels = sc->channels;
390
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
391
					AR5K_MODE_11B, max_c);
392

393 394 395 396 397
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
	ath5k_setup_rate_idx(sc, sband);
398

399 400 401 402 403
	/* 5GHz band, A mode */
	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
		sband->band = IEEE80211_BAND_5GHZ;
		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
404

405 406 407
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
408

409
		sband->channels = &sc->channels[count_c];
410
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
411
					AR5K_MODE_11A, max_c);
412

413 414 415 416 417
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
	ath5k_setup_rate_idx(sc, sband);

	ath5k_debug_dump_bands(sc);
418 419 420 421

	return 0;
}

422 423 424 425 426 427 428
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
 * Called with sc->lock.
 */
429
int
430 431 432 433 434 435
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
		  "channel set, resetting (%u -> %u MHz)\n",
		  sc->curchan->center_freq, chan->center_freq);

436
	/*
437 438 439 440
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
441
	 */
442
	return ath5k_reset(sc, chan, true);
443 444
}

445
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
446
{
447
	struct ath5k_vif_iter_data *iter_data = data;
448
	int i;
449
	struct ath5k_vif *avf = (void *)vif->drv_priv;
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
469 470 471 472

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
473
	 * interfaces is allowed.
474 475 476
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
477 478 479
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
480 481
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
482
	}
483 484
}

485 486 487
void
ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
				   struct ieee80211_vif *vif)
488 489
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
490 491
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
492 493 494 495 496 497 498 499 500

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
501
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
502
	iter_data.n_stas = 0;
503 504

	if (vif)
505
		ath5k_vif_iter(&iter_data, vif->addr, vif);
506 507

	/* Get list of all active MAC addresses */
508
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
509 510 511
						   &iter_data);
	memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);

512 513 514 515 516
	sc->opmode = iter_data.opmode;
	if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
		/* Nothing active, default to station mode */
		sc->opmode = NL80211_IFTYPE_STATION;

B
Ben Greear 已提交
517 518 519
	ath5k_hw_set_opmode(sc->ah, sc->opmode);
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  sc->opmode, ath_opmode_to_string(sc->opmode));
520

521 522 523
	if (iter_data.need_set_hw_addr && iter_data.found_active)
		ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);

524 525
	if (ath5k_hw_hasbssidmask(sc->ah))
		ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
526

527 528 529 530 531 532 533 534
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
		 * Enabling PROMISC appears to fix that probem.
		 */
		sc->filter_flags |= AR5K_RX_FILTER_PROM;
	}
535

536
	rfilt = sc->filter_flags;
537
	ath5k_hw_set_rx_filter(sc->ah, rfilt);
538 539
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
540

541 542 543 544
static inline int
ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
	int rix;
545

546 547 548 549 550
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

551
	rix = sc->rate_idx[sc->curchan->band][hw_rix];
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
	struct sk_buff *skb;
567 568

	/*
569 570
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
571
	 */
572 573 574
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
575

576 577 578 579
	if (!skb) {
		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
				common->rx_bufsize);
		return NULL;
580 581
	}

582
	*skb_addr = dma_map_single(sc->dev,
583
				   skb->data, common->rx_bufsize,
584 585 586
				   DMA_FROM_DEVICE);

	if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
587 588 589
		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
		dev_kfree_skb(skb);
		return NULL;
590
	}
591 592
	return skb;
}
593

594 595 596 597 598 599 600
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
601

602 603 604 605 606
	if (!skb) {
		skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
607 608
	}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
628
	if (ret) {
629 630
		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
		return ret;
631 632
	}

633 634 635
	if (sc->rxlink != NULL)
		*sc->rxlink = bf->daddr;
	sc->rxlink = &ds->ds_link;
636 637 638
	return 0;
}

639
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
640
{
641 642 643
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
644

645 646
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
647

648 649 650 651 652 653 654 655
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
656
	else
657
		htype = AR5K_PKT_TYPE_NORMAL;
658

659
	return htype;
660 661
}

662 663 664
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
		  struct ath5k_txq *txq, int padsize)
665
{
666 667 668 669 670 671 672 673 674 675 676 677
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
678

679
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
680

681
	/* XXX endianness */
682 683
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
684

685
	rate = ieee80211_get_tx_rate(sc->hw, info);
686 687 688 689
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
690

691 692
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
693

694 695 696
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
697

698 699 700 701 702 703 704 705 706 707 708 709 710
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
711
			info->control.vif, pktlen, info));
712 713 714 715 716
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
717
			info->control.vif, pktlen, info));
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
		(sc->power_level * 2),
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
		rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
		if (!rate)
734
			break;
735

736 737
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
738 739
	}

740 741 742 743
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
744

745 746
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
747

748 749
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
750
	txq->txq_len++;
751 752 753 754
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
755

756 757 758 759 760 761 762
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
763
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
764
	return ret;
B
Bruno Randolf 已提交
765 766
}

767 768 769 770
/*******************\
* Descriptors setup *
\*******************/

771
static int
772
ath5k_desc_alloc(struct ath5k_softc *sc)
773
{
774 775 776 777 778
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
779

780 781 782
	/* allocate descriptors */
	sc->desc_len = sizeof(struct ath5k_desc) *
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
783 784 785

	sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
				&sc->desc_daddr, GFP_KERNEL);
786 787 788 789 790 791 792 793 794
	if (sc->desc == NULL) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		ret = -ENOMEM;
		goto err;
	}
	ds = sc->desc;
	da = sc->desc_daddr;
	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
795

796 797 798 799 800 801 802 803
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
		ATH5K_ERR(sc, "can't allocate bufptr\n");
		ret = -ENOMEM;
		goto err_free;
	}
	sc->bufptr = bf;
804

805 806 807 808 809 810
	INIT_LIST_HEAD(&sc->rxbuf);
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->rxbuf);
	}
811

812 813 814 815 816 817 818
	INIT_LIST_HEAD(&sc->txbuf);
	sc->txbuf_len = ATH_TXBUF;
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
			da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->txbuf);
819 820
	}

821 822 823 824 825 826 827
	/* beacon buffers */
	INIT_LIST_HEAD(&sc->bcbuf);
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->bcbuf);
	}
828

829 830
	return 0;
err_free:
831
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
832 833 834 835
err:
	sc->desc = NULL;
	return ret;
}
836

837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
void
ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

868
static void
869
ath5k_desc_free(struct ath5k_softc *sc)
870 871
{
	struct ath5k_buf *bf;
872

873 874 875 876
	list_for_each_entry(bf, &sc->txbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
	list_for_each_entry(bf, &sc->rxbuf, list)
		ath5k_rxbuf_free_skb(sc, bf);
877 878
	list_for_each_entry(bf, &sc->bcbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
879

880
	/* Free memory associated with all descriptors */
881
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
882 883
	sc->desc = NULL;
	sc->desc_daddr = 0;
884

885 886
	kfree(sc->bufptr);
	sc->bufptr = NULL;
887 888
}

889 890 891 892 893 894 895 896

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
ath5k_txq_setup(struct ath5k_softc *sc,
		int qtype, int subtype)
897
{
898 899 900 901
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
902 903 904 905 906
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
907 908
	};
	int qnum;
909

910
	/*
911 912 913 914 915 916 917 918 919 920
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
921
	 */
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
	if (qnum >= ARRAY_SIZE(sc->txqs)) {
		ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(sc->txqs));
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
	txq = &sc->txqs[qnum];
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
945
		txq->txq_len = 0;
946
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
947
		txq->txq_poll_mark = false;
948
		txq->txq_stuck = 0;
949 950
	}
	return &sc->txqs[qnum];
951 952
}

953 954
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
955
{
956
	struct ath5k_txq_info qi = {
957 958 959 960 961
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
962 963 964
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
965

966
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
967 968
}

969 970
static int
ath5k_beaconq_config(struct ath5k_softc *sc)
971 972
{
	struct ath5k_hw *ah = sc->ah;
973 974
	struct ath5k_txq_info qi;
	int ret;
975

976 977 978
	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret)
		goto err;
979

980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	if (sc->opmode == NL80211_IFTYPE_AP ||
		sc->opmode == NL80211_IFTYPE_MESH_POINT) {
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
995
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
996
	}
997

998 999 1000
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1001

1002 1003 1004 1005 1006 1007 1008 1009 1010
	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret) {
		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
			"hardware queue!\n", __func__);
		goto err;
	}
	ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
	if (ret)
		goto err;
1011

1012 1013 1014 1015
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1016

1017 1018 1019 1020
	qi.tqi_ready_time = (sc->bintval * 80) / 100;
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1021

1022 1023 1024
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1025 1026
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
 * @sc The &struct ath5k_softc
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1038
static void
1039
ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1040
{
1041
	struct ath5k_txq *txq;
1042
	struct ath5k_buf *bf, *bf0;
1043
	int i;
1044

1045 1046 1047 1048 1049 1050
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
				ath5k_debug_printtxbuf(sc, bf);
1051

1052
				ath5k_txbuf_free_skb(sc, bf);
1053

1054 1055 1056 1057 1058
				spin_lock_bh(&sc->txbuflock);
				list_move_tail(&bf->list, &sc->txbuf);
				sc->txbuf_len++;
				txq->txq_len--;
				spin_unlock_bh(&sc->txbuflock);
1059
			}
1060 1061 1062 1063
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1064
	}
1065 1066
}

1067 1068
static void
ath5k_txq_release(struct ath5k_softc *sc)
1069
{
1070 1071
	struct ath5k_txq *txq = sc->txqs;
	unsigned int i;
1072

1073 1074 1075 1076 1077 1078
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
		if (txq->setup) {
			ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
			txq->setup = false;
		}
}
1079 1080


1081 1082 1083
/*************\
* RX Handling *
\*************/
1084

1085 1086 1087
/*
 * Enable the receive h/w following a reset.
 */
1088
static int
1089
ath5k_rx_start(struct ath5k_softc *sc)
1090 1091
{
	struct ath5k_hw *ah = sc->ah;
1092 1093 1094
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1095

1096
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1097

1098 1099
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
		  common->cachelsz, common->rx_bufsize);
1100

1101 1102 1103 1104 1105 1106 1107 1108
	spin_lock_bh(&sc->rxbuflock);
	sc->rxlink = NULL;
	list_for_each_entry(bf, &sc->rxbuf, list) {
		ret = ath5k_rxbuf_setup(sc, bf);
		if (ret != 0) {
			spin_unlock_bh(&sc->rxbuflock);
			goto err;
		}
1109
	}
1110 1111 1112
	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
	ath5k_hw_set_rxdp(ah, bf->daddr);
	spin_unlock_bh(&sc->rxbuflock);
1113

1114
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1115
	ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1116
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1117 1118

	return 0;
1119
err:
1120 1121 1122
	return ret;
}

1123
/*
1124 1125 1126 1127 1128
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1129 1130 1131
 */
static void
ath5k_rx_stop(struct ath5k_softc *sc)
1132
{
1133
	struct ath5k_hw *ah = sc->ah;
1134

1135
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1136
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1137

1138 1139
	ath5k_debug_printrxbuffs(sc, ah);
}
1140

1141 1142 1143 1144 1145 1146 1147 1148
static unsigned int
ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
		   struct ath5k_rx_status *rs)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1149

1150 1151 1152
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1153

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1166 1167 1168 1169

	return 0;
}

1170

1171
static void
1172 1173
ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
		     struct ieee80211_rx_status *rxs)
1174
{
1175 1176 1177 1178
	struct ath_common *common = ath5k_hw_common(sc->ah);
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1179

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
		tsf = ath5k_hw_get_tsf64(sc->ah);
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1191

1192 1193 1194 1195 1196 1197
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1198

1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1217

1218 1219 1220 1221 1222 1223 1224 1225
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
		if (hw_tu >= sc->nexttbtt)
			ath5k_beacon_update_timers(sc, bc_tstamp);
B
Bruno Randolf 已提交
1226 1227 1228 1229 1230 1231 1232 1233 1234

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
		if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
			ath5k_beacon_update_timers(sc, bc_tstamp);
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixed beacon timers after beacon receive\n");
		}
1235 1236
	}
}
1237

1238 1239 1240 1241 1242 1243
static void
ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1244

1245 1246 1247 1248
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
		return;
1249

B
Bruno Randolf 已提交
1250
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1251

1252 1253 1254
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1255

1256 1257 1258 1259
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1260
{
1261 1262 1263
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1264

1265 1266
	if (ieee80211_has_a4(frame_control)) {
		padpos += ETH_ALEN;
1267
	}
1268 1269
	if (ieee80211_is_data_qos(frame_control)) {
		padpos += IEEE80211_QOS_CTL_LEN;
1270
	}
1271 1272

	return padpos;
1273 1274
}

1275 1276 1277 1278 1279
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1280
{
1281 1282
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1283

1284
	if (padsize && skb->len>padpos) {
1285

1286 1287
		if (skb_headroom(skb) < padsize)
			return -1;
1288

1289 1290 1291 1292
		skb_push(skb, padsize);
		memmove(skb->data, skb->data+padsize, padpos);
		return padsize;
	}
B
Bob Copeland 已提交
1293

1294 1295
	return 0;
}
1296

1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1314

1315 1316 1317 1318
	if (padsize && skb->len>=padpos+padsize) {
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1319
	}
B
Bob Copeland 已提交
1320

1321
	return 0;
1322 1323 1324
}

static void
1325 1326
ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
		    struct ath5k_rx_status *rs)
1327
{
1328 1329 1330 1331 1332 1333 1334 1335 1336
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1337 1338

	/*
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
	 * frame, but i'm not sure.
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1357
	 */
1358
	rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1359
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1360

1361
	rxs->freq = sc->curchan->center_freq;
1362
	rxs->band = sc->curchan->band;
1363

1364
	rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1365

1366
	rxs->antenna = rs->rs_antenna;
1367

1368 1369 1370 1371
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
		sc->stats.antenna_rx[rs->rs_antenna]++;
	else
		sc->stats.antenna_rx[0]++; /* invalid */
1372

1373 1374
	rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1375

1376
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1377
	    sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1378
		rxs->flag |= RX_FLAG_SHORTPRE;
1379

1380
	trace_ath5k_rx(sc, skb);
1381

1382
	ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1383

1384 1385 1386
	/* check beacons in IBSS mode */
	if (sc->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(sc, skb, rxs);
1387

1388 1389
	ieee80211_rx(sc->hw, skb);
}
1390

1391 1392 1393 1394
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1395
 */
1396 1397
static bool
ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1398
{
1399
	sc->stats.rx_all_count++;
B
Ben Greear 已提交
1400
	sc->stats.rx_bytes_count += rs->rs_datalen;
1401

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
			sc->stats.rxerr_crc++;
		if (rs->rs_status & AR5K_RXERR_FIFO)
			sc->stats.rxerr_fifo++;
		if (rs->rs_status & AR5K_RXERR_PHY) {
			sc->stats.rxerr_phy++;
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
				sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
			sc->stats.rxerr_decrypt++;
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
			sc->stats.rxerr_mic++;
			return true;
1432 1433
		}

1434 1435 1436 1437
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1438

1439 1440 1441 1442 1443
	if (unlikely(rs->rs_more)) {
		sc->stats.rxerr_jumbo++;
		return false;
	}
	return true;
1444 1445
}

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
static void
ath5k_set_current_imask(struct ath5k_softc *sc)
{
	enum ath5k_int imask = sc->imask;
	unsigned long flags;

	spin_lock_irqsave(&sc->irqlock, flags);
	if (sc->rx_pending)
		imask &= ~AR5K_INT_RX_ALL;
	if (sc->tx_pending)
		imask &= ~AR5K_INT_TX_ALL;
	ath5k_hw_set_imr(sc->ah, imask);
	spin_unlock_irqrestore(&sc->irqlock, flags);
}

1461
static void
1462
ath5k_tasklet_rx(unsigned long data)
1463
{
1464 1465 1466 1467
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
	struct ath5k_softc *sc = (void *)data;
L
Luis R. Rodriguez 已提交
1468 1469
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1470 1471 1472
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1473

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
	spin_lock(&sc->rxbuflock);
	if (list_empty(&sc->rxbuf)) {
		ATH5K_WARN(sc, "empty rx buf pool\n");
		goto unlock;
	}
	do {
		bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1484

1485 1486 1487
		/* bail if HW is still using self-linked descriptor */
		if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
			break;
1488

1489 1490 1491 1492 1493 1494 1495 1496
		ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
			ATH5K_ERR(sc, "error in processing rx descriptor\n");
			sc->stats.rxerr_proc++;
			break;
		}
1497

1498 1499
		if (ath5k_receive_frame_ok(sc, &rs)) {
			next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1500

1501 1502 1503 1504 1505 1506
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1507

1508
			dma_unmap_single(sc->dev, bf->skbaddr,
1509
					 common->rx_bufsize,
1510
					 DMA_FROM_DEVICE);
1511

1512
			skb_put(skb, rs.rs_datalen);
1513

1514
			ath5k_receive_frame(sc, skb, &rs);
1515

1516 1517
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1518
		}
1519 1520 1521 1522 1523
next:
		list_move_tail(&bf->list, &sc->rxbuf);
	} while (ath5k_rxbuf_setup(sc, bf) == 0);
unlock:
	spin_unlock(&sc->rxbuflock);
1524 1525
	sc->rx_pending = false;
	ath5k_set_current_imask(sc);
1526 1527
}

B
Bruno Randolf 已提交
1528

1529 1530 1531
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1532

1533
void
1534 1535
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1536 1537 1538 1539 1540
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1541

1542
	trace_ath5k_tx(sc, skb, txq);
B
Bruno Randolf 已提交
1543

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
		ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
			  " headroom to pad");
		goto drop_packet;
	}
1554

1555
	if (txq->txq_len >= txq->txq_max)
B
Bruno Randolf 已提交
1556 1557
		ieee80211_stop_queue(hw, txq->qnum);

1558 1559 1560 1561
	spin_lock_irqsave(&sc->txbuflock, flags);
	if (list_empty(&sc->txbuf)) {
		ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&sc->txbuflock, flags);
B
Bruno Randolf 已提交
1562
		ieee80211_stop_queues(hw);
1563
		goto drop_packet;
1564
	}
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
	list_del(&bf->list);
	sc->txbuf_len--;
	if (list_empty(&sc->txbuf))
		ieee80211_stop_queues(hw);
	spin_unlock_irqrestore(&sc->txbuflock, flags);

	bf->skb = skb;

	if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
		bf->skb = NULL;
		spin_lock_irqsave(&sc->txbuflock, flags);
		list_add_tail(&bf->list, &sc->txbuf);
		sc->txbuf_len++;
		spin_unlock_irqrestore(&sc->txbuflock, flags);
		goto drop_packet;
1581
	}
1582
	return;
1583

1584 1585
drop_packet:
	dev_kfree_skb_any(skb);
1586 1587
}

1588 1589
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1590
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1591 1592
{
	struct ieee80211_tx_info *info;
1593
	u8 tries[3];
1594 1595 1596
	int i;

	sc->stats.tx_all_count++;
B
Ben Greear 已提交
1597
	sc->stats.tx_bytes_count += skb->len;
1598 1599
	info = IEEE80211_SKB_CB(skb);

1600 1601 1602 1603
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1604
	ieee80211_tx_info_clear_status(info);
1605 1606

	for (i = 0; i < ts->ts_final_idx; i++) {
1607 1608 1609
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1610
		r->count = tries[i];
1611 1612
	}

1613
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1614
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628

	if (unlikely(ts->ts_status)) {
		sc->stats.ack_fail++;
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
			sc->stats.txerr_filt++;
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
			sc->stats.txerr_retry++;
		if (ts->ts_status & AR5K_TXERR_FIFO)
			sc->stats.txerr_fifo++;
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1629 1630 1631

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
		sc->stats.antenna_tx[ts->ts_antenna]++;
	else
		sc->stats.antenna_tx[0]++; /* invalid */

1645
	trace_ath5k_tx_complete(sc, skb, txq, ts);
1646 1647
	ieee80211_tx_status(sc->hw, skb);
}
1648 1649 1650

static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1651
{
1652 1653 1654 1655
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1656
	int ret;
1657

1658 1659
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

			ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
				ATH5K_ERR(sc,
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1679 1680 1681

			dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
					DMA_TO_DEVICE);
1682
			ath5k_tx_frame_completed(sc, skb, txq, &ts);
1683
		}
1684

1685 1686 1687
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1688 1689
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1690
		 */
1691 1692 1693 1694 1695 1696
		if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
			spin_lock(&sc->txbuflock);
			list_move_tail(&bf->list, &sc->txbuf);
			sc->txbuf_len++;
			txq->txq_len--;
			spin_unlock(&sc->txbuflock);
1697
		}
1698 1699
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1700
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
B
Bruno Randolf 已提交
1701
		ieee80211_wake_queue(sc->hw, txq->qnum);
1702 1703 1704 1705 1706
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1707
	int i;
1708 1709
	struct ath5k_softc *sc = (void *)data;

B
Bob Copeland 已提交
1710 1711 1712
	for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
		if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
			ath5k_tx_processq(sc, &sc->txqs[i]);
1713 1714 1715

	sc->tx_pending = false;
	ath5k_set_current_imask(sc);
1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1727
ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1728 1729
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1730
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1731 1732
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds;
1733 1734
	int ret = 0;
	u8 antenna;
1735
	u32 flags;
1736
	const int padsize = 0;
1737

1738 1739
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
1740 1741 1742
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1743 1744

	if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1745 1746 1747 1748 1749
		ATH5K_ERR(sc, "beacon DMA mapping failed\n");
		return -EIO;
	}

	ds = bf->desc;
1750
	antenna = ah->ah_tx_ant;
1751 1752

	flags = AR5K_TXDESC_NOACK;
1753
	if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1754 1755
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1756
	} else
1757
		ds->ds_link = 0;
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
	 * automaticaly, based on ACKed packets.
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1771 1772 1773 1774 1775
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1776 1777 1778 1779
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
		antenna = sc->bsent & 4 ? 2 : 1;

1780

1781 1782 1783
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1784
	ds->ds_data = bf->skbaddr;
1785
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1786
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1787
			AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1788
			ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1789
			1, AR5K_TXKEYIX_INVALID,
1790
			antenna, flags, 0, 0);
1791 1792 1793 1794 1795
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1796
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1797 1798 1799
	return ret;
}

1800 1801 1802 1803 1804 1805 1806
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1807
int
1808 1809 1810 1811
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
	struct ath5k_softc *sc = hw->priv;
1812
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1827 1828 1829
	ath5k_txbuf_free_skb(sc, avf->bbuf);
	avf->bbuf->skb = skb;
	ret = ath5k_beacon_setup(sc, avf->bbuf);
1830
	if (ret)
1831
		avf->bbuf->skb = NULL;
1832 1833 1834 1835
out:
	return ret;
}

1836 1837 1838 1839 1840
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1841 1842
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1843 1844 1845 1846 1847
 */
static void
ath5k_beacon_send(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
1848 1849 1850
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1851
	struct sk_buff *skb;
1852

B
Bruno Randolf 已提交
1853
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1854 1855 1856

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1857
	 * not, don't don't try to post another: skip this
1858 1859 1860 1861 1862 1863
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
	if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
		sc->bmisscount++;
B
Bruno Randolf 已提交
1864
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1865
			"missed %u consecutive beacons\n", sc->bmisscount);
N
Nick Kossifidis 已提交
1866
		if (sc->bmisscount > 10) {	/* NB: 10 is a guess */
B
Bruno Randolf 已提交
1867
			ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1868 1869
				"stuck beacon time (%u missed)\n",
				sc->bmisscount);
1870 1871
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "stuck beacon, resetting\n");
1872
			ieee80211_queue_work(sc->hw, &sc->reset_work);
1873 1874 1875 1876
		}
		return;
	}
	if (unlikely(sc->bmisscount != 0)) {
B
Bruno Randolf 已提交
1877
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1878 1879 1880 1881 1882
			"resume beacon xmit after %u misses\n",
			sc->bmisscount);
		sc->bmisscount = 0;
	}

1883 1884
	if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
			sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
		int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
		vif = sc->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
			(unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
	} else /* only one interface */
		vif = sc->bslot[0];

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;
	if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
			sc->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
		return;
	}

1906 1907 1908 1909 1910
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1911
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
N
Nick Kossifidis 已提交
1912
		ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1913 1914 1915
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1916 1917 1918
	/* refresh the beacon for AP or MESH mode */
	if (sc->opmode == NL80211_IFTYPE_AP ||
			sc->opmode == NL80211_IFTYPE_MESH_POINT)
1919
		ath5k_beacon_update(sc->hw, vif);
B
Bob Copeland 已提交
1920

1921 1922
	trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);

N
Nick Kossifidis 已提交
1923 1924
	ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, sc->bhalq);
B
Bruno Randolf 已提交
1925
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1926 1927
		sc->bhalq, (unsigned long long)bf->daddr, bf->desc);

1928
	skb = ieee80211_get_buffered_bc(sc->hw, vif);
1929 1930
	while (skb) {
		ath5k_tx_queue(sc->hw, skb, sc->cabq);
1931
		skb = ieee80211_get_buffered_bc(sc->hw, vif);
1932 1933
	}

1934 1935 1936
	sc->bsent++;
}

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
 * @sc: struct ath5k_softc pointer we are operating on
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1949
 * when a TSF update has been detected, but also when an new IBSS is created or
1950 1951 1952
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1953
void
1954
ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1955 1956
{
	struct ath5k_hw *ah = sc->ah;
1957 1958
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1959 1960

	intval = sc->bintval & AR5K_BEACON_PERIOD;
1961 1962 1963 1964 1965 1966
	if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
			ATH5K_WARN(sc, "intval %u is too low, min 15\n",
				   intval);
	}
1967 1968 1969
	if (WARN_ON(!intval))
		return;

1970 1971
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1972

1973 1974 1975
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1976

1977 1978
#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1979
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1980 1981
	 * configuration we need to make sure it is bigger than that. */

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
1997
		 * beacon received, SW merge happened but HW TSF not yet updated.
1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2017

2018 2019
	sc->nexttbtt = nexttbtt;

2020 2021
	intval |= AR5K_BEACON_ENA;
	ath5k_hw_init_beacon(ah, nexttbtt, intval);
2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reset HW TSF and timers\n");
	else
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"updated timers based on beacon TSF\n");

	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2038 2039 2040
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2041 2042 2043 2044
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2045 2046
}

2047 2048 2049 2050
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
 * @sc: struct ath5k_softc pointer we are operating on
2051
 *
2052
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2053
 * interrupts to detect TSF updates only.
2054
 */
2055
void
2056 2057 2058
ath5k_beacon_config(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
2059
	unsigned long flags;
2060

2061
	spin_lock_irqsave(&sc->block, flags);
2062
	sc->bmisscount = 0;
J
Jiri Slaby 已提交
2063
	sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2064

2065
	if (sc->enable_beacon) {
2066
		/*
2067 2068
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2069
		 * only once here.
2070
		 * We use the SWBA interrupt only to keep track of the beacon
2071
		 * timers in order to detect automatic TSF updates.
2072 2073 2074
		 */
		ath5k_beaconq_config(sc);

2075 2076
		sc->imask |= AR5K_INT_SWBA;

J
Jiri Slaby 已提交
2077
		if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2078
			if (ath5k_hw_hasveol(ah))
J
Jiri Slaby 已提交
2079 2080 2081
				ath5k_beacon_send(sc);
		} else
			ath5k_beacon_update_timers(sc, -1);
2082
	} else {
2083
		ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2084 2085
	}

N
Nick Kossifidis 已提交
2086
	ath5k_hw_set_imr(ah, sc->imask);
2087 2088
	mmiowb();
	spin_unlock_irqrestore(&sc->block, flags);
2089 2090
}

N
Nick Kossifidis 已提交
2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
static void ath5k_tasklet_beacon(unsigned long data)
{
	struct ath5k_softc *sc = (struct ath5k_softc *) data;

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
	 * transmission time) in order to detect wether
	 * automatic TSF updates happened.
	 */
	if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/* XXX: only if VEOL suppported */
		u64 tsf = ath5k_hw_get_tsf64(sc->ah);
		sc->nexttbtt += sc->bintval;
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
				sc->nexttbtt,
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
		spin_lock(&sc->block);
		ath5k_beacon_send(sc);
		spin_unlock(&sc->block);
	}
}

2120 2121 2122 2123 2124

/********************\
* Interrupt handling *
\********************/

2125 2126 2127
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2128 2129 2130 2131 2132 2133 2134 2135
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
		/* run ANI only when full calibration is not active */
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
		tasklet_schedule(&ah->ah_sc->ani_tasklet);

	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2136 2137 2138 2139 2140 2141 2142 2143 2144
		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		tasklet_schedule(&ah->ah_sc->calib);
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158
static void
ath5k_schedule_rx(struct ath5k_softc *sc)
{
	sc->rx_pending = true;
	tasklet_schedule(&sc->rxtq);
}

static void
ath5k_schedule_tx(struct ath5k_softc *sc)
{
	sc->tx_pending = true;
	tasklet_schedule(&sc->txtq);
}

2159
irqreturn_t
2160 2161 2162 2163 2164 2165 2166 2167
ath5k_intr(int irq, void *dev_id)
{
	struct ath5k_softc *sc = dev_id;
	struct ath5k_hw *ah = sc->ah;
	enum ath5k_int status;
	unsigned int counter = 1000;

	if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2168 2169
		((ath5k_get_bus_type(ah) != ATH_AHB) &&
				!ath5k_hw_is_intr_pending(ah))))
2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
		return IRQ_NONE;

	do {
		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
		ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, sc->imask);
		if (unlikely(status & AR5K_INT_FATAL)) {
			/*
			 * Fatal errors are unrecoverable.
			 * Typically these are caused by DMA errors.
			 */
2181 2182
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "fatal int, resetting\n");
2183
			ieee80211_queue_work(sc->hw, &sc->reset_work);
2184
		} else if (unlikely(status & AR5K_INT_RXORN)) {
B
Bruno Randolf 已提交
2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
			/*
			 * Receive buffers are full. Either the bus is busy or
			 * the CPU is not fast enough to process all received
			 * frames.
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
			 * We don't know exactly which versions need a reset -
			 * this guess is copied from the HAL.
			 */
			sc->stats.rxorn_intr++;
2195 2196 2197
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
				ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
					  "rx overrun, resetting\n");
2198
				ieee80211_queue_work(sc->hw, &sc->reset_work);
2199
			}
B
Bruno Randolf 已提交
2200
			else
2201
				ath5k_schedule_rx(sc);
2202 2203
		} else {
			if (status & AR5K_INT_SWBA) {
2204
				tasklet_hi_schedule(&sc->beacontq);
2205 2206 2207 2208 2209 2210 2211
			}
			if (status & AR5K_INT_RXEOL) {
				/*
				* NB: the hardware should re-read the link when
				*     RXE bit is written, but it doesn't work at
				*     least on older hardware revs.
				*/
B
Bruno Randolf 已提交
2212
				sc->stats.rxeol_intr++;
2213 2214 2215 2216 2217
			}
			if (status & AR5K_INT_TXURN) {
				/* bump tx trigger level */
				ath5k_hw_update_tx_triglevel(ah, true);
			}
2218
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2219
				ath5k_schedule_rx(sc);
2220 2221
			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
2222
				ath5k_schedule_tx(sc);
2223
			if (status & AR5K_INT_BMISS) {
2224
				/* TODO */
2225 2226
			}
			if (status & AR5K_INT_MIB) {
2227
				sc->stats.mib_intr++;
B
Bruno Randolf 已提交
2228
				ath5k_hw_update_mib_counters(ah);
2229
				ath5k_ani_mib_intr(ah);
2230
			}
2231 2232
			if (status & AR5K_INT_GPIO)
				tasklet_schedule(&sc->rf_kill.toggleq);
B
Bob Copeland 已提交
2233

2234
		}
2235 2236 2237 2238

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2239
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2240

2241 2242 2243
	if (sc->rx_pending || sc->tx_pending)
		ath5k_set_current_imask(sc);

2244 2245 2246
	if (unlikely(!counter))
		ATH5K_WARN(sc, "too many interrupts, giving up for now\n");

2247
	ath5k_intr_calibration_poll(ah);
2248

2249 2250 2251 2252 2253 2254 2255 2256
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
2257
ath5k_tasklet_calibrate(unsigned long data)
2258 2259 2260 2261
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

2262
	/* Only full calibration for now */
2263
	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2264

2265
	ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2266 2267
		ieee80211_frequency_to_channel(sc->curchan->center_freq),
		sc->curchan->hw_value);
2268

2269
	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2270 2271 2272 2273 2274
		/*
		 * Rfgain is out of bounds, reset the chip
		 * to load new gain values.
		 */
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2275
		ieee80211_queue_work(sc->hw, &sc->reset_work);
2276 2277 2278
	}
	if (ath5k_hw_phy_calibrate(ah, sc->curchan))
		ATH5K_ERR(sc, "calibration of channel %u failed\n",
2279 2280
			ieee80211_frequency_to_channel(
				sc->curchan->center_freq));
2281

2282
	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
B
Bruno Randolf 已提交
2283 2284 2285
	 * doesn't.
	 * TODO: We should stop TX here, so that it doesn't interfere.
	 * Note that stopping the queues is not enough to stop TX! */
2286 2287 2288 2289 2290
	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
		ah->ah_cal_next_nf = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
		ath5k_hw_update_noise_floor(ah);
	}
2291

2292
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2293 2294 2295
}


2296 2297 2298 2299 2300 2301 2302 2303 2304
static void
ath5k_tasklet_ani(unsigned long data)
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2305 2306 2307
}


2308 2309 2310 2311 2312 2313 2314 2315 2316
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2317 2318
	mutex_lock(&sc->lock);

2319 2320 2321 2322
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
2323
			if (txq->txq_len > 1) {
2324 2325 2326 2327 2328
				if (txq->txq_poll_mark) {
					ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2329
					txq->txq_stuck++;
2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
			  "TX queues stuck, resetting\n");
2343
		ath5k_reset(sc, NULL, true);
2344 2345
	}

2346 2347
	mutex_unlock(&sc->lock);

2348 2349 2350 2351 2352
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2353 2354 2355
/*************************\
* Initialization routines *
\*************************/
2356

2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367
int
ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
{
	struct ieee80211_hw *hw = sc->hw;
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
	SET_IEEE80211_DEV(hw, sc->dev);
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2368 2369 2370
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2371 2372 2373 2374 2375 2376 2377

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2378 2379 2380 2381
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
	__set_bit(ATH_STAT_INVALID, sc->status);

	sc->opmode = NL80211_IFTYPE_STATION;
	sc->bintval = 1000;
	mutex_init(&sc->lock);
	spin_lock_init(&sc->rxbuflock);
	spin_lock_init(&sc->txbuflock);
	spin_lock_init(&sc->block);
2397
	spin_lock_init(&sc->irqlock);
2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439

	/* Setup interrupt handler */
	ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
	if (ret) {
		ATH5K_ERR(sc, "request_irq failed\n");
		goto err;
	}

	/* If we passed the test, malloc an ath5k_hw struct */
	sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
	if (!sc->ah) {
		ret = -ENOMEM;
		ATH5K_ERR(sc, "out of memory\n");
		goto err_irq;
	}

	sc->ah->ah_sc = sc;
	sc->ah->ah_iobase = sc->iobase;
	common = ath5k_hw_common(sc->ah);
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
	common->ah = sc->ah;
	common->hw = hw;
	common->priv = sc;

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
	ret = ath5k_hw_init(sc);
	if (ret)
		goto err_free_ah;

	/* set up multi-rate retry capabilities */
	if (sc->ah->ah_version == AR5K_AR5212) {
		hw->max_rates = 4;
2440 2441
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

	ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
					sc->ah->ah_mac_srev,
					sc->ah->ah_phy_revision);

	if (!sc->ah->ah_single_chip) {
		/* Single chip radio (!RF5111) */
		if (sc->ah->ah_radio_5ghz_revision &&
			!sc->ah->ah_radio_2ghz_revision) {
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* No 2GHz support (5110 and some
			 * 5Ghz only cards) -> report 5Ghz radio */
			} else if (!test_bit(AR5K_MODE_11B,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* Multiband radio */
			} else {
				ATH5K_INFO(sc, "RF%s multiband radio found"
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
		else if (sc->ah->ah_radio_5ghz_revision &&
				sc->ah->ah_radio_2ghz_revision){
			ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_5ghz_revision),
					sc->ah->ah_radio_5ghz_revision);
			ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_2ghz_revision),
					sc->ah->ah_radio_2ghz_revision);
		}
	}

	ath5k_debug_init_device(sc);

	/* ready to process interrupts */
	__clear_bit(ATH_STAT_INVALID, sc->status);

	return 0;
err_ah:
	ath5k_hw_deinit(sc->ah);
err_free_ah:
	kfree(sc->ah);
err_irq:
	free_irq(sc->irq, sc);
err:
	return ret;
}

2515
static int
2516
ath5k_stop_locked(struct ath5k_softc *sc)
2517
{
2518
	struct ath5k_hw *ah = sc->ah;
2519

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, sc->status));

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
	ieee80211_stop_queues(sc->hw);

	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
		ath5k_led_off(sc);
		ath5k_hw_set_imr(ah, 0);
2543
		synchronize_irq(sc->irq);
2544
		ath5k_rx_stop(sc);
2545 2546
		ath5k_hw_dma_stop(ah);
		ath5k_drain_tx_buffs(sc);
2547 2548 2549 2550
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2551 2552
}

2553
int
2554
ath5k_init_hw(struct ath5k_softc *sc)
2555
{
2556 2557 2558
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2559

2560 2561 2562
	mutex_lock(&sc->lock);

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2563 2564

	/*
2565 2566
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2567
	 */
2568
	ath5k_stop_locked(sc);
2569

2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
	sc->curchan = sc->hw->conf.channel;
	sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2581

2582
	ret = ath5k_reset(sc, NULL, false);
2583 2584
	if (ret)
		goto done;
2585

2586 2587 2588 2589 2590 2591 2592 2593 2594
	ath5k_rfkill_hw_start(ah);

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2595 2596 2597
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2598 2599 2600 2601

	for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
		sc->bslot[i] = NULL;

2602 2603 2604 2605
	ret = 0;
done:
	mmiowb();
	mutex_unlock(&sc->lock);
2606 2607 2608 2609

	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2610 2611 2612 2613 2614
	return ret;
}

static void stop_tasklets(struct ath5k_softc *sc)
{
2615 2616
	sc->rx_pending = false;
	sc->tx_pending = false;
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
	tasklet_kill(&sc->rxtq);
	tasklet_kill(&sc->txtq);
	tasklet_kill(&sc->calib);
	tasklet_kill(&sc->beacontq);
	tasklet_kill(&sc->ani_tasklet);
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2630
int
2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
ath5k_stop_hw(struct ath5k_softc *sc)
{
	int ret;

	mutex_lock(&sc->lock);
	ret = ath5k_stop_locked(sc);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
		ret = ath5k_hw_on_hold(sc->ah);

		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				"putting device to sleep\n");
2662 2663
	}

2664 2665 2666 2667 2668
	mmiowb();
	mutex_unlock(&sc->lock);

	stop_tasklets(sc);

2669 2670
	cancel_delayed_work_sync(&sc->tx_complete_work);

2671 2672 2673
	ath5k_rfkill_hw_stop(sc->ah);

	return ret;
2674 2675
}

2676 2677 2678
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2679 2680
 *
 * This should be called with sc->lock.
2681
 */
2682
static int
2683 2684
ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
							bool skip_pcu)
2685 2686
{
	struct ath5k_hw *ah = sc->ah;
B
Bruno Randolf 已提交
2687
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2688
	int ret, ani_mode;
2689 2690 2691

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");

2692
	ath5k_hw_set_imr(ah, 0);
2693
	synchronize_irq(sc->irq);
2694 2695
	stop_tasklets(sc);

L
Lucas De Marchi 已提交
2696
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2697 2698 2699 2700 2701
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
	ani_mode = ah->ah_sc->ani_state.ani_mode;
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2702 2703 2704 2705
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
	ath5k_drain_tx_buffs(sc);
2706
	if (chan)
2707
		sc->curchan = chan;
2708 2709
	ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
								skip_pcu);
J
Jiri Slaby 已提交
2710
	if (ret) {
2711 2712 2713
		ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
		goto err;
	}
J
Jiri Slaby 已提交
2714

2715
	ret = ath5k_rx_start(sc);
J
Jiri Slaby 已提交
2716
	if (ret) {
2717 2718 2719
		ATH5K_ERR(sc, "can't start recv logic\n");
		goto err;
	}
J
Jiri Slaby 已提交
2720

N
Nick Kossifidis 已提交
2721
	ath5k_ani_init(ah, ani_mode);
2722

2723 2724
	ah->ah_cal_next_full = jiffies;
	ah->ah_cal_next_ani = jiffies;
2725
	ah->ah_cal_next_nf = jiffies;
2726
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2727

B
Bruno Randolf 已提交
2728 2729
	/* clear survey data and cycle counters */
	memset(&sc->survey, 0, sizeof(sc->survey));
2730
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2731 2732 2733
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2734
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2735

2736
	/*
J
Jiri Slaby 已提交
2737 2738 2739 2740 2741
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2742 2743 2744 2745 2746
	 *
	 * XXX needed?
	 */
/*	ath5k_chan_change(sc, c); */

J
Jiri Slaby 已提交
2747 2748
	ath5k_beacon_config(sc);
	/* intrs are enabled by ath5k_beacon_config */
2749

B
Bruno Randolf 已提交
2750 2751
	ieee80211_wake_queues(sc->hw);

2752 2753 2754 2755 2756
	return 0;
err:
	return ret;
}

2757 2758 2759 2760 2761 2762
static void ath5k_reset_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
		reset_work);

	mutex_lock(&sc->lock);
2763
	ath5k_reset(sc, NULL, true);
2764 2765 2766
	mutex_unlock(&sc->lock);
}

2767
static int
2768
ath5k_init(struct ieee80211_hw *hw)
2769
{
2770

2771
	struct ath5k_softc *sc = hw->priv;
2772 2773
	struct ath5k_hw *ah = sc->ah;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2774
	struct ath5k_txq *txq;
2775
	u8 mac[ETH_ALEN] = {};
2776 2777 2778
	int ret;


2779 2780 2781 2782 2783 2784 2785 2786
	/*
	 * Check if the MAC has multi-rate retry support.
	 * We do this by trying to setup a fake extended
	 * descriptor.  MACs that don't have support will
	 * return false w/o doing anything.  MACs that do
	 * support it will return true w/o doing anything.
	 */
	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
J
Jiri Slaby 已提交
2787

2788 2789 2790 2791
	if (ret < 0)
		goto err;
	if (ret > 0)
		__set_bit(ATH_STAT_MRRETRY, sc->status);
2792

2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803
	/*
	 * Collect the channel list.  The 802.11 layer
	 * is resposible for filtering this list based
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't get channels\n");
		goto err;
	}
J
Jiri Slaby 已提交
2804

2805 2806 2807
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2808
	ret = ath5k_desc_alloc(sc);
2809 2810 2811 2812
	if (ret) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		goto err;
	}
2813

2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
		ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
		goto err_desc;
	}
	sc->bhalq = ret;
	sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(sc->cabq)) {
		ATH5K_ERR(sc, "can't setup cab queue\n");
		ret = PTR_ERR(sc->cabq);
		goto err_bhal;
	}
2832

2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2873

2874 2875 2876 2877 2878
	tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
	tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
	tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
	tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
	tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2879

2880
	INIT_WORK(&sc->reset_work, ath5k_reset_work);
2881
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2882

2883
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2884
	if (ret) {
2885
		ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2886
		goto err_queues;
2887
	}
2888

2889
	SET_IEEE80211_PERM_ADDR(hw, mac);
2890
	memcpy(&sc->lladdr, mac, ETH_ALEN);
2891
	/* All MAC address bits matter for ACKs */
2892
	ath5k_update_bssid_mask_and_opmode(sc, NULL);
2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
		ATH5K_ERR(sc, "can't initialize regulatory system\n");
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't register ieee80211 hw\n");
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

	ath5k_init_leds(sc);

	ath5k_sysfs_register(sc);

	return 0;
err_queues:
	ath5k_txq_release(sc);
err_bhal:
	ath5k_hw_release_tx_queue(ah, sc->bhalq);
err_desc:
2920
	ath5k_desc_free(sc);
2921 2922 2923 2924
err:
	return ret;
}

2925 2926
void
ath5k_deinit_softc(struct ath5k_softc *sc)
2927
{
2928
	struct ieee80211_hw *hw = sc->hw;
2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
2944
	ath5k_desc_free(sc);
2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
	ath5k_txq_release(sc);
	ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
	ath5k_unregister_leds(sc);

	ath5k_sysfs_unregister(sc);
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
2955 2956
	ath5k_hw_deinit(sc->ah);
	free_irq(sc->irq, sc);
2957 2958
}

2959 2960
bool
ath_any_vif_assoc(struct ath5k_softc *sc)
2961
{
2962
	struct ath5k_vif_iter_data iter_data;
2963 2964 2965 2966 2967
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

2968
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
2969 2970 2971 2972
						   &iter_data);
	return iter_data.any_assoc;
}

2973
void
2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986
set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
	sc->filter_flags = rfilt;
}