base.c 80.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
45
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
46
#include <linux/hardirq.h>
47
#include <linux/if.h>
J
Jiri Slaby 已提交
48
#include <linux/io.h>
49 50 51 52
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
53
#include <linux/slab.h>
54
#include <linux/etherdevice.h>
55 56 57 58 59 60 61 62

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
63
#include "ani.h"
64

65 66 67
#define CREATE_TRACE_POINTS
#include "trace.h"

68 69
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
70
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
71

72
static int modparam_all_channels;
B
Bob Copeland 已提交
73
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
74 75
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

76 77 78 79 80
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");


81 82 83 84 85 86 87
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

88
static int ath5k_init(struct ieee80211_hw *hw);
89 90
static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
								bool skip_pcu);
91 92
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
93 94

/* Known SREVs */
J
Jiri Slaby 已提交
95
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
96 97 98 99 100 101 102 103 104
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
123
#endif
124
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
125 126
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
127
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
128 129 130
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
131
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
132 133
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
134 135 136 137
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
138
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
139 140 141 142
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
143 144 145
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
146
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

188 189 190 191 192 193 194 195 196 197
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

198
const char *
199 200 201 202 203 204 205 206
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
207 208 209 210 211

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
212 213 214 215 216 217 218
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
235

236 237 238 239 240
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
241
{
242 243 244
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
	struct ath5k_softc *sc = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
245

246 247
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
248

249 250 251
/********************\
* Channel/mode setup *
\********************/
252

253 254 255
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
256
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
257
{
258 259 260 261 262
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
263 264 265
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
266 267 268 269 270
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
271
}
272

273
static unsigned int
274 275
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
276
{
277
	unsigned int count, size, chfreq, freq, ch;
278
	enum ieee80211_band band;
279

280 281 282
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
283
		size = 220;
284
		chfreq = CHANNEL_5GHZ;
285
		band = IEEE80211_BAND_5GHZ;
286 287 288 289 290
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
		chfreq = CHANNEL_2GHZ;
291
		band = IEEE80211_BAND_2GHZ;
292 293 294 295
		break;
	default:
		ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
		return 0;
296 297
	}

298 299
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
300 301 302 303
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
304

305 306 307
		/* Check if channel is supported by the chipset */
		if (!ath5k_channel_ok(ah, freq, chfreq))
			continue;
308

309 310
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
311
			continue;
312

313 314
		/* Write channel info and increment counter */
		channels[count].center_freq = freq;
315
		channels[count].band = band;
316 317 318 319 320 321 322 323
		switch (mode) {
		case AR5K_MODE_11A:
		case AR5K_MODE_11G:
			channels[count].hw_value = chfreq | CHANNEL_OFDM;
			break;
		case AR5K_MODE_11B:
			channels[count].hw_value = CHANNEL_B;
		}
324

325 326
		count++;
	}
327

328 329
	return count;
}
330

331 332 333 334
static void
ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
{
	u8 i;
335

336 337
	for (i = 0; i < AR5K_MAX_RATES; i++)
		sc->rate_idx[b->band][i] = -1;
338

339 340 341 342
	for (i = 0; i < b->n_bitrates; i++) {
		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
		if (b->bitrates[i].hw_value_short)
			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
343
	}
344
}
345

346 347 348 349 350 351 352 353
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
354

355 356
	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(sc->channels);
357

358 359 360 361
	/* 2GHz band */
	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
	sband->band = IEEE80211_BAND_2GHZ;
	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
362

363 364 365 366 367
	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
368

369
		sband->channels = sc->channels;
370
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
371
					AR5K_MODE_11G, max_c);
372

373 374 375 376 377 378 379 380
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
381

382 383 384 385 386 387 388 389 390 391
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
392 393 394
			}
		}

395
		sband->channels = sc->channels;
396
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
397
					AR5K_MODE_11B, max_c);
398

399 400 401 402 403
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
	ath5k_setup_rate_idx(sc, sband);
404

405 406 407 408 409
	/* 5GHz band, A mode */
	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
		sband->band = IEEE80211_BAND_5GHZ;
		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
410

411 412 413
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
414

415
		sband->channels = &sc->channels[count_c];
416
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
417
					AR5K_MODE_11A, max_c);
418

419 420 421 422 423
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
	ath5k_setup_rate_idx(sc, sband);

	ath5k_debug_dump_bands(sc);
424 425 426 427

	return 0;
}

428 429 430 431 432 433 434
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
 * Called with sc->lock.
 */
435
int
436 437 438 439 440 441
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
		  "channel set, resetting (%u -> %u MHz)\n",
		  sc->curchan->center_freq, chan->center_freq);

442
	/*
443 444 445 446
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
447
	 */
448
	return ath5k_reset(sc, chan, true);
449 450
}

451
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
452
{
453
	struct ath5k_vif_iter_data *iter_data = data;
454
	int i;
455
	struct ath5k_vif *avf = (void *)vif->drv_priv;
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
475 476 477 478

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
479
	 * interfaces is allowed.
480 481 482
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
483 484 485
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
486 487
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
488
	}
489 490
}

491 492 493
void
ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
				   struct ieee80211_vif *vif)
494 495
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
496 497
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
498 499 500 501 502 503 504 505 506

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
507
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
508
	iter_data.n_stas = 0;
509 510

	if (vif)
511
		ath5k_vif_iter(&iter_data, vif->addr, vif);
512 513

	/* Get list of all active MAC addresses */
514
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
515 516 517
						   &iter_data);
	memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);

518 519 520 521 522
	sc->opmode = iter_data.opmode;
	if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
		/* Nothing active, default to station mode */
		sc->opmode = NL80211_IFTYPE_STATION;

B
Ben Greear 已提交
523 524 525
	ath5k_hw_set_opmode(sc->ah, sc->opmode);
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  sc->opmode, ath_opmode_to_string(sc->opmode));
526

527 528 529
	if (iter_data.need_set_hw_addr && iter_data.found_active)
		ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);

530 531
	if (ath5k_hw_hasbssidmask(sc->ah))
		ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
532

533 534 535 536 537 538 539 540
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
		 * Enabling PROMISC appears to fix that probem.
		 */
		sc->filter_flags |= AR5K_RX_FILTER_PROM;
	}
541

542
	rfilt = sc->filter_flags;
543
	ath5k_hw_set_rx_filter(sc->ah, rfilt);
544 545
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
546

547 548 549 550
static inline int
ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
	int rix;
551

552 553 554 555 556
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

557
	rix = sc->rate_idx[sc->curchan->band][hw_rix];
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
	struct sk_buff *skb;
573 574

	/*
575 576
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
577
	 */
578 579 580
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
581

582 583 584 585
	if (!skb) {
		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
				common->rx_bufsize);
		return NULL;
586 587
	}

588
	*skb_addr = dma_map_single(sc->dev,
589
				   skb->data, common->rx_bufsize,
590 591 592
				   DMA_FROM_DEVICE);

	if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
593 594 595
		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
		dev_kfree_skb(skb);
		return NULL;
596
	}
597 598
	return skb;
}
599

600 601 602 603 604 605 606
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
607

608 609 610 611 612
	if (!skb) {
		skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
613 614
	}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
634
	if (ret) {
635 636
		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
		return ret;
637 638
	}

639 640 641
	if (sc->rxlink != NULL)
		*sc->rxlink = bf->daddr;
	sc->rxlink = &ds->ds_link;
642 643 644
	return 0;
}

645
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
646
{
647 648 649
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
650

651 652
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
653

654 655 656 657 658 659 660 661
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
662
	else
663
		htype = AR5K_PKT_TYPE_NORMAL;
664

665
	return htype;
666 667
}

668 669 670
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
		  struct ath5k_txq *txq, int padsize)
671
{
672 673 674 675 676 677 678 679 680 681 682 683
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
684

685
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
686

687
	/* XXX endianness */
688 689
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
690

691
	rate = ieee80211_get_tx_rate(sc->hw, info);
692 693 694 695
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
696

697 698
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
699

700 701 702
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
703

704 705 706 707 708 709 710 711 712 713 714 715 716
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
717
			info->control.vif, pktlen, info));
718 719 720 721 722
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
723
			info->control.vif, pktlen, info));
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
		(sc->power_level * 2),
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
		rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
		if (!rate)
740
			break;
741

742 743
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
744 745
	}

746 747 748 749
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
750

751 752
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
753

754 755
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
756
	txq->txq_len++;
757 758 759 760
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
761

762 763 764 765 766 767 768
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
769
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
770
	return ret;
B
Bruno Randolf 已提交
771 772
}

773 774 775 776
/*******************\
* Descriptors setup *
\*******************/

777
static int
778
ath5k_desc_alloc(struct ath5k_softc *sc)
779
{
780 781 782 783 784
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
785

786 787 788
	/* allocate descriptors */
	sc->desc_len = sizeof(struct ath5k_desc) *
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
789 790 791

	sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
				&sc->desc_daddr, GFP_KERNEL);
792 793 794 795 796 797 798 799 800
	if (sc->desc == NULL) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		ret = -ENOMEM;
		goto err;
	}
	ds = sc->desc;
	da = sc->desc_daddr;
	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
801

802 803 804 805 806 807 808 809
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
		ATH5K_ERR(sc, "can't allocate bufptr\n");
		ret = -ENOMEM;
		goto err_free;
	}
	sc->bufptr = bf;
810

811 812 813 814 815 816
	INIT_LIST_HEAD(&sc->rxbuf);
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->rxbuf);
	}
817

818 819 820 821 822 823 824
	INIT_LIST_HEAD(&sc->txbuf);
	sc->txbuf_len = ATH_TXBUF;
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
			da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->txbuf);
825 826
	}

827 828 829 830 831 832 833
	/* beacon buffers */
	INIT_LIST_HEAD(&sc->bcbuf);
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->bcbuf);
	}
834

835 836
	return 0;
err_free:
837
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
838 839 840 841
err:
	sc->desc = NULL;
	return ret;
}
842

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
void
ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

874
static void
875
ath5k_desc_free(struct ath5k_softc *sc)
876 877
{
	struct ath5k_buf *bf;
878

879 880 881 882
	list_for_each_entry(bf, &sc->txbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
	list_for_each_entry(bf, &sc->rxbuf, list)
		ath5k_rxbuf_free_skb(sc, bf);
883 884
	list_for_each_entry(bf, &sc->bcbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
885

886
	/* Free memory associated with all descriptors */
887
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
888 889
	sc->desc = NULL;
	sc->desc_daddr = 0;
890

891 892
	kfree(sc->bufptr);
	sc->bufptr = NULL;
893 894
}

895 896 897 898 899 900 901 902

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
ath5k_txq_setup(struct ath5k_softc *sc,
		int qtype, int subtype)
903
{
904 905 906 907
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
908 909 910 911 912
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
913 914
	};
	int qnum;
915

916
	/*
917 918 919 920 921 922 923 924 925 926
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
927
	 */
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
	if (qnum >= ARRAY_SIZE(sc->txqs)) {
		ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(sc->txqs));
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
	txq = &sc->txqs[qnum];
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
951
		txq->txq_len = 0;
952
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
953
		txq->txq_poll_mark = false;
954
		txq->txq_stuck = 0;
955 956
	}
	return &sc->txqs[qnum];
957 958
}

959 960
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
961
{
962
	struct ath5k_txq_info qi = {
963 964 965 966 967
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
968 969 970
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
971

972
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
973 974
}

975 976
static int
ath5k_beaconq_config(struct ath5k_softc *sc)
977 978
{
	struct ath5k_hw *ah = sc->ah;
979 980
	struct ath5k_txq_info qi;
	int ret;
981

982 983 984
	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret)
		goto err;
985

986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	if (sc->opmode == NL80211_IFTYPE_AP ||
		sc->opmode == NL80211_IFTYPE_MESH_POINT) {
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
1001
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
1002
	}
1003

1004 1005 1006
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016
	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret) {
		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
			"hardware queue!\n", __func__);
		goto err;
	}
	ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
	if (ret)
		goto err;
1017

1018 1019 1020 1021
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1022

1023 1024 1025 1026
	qi.tqi_ready_time = (sc->bintval * 80) / 100;
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1027

1028 1029 1030
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1031 1032
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
 * @sc The &struct ath5k_softc
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1044
static void
1045
ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1046
{
1047
	struct ath5k_txq *txq;
1048
	struct ath5k_buf *bf, *bf0;
1049
	int i;
1050

1051 1052 1053 1054 1055 1056
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
				ath5k_debug_printtxbuf(sc, bf);
1057

1058
				ath5k_txbuf_free_skb(sc, bf);
1059

1060 1061 1062 1063 1064
				spin_lock_bh(&sc->txbuflock);
				list_move_tail(&bf->list, &sc->txbuf);
				sc->txbuf_len++;
				txq->txq_len--;
				spin_unlock_bh(&sc->txbuflock);
1065
			}
1066 1067 1068 1069
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1070
	}
1071 1072
}

1073 1074
static void
ath5k_txq_release(struct ath5k_softc *sc)
1075
{
1076 1077
	struct ath5k_txq *txq = sc->txqs;
	unsigned int i;
1078

1079 1080 1081 1082 1083 1084
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
		if (txq->setup) {
			ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
			txq->setup = false;
		}
}
1085 1086


1087 1088 1089
/*************\
* RX Handling *
\*************/
1090

1091 1092 1093
/*
 * Enable the receive h/w following a reset.
 */
1094
static int
1095
ath5k_rx_start(struct ath5k_softc *sc)
1096 1097
{
	struct ath5k_hw *ah = sc->ah;
1098 1099 1100
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1101

1102
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1103

1104 1105
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
		  common->cachelsz, common->rx_bufsize);
1106

1107 1108 1109 1110 1111 1112 1113 1114
	spin_lock_bh(&sc->rxbuflock);
	sc->rxlink = NULL;
	list_for_each_entry(bf, &sc->rxbuf, list) {
		ret = ath5k_rxbuf_setup(sc, bf);
		if (ret != 0) {
			spin_unlock_bh(&sc->rxbuflock);
			goto err;
		}
1115
	}
1116 1117 1118
	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
	ath5k_hw_set_rxdp(ah, bf->daddr);
	spin_unlock_bh(&sc->rxbuflock);
1119

1120
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1121
	ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1122
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1123 1124

	return 0;
1125
err:
1126 1127 1128
	return ret;
}

1129
/*
1130 1131 1132 1133 1134
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1135 1136 1137
 */
static void
ath5k_rx_stop(struct ath5k_softc *sc)
1138
{
1139
	struct ath5k_hw *ah = sc->ah;
1140

1141
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1142
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1143

1144 1145
	ath5k_debug_printrxbuffs(sc, ah);
}
1146

1147 1148 1149 1150 1151 1152 1153 1154
static unsigned int
ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
		   struct ath5k_rx_status *rs)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1155

1156 1157 1158
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1159

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1172 1173 1174 1175

	return 0;
}

1176

1177
static void
1178 1179
ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
		     struct ieee80211_rx_status *rxs)
1180
{
1181 1182 1183 1184
	struct ath_common *common = ath5k_hw_common(sc->ah);
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1185

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
		tsf = ath5k_hw_get_tsf64(sc->ah);
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1197

1198 1199 1200 1201 1202 1203
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1204

1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1223

1224 1225 1226 1227 1228 1229 1230 1231
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
		if (hw_tu >= sc->nexttbtt)
			ath5k_beacon_update_timers(sc, bc_tstamp);
B
Bruno Randolf 已提交
1232 1233 1234 1235 1236 1237 1238 1239 1240

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
		if (!ath5k_hw_check_beacon_timers(sc->ah, sc->bintval)) {
			ath5k_beacon_update_timers(sc, bc_tstamp);
			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
				"fixed beacon timers after beacon receive\n");
		}
1241 1242
	}
}
1243

1244 1245 1246 1247 1248 1249
static void
ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1250

1251 1252 1253 1254
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
		return;
1255

B
Bruno Randolf 已提交
1256
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1257

1258 1259 1260
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1261

1262 1263 1264 1265
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1266
{
1267 1268 1269
	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1270

1271 1272
	if (ieee80211_has_a4(frame_control)) {
		padpos += ETH_ALEN;
1273
	}
1274 1275
	if (ieee80211_is_data_qos(frame_control)) {
		padpos += IEEE80211_QOS_CTL_LEN;
1276
	}
1277 1278

	return padpos;
1279 1280
}

1281 1282 1283 1284 1285
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1286
{
1287 1288
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1289

1290
	if (padsize && skb->len>padpos) {
1291

1292 1293
		if (skb_headroom(skb) < padsize)
			return -1;
1294

1295 1296 1297 1298
		skb_push(skb, padsize);
		memmove(skb->data, skb->data+padsize, padpos);
		return padsize;
	}
B
Bob Copeland 已提交
1299

1300 1301
	return 0;
}
1302

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1320

1321 1322 1323 1324
	if (padsize && skb->len>=padpos+padsize) {
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1325
	}
B
Bob Copeland 已提交
1326

1327
	return 0;
1328 1329 1330
}

static void
1331 1332
ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
		    struct ath5k_rx_status *rs)
1333
{
1334 1335 1336 1337 1338 1339 1340 1341 1342
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1343 1344

	/*
1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
	 * frame, but i'm not sure.
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1363
	 */
1364
	rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1365
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1366

1367
	rxs->freq = sc->curchan->center_freq;
1368
	rxs->band = sc->curchan->band;
1369

1370
	rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1371

1372
	rxs->antenna = rs->rs_antenna;
1373

1374 1375 1376 1377
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
		sc->stats.antenna_rx[rs->rs_antenna]++;
	else
		sc->stats.antenna_rx[0]++; /* invalid */
1378

1379 1380
	rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1381

1382
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1383
	    sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1384
		rxs->flag |= RX_FLAG_SHORTPRE;
1385

1386
	trace_ath5k_rx(sc, skb);
1387

1388
	ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1389

1390 1391 1392
	/* check beacons in IBSS mode */
	if (sc->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(sc, skb, rxs);
1393

1394 1395
	ieee80211_rx(sc->hw, skb);
}
1396

1397 1398 1399 1400
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1401
 */
1402 1403
static bool
ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
1404
{
1405
	sc->stats.rx_all_count++;
B
Ben Greear 已提交
1406
	sc->stats.rx_bytes_count += rs->rs_datalen;
1407

1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
			sc->stats.rxerr_crc++;
		if (rs->rs_status & AR5K_RXERR_FIFO)
			sc->stats.rxerr_fifo++;
		if (rs->rs_status & AR5K_RXERR_PHY) {
			sc->stats.rxerr_phy++;
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
				sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
			sc->stats.rxerr_decrypt++;
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
			sc->stats.rxerr_mic++;
			return true;
1438 1439
		}

1440 1441 1442 1443
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1444

1445 1446 1447 1448 1449
	if (unlikely(rs->rs_more)) {
		sc->stats.rxerr_jumbo++;
		return false;
	}
	return true;
1450 1451
}

1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
static void
ath5k_set_current_imask(struct ath5k_softc *sc)
{
	enum ath5k_int imask = sc->imask;
	unsigned long flags;

	spin_lock_irqsave(&sc->irqlock, flags);
	if (sc->rx_pending)
		imask &= ~AR5K_INT_RX_ALL;
	if (sc->tx_pending)
		imask &= ~AR5K_INT_TX_ALL;
	ath5k_hw_set_imr(sc->ah, imask);
	spin_unlock_irqrestore(&sc->irqlock, flags);
}

1467
static void
1468
ath5k_tasklet_rx(unsigned long data)
1469
{
1470 1471 1472 1473
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
	struct ath5k_softc *sc = (void *)data;
L
Luis R. Rodriguez 已提交
1474 1475
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
1476 1477 1478
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1479

1480 1481 1482 1483 1484 1485 1486 1487 1488 1489
	spin_lock(&sc->rxbuflock);
	if (list_empty(&sc->rxbuf)) {
		ATH5K_WARN(sc, "empty rx buf pool\n");
		goto unlock;
	}
	do {
		bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1490

1491 1492 1493
		/* bail if HW is still using self-linked descriptor */
		if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
			break;
1494

1495 1496 1497 1498 1499 1500 1501 1502
		ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
			ATH5K_ERR(sc, "error in processing rx descriptor\n");
			sc->stats.rxerr_proc++;
			break;
		}
1503

1504 1505
		if (ath5k_receive_frame_ok(sc, &rs)) {
			next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1506

1507 1508 1509 1510 1511 1512
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1513

1514
			dma_unmap_single(sc->dev, bf->skbaddr,
1515
					 common->rx_bufsize,
1516
					 DMA_FROM_DEVICE);
1517

1518
			skb_put(skb, rs.rs_datalen);
1519

1520
			ath5k_receive_frame(sc, skb, &rs);
1521

1522 1523
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1524
		}
1525 1526 1527 1528 1529
next:
		list_move_tail(&bf->list, &sc->rxbuf);
	} while (ath5k_rxbuf_setup(sc, bf) == 0);
unlock:
	spin_unlock(&sc->rxbuflock);
1530 1531
	sc->rx_pending = false;
	ath5k_set_current_imask(sc);
1532 1533
}

B
Bruno Randolf 已提交
1534

1535 1536 1537
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1538

1539
void
1540 1541
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1542 1543 1544 1545 1546
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1547

1548
	trace_ath5k_tx(sc, skb, txq);
B
Bruno Randolf 已提交
1549

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
		ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
			  " headroom to pad");
		goto drop_packet;
	}
1560

1561
	if (txq->txq_len >= txq->txq_max)
B
Bruno Randolf 已提交
1562 1563
		ieee80211_stop_queue(hw, txq->qnum);

1564 1565 1566 1567
	spin_lock_irqsave(&sc->txbuflock, flags);
	if (list_empty(&sc->txbuf)) {
		ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&sc->txbuflock, flags);
B
Bruno Randolf 已提交
1568
		ieee80211_stop_queues(hw);
1569
		goto drop_packet;
1570
	}
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
	bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
	list_del(&bf->list);
	sc->txbuf_len--;
	if (list_empty(&sc->txbuf))
		ieee80211_stop_queues(hw);
	spin_unlock_irqrestore(&sc->txbuflock, flags);

	bf->skb = skb;

	if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
		bf->skb = NULL;
		spin_lock_irqsave(&sc->txbuflock, flags);
		list_add_tail(&bf->list, &sc->txbuf);
		sc->txbuf_len++;
		spin_unlock_irqrestore(&sc->txbuflock, flags);
		goto drop_packet;
1587
	}
1588
	return;
1589

1590 1591
drop_packet:
	dev_kfree_skb_any(skb);
1592 1593
}

1594 1595
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
1596
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1597 1598
{
	struct ieee80211_tx_info *info;
1599
	u8 tries[3];
1600 1601 1602
	int i;

	sc->stats.tx_all_count++;
B
Ben Greear 已提交
1603
	sc->stats.tx_bytes_count += skb->len;
1604 1605
	info = IEEE80211_SKB_CB(skb);

1606 1607 1608 1609
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1610
	ieee80211_tx_info_clear_status(info);
1611 1612

	for (i = 0; i < ts->ts_final_idx; i++) {
1613 1614 1615
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1616
		r->count = tries[i];
1617 1618
	}

1619
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1620
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634

	if (unlikely(ts->ts_status)) {
		sc->stats.ack_fail++;
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
			sc->stats.txerr_filt++;
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
			sc->stats.txerr_retry++;
		if (ts->ts_status & AR5K_TXERR_FIFO)
			sc->stats.txerr_fifo++;
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1635 1636 1637

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
		sc->stats.antenna_tx[ts->ts_antenna]++;
	else
		sc->stats.antenna_tx[0]++; /* invalid */

1651
	trace_ath5k_tx_complete(sc, skb, txq, ts);
1652 1653
	ieee80211_tx_status(sc->hw, skb);
}
1654 1655 1656

static void
ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1657
{
1658 1659 1660 1661
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1662
	int ret;
1663

1664 1665
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

			ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
				ATH5K_ERR(sc,
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1685 1686 1687

			dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
					DMA_TO_DEVICE);
1688
			ath5k_tx_frame_completed(sc, skb, txq, &ts);
1689
		}
1690

1691 1692 1693
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1694 1695
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1696
		 */
1697 1698 1699 1700 1701 1702
		if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) {
			spin_lock(&sc->txbuflock);
			list_move_tail(&bf->list, &sc->txbuf);
			sc->txbuf_len++;
			txq->txq_len--;
			spin_unlock(&sc->txbuflock);
1703
		}
1704 1705
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1706
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
B
Bruno Randolf 已提交
1707
		ieee80211_wake_queue(sc->hw, txq->qnum);
1708 1709 1710 1711 1712
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1713
	int i;
1714 1715
	struct ath5k_softc *sc = (void *)data;

B
Bob Copeland 已提交
1716 1717 1718
	for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
		if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
			ath5k_tx_processq(sc, &sc->txqs[i]);
1719 1720 1721

	sc->tx_pending = false;
	ath5k_set_current_imask(sc);
1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1733
ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1734 1735
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1736
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1737 1738
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds;
1739 1740
	int ret = 0;
	u8 antenna;
1741
	u32 flags;
1742
	const int padsize = 0;
1743

1744 1745
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
1746 1747 1748
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1749 1750

	if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1751 1752 1753 1754 1755
		ATH5K_ERR(sc, "beacon DMA mapping failed\n");
		return -EIO;
	}

	ds = bf->desc;
1756
	antenna = ah->ah_tx_ant;
1757 1758

	flags = AR5K_TXDESC_NOACK;
1759
	if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1760 1761
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1762
	} else
1763
		ds->ds_link = 0;
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
	 * automaticaly, based on ACKed packets.
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1777 1778 1779 1780 1781
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1782 1783 1784 1785
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
		antenna = sc->bsent & 4 ? 2 : 1;

1786

1787 1788 1789
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1790
	ds->ds_data = bf->skbaddr;
1791
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1792
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1793
			AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1794
			ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1795
			1, AR5K_TXKEYIX_INVALID,
1796
			antenna, flags, 0, 0);
1797 1798 1799 1800 1801
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1802
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1803 1804 1805
	return ret;
}

1806 1807 1808 1809 1810 1811 1812
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1813
int
1814 1815 1816 1817
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
	struct ath5k_softc *sc = hw->priv;
1818
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1833 1834 1835
	ath5k_txbuf_free_skb(sc, avf->bbuf);
	avf->bbuf->skb = skb;
	ret = ath5k_beacon_setup(sc, avf->bbuf);
1836
	if (ret)
1837
		avf->bbuf->skb = NULL;
1838 1839 1840 1841
out:
	return ret;
}

1842 1843 1844 1845 1846
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1847 1848
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1849 1850 1851 1852 1853
 */
static void
ath5k_beacon_send(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
1854 1855 1856
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1857
	struct sk_buff *skb;
1858

B
Bruno Randolf 已提交
1859
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1860 1861 1862

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1863
	 * not, don't don't try to post another: skip this
1864 1865 1866 1867 1868 1869
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
	if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
		sc->bmisscount++;
B
Bruno Randolf 已提交
1870
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1871
			"missed %u consecutive beacons\n", sc->bmisscount);
N
Nick Kossifidis 已提交
1872
		if (sc->bmisscount > 10) {	/* NB: 10 is a guess */
B
Bruno Randolf 已提交
1873
			ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1874 1875
				"stuck beacon time (%u missed)\n",
				sc->bmisscount);
1876 1877
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "stuck beacon, resetting\n");
1878
			ieee80211_queue_work(sc->hw, &sc->reset_work);
1879 1880 1881 1882
		}
		return;
	}
	if (unlikely(sc->bmisscount != 0)) {
B
Bruno Randolf 已提交
1883
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1884 1885 1886 1887 1888
			"resume beacon xmit after %u misses\n",
			sc->bmisscount);
		sc->bmisscount = 0;
	}

1889 1890
	if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) ||
			sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
		int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval;
		vif = sc->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
			(unsigned long long)tsf, tsftu, sc->bintval, slot, vif);
	} else /* only one interface */
		vif = sc->bslot[0];

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;
	if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
			sc->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
		return;
	}

1912 1913 1914 1915 1916
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1917
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
N
Nick Kossifidis 已提交
1918
		ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1919 1920 1921
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1922 1923 1924
	/* refresh the beacon for AP or MESH mode */
	if (sc->opmode == NL80211_IFTYPE_AP ||
			sc->opmode == NL80211_IFTYPE_MESH_POINT)
1925
		ath5k_beacon_update(sc->hw, vif);
B
Bob Copeland 已提交
1926

1927 1928
	trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);

N
Nick Kossifidis 已提交
1929 1930
	ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, sc->bhalq);
B
Bruno Randolf 已提交
1931
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
1932 1933
		sc->bhalq, (unsigned long long)bf->daddr, bf->desc);

1934
	skb = ieee80211_get_buffered_bc(sc->hw, vif);
1935 1936
	while (skb) {
		ath5k_tx_queue(sc->hw, skb, sc->cabq);
1937
		skb = ieee80211_get_buffered_bc(sc->hw, vif);
1938 1939
	}

1940 1941 1942
	sc->bsent++;
}

1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
 * @sc: struct ath5k_softc pointer we are operating on
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1955
 * when a TSF update has been detected, but also when an new IBSS is created or
1956 1957 1958
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1959
void
1960
ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1961 1962
{
	struct ath5k_hw *ah = sc->ah;
1963 1964
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1965 1966

	intval = sc->bintval & AR5K_BEACON_PERIOD;
1967 1968 1969 1970 1971 1972
	if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
			ATH5K_WARN(sc, "intval %u is too low, min 15\n",
				   intval);
	}
1973 1974 1975
	if (WARN_ON(!intval))
		return;

1976 1977
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1978

1979 1980 1981
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1982

1983 1984
#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1985
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1986 1987
	 * configuration we need to make sure it is bigger than that. */

1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
2003
		 * beacon received, SW merge happened but HW TSF not yet updated.
2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2023

2024 2025
	sc->nexttbtt = nexttbtt;

2026 2027
	intval |= AR5K_BEACON_ENA;
	ath5k_hw_init_beacon(ah, nexttbtt, intval);
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"reset HW TSF and timers\n");
	else
		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
			"updated timers based on beacon TSF\n");

	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2044 2045 2046
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2047 2048 2049 2050
	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2051 2052
}

2053 2054 2055 2056
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
 * @sc: struct ath5k_softc pointer we are operating on
2057
 *
2058
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2059
 * interrupts to detect TSF updates only.
2060
 */
2061
void
2062 2063 2064
ath5k_beacon_config(struct ath5k_softc *sc)
{
	struct ath5k_hw *ah = sc->ah;
2065
	unsigned long flags;
2066

2067
	spin_lock_irqsave(&sc->block, flags);
2068
	sc->bmisscount = 0;
J
Jiri Slaby 已提交
2069
	sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2070

2071
	if (sc->enable_beacon) {
2072
		/*
2073 2074
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2075
		 * only once here.
2076
		 * We use the SWBA interrupt only to keep track of the beacon
2077
		 * timers in order to detect automatic TSF updates.
2078 2079 2080
		 */
		ath5k_beaconq_config(sc);

2081 2082
		sc->imask |= AR5K_INT_SWBA;

J
Jiri Slaby 已提交
2083
		if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2084
			if (ath5k_hw_hasveol(ah))
J
Jiri Slaby 已提交
2085 2086 2087
				ath5k_beacon_send(sc);
		} else
			ath5k_beacon_update_timers(sc, -1);
2088
	} else {
2089
		ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2090 2091
	}

N
Nick Kossifidis 已提交
2092
	ath5k_hw_set_imr(ah, sc->imask);
2093 2094
	mmiowb();
	spin_unlock_irqrestore(&sc->block, flags);
2095 2096
}

N
Nick Kossifidis 已提交
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
static void ath5k_tasklet_beacon(unsigned long data)
{
	struct ath5k_softc *sc = (struct ath5k_softc *) data;

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
	 * transmission time) in order to detect wether
	 * automatic TSF updates happened.
	 */
	if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/* XXX: only if VEOL suppported */
		u64 tsf = ath5k_hw_get_tsf64(sc->ah);
		sc->nexttbtt += sc->bintval;
		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
				sc->nexttbtt,
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
		spin_lock(&sc->block);
		ath5k_beacon_send(sc);
		spin_unlock(&sc->block);
	}
}

2126 2127 2128 2129 2130

/********************\
* Interrupt handling *
\********************/

2131 2132 2133
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2134 2135 2136 2137 2138 2139 2140 2141
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
		/* run ANI only when full calibration is not active */
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
		tasklet_schedule(&ah->ah_sc->ani_tasklet);

	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2142 2143 2144 2145 2146 2147 2148 2149 2150
		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		tasklet_schedule(&ah->ah_sc->calib);
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164
static void
ath5k_schedule_rx(struct ath5k_softc *sc)
{
	sc->rx_pending = true;
	tasklet_schedule(&sc->rxtq);
}

static void
ath5k_schedule_tx(struct ath5k_softc *sc)
{
	sc->tx_pending = true;
	tasklet_schedule(&sc->txtq);
}

2165
irqreturn_t
2166 2167 2168 2169 2170 2171 2172 2173
ath5k_intr(int irq, void *dev_id)
{
	struct ath5k_softc *sc = dev_id;
	struct ath5k_hw *ah = sc->ah;
	enum ath5k_int status;
	unsigned int counter = 1000;

	if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2174 2175
		((ath5k_get_bus_type(ah) != ATH_AHB) &&
				!ath5k_hw_is_intr_pending(ah))))
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186
		return IRQ_NONE;

	do {
		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
		ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, sc->imask);
		if (unlikely(status & AR5K_INT_FATAL)) {
			/*
			 * Fatal errors are unrecoverable.
			 * Typically these are caused by DMA errors.
			 */
2187 2188
			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				  "fatal int, resetting\n");
2189
			ieee80211_queue_work(sc->hw, &sc->reset_work);
2190
		} else if (unlikely(status & AR5K_INT_RXORN)) {
B
Bruno Randolf 已提交
2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
			/*
			 * Receive buffers are full. Either the bus is busy or
			 * the CPU is not fast enough to process all received
			 * frames.
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
			 * We don't know exactly which versions need a reset -
			 * this guess is copied from the HAL.
			 */
			sc->stats.rxorn_intr++;
2201 2202 2203
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
				ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
					  "rx overrun, resetting\n");
2204
				ieee80211_queue_work(sc->hw, &sc->reset_work);
2205
			}
B
Bruno Randolf 已提交
2206
			else
2207
				ath5k_schedule_rx(sc);
2208 2209
		} else {
			if (status & AR5K_INT_SWBA) {
2210
				tasklet_hi_schedule(&sc->beacontq);
2211 2212 2213 2214 2215 2216 2217
			}
			if (status & AR5K_INT_RXEOL) {
				/*
				* NB: the hardware should re-read the link when
				*     RXE bit is written, but it doesn't work at
				*     least on older hardware revs.
				*/
B
Bruno Randolf 已提交
2218
				sc->stats.rxeol_intr++;
2219 2220 2221 2222 2223
			}
			if (status & AR5K_INT_TXURN) {
				/* bump tx trigger level */
				ath5k_hw_update_tx_triglevel(ah, true);
			}
2224
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2225
				ath5k_schedule_rx(sc);
2226 2227
			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
2228
				ath5k_schedule_tx(sc);
2229
			if (status & AR5K_INT_BMISS) {
2230
				/* TODO */
2231 2232
			}
			if (status & AR5K_INT_MIB) {
2233
				sc->stats.mib_intr++;
B
Bruno Randolf 已提交
2234
				ath5k_hw_update_mib_counters(ah);
2235
				ath5k_ani_mib_intr(ah);
2236
			}
2237 2238
			if (status & AR5K_INT_GPIO)
				tasklet_schedule(&sc->rf_kill.toggleq);
B
Bob Copeland 已提交
2239

2240
		}
2241 2242 2243 2244

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2245
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2246

2247 2248 2249
	if (sc->rx_pending || sc->tx_pending)
		ath5k_set_current_imask(sc);

2250 2251 2252
	if (unlikely(!counter))
		ATH5K_WARN(sc, "too many interrupts, giving up for now\n");

2253
	ath5k_intr_calibration_poll(ah);
2254

2255 2256 2257 2258 2259 2260 2261 2262
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
2263
ath5k_tasklet_calibrate(unsigned long data)
2264 2265 2266 2267
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

2268
	/* Only full calibration for now */
2269
	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2270

2271
	ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2272 2273
		ieee80211_frequency_to_channel(sc->curchan->center_freq),
		sc->curchan->hw_value);
2274

2275
	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2276 2277 2278 2279 2280
		/*
		 * Rfgain is out of bounds, reset the chip
		 * to load new gain values.
		 */
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2281
		ieee80211_queue_work(sc->hw, &sc->reset_work);
2282 2283 2284
	}
	if (ath5k_hw_phy_calibrate(ah, sc->curchan))
		ATH5K_ERR(sc, "calibration of channel %u failed\n",
2285 2286
			ieee80211_frequency_to_channel(
				sc->curchan->center_freq));
2287

2288
	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
B
Bruno Randolf 已提交
2289 2290 2291
	 * doesn't.
	 * TODO: We should stop TX here, so that it doesn't interfere.
	 * Note that stopping the queues is not enough to stop TX! */
2292 2293 2294 2295 2296
	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
		ah->ah_cal_next_nf = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
		ath5k_hw_update_noise_floor(ah);
	}
2297

2298
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2299 2300 2301
}


2302 2303 2304 2305 2306 2307 2308 2309 2310
static void
ath5k_tasklet_ani(unsigned long data)
{
	struct ath5k_softc *sc = (void *)data;
	struct ath5k_hw *ah = sc->ah;

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2311 2312 2313
}


2314 2315 2316 2317 2318 2319 2320 2321 2322
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2323 2324
	mutex_lock(&sc->lock);

2325 2326 2327 2328
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
2329
			if (txq->txq_len > 1) {
2330 2331 2332 2333 2334
				if (txq->txq_poll_mark) {
					ATH5K_DBG(sc, ATH5K_DEBUG_XMIT,
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2335
					txq->txq_stuck++;
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
			  "TX queues stuck, resetting\n");
2349
		ath5k_reset(sc, NULL, true);
2350 2351
	}

2352 2353
	mutex_unlock(&sc->lock);

2354 2355 2356 2357 2358
	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2359 2360 2361
/*************************\
* Initialization routines *
\*************************/
2362

2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373
int
ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
{
	struct ieee80211_hw *hw = sc->hw;
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
	SET_IEEE80211_DEV(hw, sc->dev);
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2374 2375 2376
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2377 2378 2379 2380 2381 2382 2383

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2384 2385 2386 2387
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
	__set_bit(ATH_STAT_INVALID, sc->status);

	sc->opmode = NL80211_IFTYPE_STATION;
	sc->bintval = 1000;
	mutex_init(&sc->lock);
	spin_lock_init(&sc->rxbuflock);
	spin_lock_init(&sc->txbuflock);
	spin_lock_init(&sc->block);
2403
	spin_lock_init(&sc->irqlock);
2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445

	/* Setup interrupt handler */
	ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
	if (ret) {
		ATH5K_ERR(sc, "request_irq failed\n");
		goto err;
	}

	/* If we passed the test, malloc an ath5k_hw struct */
	sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
	if (!sc->ah) {
		ret = -ENOMEM;
		ATH5K_ERR(sc, "out of memory\n");
		goto err_irq;
	}

	sc->ah->ah_sc = sc;
	sc->ah->ah_iobase = sc->iobase;
	common = ath5k_hw_common(sc->ah);
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
	common->ah = sc->ah;
	common->hw = hw;
	common->priv = sc;

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
	ret = ath5k_hw_init(sc);
	if (ret)
		goto err_free_ah;

	/* set up multi-rate retry capabilities */
	if (sc->ah->ah_version == AR5K_AR5212) {
		hw->max_rates = 4;
2446 2447
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

	ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
					sc->ah->ah_mac_srev,
					sc->ah->ah_phy_revision);

	if (!sc->ah->ah_single_chip) {
		/* Single chip radio (!RF5111) */
		if (sc->ah->ah_radio_5ghz_revision &&
			!sc->ah->ah_radio_2ghz_revision) {
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* No 2GHz support (5110 and some
			 * 5Ghz only cards) -> report 5Ghz radio */
			} else if (!test_bit(AR5K_MODE_11B,
				sc->ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			/* Multiband radio */
			} else {
				ATH5K_INFO(sc, "RF%s multiband radio found"
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
						sc->ah->ah_radio_5ghz_revision),
						sc->ah->ah_radio_5ghz_revision);
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
		else if (sc->ah->ah_radio_5ghz_revision &&
				sc->ah->ah_radio_2ghz_revision){
			ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_5ghz_revision),
					sc->ah->ah_radio_5ghz_revision);
			ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
				ath5k_chip_name(AR5K_VERSION_RAD,
					sc->ah->ah_radio_2ghz_revision),
					sc->ah->ah_radio_2ghz_revision);
		}
	}

	ath5k_debug_init_device(sc);

	/* ready to process interrupts */
	__clear_bit(ATH_STAT_INVALID, sc->status);

	return 0;
err_ah:
	ath5k_hw_deinit(sc->ah);
err_free_ah:
	kfree(sc->ah);
err_irq:
	free_irq(sc->irq, sc);
err:
	return ret;
}

2521
static int
2522
ath5k_stop_locked(struct ath5k_softc *sc)
2523
{
2524
	struct ath5k_hw *ah = sc->ah;
2525

2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, sc->status));

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
	ieee80211_stop_queues(sc->hw);

	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
		ath5k_led_off(sc);
		ath5k_hw_set_imr(ah, 0);
2549
		synchronize_irq(sc->irq);
2550
		ath5k_rx_stop(sc);
2551 2552
		ath5k_hw_dma_stop(ah);
		ath5k_drain_tx_buffs(sc);
2553 2554 2555 2556
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2557 2558
}

2559
int
2560
ath5k_init_hw(struct ath5k_softc *sc)
2561
{
2562 2563 2564
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2565

2566 2567 2568
	mutex_lock(&sc->lock);

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2569 2570

	/*
2571 2572
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2573
	 */
2574
	ath5k_stop_locked(sc);
2575

2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
	sc->curchan = sc->hw->conf.channel;
	sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2587

2588
	ret = ath5k_reset(sc, NULL, false);
2589 2590
	if (ret)
		goto done;
2591

2592 2593 2594 2595 2596 2597 2598 2599 2600
	ath5k_rfkill_hw_start(ah);

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2601 2602 2603
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2604 2605 2606 2607

	for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
		sc->bslot[i] = NULL;

2608 2609 2610 2611
	ret = 0;
done:
	mmiowb();
	mutex_unlock(&sc->lock);
2612 2613 2614 2615

	ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2616 2617 2618 2619 2620
	return ret;
}

static void stop_tasklets(struct ath5k_softc *sc)
{
2621 2622
	sc->rx_pending = false;
	sc->tx_pending = false;
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
	tasklet_kill(&sc->rxtq);
	tasklet_kill(&sc->txtq);
	tasklet_kill(&sc->calib);
	tasklet_kill(&sc->beacontq);
	tasklet_kill(&sc->ani_tasklet);
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2636
int
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667
ath5k_stop_hw(struct ath5k_softc *sc)
{
	int ret;

	mutex_lock(&sc->lock);
	ret = ath5k_stop_locked(sc);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
		ret = ath5k_hw_on_hold(sc->ah);

		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
				"putting device to sleep\n");
2668 2669
	}

2670 2671 2672 2673 2674
	mmiowb();
	mutex_unlock(&sc->lock);

	stop_tasklets(sc);

2675 2676
	cancel_delayed_work_sync(&sc->tx_complete_work);

2677 2678 2679
	ath5k_rfkill_hw_stop(sc->ah);

	return ret;
2680 2681
}

2682 2683 2684
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2685 2686
 *
 * This should be called with sc->lock.
2687
 */
2688
static int
2689 2690
ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
							bool skip_pcu)
2691 2692
{
	struct ath5k_hw *ah = sc->ah;
B
Bruno Randolf 已提交
2693
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2694
	int ret, ani_mode;
2695
	bool fast;
2696 2697 2698

	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");

2699
	ath5k_hw_set_imr(ah, 0);
2700
	synchronize_irq(sc->irq);
2701 2702
	stop_tasklets(sc);

L
Lucas De Marchi 已提交
2703
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2704 2705 2706 2707 2708
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
	ani_mode = ah->ah_sc->ani_state.ani_mode;
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2709 2710 2711 2712
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
	ath5k_drain_tx_buffs(sc);
2713
	if (chan)
2714
		sc->curchan = chan;
2715 2716 2717 2718

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

	ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, fast,
2719
								skip_pcu);
J
Jiri Slaby 已提交
2720
	if (ret) {
2721 2722 2723
		ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
		goto err;
	}
J
Jiri Slaby 已提交
2724

2725
	ret = ath5k_rx_start(sc);
J
Jiri Slaby 已提交
2726
	if (ret) {
2727 2728 2729
		ATH5K_ERR(sc, "can't start recv logic\n");
		goto err;
	}
J
Jiri Slaby 已提交
2730

N
Nick Kossifidis 已提交
2731
	ath5k_ani_init(ah, ani_mode);
2732

2733 2734
	ah->ah_cal_next_full = jiffies;
	ah->ah_cal_next_ani = jiffies;
2735
	ah->ah_cal_next_nf = jiffies;
2736
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2737

B
Bruno Randolf 已提交
2738 2739
	/* clear survey data and cycle counters */
	memset(&sc->survey, 0, sizeof(sc->survey));
2740
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2741 2742 2743
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2744
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2745

2746
	/*
J
Jiri Slaby 已提交
2747 2748 2749 2750 2751
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2752 2753 2754 2755 2756
	 *
	 * XXX needed?
	 */
/*	ath5k_chan_change(sc, c); */

J
Jiri Slaby 已提交
2757 2758
	ath5k_beacon_config(sc);
	/* intrs are enabled by ath5k_beacon_config */
2759

B
Bruno Randolf 已提交
2760 2761
	ieee80211_wake_queues(sc->hw);

2762 2763 2764 2765 2766
	return 0;
err:
	return ret;
}

2767 2768 2769 2770 2771 2772
static void ath5k_reset_work(struct work_struct *work)
{
	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
		reset_work);

	mutex_lock(&sc->lock);
2773
	ath5k_reset(sc, NULL, true);
2774 2775 2776
	mutex_unlock(&sc->lock);
}

2777
static int
2778
ath5k_init(struct ieee80211_hw *hw)
2779
{
2780

2781
	struct ath5k_softc *sc = hw->priv;
2782 2783
	struct ath5k_hw *ah = sc->ah;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2784
	struct ath5k_txq *txq;
2785
	u8 mac[ETH_ALEN] = {};
2786 2787 2788
	int ret;


2789 2790 2791 2792 2793 2794 2795 2796
	/*
	 * Check if the MAC has multi-rate retry support.
	 * We do this by trying to setup a fake extended
	 * descriptor.  MACs that don't have support will
	 * return false w/o doing anything.  MACs that do
	 * support it will return true w/o doing anything.
	 */
	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
J
Jiri Slaby 已提交
2797

2798 2799 2800 2801
	if (ret < 0)
		goto err;
	if (ret > 0)
		__set_bit(ATH_STAT_MRRETRY, sc->status);
2802

2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813
	/*
	 * Collect the channel list.  The 802.11 layer
	 * is resposible for filtering this list based
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't get channels\n");
		goto err;
	}
J
Jiri Slaby 已提交
2814

2815 2816 2817
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2818
	ret = ath5k_desc_alloc(sc);
2819 2820 2821 2822
	if (ret) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		goto err;
	}
2823

2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
		ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
		goto err_desc;
	}
	sc->bhalq = ret;
	sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(sc->cabq)) {
		ATH5K_ERR(sc, "can't setup cab queue\n");
		ret = PTR_ERR(sc->cabq);
		goto err_bhal;
	}
2842

2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
		txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
		if (IS_ERR(txq)) {
			ATH5K_ERR(sc, "can't setup xmit queue\n");
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2883

2884 2885 2886 2887 2888
	tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
	tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
	tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
	tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
	tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2889

2890
	INIT_WORK(&sc->reset_work, ath5k_reset_work);
2891
	INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2892

2893
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2894
	if (ret) {
2895
		ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2896
		goto err_queues;
2897
	}
2898

2899
	SET_IEEE80211_PERM_ADDR(hw, mac);
2900
	memcpy(&sc->lladdr, mac, ETH_ALEN);
2901
	/* All MAC address bits matter for ACKs */
2902
	ath5k_update_bssid_mask_and_opmode(sc, NULL);
2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
		ATH5K_ERR(sc, "can't initialize regulatory system\n");
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
		ATH5K_ERR(sc, "can't register ieee80211 hw\n");
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

	ath5k_init_leds(sc);

	ath5k_sysfs_register(sc);

	return 0;
err_queues:
	ath5k_txq_release(sc);
err_bhal:
	ath5k_hw_release_tx_queue(ah, sc->bhalq);
err_desc:
2930
	ath5k_desc_free(sc);
2931 2932 2933 2934
err:
	return ret;
}

2935 2936
void
ath5k_deinit_softc(struct ath5k_softc *sc)
2937
{
2938
	struct ieee80211_hw *hw = sc->hw;
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
2954
	ath5k_desc_free(sc);
2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
	ath5k_txq_release(sc);
	ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
	ath5k_unregister_leds(sc);

	ath5k_sysfs_unregister(sc);
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
2965 2966
	ath5k_hw_deinit(sc->ah);
	free_irq(sc->irq, sc);
2967 2968
}

2969 2970
bool
ath_any_vif_assoc(struct ath5k_softc *sc)
2971
{
2972
	struct ath5k_vif_iter_data iter_data;
2973 2974 2975 2976 2977
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

2978
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
2979 2980 2981 2982
						   &iter_data);
	return iter_data.any_assoc;
}

2983
void
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
	sc->filter_flags = rfilt;
}