base.c 81.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

43 44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

45 46
#include <linux/module.h>
#include <linux/delay.h>
47
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
48
#include <linux/hardirq.h>
49
#include <linux/if.h>
J
Jiri Slaby 已提交
50
#include <linux/io.h>
51 52 53 54
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
55
#include <linux/slab.h>
56
#include <linux/etherdevice.h>
57
#include <linux/nl80211.h>
58 59 60 61 62 63 64 65

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
66
#include "ani.h"
67 68
#include "ath5k.h"
#include "../regd.h"
69

70 71 72
#define CREATE_TRACE_POINTS
#include "trace.h"

73
bool ath5k_modparam_nohwcrypt;
74
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
75
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
76

77
static bool modparam_all_channels;
B
Bob Copeland 已提交
78
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
79 80
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

81
static bool modparam_fastchanswitch;
82 83 84
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");

85
static bool ath5k_modparam_no_hw_rfkill_switch;
86 87 88 89
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
								bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");

90

91 92 93 94 95 96 97
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

98
static int ath5k_init(struct ieee80211_hw *hw);
99
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
100
								bool skip_pcu);
101 102

/* Known SREVs */
J
Jiri Slaby 已提交
103
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
104 105 106 107 108 109 110 111 112
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
131
#endif
132
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
133 134
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
135
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
136 137 138
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
139
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
140 141
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
142 143 144 145
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
146
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
147 148 149 150
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
151 152 153
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
154
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
};

195 196 197 198 199 200 201 202 203 204
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

205
const char *
206 207 208 209 210 211 212 213
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
214 215 216 217 218

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
219 220 221 222 223 224 225
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
242

243 244 245 246 247
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
248
{
249
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
250 251
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
252

253 254
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
255

256 257 258
/********************\
* Channel/mode setup *
\********************/
259

260 261 262
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
263
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
264
{
265 266 267 268 269
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
270 271 272
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
273 274 275 276 277
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
278
}
279

280
static unsigned int
281 282
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
283
{
284
	unsigned int count, size, freq, ch;
285
	enum ieee80211_band band;
286

287 288 289
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
290
		size = 220;
291
		band = IEEE80211_BAND_5GHZ;
292 293 294 295
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
296
		band = IEEE80211_BAND_2GHZ;
297 298
		break;
	default:
299
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
300
		return 0;
301 302
	}

303 304
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
305 306 307 308
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
309

310 311 312 313 314
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

315
		/* Check if channel is supported by the chipset */
316
		if (!ath5k_channel_ok(ah, &channels[count]))
317
			continue;
318

319 320
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
321
			continue;
322

323 324
		count++;
	}
325

326 327
	return count;
}
328

329
static void
330
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
331 332
{
	u8 i;
333

334
	for (i = 0; i < AR5K_MAX_RATES; i++)
335
		ah->rate_idx[b->band][i] = -1;
336

337
	for (i = 0; i < b->n_bitrates; i++) {
338
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
339
		if (b->bitrates[i].hw_value_short)
340
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
341
	}
342
}
343

344 345 346
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
347
	struct ath5k_hw *ah = hw->priv;
348 349 350
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
351

352 353
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
354

355
	/* 2GHz band */
356
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
357
	sband->band = IEEE80211_BAND_2GHZ;
358
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
359

360
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
361 362 363 364
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
365

366
		sband->channels = ah->channels;
367
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368
					AR5K_MODE_11G, max_c);
369

370 371 372
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
373
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
374 375 376 377
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
378

379 380 381 382 383 384 385 386 387 388
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
389 390 391
			}
		}

392
		sband->channels = ah->channels;
393
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394
					AR5K_MODE_11B, max_c);
395

396 397 398 399
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
400
	ath5k_setup_rate_idx(ah, sband);
401

402
	/* 5GHz band, A mode */
403 404
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
405
		sband->band = IEEE80211_BAND_5GHZ;
406
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
407

408 409 410
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
411

412
		sband->channels = &ah->channels[count_c];
413
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414
					AR5K_MODE_11A, max_c);
415

416 417
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
418
	ath5k_setup_rate_idx(ah, sband);
419

420
	ath5k_debug_dump_bands(ah);
421 422 423 424

	return 0;
}

425 426 427 428 429
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
430
 * Called with ah->lock.
431
 */
432
int
433
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
434
{
435
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
436
		  "channel set, resetting (%u -> %u MHz)\n",
437
		  ah->curchan->center_freq, chan->center_freq);
438

439
	/*
440 441 442 443
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
444
	 */
445
	return ath5k_reset(ah, chan, true);
446 447
}

448
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
449
{
450
	struct ath5k_vif_iter_data *iter_data = data;
451
	int i;
452
	struct ath5k_vif *avf = (void *)vif->drv_priv;
453 454 455 456 457 458 459 460 461 462 463 464

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
465
		if (ether_addr_equal(iter_data->hw_macaddr, mac))
466 467 468 469 470 471
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
472 473 474 475

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
476
	 * interfaces is allowed.
477 478 479
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
480 481 482
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
483 484
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
485
	}
486 487
}

488
void
489
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
490
				   struct ieee80211_vif *vif)
491
{
492
	struct ath_common *common = ath5k_hw_common(ah);
493 494
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
495 496 497 498 499 500 501 502 503

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
504
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
505
	iter_data.n_stas = 0;
506 507

	if (vif)
508
		ath5k_vif_iter(&iter_data, vif->addr, vif);
509 510

	/* Get list of all active MAC addresses */
511
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
512
						   &iter_data);
513
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
514

515 516
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
517
		/* Nothing active, default to station mode */
518
		ah->opmode = NL80211_IFTYPE_STATION;
519

520 521 522
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
523

524
	if (iter_data.need_set_hw_addr && iter_data.found_active)
525
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
526

527 528
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
529

530 531 532 533
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
534
		 * Enabling PROMISC appears to fix that problem.
535
		 */
536
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
537
	}
538

539 540 541
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
542
}
543

544
static inline int
545
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
546 547
{
	int rix;
548

549 550 551 552 553
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

554
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
555 556 557 558 559 560 561 562 563 564 565
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
566
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
567
{
568
	struct ath_common *common = ath5k_hw_common(ah);
569
	struct sk_buff *skb;
570 571

	/*
572 573
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
574
	 */
575 576 577
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
578

579
	if (!skb) {
580
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
581 582
				common->rx_bufsize);
		return NULL;
583 584
	}

585
	*skb_addr = dma_map_single(ah->dev,
586
				   skb->data, common->rx_bufsize,
587 588
				   DMA_FROM_DEVICE);

589 590
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
591 592
		dev_kfree_skb(skb);
		return NULL;
593
	}
594 595
	return skb;
}
596

597
static int
598
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
599 600 601 602
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
603

604
	if (!skb) {
605
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
606 607 608
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
609 610
	}

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
630
	if (ret) {
631
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
632
		return ret;
633 634
	}

635 636 637
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
638 639 640
	return 0;
}

641
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
642
{
643 644 645
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
646

647 648
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
649

650 651 652 653 654 655 656 657
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
658
	else
659
		htype = AR5K_PKT_TYPE_NORMAL;
660

661
	return htype;
662 663
}

664
static int
665
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
666
		  struct ath5k_txq *txq, int padsize)
667
{
668 669 670 671 672 673 674 675 676 677 678
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
679

680
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
681

682
	/* XXX endianness */
683
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
684
			DMA_TO_DEVICE);
685

686
	rate = ieee80211_get_tx_rate(ah->hw, info);
687 688 689 690
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
691

692 693
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
694

695 696 697
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
698

699 700 701 702 703 704 705 706 707 708 709
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
710 711
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
712
			info->control.vif, pktlen, info));
713 714 715
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
716 717
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
718
			info->control.vif, pktlen, info));
719 720 721 722
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
723
		(ah->power_level * 2),
724 725 726 727 728 729
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

730 731 732 733 734 735 736 737
	/* Set up MRR descriptor */
	if (ah->ah_capabilities.cap_has_mrr_support) {
		memset(mrr_rate, 0, sizeof(mrr_rate));
		memset(mrr_tries, 0, sizeof(mrr_tries));
		for (i = 0; i < 3; i++) {
			rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
			if (!rate)
				break;
738

739 740 741
			mrr_rate[i] = rate->hw_value;
			mrr_tries[i] = info->control.rates[i + 1].count;
		}
742

743 744 745 746 747
		ath5k_hw_setup_mrr_tx_desc(ah, ds,
			mrr_rate[0], mrr_tries[0],
			mrr_rate[1], mrr_tries[1],
			mrr_rate[2], mrr_tries[2]);
	}
748

749 750
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
751

752 753
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
754
	txq->txq_len++;
755 756 757 758
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
759

760 761 762 763 764 765 766
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
767
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
768
	return ret;
B
Bruno Randolf 已提交
769 770
}

771 772 773 774
/*******************\
* Descriptors setup *
\*******************/

775
static int
776
ath5k_desc_alloc(struct ath5k_hw *ah)
777
{
778 779 780 781 782
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
783

784
	/* allocate descriptors */
785
	ah->desc_len = sizeof(struct ath5k_desc) *
786
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
787

788 789 790 791
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
792 793 794
		ret = -ENOMEM;
		goto err;
	}
795 796 797 798
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
799

800 801 802
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
803
		ATH5K_ERR(ah, "can't allocate bufptr\n");
804 805 806
		ret = -ENOMEM;
		goto err_free;
	}
807
	ah->bufptr = bf;
808

809
	INIT_LIST_HEAD(&ah->rxbuf);
810 811 812
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
813
		list_add_tail(&bf->list, &ah->rxbuf);
814
	}
815

816 817
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
818
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
819 820
		bf->desc = ds;
		bf->daddr = da;
821
		list_add_tail(&bf->list, &ah->txbuf);
822 823
	}

824
	/* beacon buffers */
825
	INIT_LIST_HEAD(&ah->bcbuf);
826 827 828
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
829
		list_add_tail(&bf->list, &ah->bcbuf);
830
	}
831

832 833
	return 0;
err_free:
834
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
835
err:
836
	ah->desc = NULL;
837 838
	return ret;
}
839

840
void
841
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
842 843 844 845
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
846
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
847 848 849 850 851 852 853 854
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
855
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
856 857 858 859 860 861
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
862
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
863 864 865 866 867 868 869
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

870
static void
871
ath5k_desc_free(struct ath5k_hw *ah)
872 873
{
	struct ath5k_buf *bf;
874

875 876 877 878 879 880
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
881

882
	/* Free memory associated with all descriptors */
883 884 885
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
886

887 888
	kfree(ah->bufptr);
	ah->bufptr = NULL;
889 890
}

891 892 893 894 895 896

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
897
ath5k_txq_setup(struct ath5k_hw *ah,
898
		int qtype, int subtype)
899
{
900 901 902
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
903 904 905 906 907
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
908 909
	};
	int qnum;
910

911
	/*
912 913 914 915 916 917 918 919 920 921
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
922
	 */
923 924 925 926 927 928 929 930 931 932
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
933
	txq = &ah->txqs[qnum];
934 935 936 937 938 939
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
940
		txq->txq_len = 0;
941
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
942
		txq->txq_poll_mark = false;
943
		txq->txq_stuck = 0;
944
	}
945
	return &ah->txqs[qnum];
946 947
}

948 949
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
950
{
951
	struct ath5k_txq_info qi = {
952 953 954 955 956
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
957 958 959
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
960

961
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
962 963
}

964
static int
965
ath5k_beaconq_config(struct ath5k_hw *ah)
966
{
967 968
	struct ath5k_txq_info qi;
	int ret;
969

970
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
971 972
	if (ret)
		goto err;
973

974 975
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
976 977 978 979 980 981 982
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
983
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
984 985 986 987 988
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
989
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
990
	}
991

992
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
993 994
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
995

996
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
997
	if (ret) {
998
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
999 1000 1001
			"hardware queue!\n", __func__);
		goto err;
	}
1002
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1003 1004
	if (ret)
		goto err;
1005

1006 1007 1008 1009
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1010

1011
	qi.tqi_ready_time = (ah->bintval * 80) / 100;
1012 1013 1014
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1015

1016 1017 1018
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1019 1020
}

1021 1022 1023
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
1024
 * @ah The &struct ath5k_hw
1025 1026 1027 1028 1029 1030 1031
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1032
static void
1033
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1034
{
1035
	struct ath5k_txq *txq;
1036
	struct ath5k_buf *bf, *bf0;
1037
	int i;
1038

1039 1040 1041
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
1042 1043
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1044
				ath5k_debug_printtxbuf(ah, bf);
1045

1046
				ath5k_txbuf_free_skb(ah, bf);
1047

1048 1049 1050
				spin_lock_bh(&ah->txbuflock);
				list_move_tail(&bf->list, &ah->txbuf);
				ah->txbuf_len++;
1051
				txq->txq_len--;
1052
				spin_unlock_bh(&ah->txbuflock);
1053
			}
1054 1055 1056 1057
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1058
	}
1059 1060
}

1061
static void
1062
ath5k_txq_release(struct ath5k_hw *ah)
1063
{
1064
	struct ath5k_txq *txq = ah->txqs;
1065
	unsigned int i;
1066

1067
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1068
		if (txq->setup) {
1069
			ath5k_hw_release_tx_queue(ah, txq->qnum);
1070 1071 1072
			txq->setup = false;
		}
}
1073 1074


1075 1076 1077
/*************\
* RX Handling *
\*************/
1078

1079 1080 1081
/*
 * Enable the receive h/w following a reset.
 */
1082
static int
1083
ath5k_rx_start(struct ath5k_hw *ah)
1084
{
1085 1086 1087
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1088

1089
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1090

1091
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1092
		  common->cachelsz, common->rx_bufsize);
1093

1094 1095 1096 1097
	spin_lock_bh(&ah->rxbuflock);
	ah->rxlink = NULL;
	list_for_each_entry(bf, &ah->rxbuf, list) {
		ret = ath5k_rxbuf_setup(ah, bf);
1098
		if (ret != 0) {
1099
			spin_unlock_bh(&ah->rxbuflock);
1100 1101
			goto err;
		}
1102
	}
1103
	bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1104
	ath5k_hw_set_rxdp(ah, bf->daddr);
1105
	spin_unlock_bh(&ah->rxbuflock);
1106

1107
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1108
	ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1109
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1110 1111

	return 0;
1112
err:
1113 1114 1115
	return ret;
}

1116
/*
1117 1118 1119 1120 1121
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1122 1123
 */
static void
1124
ath5k_rx_stop(struct ath5k_hw *ah)
1125 1126
{

1127
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1128
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1129

1130
	ath5k_debug_printrxbuffs(ah);
1131
}
1132

1133
static unsigned int
1134
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1135 1136 1137 1138 1139
		   struct ath5k_rx_status *rs)
{
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1140

1141 1142 1143
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1144

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1157 1158 1159 1160

	return 0;
}

1161

1162
static void
1163
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1164
		     struct ieee80211_rx_status *rxs)
1165
{
1166
	struct ath_common *common = ath5k_hw_common(ah);
1167 1168 1169
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1170

1171 1172
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1173
	    ether_addr_equal(mgmt->bssid, common->curbssid)) {
1174 1175 1176 1177 1178
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
1179
		tsf = ath5k_hw_get_tsf64(ah);
1180 1181
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1182

1183
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1184 1185 1186 1187 1188
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1189

1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
1202
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1203 1204 1205 1206 1207
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1208

1209 1210 1211 1212 1213 1214
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
1215 1216
		if (hw_tu >= ah->nexttbtt)
			ath5k_beacon_update_timers(ah, bc_tstamp);
B
Bruno Randolf 已提交
1217 1218 1219 1220

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
1221 1222 1223
		if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
			ath5k_beacon_update_timers(ah, bc_tstamp);
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
B
Bruno Randolf 已提交
1224 1225
				"fixed beacon timers after beacon receive\n");
		}
1226 1227
	}
}
1228

1229
static void
1230
ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1231 1232 1233
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath_common *common = ath5k_hw_common(ah);
1234

1235 1236
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
1237
	    !ether_addr_equal(mgmt->bssid, common->curbssid))
1238
		return;
1239

B
Bruno Randolf 已提交
1240
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1241

1242 1243 1244
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1245

1246 1247 1248 1249
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1250
{
1251
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1252 1253
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1254

1255
	if (ieee80211_has_a4(frame_control))
1256
		padpos += ETH_ALEN;
1257 1258

	if (ieee80211_is_data_qos(frame_control))
1259 1260 1261
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1262 1263
}

1264 1265 1266 1267 1268
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1269
{
1270 1271
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1272

1273
	if (padsize && skb->len > padpos) {
1274

1275 1276
		if (skb_headroom(skb) < padsize)
			return -1;
1277

1278
		skb_push(skb, padsize);
1279
		memmove(skb->data, skb->data + padsize, padpos);
1280 1281
		return padsize;
	}
B
Bob Copeland 已提交
1282

1283 1284
	return 0;
}
1285

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1303

1304
	if (padsize && skb->len >= padpos + padsize) {
1305 1306 1307
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1308
	}
B
Bob Copeland 已提交
1309

1310
	return 0;
1311 1312 1313
}

static void
1314
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1315
		    struct ath5k_rx_status *rs)
1316
{
1317 1318 1319 1320 1321 1322 1323 1324 1325
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1326 1327

	/*
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
1340
	 * frame, but I'm not sure.
1341 1342 1343 1344 1345
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1346
	 */
1347
	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1348
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1349

1350 1351
	rxs->freq = ah->curchan->center_freq;
	rxs->band = ah->curchan->band;
1352

1353
	rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1354

1355
	rxs->antenna = rs->rs_antenna;
1356

1357
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1358
		ah->stats.antenna_rx[rs->rs_antenna]++;
1359
	else
1360
		ah->stats.antenna_rx[0]++; /* invalid */
1361

1362 1363
	rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1364

1365
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1366
	    ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1367
		rxs->flag |= RX_FLAG_SHORTPRE;
1368

1369
	trace_ath5k_rx(ah, skb);
1370

1371
	ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
1372

1373
	/* check beacons in IBSS mode */
1374 1375
	if (ah->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(ah, skb, rxs);
1376

1377
	ieee80211_rx(ah->hw, skb);
1378
}
1379

1380 1381 1382 1383
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1384
 */
1385
static bool
1386
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1387
{
1388 1389
	ah->stats.rx_all_count++;
	ah->stats.rx_bytes_count += rs->rs_datalen;
1390

1391 1392
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
1393
			ah->stats.rxerr_crc++;
1394
		if (rs->rs_status & AR5K_RXERR_FIFO)
1395
			ah->stats.rxerr_fifo++;
1396
		if (rs->rs_status & AR5K_RXERR_PHY) {
1397
			ah->stats.rxerr_phy++;
1398
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1399
				ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
1413
			ah->stats.rxerr_decrypt++;
1414 1415 1416 1417 1418
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
1419
			ah->stats.rxerr_mic++;
1420
			return true;
1421 1422
		}

1423 1424 1425 1426
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1427

1428
	if (unlikely(rs->rs_more)) {
1429
		ah->stats.rxerr_jumbo++;
1430 1431 1432
		return false;
	}
	return true;
1433 1434
}

1435
static void
1436
ath5k_set_current_imask(struct ath5k_hw *ah)
1437
{
1438
	enum ath5k_int imask;
1439 1440
	unsigned long flags;

1441 1442 1443
	spin_lock_irqsave(&ah->irqlock, flags);
	imask = ah->imask;
	if (ah->rx_pending)
1444
		imask &= ~AR5K_INT_RX_ALL;
1445
	if (ah->tx_pending)
1446
		imask &= ~AR5K_INT_TX_ALL;
1447 1448
	ath5k_hw_set_imr(ah, imask);
	spin_unlock_irqrestore(&ah->irqlock, flags);
1449 1450
}

1451
static void
1452
ath5k_tasklet_rx(unsigned long data)
1453
{
1454 1455 1456
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
1457
	struct ath5k_hw *ah = (void *)data;
L
Luis R. Rodriguez 已提交
1458
	struct ath_common *common = ath5k_hw_common(ah);
1459 1460 1461
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1462

1463 1464 1465
	spin_lock(&ah->rxbuflock);
	if (list_empty(&ah->rxbuf)) {
		ATH5K_WARN(ah, "empty rx buf pool\n");
1466 1467 1468
		goto unlock;
	}
	do {
1469
		bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1470 1471 1472
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1473

1474
		/* bail if HW is still using self-linked descriptor */
1475
		if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1476
			break;
1477

1478
		ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1479 1480 1481
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
1482 1483
			ATH5K_ERR(ah, "error in processing rx descriptor\n");
			ah->stats.rxerr_proc++;
1484 1485
			break;
		}
1486

1487 1488
		if (ath5k_receive_frame_ok(ah, &rs)) {
			next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1489

1490 1491 1492 1493 1494 1495
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1496

1497
			dma_unmap_single(ah->dev, bf->skbaddr,
1498
					 common->rx_bufsize,
1499
					 DMA_FROM_DEVICE);
1500

1501
			skb_put(skb, rs.rs_datalen);
1502

1503
			ath5k_receive_frame(ah, skb, &rs);
1504

1505 1506
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1507
		}
1508
next:
1509 1510
		list_move_tail(&bf->list, &ah->rxbuf);
	} while (ath5k_rxbuf_setup(ah, bf) == 0);
1511
unlock:
1512 1513 1514
	spin_unlock(&ah->rxbuflock);
	ah->rx_pending = false;
	ath5k_set_current_imask(ah);
1515 1516
}

B
Bruno Randolf 已提交
1517

1518 1519 1520
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1521

1522
void
1523 1524
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1525
{
1526
	struct ath5k_hw *ah = hw->priv;
1527 1528 1529
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1530

1531
	trace_ath5k_tx(ah, skb, txq);
B
Bruno Randolf 已提交
1532

1533 1534 1535 1536 1537 1538
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
1539
		ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1540 1541 1542
			  " headroom to pad");
		goto drop_packet;
	}
1543

1544 1545
	if (txq->txq_len >= txq->txq_max &&
	    txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
B
Bruno Randolf 已提交
1546 1547
		ieee80211_stop_queue(hw, txq->qnum);

1548 1549 1550 1551
	spin_lock_irqsave(&ah->txbuflock, flags);
	if (list_empty(&ah->txbuf)) {
		ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&ah->txbuflock, flags);
B
Bruno Randolf 已提交
1552
		ieee80211_stop_queues(hw);
1553
		goto drop_packet;
1554
	}
1555
	bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1556
	list_del(&bf->list);
1557 1558
	ah->txbuf_len--;
	if (list_empty(&ah->txbuf))
1559
		ieee80211_stop_queues(hw);
1560
	spin_unlock_irqrestore(&ah->txbuflock, flags);
1561 1562 1563

	bf->skb = skb;

1564
	if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
1565
		bf->skb = NULL;
1566 1567 1568 1569
		spin_lock_irqsave(&ah->txbuflock, flags);
		list_add_tail(&bf->list, &ah->txbuf);
		ah->txbuf_len++;
		spin_unlock_irqrestore(&ah->txbuflock, flags);
1570
		goto drop_packet;
1571
	}
1572
	return;
1573

1574 1575
drop_packet:
	dev_kfree_skb_any(skb);
1576 1577
}

1578
static void
1579
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1580
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1581 1582
{
	struct ieee80211_tx_info *info;
1583
	u8 tries[3];
1584 1585
	int i;

1586 1587
	ah->stats.tx_all_count++;
	ah->stats.tx_bytes_count += skb->len;
1588 1589
	info = IEEE80211_SKB_CB(skb);

1590 1591 1592 1593
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1594
	ieee80211_tx_info_clear_status(info);
1595 1596

	for (i = 0; i < ts->ts_final_idx; i++) {
1597 1598 1599
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1600
		r->count = tries[i];
1601 1602
	}

1603
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1604
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1605 1606

	if (unlikely(ts->ts_status)) {
1607
		ah->stats.ack_fail++;
1608 1609
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1610
			ah->stats.txerr_filt++;
1611 1612
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
1613
			ah->stats.txerr_retry++;
1614
		if (ts->ts_status & AR5K_TXERR_FIFO)
1615
			ah->stats.txerr_fifo++;
1616 1617 1618
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1619 1620 1621

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1622 1623 1624 1625 1626 1627 1628 1629 1630
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1631
		ah->stats.antenna_tx[ts->ts_antenna]++;
1632
	else
1633
		ah->stats.antenna_tx[0]++; /* invalid */
1634

1635 1636
	trace_ath5k_tx_complete(ah, skb, txq, ts);
	ieee80211_tx_status(ah->hw, skb);
1637
}
1638 1639

static void
1640
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1641
{
1642 1643 1644 1645
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1646
	int ret;
1647

1648 1649
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1650 1651 1652 1653 1654 1655 1656

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

1657
			ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1658 1659 1660
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
1661
				ATH5K_ERR(ah,
1662 1663 1664 1665 1666 1667 1668
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1669

1670
			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1671
					DMA_TO_DEVICE);
1672
			ath5k_tx_frame_completed(ah, skb, txq, &ts);
1673
		}
1674

1675 1676 1677
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1678 1679
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1680
		 */
1681 1682 1683 1684
		if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
			spin_lock(&ah->txbuflock);
			list_move_tail(&bf->list, &ah->txbuf);
			ah->txbuf_len++;
1685
			txq->txq_len--;
1686
			spin_unlock(&ah->txbuflock);
1687
		}
1688 1689
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1690
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1691
		ieee80211_wake_queue(ah->hw, txq->qnum);
1692 1693 1694 1695 1696
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1697
	int i;
1698
	struct ath5k_hw *ah = (void *)data;
1699

1700
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1701
		if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1702
			ath5k_tx_processq(ah, &ah->txqs[i]);
1703

1704 1705
	ah->tx_pending = false;
	ath5k_set_current_imask(ah);
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1717
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1718 1719
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1720
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1721
	struct ath5k_desc *ds;
1722 1723
	int ret = 0;
	u8 antenna;
1724
	u32 flags;
1725
	const int padsize = 0;
1726

1727
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1728
			DMA_TO_DEVICE);
1729
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1730 1731
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1732

1733 1734
	if (dma_mapping_error(ah->dev, bf->skbaddr)) {
		ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1735 1736
		dev_kfree_skb_any(skb);
		bf->skb = NULL;
1737 1738 1739 1740
		return -EIO;
	}

	ds = bf->desc;
1741
	antenna = ah->ah_tx_ant;
1742 1743

	flags = AR5K_TXDESC_NOACK;
1744
	if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1745 1746
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1747
	} else
1748
		ds->ds_link = 0;
1749 1750 1751 1752 1753 1754 1755

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1756
	 * automatically, based on ACKed packets.
1757 1758 1759 1760 1761
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1762 1763 1764 1765 1766
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1767 1768
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1769
		antenna = ah->bsent & 4 ? 2 : 1;
1770

1771

1772 1773 1774
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1775
	ds->ds_data = bf->skbaddr;
1776
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1777
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1778 1779
			AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1780
			1, AR5K_TXKEYIX_INVALID,
1781
			antenna, flags, 0, 0);
1782 1783 1784 1785 1786
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1787
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1788 1789 1790
	return ret;
}

1791 1792 1793 1794 1795 1796 1797
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1798
int
1799 1800 1801
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
1802
	struct ath5k_hw *ah = hw->priv;
1803
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1818
	ath5k_txbuf_free_skb(ah, avf->bbuf);
1819
	avf->bbuf->skb = skb;
1820
	ret = ath5k_beacon_setup(ah, avf->bbuf);
1821 1822 1823 1824
out:
	return ret;
}

1825 1826 1827 1828 1829
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1830 1831
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1832 1833
 */
static void
1834
ath5k_beacon_send(struct ath5k_hw *ah)
1835
{
1836 1837 1838
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1839
	struct sk_buff *skb;
1840
	int err;
1841

1842
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1843 1844 1845

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1846
	 * not, don't don't try to post another: skip this
1847 1848 1849 1850
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
1851 1852 1853 1854 1855 1856
	if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
		ah->bmisscount++;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
			"missed %u consecutive beacons\n", ah->bmisscount);
		if (ah->bmisscount > 10) {	/* NB: 10 is a guess */
			ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1857
				"stuck beacon time (%u missed)\n",
1858 1859
				ah->bmisscount);
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1860
				  "stuck beacon, resetting\n");
1861
			ieee80211_queue_work(ah->hw, &ah->reset_work);
1862 1863 1864
		}
		return;
	}
1865 1866
	if (unlikely(ah->bmisscount != 0)) {
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1867
			"resume beacon xmit after %u misses\n",
1868 1869
			ah->bmisscount);
		ah->bmisscount = 0;
1870 1871
	}

1872 1873
	if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
			ah->num_mesh_vifs > 1) ||
1874
			ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1875 1876
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
1877 1878 1879
		int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
		vif = ah->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1880
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
1881
			(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1882
	} else /* only one interface */
1883
		vif = ah->bslot[0];
1884 1885 1886 1887 1888 1889 1890

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;

1891 1892 1893 1894 1895
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1896 1897
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
		ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1898 1899 1900
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1901
	/* refresh the beacon for AP or MESH mode */
1902
	if (ah->opmode == NL80211_IFTYPE_AP ||
1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
		err = ath5k_beacon_update(ah->hw, vif);
		if (err)
			return;
	}

	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
		     ah->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
		return;
	}
B
Bob Copeland 已提交
1914

1915
	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1916

1917 1918 1919 1920
	ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, ah->bhalq);
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
		ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1921

1922
	skb = ieee80211_get_buffered_bc(ah->hw, vif);
1923
	while (skb) {
1924
		ath5k_tx_queue(ah->hw, skb, ah->cabq);
1925

1926
		if (ah->cabq->txq_len >= ah->cabq->txq_max)
1927 1928
			break;

1929
		skb = ieee80211_get_buffered_bc(ah->hw, vif);
1930 1931
	}

1932
	ah->bsent++;
1933 1934
}

1935 1936 1937
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
1938
 * @ah: struct ath5k_hw pointer we are operating on
1939 1940 1941 1942 1943 1944 1945 1946
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1947
 * when a TSF update has been detected, but also when an new IBSS is created or
1948 1949 1950
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1951
void
1952
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
1953
{
1954 1955
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1956

1957
	intval = ah->bintval & AR5K_BEACON_PERIOD;
1958 1959
	if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
		+ ah->num_mesh_vifs > 1) {
1960 1961
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
1962
			ATH5K_WARN(ah, "intval %u is too low, min 15\n",
1963 1964
				   intval);
	}
1965 1966 1967
	if (WARN_ON(!intval))
		return;

1968 1969
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1970

1971 1972 1973
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1974

1975
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1976
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1977
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1978 1979
	 * configuration we need to make sure it is bigger than that. */

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
1995
		 * beacon received, SW merge happened but HW TSF not yet updated.
1996 1997 1998 1999 2000
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
2001
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2015

2016
	ah->nexttbtt = nexttbtt;
2017

2018
	intval |= AR5K_BEACON_ENA;
2019
	ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2020 2021 2022 2023 2024 2025

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
2026
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2027 2028
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
2029
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2030 2031
			"reset HW TSF and timers\n");
	else
2032
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2033 2034
			"updated timers based on beacon TSF\n");

2035
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2036 2037 2038
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2039
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2040 2041 2042
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2043 2044
}

2045 2046 2047
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
2048
 * @ah: struct ath5k_hw pointer we are operating on
2049
 *
2050
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2051
 * interrupts to detect TSF updates only.
2052
 */
2053
void
2054
ath5k_beacon_config(struct ath5k_hw *ah)
2055
{
2056
	unsigned long flags;
2057

2058 2059 2060
	spin_lock_irqsave(&ah->block, flags);
	ah->bmisscount = 0;
	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2061

2062
	if (ah->enable_beacon) {
2063
		/*
2064 2065
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2066
		 * only once here.
2067
		 * We use the SWBA interrupt only to keep track of the beacon
2068
		 * timers in order to detect automatic TSF updates.
2069
		 */
2070
		ath5k_beaconq_config(ah);
2071

2072
		ah->imask |= AR5K_INT_SWBA;
2073

2074
		if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2075
			if (ath5k_hw_hasveol(ah))
2076
				ath5k_beacon_send(ah);
J
Jiri Slaby 已提交
2077
		} else
2078
			ath5k_beacon_update_timers(ah, -1);
2079
	} else {
2080
		ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2081 2082
	}

2083
	ath5k_hw_set_imr(ah, ah->imask);
2084
	mmiowb();
2085
	spin_unlock_irqrestore(&ah->block, flags);
2086 2087
}

N
Nick Kossifidis 已提交
2088 2089
static void ath5k_tasklet_beacon(unsigned long data)
{
2090
	struct ath5k_hw *ah = (struct ath5k_hw *) data;
N
Nick Kossifidis 已提交
2091 2092 2093 2094 2095 2096

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2097
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2098 2099
	 * automatic TSF updates happened.
	 */
2100
	if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2101
		/* XXX: only if VEOL supported */
2102 2103 2104
		u64 tsf = ath5k_hw_get_tsf64(ah);
		ah->nexttbtt += ah->bintval;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
N
Nick Kossifidis 已提交
2105 2106
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
2107
				ah->nexttbtt,
N
Nick Kossifidis 已提交
2108 2109 2110
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
2111 2112 2113
		spin_lock(&ah->block);
		ath5k_beacon_send(ah);
		spin_unlock(&ah->block);
N
Nick Kossifidis 已提交
2114 2115 2116
	}
}

2117 2118 2119 2120 2121

/********************\
* Interrupt handling *
\********************/

2122 2123 2124
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2125
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
N
Nick Kossifidis 已提交
2126 2127 2128 2129 2130
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run ANI only when calibration is not active */

2131 2132
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2133
		tasklet_schedule(&ah->ani_tasklet);
2134

N
Nick Kossifidis 已提交
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run calibration only when another calibration
		 * is not running.
		 *
		 * Note: This is for both full/short calibration,
		 * if it's time for a full one, ath5k_calibrate_work will deal
		 * with it. */

		ah->ah_cal_next_short = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
		ieee80211_queue_work(ah->hw, &ah->calib_work);
2149 2150 2151 2152 2153 2154
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2155
static void
2156
ath5k_schedule_rx(struct ath5k_hw *ah)
2157
{
2158 2159
	ah->rx_pending = true;
	tasklet_schedule(&ah->rxtq);
2160 2161 2162
}

static void
2163
ath5k_schedule_tx(struct ath5k_hw *ah)
2164
{
2165 2166
	ah->tx_pending = true;
	tasklet_schedule(&ah->txtq);
2167 2168
}

P
Pavel Roskin 已提交
2169
static irqreturn_t
2170 2171
ath5k_intr(int irq, void *dev_id)
{
2172
	struct ath5k_hw *ah = dev_id;
2173 2174 2175
	enum ath5k_int status;
	unsigned int counter = 1000;

N
Nick Kossifidis 已提交
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186

	/*
	 * If hw is not ready (or detached) and we get an
	 * interrupt, or if we have no interrupts pending
	 * (that means it's not for us) skip it.
	 *
	 * NOTE: Group 0/1 PCI interface registers are not
	 * supported on WiSOCs, so we can't check for pending
	 * interrupts (ISR belongs to another register group
	 * so we are ok).
	 */
2187
	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
N
Nick Kossifidis 已提交
2188 2189
			((ath5k_get_bus_type(ah) != ATH_AHB) &&
			!ath5k_hw_is_intr_pending(ah))))
2190 2191
		return IRQ_NONE;

N
Nick Kossifidis 已提交
2192
	/** Main loop **/
2193
	do {
N
Nick Kossifidis 已提交
2194 2195
		ath5k_hw_get_isr(ah, &status);	/* NB: clears IRQ too */

2196 2197
		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, ah->imask);
N
Nick Kossifidis 已提交
2198 2199 2200 2201 2202 2203 2204 2205

		/*
		 * Fatal hw error -> Log and reset
		 *
		 * Fatal errors are unrecoverable so we have to
		 * reset the card. These errors include bus and
		 * dma errors.
		 */
2206
		if (unlikely(status & AR5K_INT_FATAL)) {
N
Nick Kossifidis 已提交
2207

2208
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2209
				  "fatal int, resetting\n");
2210
			ieee80211_queue_work(ah->hw, &ah->reset_work);
N
Nick Kossifidis 已提交
2211 2212 2213 2214 2215 2216 2217 2218

		/*
		 * RX Overrun -> Count and reset if needed
		 *
		 * Receive buffers are full. Either the bus is busy or
		 * the CPU is not fast enough to process all received
		 * frames.
		 */
2219
		} else if (unlikely(status & AR5K_INT_RXORN)) {
N
Nick Kossifidis 已提交
2220

B
Bruno Randolf 已提交
2221 2222 2223
			/*
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
N
Nick Kossifidis 已提交
2224
			 * We don't know exactly which versions need a reset
B
Bruno Randolf 已提交
2225 2226
			 * this guess is copied from the HAL.
			 */
2227
			ah->stats.rxorn_intr++;
N
Nick Kossifidis 已提交
2228

2229
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2230
				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2231
					  "rx overrun, resetting\n");
2232
				ieee80211_queue_work(ah->hw, &ah->reset_work);
2233
			} else
2234
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2235

2236
		} else {
N
Nick Kossifidis 已提交
2237 2238

			/* Software Beacon Alert -> Schedule beacon tasklet */
2239
			if (status & AR5K_INT_SWBA)
2240
				tasklet_hi_schedule(&ah->beacontq);
2241

N
Nick Kossifidis 已提交
2242 2243 2244 2245 2246 2247 2248 2249
			/*
			 * No more RX descriptors -> Just count
			 *
			 * NB: the hardware should re-read the link when
			 *     RXE bit is written, but it doesn't work at
			 *     least on older hardware revs.
			 */
			if (status & AR5K_INT_RXEOL)
2250
				ah->stats.rxeol_intr++;
N
Nick Kossifidis 已提交
2251 2252 2253 2254


			/* TX Underrun -> Bump tx trigger level */
			if (status & AR5K_INT_TXURN)
2255
				ath5k_hw_update_tx_triglevel(ah, true);
N
Nick Kossifidis 已提交
2256 2257

			/* RX -> Schedule rx tasklet */
2258
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2259
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2260 2261 2262 2263 2264 2265

			/* TX -> Schedule tx tasklet */
			if (status & (AR5K_INT_TXOK
					| AR5K_INT_TXDESC
					| AR5K_INT_TXERR
					| AR5K_INT_TXEOL))
2266
				ath5k_schedule_tx(ah);
N
Nick Kossifidis 已提交
2267 2268 2269 2270 2271 2272

			/* Missed beacon -> TODO
			if (status & AR5K_INT_BMISS)
			*/

			/* MIB event -> Update counters and notify ANI */
2273
			if (status & AR5K_INT_MIB) {
2274
				ah->stats.mib_intr++;
B
Bruno Randolf 已提交
2275
				ath5k_hw_update_mib_counters(ah);
2276
				ath5k_ani_mib_intr(ah);
2277
			}
N
Nick Kossifidis 已提交
2278 2279

			/* GPIO -> Notify RFKill layer */
2280
			if (status & AR5K_INT_GPIO)
2281
				tasklet_schedule(&ah->rf_kill.toggleq);
B
Bob Copeland 已提交
2282

2283
		}
2284 2285 2286 2287

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2288
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2289

N
Nick Kossifidis 已提交
2290 2291 2292 2293 2294 2295
	/*
	 * Until we handle rx/tx interrupts mask them on IMR
	 *
	 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
	 * and unset after we 've handled the interrupts.
	 */
2296 2297
	if (ah->rx_pending || ah->tx_pending)
		ath5k_set_current_imask(ah);
2298

2299
	if (unlikely(!counter))
2300
		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2301

N
Nick Kossifidis 已提交
2302
	/* Fire up calibration poll */
2303
	ath5k_intr_calibration_poll(ah);
2304

2305 2306 2307 2308 2309 2310 2311 2312
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
N
Nick Kossifidis 已提交
2313
ath5k_calibrate_work(struct work_struct *work)
2314
{
N
Nick Kossifidis 已提交
2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
		calib_work);

	/* Should we run a full calibration ? */
	if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {

		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;

		ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
				"running full calibration\n");

		if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
			/*
			 * Rfgain is out of bounds, reset the chip
			 * to load new gain values.
			 */
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
					"got new rfgain, resetting\n");
			ieee80211_queue_work(ah->hw, &ah->reset_work);
		}
	} else
		ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2339

2340

2341 2342 2343
	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
		ieee80211_frequency_to_channel(ah->curchan->center_freq),
		ah->curchan->hw_value);
2344

2345 2346
	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
		ATH5K_ERR(ah, "calibration of channel %u failed\n",
2347
			ieee80211_frequency_to_channel(
2348
				ah->curchan->center_freq));
2349

N
Nick Kossifidis 已提交
2350
	/* Clear calibration flags */
2351
	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
N
Nick Kossifidis 已提交
2352
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2353
	else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
N
Nick Kossifidis 已提交
2354
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2355 2356 2357
}


2358 2359 2360
static void
ath5k_tasklet_ani(unsigned long data)
{
2361
	struct ath5k_hw *ah = (void *)data;
2362 2363 2364 2365

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2366 2367 2368
}


2369 2370 2371
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
2372
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2373 2374 2375 2376 2377
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2378
	mutex_lock(&ah->lock);
2379

2380 2381 2382
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
2383
			spin_lock_bh(&txq->lock);
2384
			if (txq->txq_len > 1) {
2385
				if (txq->txq_poll_mark) {
2386
					ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2387 2388 2389
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2390
					txq->txq_stuck++;
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
2402
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2403
			  "TX queues stuck, resetting\n");
2404
		ath5k_reset(ah, NULL, true);
2405 2406
	}

2407
	mutex_unlock(&ah->lock);
2408

2409
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2410 2411 2412 2413
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2414 2415 2416
/*************************\
* Initialization routines *
\*************************/
2417

2418
int __devinit
2419
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2420
{
2421
	struct ieee80211_hw *hw = ah->hw;
2422 2423 2424 2425 2426
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
2427
	SET_IEEE80211_DEV(hw, ah->dev);
2428
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2429 2430 2431
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2432 2433 2434 2435 2436 2437 2438

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2439 2440 2441
	/* SW support for IBSS_RSN is provided by mac80211 */
	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

2442 2443 2444 2445
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2446 2447 2448 2449 2450 2451 2452
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
2453
	__set_bit(ATH_STAT_INVALID, ah->status);
2454

2455 2456 2457 2458 2459 2460 2461
	ah->opmode = NL80211_IFTYPE_STATION;
	ah->bintval = 1000;
	mutex_init(&ah->lock);
	spin_lock_init(&ah->rxbuflock);
	spin_lock_init(&ah->txbuflock);
	spin_lock_init(&ah->block);
	spin_lock_init(&ah->irqlock);
2462 2463

	/* Setup interrupt handler */
2464
	ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2465
	if (ret) {
2466
		ATH5K_ERR(ah, "request_irq failed\n");
2467 2468 2469
		goto err;
	}

2470
	common = ath5k_hw_common(ah);
2471 2472
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
2473
	common->ah = ah;
2474
	common->hw = hw;
2475
	common->priv = ah;
2476
	common->clockrate = 40;
2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
2488
	ret = ath5k_hw_init(ah);
2489
	if (ret)
2490
		goto err_irq;
2491

2492 2493
	/* Set up multi-rate retry capabilities */
	if (ah->ah_capabilities.cap_has_mrr_support) {
2494
		hw->max_rates = 4;
2495 2496
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2497 2498 2499 2500 2501 2502 2503 2504 2505
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

2506 2507 2508 2509
	ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
					ah->ah_mac_srev,
					ah->ah_phy_revision);
2510

2511
	if (!ah->ah_single_chip) {
2512
		/* Single chip radio (!RF5111) */
2513 2514
		if (ah->ah_radio_5ghz_revision &&
			!ah->ah_radio_2ghz_revision) {
2515 2516
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
2517 2518
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2519
					ath5k_chip_name(AR5K_VERSION_RAD,
2520 2521
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2522
			/* No 2GHz support (5110 and some
2523
			 * 5GHz only cards) -> report 5GHz radio */
2524
			} else if (!test_bit(AR5K_MODE_11B,
2525 2526
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2527
					ath5k_chip_name(AR5K_VERSION_RAD,
2528 2529
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2530 2531
			/* Multiband radio */
			} else {
2532
				ATH5K_INFO(ah, "RF%s multiband radio found"
2533 2534
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
2535 2536
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2537 2538 2539 2540
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
2541 2542 2543
		else if (ah->ah_radio_5ghz_revision &&
				ah->ah_radio_2ghz_revision) {
			ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2544
				ath5k_chip_name(AR5K_VERSION_RAD,
2545 2546 2547
					ah->ah_radio_5ghz_revision),
					ah->ah_radio_5ghz_revision);
			ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2548
				ath5k_chip_name(AR5K_VERSION_RAD,
2549 2550
					ah->ah_radio_2ghz_revision),
					ah->ah_radio_2ghz_revision);
2551 2552 2553
		}
	}

2554
	ath5k_debug_init_device(ah);
2555 2556

	/* ready to process interrupts */
2557
	__clear_bit(ATH_STAT_INVALID, ah->status);
2558 2559 2560

	return 0;
err_ah:
2561
	ath5k_hw_deinit(ah);
2562
err_irq:
2563
	free_irq(ah->irq, ah);
2564 2565 2566 2567
err:
	return ret;
}

2568
static int
2569
ath5k_stop_locked(struct ath5k_hw *ah)
2570 2571
{

2572 2573
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, ah->status));
2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
2590
	ieee80211_stop_queues(ah->hw);
2591

2592 2593
	if (!test_bit(ATH_STAT_INVALID, ah->status)) {
		ath5k_led_off(ah);
2594
		ath5k_hw_set_imr(ah, 0);
2595 2596
		synchronize_irq(ah->irq);
		ath5k_rx_stop(ah);
2597
		ath5k_hw_dma_stop(ah);
2598
		ath5k_drain_tx_buffs(ah);
2599 2600 2601 2602
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2603 2604
}

2605
int ath5k_start(struct ieee80211_hw *hw)
2606
{
2607
	struct ath5k_hw *ah = hw->priv;
2608 2609
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2610

2611
	mutex_lock(&ah->lock);
2612

2613
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2614 2615

	/*
2616 2617
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2618
	 */
2619
	ath5k_stop_locked(ah);
2620

2621 2622 2623 2624 2625 2626 2627
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
2628
	ah->curchan = ah->hw->conf.channel;
N
Nick Kossifidis 已提交
2629 2630 2631 2632 2633 2634 2635 2636 2637
	ah->imask = AR5K_INT_RXOK
		| AR5K_INT_RXERR
		| AR5K_INT_RXEOL
		| AR5K_INT_RXORN
		| AR5K_INT_TXDESC
		| AR5K_INT_TXEOL
		| AR5K_INT_FATAL
		| AR5K_INT_GLOBAL
		| AR5K_INT_MIB;
2638

2639
	ret = ath5k_reset(ah, NULL, false);
2640 2641
	if (ret)
		goto done;
2642

2643 2644
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_start(ah);
2645 2646 2647 2648 2649 2650 2651 2652

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2653 2654 2655
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2656

2657 2658
	for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
		ah->bslot[i] = NULL;
2659

2660 2661 2662
	ret = 0;
done:
	mmiowb();
2663
	mutex_unlock(&ah->lock);
2664

2665
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2666 2667
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2668 2669 2670
	return ret;
}

2671
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2672
{
2673 2674 2675 2676 2677 2678
	ah->rx_pending = false;
	ah->tx_pending = false;
	tasklet_kill(&ah->rxtq);
	tasklet_kill(&ah->txtq);
	tasklet_kill(&ah->beacontq);
	tasklet_kill(&ah->ani_tasklet);
2679 2680 2681 2682 2683 2684 2685 2686
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2687
void ath5k_stop(struct ieee80211_hw *hw)
2688
{
2689
	struct ath5k_hw *ah = hw->priv;
2690 2691
	int ret;

2692 2693 2694
	mutex_lock(&ah->lock);
	ret = ath5k_stop_locked(ah);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
2715
		ret = ath5k_hw_on_hold(ah);
2716

2717
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2718
				"putting device to sleep\n");
2719 2720
	}

2721
	mmiowb();
2722
	mutex_unlock(&ah->lock);
2723

2724
	ath5k_stop_tasklets(ah);
2725

2726
	cancel_delayed_work_sync(&ah->tx_complete_work);
2727

2728 2729
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_stop(ah);
2730 2731
}

2732 2733 2734
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2735
 *
2736
 * This should be called with ah->lock.
2737
 */
2738
static int
2739
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2740
							bool skip_pcu)
2741
{
B
Bruno Randolf 已提交
2742
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2743
	int ret, ani_mode;
2744
	bool fast;
2745

2746
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2747

2748
	ath5k_hw_set_imr(ah, 0);
2749 2750
	synchronize_irq(ah->irq);
	ath5k_stop_tasklets(ah);
2751

L
Lucas De Marchi 已提交
2752
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2753 2754
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
2755
	ani_mode = ah->ani_state.ani_mode;
N
Nick Kossifidis 已提交
2756 2757
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2758 2759 2760
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
2761
	ath5k_drain_tx_buffs(ah);
2762
	if (chan)
2763
		ah->curchan = chan;
2764 2765 2766

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2767
	ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2768
	if (ret) {
2769
		ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2770 2771
		goto err;
	}
J
Jiri Slaby 已提交
2772

2773
	ret = ath5k_rx_start(ah);
J
Jiri Slaby 已提交
2774
	if (ret) {
2775
		ATH5K_ERR(ah, "can't start recv logic\n");
2776 2777
		goto err;
	}
J
Jiri Slaby 已提交
2778

N
Nick Kossifidis 已提交
2779
	ath5k_ani_init(ah, ani_mode);
2780

N
Nick Kossifidis 已提交
2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
	/*
	 * Set calibration intervals
	 *
	 * Note: We don't need to run calibration imediately
	 * since some initial calibration is done on reset
	 * even for fast channel switching. Also on scanning
	 * this will get set again and again and it won't get
	 * executed unless we connect somewhere and spend some
	 * time on the channel (that's what calibration needs
	 * anyway to be accurate).
	 */
	ah->ah_cal_next_full = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
	ah->ah_cal_next_ani = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
	ah->ah_cal_next_short = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);

2799
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2800

B
Bruno Randolf 已提交
2801
	/* clear survey data and cycle counters */
2802
	memset(&ah->survey, 0, sizeof(ah->survey));
2803
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2804 2805 2806
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2807
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2808

2809
	/*
J
Jiri Slaby 已提交
2810 2811 2812 2813 2814
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2815 2816 2817
	 *
	 * XXX needed?
	 */
2818
/*	ath5k_chan_change(ah, c); */
2819

2820
	ath5k_beacon_config(ah);
J
Jiri Slaby 已提交
2821
	/* intrs are enabled by ath5k_beacon_config */
2822

2823
	ieee80211_wake_queues(ah->hw);
B
Bruno Randolf 已提交
2824

2825 2826 2827 2828 2829
	return 0;
err:
	return ret;
}

2830 2831
static void ath5k_reset_work(struct work_struct *work)
{
2832
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2833 2834
		reset_work);

2835 2836 2837
	mutex_lock(&ah->lock);
	ath5k_reset(ah, NULL, true);
	mutex_unlock(&ah->lock);
2838 2839
}

2840
static int __devinit
2841
ath5k_init(struct ieee80211_hw *hw)
2842
{
2843

2844
	struct ath5k_hw *ah = hw->priv;
2845
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2846
	struct ath5k_txq *txq;
2847
	u8 mac[ETH_ALEN] = {};
2848 2849 2850
	int ret;


2851 2852
	/*
	 * Collect the channel list.  The 802.11 layer
2853
	 * is responsible for filtering this list based
2854 2855 2856 2857 2858
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
2859
		ATH5K_ERR(ah, "can't get channels\n");
2860 2861
		goto err;
	}
J
Jiri Slaby 已提交
2862

2863 2864 2865
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2866
	ret = ath5k_desc_alloc(ah);
2867
	if (ret) {
2868
		ATH5K_ERR(ah, "can't allocate descriptors\n");
2869 2870
		goto err;
	}
2871

2872 2873 2874 2875 2876 2877 2878 2879
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
2880
		ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2881 2882
		goto err_desc;
	}
2883 2884 2885 2886 2887
	ah->bhalq = ret;
	ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(ah->cabq)) {
		ATH5K_ERR(ah, "can't setup cab queue\n");
		ret = PTR_ERR(ah->cabq);
2888 2889
		goto err_bhal;
	}
2890

2891 2892 2893 2894 2895
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
2896
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2897
		if (IS_ERR(txq)) {
2898
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2899 2900 2901
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2902
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2903
		if (IS_ERR(txq)) {
2904
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2905 2906 2907
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2908
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2909
		if (IS_ERR(txq)) {
2910
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2911 2912 2913
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2914
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2915
		if (IS_ERR(txq)) {
2916
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2917 2918 2919 2920 2921 2922
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
2923
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2924
		if (IS_ERR(txq)) {
2925
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2926 2927 2928 2929 2930
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2931

2932 2933 2934 2935
	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2936

2937
	INIT_WORK(&ah->reset_work, ath5k_reset_work);
N
Nick Kossifidis 已提交
2938
	INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
2939
	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2940

2941
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2942
	if (ret) {
2943
		ATH5K_ERR(ah, "unable to read address from EEPROM\n");
2944
		goto err_queues;
2945
	}
2946

2947 2948
	SET_IEEE80211_PERM_ADDR(hw, mac);
	/* All MAC address bits matter for ACKs */
2949
	ath5k_update_bssid_mask_and_opmode(ah, NULL);
2950 2951 2952 2953

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
2954
		ATH5K_ERR(ah, "can't initialize regulatory system\n");
2955 2956 2957 2958 2959
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
2960
		ATH5K_ERR(ah, "can't register ieee80211 hw\n");
2961 2962 2963 2964 2965 2966
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

2967
	ath5k_init_leds(ah);
2968

2969
	ath5k_sysfs_register(ah);
2970 2971 2972

	return 0;
err_queues:
2973
	ath5k_txq_release(ah);
2974
err_bhal:
2975
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
2976
err_desc:
2977
	ath5k_desc_free(ah);
2978 2979 2980 2981
err:
	return ret;
}

2982
void
2983
ath5k_deinit_ah(struct ath5k_hw *ah)
2984
{
2985
	struct ieee80211_hw *hw = ah->hw;
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
3001 3002 3003 3004
	ath5k_desc_free(ah);
	ath5k_txq_release(ah);
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
	ath5k_unregister_leds(ah);
3005

3006
	ath5k_sysfs_unregister(ah);
3007 3008 3009 3010 3011
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
3012 3013
	ath5k_hw_deinit(ah);
	free_irq(ah->irq, ah);
3014 3015
}

3016
bool
3017
ath5k_any_vif_assoc(struct ath5k_hw *ah)
3018
{
3019
	struct ath5k_vif_iter_data iter_data;
3020 3021 3022 3023 3024
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

3025
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
3026 3027 3028 3029
						   &iter_data);
	return iter_data.any_assoc;
}

3030
void
P
Pavel Roskin 已提交
3031
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3032
{
3033
	struct ath5k_hw *ah = hw->priv;
3034 3035 3036 3037 3038 3039 3040
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
3041
	ah->filter_flags = rfilt;
3042
}
3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062

void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
		   const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (ah && ah->hw)
		printk("%s" pr_fmt("%s: %pV"),
		       level, wiphy_name(ah->hw->wiphy), &vaf);
	else
		printk("%s" pr_fmt("%pV"), level, &vaf);

	va_end(args);
}