base.c 78.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
45
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
46
#include <linux/hardirq.h>
47
#include <linux/if.h>
J
Jiri Slaby 已提交
48
#include <linux/io.h>
49 50 51 52
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
53
#include <linux/slab.h>
54
#include <linux/etherdevice.h>
55
#include <linux/nl80211.h>
56 57 58 59 60 61 62 63

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
64
#include "ani.h"
65 66
#include "ath5k.h"
#include "../regd.h"
67

68 69 70
#define CREATE_TRACE_POINTS
#include "trace.h"

71 72
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
73
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
74

75
static int modparam_all_channels;
B
Bob Copeland 已提交
76
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
77 78
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

79 80 81 82 83
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");


84 85 86 87 88 89 90
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

91
static int ath5k_init(struct ieee80211_hw *hw);
92
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
93
								bool skip_pcu);
94 95

/* Known SREVs */
J
Jiri Slaby 已提交
96
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
97 98 99 100 101 102 103 104 105
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
124
#endif
125
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
126 127
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
128
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
129 130 131
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
132
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
133 134
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
135 136 137 138
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
139
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
140 141 142 143
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
144 145 146
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
147
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

189 190 191 192 193 194 195 196 197 198
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

199
const char *
200 201 202 203 204 205 206 207
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
208 209 210 211 212

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
213 214 215 216 217 218 219
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
236

237 238 239 240 241
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
242
{
243
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
244 245
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
246

247 248
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
249

250 251 252
/********************\
* Channel/mode setup *
\********************/
253

254 255 256
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
257
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
258
{
259 260 261 262 263
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
264 265 266
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
267 268 269 270 271
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
272
}
273

274
static unsigned int
275 276
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
277
{
278
	unsigned int count, size, freq, ch;
279
	enum ieee80211_band band;
280

281 282 283
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
284
		size = 220;
285
		band = IEEE80211_BAND_5GHZ;
286 287 288 289
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
290
		band = IEEE80211_BAND_2GHZ;
291 292
		break;
	default:
293
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
294
		return 0;
295 296
	}

297 298
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
299 300 301 302
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
303

304 305 306 307 308
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

309
		/* Check if channel is supported by the chipset */
310
		if (!ath5k_channel_ok(ah, &channels[count]))
311
			continue;
312

313 314
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
315
			continue;
316

317 318
		count++;
	}
319

320 321
	return count;
}
322

323
static void
324
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
325 326
{
	u8 i;
327

328
	for (i = 0; i < AR5K_MAX_RATES; i++)
329
		ah->rate_idx[b->band][i] = -1;
330

331
	for (i = 0; i < b->n_bitrates; i++) {
332
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
333
		if (b->bitrates[i].hw_value_short)
334
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
335
	}
336
}
337

338 339 340
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
341
	struct ath5k_hw *ah = hw->priv;
342 343 344
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
345

346 347
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
348

349
	/* 2GHz band */
350
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
351
	sband->band = IEEE80211_BAND_2GHZ;
352
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
353

354
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
355 356 357 358
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
359

360
		sband->channels = ah->channels;
361
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
362
					AR5K_MODE_11G, max_c);
363

364 365 366
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
367
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
368 369 370 371
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
372

373 374 375 376 377 378 379 380 381 382
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
383 384 385
			}
		}

386
		sband->channels = ah->channels;
387
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
388
					AR5K_MODE_11B, max_c);
389

390 391 392 393
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
394
	ath5k_setup_rate_idx(ah, sband);
395

396
	/* 5GHz band, A mode */
397 398
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
399
		sband->band = IEEE80211_BAND_5GHZ;
400
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
401

402 403 404
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
405

406
		sband->channels = &ah->channels[count_c];
407
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
408
					AR5K_MODE_11A, max_c);
409

410 411
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
412
	ath5k_setup_rate_idx(ah, sband);
413

414
	ath5k_debug_dump_bands(ah);
415 416 417 418

	return 0;
}

419 420 421 422 423
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
424
 * Called with ah->lock.
425
 */
426
int
427
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
428
{
429
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
430
		  "channel set, resetting (%u -> %u MHz)\n",
431
		  ah->curchan->center_freq, chan->center_freq);
432

433
	/*
434 435 436 437
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
438
	 */
439
	return ath5k_reset(ah, chan, true);
440 441
}

442
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
443
{
444
	struct ath5k_vif_iter_data *iter_data = data;
445
	int i;
446
	struct ath5k_vif *avf = (void *)vif->drv_priv;
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
466 467 468 469

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
470
	 * interfaces is allowed.
471 472 473
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
474 475 476
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
477 478
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
479
	}
480 481
}

482
void
483
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
484
				   struct ieee80211_vif *vif)
485
{
486
	struct ath_common *common = ath5k_hw_common(ah);
487 488
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
489 490 491 492 493 494 495 496 497

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
498
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
499
	iter_data.n_stas = 0;
500 501

	if (vif)
502
		ath5k_vif_iter(&iter_data, vif->addr, vif);
503 504

	/* Get list of all active MAC addresses */
505
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
506
						   &iter_data);
507
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
508

509 510
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
511
		/* Nothing active, default to station mode */
512
		ah->opmode = NL80211_IFTYPE_STATION;
513

514 515 516
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
517

518
	if (iter_data.need_set_hw_addr && iter_data.found_active)
519
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
520

521 522
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
523

524 525 526 527
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
528
		 * Enabling PROMISC appears to fix that problem.
529
		 */
530
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
531
	}
532

533 534 535
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
536
}
537

538
static inline int
539
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
540 541
{
	int rix;
542

543 544 545 546 547
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

548
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
549 550 551 552 553 554 555 556 557 558 559
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
560
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
561
{
562
	struct ath_common *common = ath5k_hw_common(ah);
563
	struct sk_buff *skb;
564 565

	/*
566 567
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
568
	 */
569 570 571
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
572

573
	if (!skb) {
574
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
575 576
				common->rx_bufsize);
		return NULL;
577 578
	}

579
	*skb_addr = dma_map_single(ah->dev,
580
				   skb->data, common->rx_bufsize,
581 582
				   DMA_FROM_DEVICE);

583 584
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
585 586
		dev_kfree_skb(skb);
		return NULL;
587
	}
588 589
	return skb;
}
590

591
static int
592
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
593 594 595 596
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
597

598
	if (!skb) {
599
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
600 601 602
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
603 604
	}

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
624
	if (ret) {
625
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
626
		return ret;
627 628
	}

629 630 631
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
632 633 634
	return 0;
}

635
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
636
{
637 638 639
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
640

641 642
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
643

644 645 646 647 648 649 650 651
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
652
	else
653
		htype = AR5K_PKT_TYPE_NORMAL;
654

655
	return htype;
656 657
}

658
static int
659
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
660
		  struct ath5k_txq *txq, int padsize)
661
{
662 663 664 665 666 667 668 669 670 671 672
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
673

674
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
675

676
	/* XXX endianness */
677
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
678
			DMA_TO_DEVICE);
679

680
	rate = ieee80211_get_tx_rate(ah->hw, info);
681 682 683 684
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
685

686 687
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
688

689 690 691
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
692

693 694 695 696 697 698 699 700 701 702 703
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
704 705
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
706
			info->control.vif, pktlen, info));
707 708 709
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
710 711
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
712
			info->control.vif, pktlen, info));
713 714 715 716
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
717
		(ah->power_level * 2),
718 719 720 721 722 723 724 725 726
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
727
		rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
728
		if (!rate)
729
			break;
730

731 732
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
733 734
	}

735 736 737 738
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
739

740 741
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
742

743 744
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
745
	txq->txq_len++;
746 747 748 749
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
750

751 752 753 754 755 756 757
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
758
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
759
	return ret;
B
Bruno Randolf 已提交
760 761
}

762 763 764 765
/*******************\
* Descriptors setup *
\*******************/

766
static int
767
ath5k_desc_alloc(struct ath5k_hw *ah)
768
{
769 770 771 772 773
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
774

775
	/* allocate descriptors */
776
	ah->desc_len = sizeof(struct ath5k_desc) *
777
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
778

779 780 781 782
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
783 784 785
		ret = -ENOMEM;
		goto err;
	}
786 787 788 789
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
790

791 792 793
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
794
		ATH5K_ERR(ah, "can't allocate bufptr\n");
795 796 797
		ret = -ENOMEM;
		goto err_free;
	}
798
	ah->bufptr = bf;
799

800
	INIT_LIST_HEAD(&ah->rxbuf);
801 802 803
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
804
		list_add_tail(&bf->list, &ah->rxbuf);
805
	}
806

807 808
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
809
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
810 811
		bf->desc = ds;
		bf->daddr = da;
812
		list_add_tail(&bf->list, &ah->txbuf);
813 814
	}

815
	/* beacon buffers */
816
	INIT_LIST_HEAD(&ah->bcbuf);
817 818 819
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
820
		list_add_tail(&bf->list, &ah->bcbuf);
821
	}
822

823 824
	return 0;
err_free:
825
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
826
err:
827
	ah->desc = NULL;
828 829
	return ret;
}
830

831
void
832
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
833 834 835 836
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
837
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
838 839 840 841 842 843 844 845
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
846
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
847 848 849 850 851 852
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
853
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
854 855 856 857 858 859 860
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

861
static void
862
ath5k_desc_free(struct ath5k_hw *ah)
863 864
{
	struct ath5k_buf *bf;
865

866 867 868 869 870 871
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
872

873
	/* Free memory associated with all descriptors */
874 875 876
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
877

878 879
	kfree(ah->bufptr);
	ah->bufptr = NULL;
880 881
}

882 883 884 885 886 887

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
888
ath5k_txq_setup(struct ath5k_hw *ah,
889
		int qtype, int subtype)
890
{
891 892 893
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
894 895 896 897 898
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
899 900
	};
	int qnum;
901

902
	/*
903 904 905 906 907 908 909 910 911 912
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
913
	 */
914 915 916 917 918 919 920 921 922 923
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
924 925 926
	if (qnum >= ARRAY_SIZE(ah->txqs)) {
		ATH5K_ERR(ah, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(ah->txqs));
927 928 929
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
930
	txq = &ah->txqs[qnum];
931 932 933 934 935 936
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
937
		txq->txq_len = 0;
938
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
939
		txq->txq_poll_mark = false;
940
		txq->txq_stuck = 0;
941
	}
942
	return &ah->txqs[qnum];
943 944
}

945 946
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
947
{
948
	struct ath5k_txq_info qi = {
949 950 951 952 953
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
954 955 956
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
957

958
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
959 960
}

961
static int
962
ath5k_beaconq_config(struct ath5k_hw *ah)
963
{
964 965
	struct ath5k_txq_info qi;
	int ret;
966

967
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
968 969
	if (ret)
		goto err;
970

971 972
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
973 974 975 976 977 978 979
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
980
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
981 982 983 984 985
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
986
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
987
	}
988

989
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
990 991
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
992

993
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
994
	if (ret) {
995
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
996 997 998
			"hardware queue!\n", __func__);
		goto err;
	}
999
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1000 1001
	if (ret)
		goto err;
1002

1003 1004 1005 1006
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1007

1008
	qi.tqi_ready_time = (ah->bintval * 80) / 100;
1009 1010 1011
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1012

1013 1014 1015
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1016 1017
}

1018 1019 1020
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
1021
 * @ah The &struct ath5k_hw
1022 1023 1024 1025 1026 1027 1028
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1029
static void
1030
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1031
{
1032
	struct ath5k_txq *txq;
1033
	struct ath5k_buf *bf, *bf0;
1034
	int i;
1035

1036 1037 1038
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
1039 1040
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1041
				ath5k_debug_printtxbuf(ah, bf);
1042

1043
				ath5k_txbuf_free_skb(ah, bf);
1044

1045 1046 1047
				spin_lock_bh(&ah->txbuflock);
				list_move_tail(&bf->list, &ah->txbuf);
				ah->txbuf_len++;
1048
				txq->txq_len--;
1049
				spin_unlock_bh(&ah->txbuflock);
1050
			}
1051 1052 1053 1054
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1055
	}
1056 1057
}

1058
static void
1059
ath5k_txq_release(struct ath5k_hw *ah)
1060
{
1061
	struct ath5k_txq *txq = ah->txqs;
1062
	unsigned int i;
1063

1064
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1065
		if (txq->setup) {
1066
			ath5k_hw_release_tx_queue(ah, txq->qnum);
1067 1068 1069
			txq->setup = false;
		}
}
1070 1071


1072 1073 1074
/*************\
* RX Handling *
\*************/
1075

1076 1077 1078
/*
 * Enable the receive h/w following a reset.
 */
1079
static int
1080
ath5k_rx_start(struct ath5k_hw *ah)
1081
{
1082 1083 1084
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1085

1086
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1087

1088
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1089
		  common->cachelsz, common->rx_bufsize);
1090

1091 1092 1093 1094
	spin_lock_bh(&ah->rxbuflock);
	ah->rxlink = NULL;
	list_for_each_entry(bf, &ah->rxbuf, list) {
		ret = ath5k_rxbuf_setup(ah, bf);
1095
		if (ret != 0) {
1096
			spin_unlock_bh(&ah->rxbuflock);
1097 1098
			goto err;
		}
1099
	}
1100
	bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1101
	ath5k_hw_set_rxdp(ah, bf->daddr);
1102
	spin_unlock_bh(&ah->rxbuflock);
1103

1104
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1105
	ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1106
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1107 1108

	return 0;
1109
err:
1110 1111 1112
	return ret;
}

1113
/*
1114 1115 1116 1117 1118
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1119 1120
 */
static void
1121
ath5k_rx_stop(struct ath5k_hw *ah)
1122 1123
{

1124
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1125
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1126

1127
	ath5k_debug_printrxbuffs(ah);
1128
}
1129

1130
static unsigned int
1131
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1132 1133 1134 1135 1136
		   struct ath5k_rx_status *rs)
{
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1137

1138 1139 1140
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1141

1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1154 1155 1156 1157

	return 0;
}

1158

1159
static void
1160
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1161
		     struct ieee80211_rx_status *rxs)
1162
{
1163
	struct ath_common *common = ath5k_hw_common(ah);
1164 1165 1166
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1167

1168 1169 1170 1171 1172 1173 1174 1175
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
1176
		tsf = ath5k_hw_get_tsf64(ah);
1177 1178
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1179

1180
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1181 1182 1183 1184 1185
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1186

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
1199
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1200 1201 1202 1203 1204
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1205

1206 1207 1208 1209 1210 1211
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
1212 1213
		if (hw_tu >= ah->nexttbtt)
			ath5k_beacon_update_timers(ah, bc_tstamp);
B
Bruno Randolf 已提交
1214 1215 1216 1217

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
1218 1219 1220
		if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
			ath5k_beacon_update_timers(ah, bc_tstamp);
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
B
Bruno Randolf 已提交
1221 1222
				"fixed beacon timers after beacon receive\n");
		}
1223 1224
	}
}
1225

1226
static void
1227
ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1228 1229 1230
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath_common *common = ath5k_hw_common(ah);
1231

1232 1233 1234 1235
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
		return;
1236

B
Bruno Randolf 已提交
1237
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1238

1239 1240 1241
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1242

1243 1244 1245 1246
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1247
{
1248
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1249 1250
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1251

1252
	if (ieee80211_has_a4(frame_control))
1253
		padpos += ETH_ALEN;
1254 1255

	if (ieee80211_is_data_qos(frame_control))
1256 1257 1258
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1259 1260
}

1261 1262 1263 1264 1265
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1266
{
1267 1268
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1269

1270
	if (padsize && skb->len > padpos) {
1271

1272 1273
		if (skb_headroom(skb) < padsize)
			return -1;
1274

1275
		skb_push(skb, padsize);
1276
		memmove(skb->data, skb->data + padsize, padpos);
1277 1278
		return padsize;
	}
B
Bob Copeland 已提交
1279

1280 1281
	return 0;
}
1282

1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1300

1301
	if (padsize && skb->len >= padpos + padsize) {
1302 1303 1304
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1305
	}
B
Bob Copeland 已提交
1306

1307
	return 0;
1308 1309 1310
}

static void
1311
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1312
		    struct ath5k_rx_status *rs)
1313
{
1314 1315 1316 1317 1318 1319 1320 1321 1322
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1323 1324

	/*
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
1337
	 * frame, but I'm not sure.
1338 1339 1340 1341 1342
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1343
	 */
1344
	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1345
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1346

1347 1348
	rxs->freq = ah->curchan->center_freq;
	rxs->band = ah->curchan->band;
1349

1350
	rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1351

1352
	rxs->antenna = rs->rs_antenna;
1353

1354
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1355
		ah->stats.antenna_rx[rs->rs_antenna]++;
1356
	else
1357
		ah->stats.antenna_rx[0]++; /* invalid */
1358

1359 1360
	rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1361

1362
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1363
	    ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1364
		rxs->flag |= RX_FLAG_SHORTPRE;
1365

1366
	trace_ath5k_rx(ah, skb);
1367

1368
	ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
1369

1370
	/* check beacons in IBSS mode */
1371 1372
	if (ah->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(ah, skb, rxs);
1373

1374
	ieee80211_rx(ah->hw, skb);
1375
}
1376

1377 1378 1379 1380
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1381
 */
1382
static bool
1383
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1384
{
1385 1386
	ah->stats.rx_all_count++;
	ah->stats.rx_bytes_count += rs->rs_datalen;
1387

1388 1389
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
1390
			ah->stats.rxerr_crc++;
1391
		if (rs->rs_status & AR5K_RXERR_FIFO)
1392
			ah->stats.rxerr_fifo++;
1393
		if (rs->rs_status & AR5K_RXERR_PHY) {
1394
			ah->stats.rxerr_phy++;
1395
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1396
				ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
1410
			ah->stats.rxerr_decrypt++;
1411 1412 1413 1414 1415
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
1416
			ah->stats.rxerr_mic++;
1417
			return true;
1418 1419
		}

1420 1421 1422 1423
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1424

1425
	if (unlikely(rs->rs_more)) {
1426
		ah->stats.rxerr_jumbo++;
1427 1428 1429
		return false;
	}
	return true;
1430 1431
}

1432
static void
1433
ath5k_set_current_imask(struct ath5k_hw *ah)
1434
{
1435
	enum ath5k_int imask;
1436 1437
	unsigned long flags;

1438 1439 1440
	spin_lock_irqsave(&ah->irqlock, flags);
	imask = ah->imask;
	if (ah->rx_pending)
1441
		imask &= ~AR5K_INT_RX_ALL;
1442
	if (ah->tx_pending)
1443
		imask &= ~AR5K_INT_TX_ALL;
1444 1445
	ath5k_hw_set_imr(ah, imask);
	spin_unlock_irqrestore(&ah->irqlock, flags);
1446 1447
}

1448
static void
1449
ath5k_tasklet_rx(unsigned long data)
1450
{
1451 1452 1453
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
1454
	struct ath5k_hw *ah = (void *)data;
L
Luis R. Rodriguez 已提交
1455
	struct ath_common *common = ath5k_hw_common(ah);
1456 1457 1458
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1459

1460 1461 1462
	spin_lock(&ah->rxbuflock);
	if (list_empty(&ah->rxbuf)) {
		ATH5K_WARN(ah, "empty rx buf pool\n");
1463 1464 1465
		goto unlock;
	}
	do {
1466
		bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1467 1468 1469
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1470

1471
		/* bail if HW is still using self-linked descriptor */
1472
		if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1473
			break;
1474

1475
		ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1476 1477 1478
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
1479 1480
			ATH5K_ERR(ah, "error in processing rx descriptor\n");
			ah->stats.rxerr_proc++;
1481 1482
			break;
		}
1483

1484 1485
		if (ath5k_receive_frame_ok(ah, &rs)) {
			next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1486

1487 1488 1489 1490 1491 1492
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1493

1494
			dma_unmap_single(ah->dev, bf->skbaddr,
1495
					 common->rx_bufsize,
1496
					 DMA_FROM_DEVICE);
1497

1498
			skb_put(skb, rs.rs_datalen);
1499

1500
			ath5k_receive_frame(ah, skb, &rs);
1501

1502 1503
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1504
		}
1505
next:
1506 1507
		list_move_tail(&bf->list, &ah->rxbuf);
	} while (ath5k_rxbuf_setup(ah, bf) == 0);
1508
unlock:
1509 1510 1511
	spin_unlock(&ah->rxbuflock);
	ah->rx_pending = false;
	ath5k_set_current_imask(ah);
1512 1513
}

B
Bruno Randolf 已提交
1514

1515 1516 1517
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1518

1519
void
1520 1521
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1522
{
1523
	struct ath5k_hw *ah = hw->priv;
1524 1525 1526
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1527

1528
	trace_ath5k_tx(ah, skb, txq);
B
Bruno Randolf 已提交
1529

1530 1531 1532 1533 1534 1535
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
1536
		ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1537 1538 1539
			  " headroom to pad");
		goto drop_packet;
	}
1540

1541 1542
	if (txq->txq_len >= txq->txq_max &&
	    txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
B
Bruno Randolf 已提交
1543 1544
		ieee80211_stop_queue(hw, txq->qnum);

1545 1546 1547 1548
	spin_lock_irqsave(&ah->txbuflock, flags);
	if (list_empty(&ah->txbuf)) {
		ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&ah->txbuflock, flags);
B
Bruno Randolf 已提交
1549
		ieee80211_stop_queues(hw);
1550
		goto drop_packet;
1551
	}
1552
	bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1553
	list_del(&bf->list);
1554 1555
	ah->txbuf_len--;
	if (list_empty(&ah->txbuf))
1556
		ieee80211_stop_queues(hw);
1557
	spin_unlock_irqrestore(&ah->txbuflock, flags);
1558 1559 1560

	bf->skb = skb;

1561
	if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
1562
		bf->skb = NULL;
1563 1564 1565 1566
		spin_lock_irqsave(&ah->txbuflock, flags);
		list_add_tail(&bf->list, &ah->txbuf);
		ah->txbuf_len++;
		spin_unlock_irqrestore(&ah->txbuflock, flags);
1567
		goto drop_packet;
1568
	}
1569
	return;
1570

1571 1572
drop_packet:
	dev_kfree_skb_any(skb);
1573 1574
}

1575
static void
1576
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1577
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1578 1579
{
	struct ieee80211_tx_info *info;
1580
	u8 tries[3];
1581 1582
	int i;

1583 1584
	ah->stats.tx_all_count++;
	ah->stats.tx_bytes_count += skb->len;
1585 1586
	info = IEEE80211_SKB_CB(skb);

1587 1588 1589 1590
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1591
	ieee80211_tx_info_clear_status(info);
1592 1593

	for (i = 0; i < ts->ts_final_idx; i++) {
1594 1595 1596
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1597
		r->count = tries[i];
1598 1599
	}

1600
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1601
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1602 1603

	if (unlikely(ts->ts_status)) {
1604
		ah->stats.ack_fail++;
1605 1606
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1607
			ah->stats.txerr_filt++;
1608 1609
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
1610
			ah->stats.txerr_retry++;
1611
		if (ts->ts_status & AR5K_TXERR_FIFO)
1612
			ah->stats.txerr_fifo++;
1613 1614 1615
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1616 1617 1618

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1619 1620 1621 1622 1623 1624 1625 1626 1627
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1628
		ah->stats.antenna_tx[ts->ts_antenna]++;
1629
	else
1630
		ah->stats.antenna_tx[0]++; /* invalid */
1631

1632 1633
	trace_ath5k_tx_complete(ah, skb, txq, ts);
	ieee80211_tx_status(ah->hw, skb);
1634
}
1635 1636

static void
1637
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1638
{
1639 1640 1641 1642
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1643
	int ret;
1644

1645 1646
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1647 1648 1649 1650 1651 1652 1653

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

1654
			ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1655 1656 1657
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
1658
				ATH5K_ERR(ah,
1659 1660 1661 1662 1663 1664 1665
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1666

1667
			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1668
					DMA_TO_DEVICE);
1669
			ath5k_tx_frame_completed(ah, skb, txq, &ts);
1670
		}
1671

1672 1673 1674
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1675 1676
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1677
		 */
1678 1679 1680 1681
		if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
			spin_lock(&ah->txbuflock);
			list_move_tail(&bf->list, &ah->txbuf);
			ah->txbuf_len++;
1682
			txq->txq_len--;
1683
			spin_unlock(&ah->txbuflock);
1684
		}
1685 1686
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1687
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1688
		ieee80211_wake_queue(ah->hw, txq->qnum);
1689 1690 1691 1692 1693
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1694
	int i;
1695
	struct ath5k_hw *ah = (void *)data;
1696

1697
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1698 1699
		if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
			ath5k_tx_processq(ah, &ah->txqs[i]);
1700

1701 1702
	ah->tx_pending = false;
	ath5k_set_current_imask(ah);
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1714
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1715 1716
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1717
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1718
	struct ath5k_desc *ds;
1719 1720
	int ret = 0;
	u8 antenna;
1721
	u32 flags;
1722
	const int padsize = 0;
1723

1724
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1725
			DMA_TO_DEVICE);
1726
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1727 1728
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1729

1730 1731
	if (dma_mapping_error(ah->dev, bf->skbaddr)) {
		ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1732 1733 1734 1735
		return -EIO;
	}

	ds = bf->desc;
1736
	antenna = ah->ah_tx_ant;
1737 1738

	flags = AR5K_TXDESC_NOACK;
1739
	if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1740 1741
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1742
	} else
1743
		ds->ds_link = 0;
1744 1745 1746 1747 1748 1749 1750

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1751
	 * automatically, based on ACKed packets.
1752 1753 1754 1755 1756
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1757 1758 1759 1760 1761
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1762 1763
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1764
		antenna = ah->bsent & 4 ? 2 : 1;
1765

1766

1767 1768 1769
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1770
	ds->ds_data = bf->skbaddr;
1771
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1772
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1773 1774
			AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1775
			1, AR5K_TXKEYIX_INVALID,
1776
			antenna, flags, 0, 0);
1777 1778 1779 1780 1781
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1782
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1783 1784 1785
	return ret;
}

1786 1787 1788 1789 1790 1791 1792
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1793
int
1794 1795 1796
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
1797
	struct ath5k_hw *ah = hw->priv;
1798
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1813
	ath5k_txbuf_free_skb(ah, avf->bbuf);
1814
	avf->bbuf->skb = skb;
1815
	ret = ath5k_beacon_setup(ah, avf->bbuf);
1816
	if (ret)
1817
		avf->bbuf->skb = NULL;
1818 1819 1820 1821
out:
	return ret;
}

1822 1823 1824 1825 1826
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1827 1828
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1829 1830
 */
static void
1831
ath5k_beacon_send(struct ath5k_hw *ah)
1832
{
1833 1834 1835
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1836
	struct sk_buff *skb;
1837

1838
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1839 1840 1841

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1842
	 * not, don't don't try to post another: skip this
1843 1844 1845 1846
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
1847 1848 1849 1850 1851 1852
	if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
		ah->bmisscount++;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
			"missed %u consecutive beacons\n", ah->bmisscount);
		if (ah->bmisscount > 10) {	/* NB: 10 is a guess */
			ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1853
				"stuck beacon time (%u missed)\n",
1854 1855
				ah->bmisscount);
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1856
				  "stuck beacon, resetting\n");
1857
			ieee80211_queue_work(ah->hw, &ah->reset_work);
1858 1859 1860
		}
		return;
	}
1861 1862
	if (unlikely(ah->bmisscount != 0)) {
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1863
			"resume beacon xmit after %u misses\n",
1864 1865
			ah->bmisscount);
		ah->bmisscount = 0;
1866 1867
	}

1868 1869
	if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
			ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1870 1871
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
1872 1873 1874
		int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
		vif = ah->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1875
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
1876
			(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1877
	} else /* only one interface */
1878
		vif = ah->bslot[0];
1879 1880 1881 1882 1883 1884

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;
1885 1886 1887
	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
		     ah->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1888 1889 1890
		return;
	}

1891 1892 1893 1894 1895
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1896 1897
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
		ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1898 1899 1900
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1901
	/* refresh the beacon for AP or MESH mode */
1902 1903 1904
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT)
		ath5k_beacon_update(ah->hw, vif);
B
Bob Copeland 已提交
1905

1906
	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1907

1908 1909 1910 1911
	ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, ah->bhalq);
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
		ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1912

1913
	skb = ieee80211_get_buffered_bc(ah->hw, vif);
1914
	while (skb) {
1915
		ath5k_tx_queue(ah->hw, skb, ah->cabq);
1916

1917
		if (ah->cabq->txq_len >= ah->cabq->txq_max)
1918 1919
			break;

1920
		skb = ieee80211_get_buffered_bc(ah->hw, vif);
1921 1922
	}

1923
	ah->bsent++;
1924 1925
}

1926 1927 1928
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
1929
 * @ah: struct ath5k_hw pointer we are operating on
1930 1931 1932 1933 1934 1935 1936 1937
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1938
 * when a TSF update has been detected, but also when an new IBSS is created or
1939 1940 1941
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1942
void
1943
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
1944
{
1945 1946
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1947

1948 1949
	intval = ah->bintval & AR5K_BEACON_PERIOD;
	if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
1950 1951
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
1952
			ATH5K_WARN(ah, "intval %u is too low, min 15\n",
1953 1954
				   intval);
	}
1955 1956 1957
	if (WARN_ON(!intval))
		return;

1958 1959
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1960

1961 1962 1963
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1964

1965
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1966
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1967
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1968 1969
	 * configuration we need to make sure it is bigger than that. */

1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
1985
		 * beacon received, SW merge happened but HW TSF not yet updated.
1986 1987 1988 1989 1990
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
1991
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2005

2006
	ah->nexttbtt = nexttbtt;
2007

2008 2009
	intval |= AR5K_BEACON_ENA;
	ath5k_hw_init_beacon(ah, nexttbtt, intval);
2010 2011 2012 2013 2014 2015

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
2016
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2017 2018
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
2019
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2020 2021
			"reset HW TSF and timers\n");
	else
2022
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2023 2024
			"updated timers based on beacon TSF\n");

2025
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2026 2027 2028
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2029
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2030 2031 2032
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2033 2034
}

2035 2036 2037
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
2038
 * @ah: struct ath5k_hw pointer we are operating on
2039
 *
2040
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2041
 * interrupts to detect TSF updates only.
2042
 */
2043
void
2044
ath5k_beacon_config(struct ath5k_hw *ah)
2045
{
2046
	unsigned long flags;
2047

2048 2049 2050
	spin_lock_irqsave(&ah->block, flags);
	ah->bmisscount = 0;
	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2051

2052
	if (ah->enable_beacon) {
2053
		/*
2054 2055
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2056
		 * only once here.
2057
		 * We use the SWBA interrupt only to keep track of the beacon
2058
		 * timers in order to detect automatic TSF updates.
2059
		 */
2060
		ath5k_beaconq_config(ah);
2061

2062
		ah->imask |= AR5K_INT_SWBA;
2063

2064
		if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2065
			if (ath5k_hw_hasveol(ah))
2066
				ath5k_beacon_send(ah);
J
Jiri Slaby 已提交
2067
		} else
2068
			ath5k_beacon_update_timers(ah, -1);
2069
	} else {
2070
		ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2071 2072
	}

2073
	ath5k_hw_set_imr(ah, ah->imask);
2074
	mmiowb();
2075
	spin_unlock_irqrestore(&ah->block, flags);
2076 2077
}

N
Nick Kossifidis 已提交
2078 2079
static void ath5k_tasklet_beacon(unsigned long data)
{
2080
	struct ath5k_hw *ah = (struct ath5k_hw *) data;
N
Nick Kossifidis 已提交
2081 2082 2083 2084 2085 2086

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2087
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2088 2089
	 * automatic TSF updates happened.
	 */
2090
	if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2091
		/* XXX: only if VEOL supported */
2092 2093 2094
		u64 tsf = ath5k_hw_get_tsf64(ah);
		ah->nexttbtt += ah->bintval;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
N
Nick Kossifidis 已提交
2095 2096
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
2097
				ah->nexttbtt,
N
Nick Kossifidis 已提交
2098 2099 2100
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
2101 2102 2103
		spin_lock(&ah->block);
		ath5k_beacon_send(ah);
		spin_unlock(&ah->block);
N
Nick Kossifidis 已提交
2104 2105 2106
	}
}

2107 2108 2109 2110 2111

/********************\
* Interrupt handling *
\********************/

2112 2113 2114
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2115 2116 2117 2118 2119
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
		/* run ANI only when full calibration is not active */
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2120
		tasklet_schedule(&ah->ani_tasklet);
2121 2122

	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2123 2124
		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2125
		tasklet_schedule(&ah->calib);
2126 2127 2128 2129 2130 2131
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2132
static void
2133
ath5k_schedule_rx(struct ath5k_hw *ah)
2134
{
2135 2136
	ah->rx_pending = true;
	tasklet_schedule(&ah->rxtq);
2137 2138 2139
}

static void
2140
ath5k_schedule_tx(struct ath5k_hw *ah)
2141
{
2142 2143
	ah->tx_pending = true;
	tasklet_schedule(&ah->txtq);
2144 2145
}

P
Pavel Roskin 已提交
2146
static irqreturn_t
2147 2148
ath5k_intr(int irq, void *dev_id)
{
2149
	struct ath5k_hw *ah = dev_id;
2150 2151 2152
	enum ath5k_int status;
	unsigned int counter = 1000;

2153
	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
2154 2155
		((ath5k_get_bus_type(ah) != ATH_AHB) &&
				!ath5k_hw_is_intr_pending(ah))))
2156 2157 2158 2159
		return IRQ_NONE;

	do {
		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
2160 2161
		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, ah->imask);
2162 2163 2164 2165 2166
		if (unlikely(status & AR5K_INT_FATAL)) {
			/*
			 * Fatal errors are unrecoverable.
			 * Typically these are caused by DMA errors.
			 */
2167
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2168
				  "fatal int, resetting\n");
2169
			ieee80211_queue_work(ah->hw, &ah->reset_work);
2170
		} else if (unlikely(status & AR5K_INT_RXORN)) {
B
Bruno Randolf 已提交
2171 2172 2173 2174 2175 2176 2177 2178 2179
			/*
			 * Receive buffers are full. Either the bus is busy or
			 * the CPU is not fast enough to process all received
			 * frames.
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
			 * We don't know exactly which versions need a reset -
			 * this guess is copied from the HAL.
			 */
2180
			ah->stats.rxorn_intr++;
2181
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2182
				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2183
					  "rx overrun, resetting\n");
2184
				ieee80211_queue_work(ah->hw, &ah->reset_work);
2185
			} else
2186
				ath5k_schedule_rx(ah);
2187
		} else {
2188
			if (status & AR5K_INT_SWBA)
2189
				tasklet_hi_schedule(&ah->beacontq);
2190

2191 2192 2193 2194 2195 2196
			if (status & AR5K_INT_RXEOL) {
				/*
				* NB: the hardware should re-read the link when
				*     RXE bit is written, but it doesn't work at
				*     least on older hardware revs.
				*/
2197
				ah->stats.rxeol_intr++;
2198 2199 2200 2201 2202
			}
			if (status & AR5K_INT_TXURN) {
				/* bump tx trigger level */
				ath5k_hw_update_tx_triglevel(ah, true);
			}
2203
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2204
				ath5k_schedule_rx(ah);
2205 2206
			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
2207
				ath5k_schedule_tx(ah);
2208
			if (status & AR5K_INT_BMISS) {
2209
				/* TODO */
2210 2211
			}
			if (status & AR5K_INT_MIB) {
2212
				ah->stats.mib_intr++;
B
Bruno Randolf 已提交
2213
				ath5k_hw_update_mib_counters(ah);
2214
				ath5k_ani_mib_intr(ah);
2215
			}
2216
			if (status & AR5K_INT_GPIO)
2217
				tasklet_schedule(&ah->rf_kill.toggleq);
B
Bob Copeland 已提交
2218

2219
		}
2220 2221 2222 2223

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2224
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2225

2226 2227
	if (ah->rx_pending || ah->tx_pending)
		ath5k_set_current_imask(ah);
2228

2229
	if (unlikely(!counter))
2230
		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2231

2232
	ath5k_intr_calibration_poll(ah);
2233

2234 2235 2236 2237 2238 2239 2240 2241
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
2242
ath5k_tasklet_calibrate(unsigned long data)
2243
{
2244
	struct ath5k_hw *ah = (void *)data;
2245

2246
	/* Only full calibration for now */
2247
	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2248

2249 2250 2251
	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
		ieee80211_frequency_to_channel(ah->curchan->center_freq),
		ah->curchan->hw_value);
2252

2253
	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2254 2255 2256 2257
		/*
		 * Rfgain is out of bounds, reset the chip
		 * to load new gain values.
		 */
2258 2259
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
		ieee80211_queue_work(ah->hw, &ah->reset_work);
2260
	}
2261 2262
	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
		ATH5K_ERR(ah, "calibration of channel %u failed\n",
2263
			ieee80211_frequency_to_channel(
2264
				ah->curchan->center_freq));
2265

2266
	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
B
Bruno Randolf 已提交
2267 2268 2269
	 * doesn't.
	 * TODO: We should stop TX here, so that it doesn't interfere.
	 * Note that stopping the queues is not enough to stop TX! */
2270 2271 2272 2273 2274
	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
		ah->ah_cal_next_nf = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
		ath5k_hw_update_noise_floor(ah);
	}
2275

2276
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2277 2278 2279
}


2280 2281 2282
static void
ath5k_tasklet_ani(unsigned long data)
{
2283
	struct ath5k_hw *ah = (void *)data;
2284 2285 2286 2287

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2288 2289 2290
}


2291 2292 2293
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
2294
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2295 2296 2297 2298 2299
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2300
	mutex_lock(&ah->lock);
2301

2302 2303 2304
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
2305
			spin_lock_bh(&txq->lock);
2306
			if (txq->txq_len > 1) {
2307
				if (txq->txq_poll_mark) {
2308
					ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2309 2310 2311
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2312
					txq->txq_stuck++;
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
2324
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2325
			  "TX queues stuck, resetting\n");
2326
		ath5k_reset(ah, NULL, true);
2327 2328
	}

2329
	mutex_unlock(&ah->lock);
2330

2331
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2332 2333 2334 2335
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2336 2337 2338
/*************************\
* Initialization routines *
\*************************/
2339

2340
int __devinit
2341
ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2342
{
2343
	struct ieee80211_hw *hw = ah->hw;
2344 2345 2346 2347 2348
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
2349
	SET_IEEE80211_DEV(hw, ah->dev);
2350
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2351 2352 2353
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2354 2355 2356 2357 2358 2359 2360

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2361 2362 2363 2364
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2365 2366 2367 2368 2369 2370 2371
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
2372
	__set_bit(ATH_STAT_INVALID, ah->status);
2373

2374 2375 2376 2377 2378 2379 2380
	ah->opmode = NL80211_IFTYPE_STATION;
	ah->bintval = 1000;
	mutex_init(&ah->lock);
	spin_lock_init(&ah->rxbuflock);
	spin_lock_init(&ah->txbuflock);
	spin_lock_init(&ah->block);
	spin_lock_init(&ah->irqlock);
2381 2382

	/* Setup interrupt handler */
2383
	ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2384
	if (ret) {
2385
		ATH5K_ERR(ah, "request_irq failed\n");
2386 2387 2388
		goto err;
	}

2389
	common = ath5k_hw_common(ah);
2390 2391
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
2392
	common->ah = ah;
2393
	common->hw = hw;
2394
	common->priv = ah;
2395
	common->clockrate = 40;
2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
2407
	ret = ath5k_hw_init(ah);
2408
	if (ret)
2409
		goto err_irq;
2410 2411

	/* set up multi-rate retry capabilities */
2412
	if (ah->ah_version == AR5K_AR5212) {
2413
		hw->max_rates = 4;
2414 2415
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2416 2417 2418 2419 2420 2421 2422 2423 2424
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

2425 2426 2427 2428
	ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
					ah->ah_mac_srev,
					ah->ah_phy_revision);
2429

2430
	if (!ah->ah_single_chip) {
2431
		/* Single chip radio (!RF5111) */
2432 2433
		if (ah->ah_radio_5ghz_revision &&
			!ah->ah_radio_2ghz_revision) {
2434 2435
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
2436 2437
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2438
					ath5k_chip_name(AR5K_VERSION_RAD,
2439 2440
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2441
			/* No 2GHz support (5110 and some
2442
			 * 5GHz only cards) -> report 5GHz radio */
2443
			} else if (!test_bit(AR5K_MODE_11B,
2444 2445
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2446
					ath5k_chip_name(AR5K_VERSION_RAD,
2447 2448
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2449 2450
			/* Multiband radio */
			} else {
2451
				ATH5K_INFO(ah, "RF%s multiband radio found"
2452 2453
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
2454 2455
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2456 2457 2458 2459
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
2460 2461 2462
		else if (ah->ah_radio_5ghz_revision &&
				ah->ah_radio_2ghz_revision) {
			ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2463
				ath5k_chip_name(AR5K_VERSION_RAD,
2464 2465 2466
					ah->ah_radio_5ghz_revision),
					ah->ah_radio_5ghz_revision);
			ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2467
				ath5k_chip_name(AR5K_VERSION_RAD,
2468 2469
					ah->ah_radio_2ghz_revision),
					ah->ah_radio_2ghz_revision);
2470 2471 2472
		}
	}

2473
	ath5k_debug_init_device(ah);
2474 2475

	/* ready to process interrupts */
2476
	__clear_bit(ATH_STAT_INVALID, ah->status);
2477 2478 2479

	return 0;
err_ah:
2480
	ath5k_hw_deinit(ah);
2481
err_irq:
2482
	free_irq(ah->irq, ah);
2483 2484 2485 2486
err:
	return ret;
}

2487
static int
2488
ath5k_stop_locked(struct ath5k_hw *ah)
2489 2490
{

2491 2492
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, ah->status));
2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
2509
	ieee80211_stop_queues(ah->hw);
2510

2511 2512
	if (!test_bit(ATH_STAT_INVALID, ah->status)) {
		ath5k_led_off(ah);
2513
		ath5k_hw_set_imr(ah, 0);
2514 2515
		synchronize_irq(ah->irq);
		ath5k_rx_stop(ah);
2516
		ath5k_hw_dma_stop(ah);
2517
		ath5k_drain_tx_buffs(ah);
2518 2519 2520 2521
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2522 2523
}

2524
int ath5k_start(struct ieee80211_hw *hw)
2525
{
2526
	struct ath5k_hw *ah = hw->priv;
2527 2528
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2529

2530
	mutex_lock(&ah->lock);
2531

2532
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2533 2534

	/*
2535 2536
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2537
	 */
2538
	ath5k_stop_locked(ah);
2539

2540 2541 2542 2543 2544 2545 2546
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
2547 2548
	ah->curchan = ah->hw->conf.channel;
	ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2549 2550
		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2551

2552
	ret = ath5k_reset(ah, NULL, false);
2553 2554
	if (ret)
		goto done;
2555

2556 2557 2558 2559 2560 2561 2562 2563 2564
	ath5k_rfkill_hw_start(ah);

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2565 2566 2567
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2568

2569 2570
	for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
		ah->bslot[i] = NULL;
2571

2572 2573 2574
	ret = 0;
done:
	mmiowb();
2575
	mutex_unlock(&ah->lock);
2576

2577
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2578 2579
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2580 2581 2582
	return ret;
}

2583
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2584
{
2585 2586 2587 2588 2589 2590 2591
	ah->rx_pending = false;
	ah->tx_pending = false;
	tasklet_kill(&ah->rxtq);
	tasklet_kill(&ah->txtq);
	tasklet_kill(&ah->calib);
	tasklet_kill(&ah->beacontq);
	tasklet_kill(&ah->ani_tasklet);
2592 2593 2594 2595 2596 2597 2598 2599
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2600
void ath5k_stop(struct ieee80211_hw *hw)
2601
{
2602
	struct ath5k_hw *ah = hw->priv;
2603 2604
	int ret;

2605 2606 2607
	mutex_lock(&ah->lock);
	ret = ath5k_stop_locked(ah);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
2628
		ret = ath5k_hw_on_hold(ah);
2629

2630
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2631
				"putting device to sleep\n");
2632 2633
	}

2634
	mmiowb();
2635
	mutex_unlock(&ah->lock);
2636

2637
	ath5k_stop_tasklets(ah);
2638

2639
	cancel_delayed_work_sync(&ah->tx_complete_work);
2640

2641
	ath5k_rfkill_hw_stop(ah);
2642 2643
}

2644 2645 2646
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2647
 *
2648
 * This should be called with ah->lock.
2649
 */
2650
static int
2651
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2652
							bool skip_pcu)
2653
{
B
Bruno Randolf 已提交
2654
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2655
	int ret, ani_mode;
2656
	bool fast;
2657

2658
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2659

2660
	ath5k_hw_set_imr(ah, 0);
2661 2662
	synchronize_irq(ah->irq);
	ath5k_stop_tasklets(ah);
2663

L
Lucas De Marchi 已提交
2664
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2665 2666
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
2667
	ani_mode = ah->ani_state.ani_mode;
N
Nick Kossifidis 已提交
2668 2669
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2670 2671 2672
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
2673
	ath5k_drain_tx_buffs(ah);
2674
	if (chan)
2675
		ah->curchan = chan;
2676 2677 2678

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2679
	ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2680
	if (ret) {
2681
		ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2682 2683
		goto err;
	}
J
Jiri Slaby 已提交
2684

2685
	ret = ath5k_rx_start(ah);
J
Jiri Slaby 已提交
2686
	if (ret) {
2687
		ATH5K_ERR(ah, "can't start recv logic\n");
2688 2689
		goto err;
	}
J
Jiri Slaby 已提交
2690

N
Nick Kossifidis 已提交
2691
	ath5k_ani_init(ah, ani_mode);
2692

2693
	ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
2694
	ah->ah_cal_next_ani = jiffies;
2695
	ah->ah_cal_next_nf = jiffies;
2696
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2697

B
Bruno Randolf 已提交
2698
	/* clear survey data and cycle counters */
2699
	memset(&ah->survey, 0, sizeof(ah->survey));
2700
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2701 2702 2703
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2704
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2705

2706
	/*
J
Jiri Slaby 已提交
2707 2708 2709 2710 2711
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2712 2713 2714
	 *
	 * XXX needed?
	 */
2715
/*	ath5k_chan_change(ah, c); */
2716

2717
	ath5k_beacon_config(ah);
J
Jiri Slaby 已提交
2718
	/* intrs are enabled by ath5k_beacon_config */
2719

2720
	ieee80211_wake_queues(ah->hw);
B
Bruno Randolf 已提交
2721

2722 2723 2724 2725 2726
	return 0;
err:
	return ret;
}

2727 2728
static void ath5k_reset_work(struct work_struct *work)
{
2729
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2730 2731
		reset_work);

2732 2733 2734
	mutex_lock(&ah->lock);
	ath5k_reset(ah, NULL, true);
	mutex_unlock(&ah->lock);
2735 2736
}

2737
static int __devinit
2738
ath5k_init(struct ieee80211_hw *hw)
2739
{
2740

2741
	struct ath5k_hw *ah = hw->priv;
2742
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2743
	struct ath5k_txq *txq;
2744
	u8 mac[ETH_ALEN] = {};
2745 2746 2747
	int ret;


2748 2749 2750 2751 2752 2753 2754 2755
	/*
	 * Check if the MAC has multi-rate retry support.
	 * We do this by trying to setup a fake extended
	 * descriptor.  MACs that don't have support will
	 * return false w/o doing anything.  MACs that do
	 * support it will return true w/o doing anything.
	 */
	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
J
Jiri Slaby 已提交
2756

2757 2758 2759
	if (ret < 0)
		goto err;
	if (ret > 0)
2760
		__set_bit(ATH_STAT_MRRETRY, ah->status);
2761

2762 2763
	/*
	 * Collect the channel list.  The 802.11 layer
2764
	 * is responsible for filtering this list based
2765 2766 2767 2768 2769
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
2770
		ATH5K_ERR(ah, "can't get channels\n");
2771 2772
		goto err;
	}
J
Jiri Slaby 已提交
2773

2774 2775 2776
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2777
	ret = ath5k_desc_alloc(ah);
2778
	if (ret) {
2779
		ATH5K_ERR(ah, "can't allocate descriptors\n");
2780 2781
		goto err;
	}
2782

2783 2784 2785 2786 2787 2788 2789 2790
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
2791
		ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2792 2793
		goto err_desc;
	}
2794 2795 2796 2797 2798
	ah->bhalq = ret;
	ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(ah->cabq)) {
		ATH5K_ERR(ah, "can't setup cab queue\n");
		ret = PTR_ERR(ah->cabq);
2799 2800
		goto err_bhal;
	}
2801

2802 2803 2804 2805 2806
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
2807
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2808
		if (IS_ERR(txq)) {
2809
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2810 2811 2812
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2813
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2814
		if (IS_ERR(txq)) {
2815
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2816 2817 2818
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2819
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2820
		if (IS_ERR(txq)) {
2821
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2822 2823 2824
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2825
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2826
		if (IS_ERR(txq)) {
2827
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2828 2829 2830 2831 2832 2833
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
2834
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2835
		if (IS_ERR(txq)) {
2836
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2837 2838 2839 2840 2841
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2842

2843 2844 2845 2846 2847
	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
	tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2848

2849 2850
	INIT_WORK(&ah->reset_work, ath5k_reset_work);
	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2851

2852
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2853
	if (ret) {
2854
		ATH5K_ERR(ah, "unable to read address from EEPROM\n");
2855
		goto err_queues;
2856
	}
2857

2858 2859
	SET_IEEE80211_PERM_ADDR(hw, mac);
	/* All MAC address bits matter for ACKs */
2860
	ath5k_update_bssid_mask_and_opmode(ah, NULL);
2861 2862 2863 2864

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
2865
		ATH5K_ERR(ah, "can't initialize regulatory system\n");
2866 2867 2868 2869 2870
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
2871
		ATH5K_ERR(ah, "can't register ieee80211 hw\n");
2872 2873 2874 2875 2876 2877
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

2878
	ath5k_init_leds(ah);
2879

2880
	ath5k_sysfs_register(ah);
2881 2882 2883

	return 0;
err_queues:
2884
	ath5k_txq_release(ah);
2885
err_bhal:
2886
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
2887
err_desc:
2888
	ath5k_desc_free(ah);
2889 2890 2891 2892
err:
	return ret;
}

2893
void
2894
ath5k_deinit_softc(struct ath5k_hw *ah)
2895
{
2896
	struct ieee80211_hw *hw = ah->hw;
2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
2912 2913 2914 2915
	ath5k_desc_free(ah);
	ath5k_txq_release(ah);
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
	ath5k_unregister_leds(ah);
2916

2917
	ath5k_sysfs_unregister(ah);
2918 2919 2920 2921 2922
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
2923 2924
	ath5k_hw_deinit(ah);
	free_irq(ah->irq, ah);
2925 2926
}

2927
bool
2928
ath5k_any_vif_assoc(struct ath5k_hw *ah)
2929
{
2930
	struct ath5k_vif_iter_data iter_data;
2931 2932 2933 2934 2935
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

2936
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
2937 2938 2939 2940
						   &iter_data);
	return iter_data.any_assoc;
}

2941
void
P
Pavel Roskin 已提交
2942
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2943
{
2944
	struct ath5k_hw *ah = hw->priv;
2945 2946 2947 2948 2949 2950 2951
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
2952
	ah->filter_flags = rfilt;
2953
}