base.c 81.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

43 44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

45 46
#include <linux/module.h>
#include <linux/delay.h>
47
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
48
#include <linux/hardirq.h>
49
#include <linux/if.h>
J
Jiri Slaby 已提交
50
#include <linux/io.h>
51 52 53 54
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
55
#include <linux/slab.h>
56
#include <linux/etherdevice.h>
57
#include <linux/nl80211.h>
58 59 60 61 62 63 64 65

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
66
#include "ani.h"
67 68
#include "ath5k.h"
#include "../regd.h"
69

70 71 72
#define CREATE_TRACE_POINTS
#include "trace.h"

73
bool ath5k_modparam_nohwcrypt;
74
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
75
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
76

77
static bool modparam_fastchanswitch;
78 79 80
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");

81
static bool ath5k_modparam_no_hw_rfkill_switch;
82 83 84 85
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
								bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");

86

87 88 89 90 91 92 93
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

94
static int ath5k_init(struct ieee80211_hw *hw);
95
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
96
								bool skip_pcu);
97 98

/* Known SREVs */
J
Jiri Slaby 已提交
99
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
100 101 102 103 104 105 106 107 108
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
127
#endif
128
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
129 130
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
131
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
132 133 134
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
135
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
136 137
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
138 139 140 141
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
142
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
143 144 145 146
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
147 148 149
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
150
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
};

191 192 193 194 195 196 197 198 199 200
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

201
const char *
202 203 204 205 206 207 208 209
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
210 211 212 213 214

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
215 216 217 218 219 220 221
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
238

239 240 241 242 243
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
244
{
245
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
246 247
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
248

249 250
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
251

252 253 254
/********************\
* Channel/mode setup *
\********************/
255

256
/*
257
 * Returns true for the channel numbers used.
258
 */
259 260 261 262 263 264 265
#ifdef CONFIG_ATH5K_TEST_CHANNELS
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
	return true;
}

#else
266
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
267
{
268 269 270 271 272
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
273 274 275
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
276 277 278 279 280
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
281
}
282
#endif
283

284
static unsigned int
285 286
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
287
{
288
	unsigned int count, size, freq, ch;
289
	enum ieee80211_band band;
290

291 292 293
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
294
		size = 220;
295
		band = IEEE80211_BAND_5GHZ;
296 297 298 299
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
300
		band = IEEE80211_BAND_2GHZ;
301 302
		break;
	default:
303
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
304
		return 0;
305 306
	}

307 308
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
309 310 311 312
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
313

314 315 316 317 318
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

319
		/* Check if channel is supported by the chipset */
320
		if (!ath5k_channel_ok(ah, &channels[count]))
321
			continue;
322

323
		if (!ath5k_is_standard_channel(ch, band))
324
			continue;
325

326 327
		count++;
	}
328

329 330
	return count;
}
331

332
static void
333
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
334 335
{
	u8 i;
336

337
	for (i = 0; i < AR5K_MAX_RATES; i++)
338
		ah->rate_idx[b->band][i] = -1;
339

340
	for (i = 0; i < b->n_bitrates; i++) {
341
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
342
		if (b->bitrates[i].hw_value_short)
343
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
344
	}
345
}
346

347 348 349
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
350
	struct ath5k_hw *ah = hw->priv;
351 352 353
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
354

355 356
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
357

358
	/* 2GHz band */
359
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
360
	sband->band = IEEE80211_BAND_2GHZ;
361
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
362

363
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
364 365 366 367
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
368

369
		sband->channels = ah->channels;
370
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
371
					AR5K_MODE_11G, max_c);
372

373 374 375
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
376
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
377 378 379 380
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
381

382 383 384 385 386 387 388 389 390 391
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
392 393 394
			}
		}

395
		sband->channels = ah->channels;
396
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
397
					AR5K_MODE_11B, max_c);
398

399 400 401 402
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
403
	ath5k_setup_rate_idx(ah, sband);
404

405
	/* 5GHz band, A mode */
406 407
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
408
		sband->band = IEEE80211_BAND_5GHZ;
409
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
410

411 412 413
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
414

415
		sband->channels = &ah->channels[count_c];
416
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
417
					AR5K_MODE_11A, max_c);
418

419 420
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
421
	ath5k_setup_rate_idx(ah, sband);
422

423
	ath5k_debug_dump_bands(ah);
424 425 426 427

	return 0;
}

428 429 430 431 432
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
433
 * Called with ah->lock.
434
 */
435
int
436
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
437
{
438
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
439
		  "channel set, resetting (%u -> %u MHz)\n",
440
		  ah->curchan->center_freq, chan->center_freq);
441

442
	/*
443 444 445 446
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
447
	 */
448
	return ath5k_reset(ah, chan, true);
449 450
}

451
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
452
{
453
	struct ath5k_vif_iter_data *iter_data = data;
454
	int i;
455
	struct ath5k_vif *avf = (void *)vif->drv_priv;
456 457 458 459 460 461 462 463 464 465 466 467

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
468
		if (ether_addr_equal(iter_data->hw_macaddr, mac))
469 470 471 472 473 474
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
475 476 477 478

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
479
	 * interfaces is allowed.
480 481 482
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
483 484 485
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
486 487
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
488
	}
489 490
}

491
void
492
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
493
				   struct ieee80211_vif *vif)
494
{
495
	struct ath_common *common = ath5k_hw_common(ah);
496 497
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
498 499 500 501 502 503 504 505 506

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
507
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
508
	iter_data.n_stas = 0;
509 510

	if (vif)
511
		ath5k_vif_iter(&iter_data, vif->addr, vif);
512 513

	/* Get list of all active MAC addresses */
514 515 516
	ieee80211_iterate_active_interfaces_atomic(
		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
		ath5k_vif_iter, &iter_data);
517
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
518

519 520
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
521
		/* Nothing active, default to station mode */
522
		ah->opmode = NL80211_IFTYPE_STATION;
523

524 525 526
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
527

528
	if (iter_data.need_set_hw_addr && iter_data.found_active)
529
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
530

531 532
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
533

534 535 536 537
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
538
		 * Enabling PROMISC appears to fix that problem.
539
		 */
540
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
541
	}
542

543 544 545
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
546
}
547

548
static inline int
549
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
550 551
{
	int rix;
552

553 554 555 556 557
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

558
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
559 560 561 562 563 564 565 566 567 568 569
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
570
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
571
{
572
	struct ath_common *common = ath5k_hw_common(ah);
573
	struct sk_buff *skb;
574 575

	/*
576 577
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
578
	 */
579 580 581
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
582

583
	if (!skb) {
584
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
585 586
				common->rx_bufsize);
		return NULL;
587 588
	}

589
	*skb_addr = dma_map_single(ah->dev,
590
				   skb->data, common->rx_bufsize,
591 592
				   DMA_FROM_DEVICE);

593 594
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
595 596
		dev_kfree_skb(skb);
		return NULL;
597
	}
598 599
	return skb;
}
600

601
static int
602
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
603 604 605 606
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
607

608
	if (!skb) {
609
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
610 611 612
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
613 614
	}

615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
634
	if (ret) {
635
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
636
		return ret;
637 638
	}

639 640 641
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
642 643 644
	return 0;
}

645
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
646
{
647 648 649
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
650

651 652
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
653

654 655 656 657 658 659 660 661
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
662
	else
663
		htype = AR5K_PKT_TYPE_NORMAL;
664

665
	return htype;
666 667
}

668
static int
669
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
670
		  struct ath5k_txq *txq, int padsize)
671
{
672 673 674 675 676 677 678 679 680 681 682
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
683

684
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
685

686
	/* XXX endianness */
687
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
688
			DMA_TO_DEVICE);
689

690
	rate = ieee80211_get_tx_rate(ah->hw, info);
691 692 693 694
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
695

696 697
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
698

699 700 701
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
702

703 704 705 706 707 708 709 710 711 712 713
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
714 715
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
716
			info->control.vif, pktlen, info));
717 718 719
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
720 721
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
722
			info->control.vif, pktlen, info));
723 724 725 726
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
727
		(ah->ah_txpower.txp_requested * 2),
728 729 730 731 732 733
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

734 735 736 737 738 739 740 741
	/* Set up MRR descriptor */
	if (ah->ah_capabilities.cap_has_mrr_support) {
		memset(mrr_rate, 0, sizeof(mrr_rate));
		memset(mrr_tries, 0, sizeof(mrr_tries));
		for (i = 0; i < 3; i++) {
			rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
			if (!rate)
				break;
742

743 744 745
			mrr_rate[i] = rate->hw_value;
			mrr_tries[i] = info->control.rates[i + 1].count;
		}
746

747 748 749 750 751
		ath5k_hw_setup_mrr_tx_desc(ah, ds,
			mrr_rate[0], mrr_tries[0],
			mrr_rate[1], mrr_tries[1],
			mrr_rate[2], mrr_tries[2]);
	}
752

753 754
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
755

756 757
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
758
	txq->txq_len++;
759 760 761 762
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
763

764 765 766 767 768 769 770
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
771
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
772
	return ret;
B
Bruno Randolf 已提交
773 774
}

775 776 777 778
/*******************\
* Descriptors setup *
\*******************/

779
static int
780
ath5k_desc_alloc(struct ath5k_hw *ah)
781
{
782 783 784 785 786
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
787

788
	/* allocate descriptors */
789
	ah->desc_len = sizeof(struct ath5k_desc) *
790
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
791

792 793 794 795
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
796 797 798
		ret = -ENOMEM;
		goto err;
	}
799 800 801 802
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
803

804 805 806
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
807
		ATH5K_ERR(ah, "can't allocate bufptr\n");
808 809 810
		ret = -ENOMEM;
		goto err_free;
	}
811
	ah->bufptr = bf;
812

813
	INIT_LIST_HEAD(&ah->rxbuf);
814 815 816
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
817
		list_add_tail(&bf->list, &ah->rxbuf);
818
	}
819

820 821
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
822
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
823 824
		bf->desc = ds;
		bf->daddr = da;
825
		list_add_tail(&bf->list, &ah->txbuf);
826 827
	}

828
	/* beacon buffers */
829
	INIT_LIST_HEAD(&ah->bcbuf);
830 831 832
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
833
		list_add_tail(&bf->list, &ah->bcbuf);
834
	}
835

836 837
	return 0;
err_free:
838
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
839
err:
840
	ah->desc = NULL;
841 842
	return ret;
}
843

844
void
845
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
846 847 848 849
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
850
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
851
			DMA_TO_DEVICE);
F
Felix Fietkau 已提交
852
	ieee80211_free_txskb(ah->hw, bf->skb);
853 854 855 856 857 858
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
859
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
860 861 862 863 864 865
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
866
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
867 868 869 870 871 872 873
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

874
static void
875
ath5k_desc_free(struct ath5k_hw *ah)
876 877
{
	struct ath5k_buf *bf;
878

879 880 881 882 883 884
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
885

886
	/* Free memory associated with all descriptors */
887 888 889
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
890

891 892
	kfree(ah->bufptr);
	ah->bufptr = NULL;
893 894
}

895 896 897 898 899 900

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
901
ath5k_txq_setup(struct ath5k_hw *ah,
902
		int qtype, int subtype)
903
{
904 905 906
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
907 908 909 910 911
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
912 913
	};
	int qnum;
914

915
	/*
916 917 918 919 920 921 922 923 924 925
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
926
	 */
927 928 929 930 931 932 933 934 935 936
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
937
	txq = &ah->txqs[qnum];
938 939 940 941 942 943
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
944
		txq->txq_len = 0;
945
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
946
		txq->txq_poll_mark = false;
947
		txq->txq_stuck = 0;
948
	}
949
	return &ah->txqs[qnum];
950 951
}

952 953
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
954
{
955
	struct ath5k_txq_info qi = {
956 957 958 959 960
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
961 962 963
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
964

965
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
966 967
}

968
static int
969
ath5k_beaconq_config(struct ath5k_hw *ah)
970
{
971 972
	struct ath5k_txq_info qi;
	int ret;
973

974
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
975 976
	if (ret)
		goto err;
977

978 979
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
980 981 982 983 984 985 986
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
987
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
988 989 990 991 992
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
993
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
994
	}
995

996
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
997 998
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
999

1000
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
1001
	if (ret) {
1002
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
1003 1004 1005
			"hardware queue!\n", __func__);
		goto err;
	}
1006
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1007 1008
	if (ret)
		goto err;
1009

1010 1011 1012 1013
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1014

1015
	qi.tqi_ready_time = (ah->bintval * 80) / 100;
1016 1017 1018
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1019

1020 1021 1022
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1023 1024
}

1025 1026 1027
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
1028
 * @ah The &struct ath5k_hw
1029 1030 1031 1032 1033 1034 1035
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1036
static void
1037
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1038
{
1039
	struct ath5k_txq *txq;
1040
	struct ath5k_buf *bf, *bf0;
1041
	int i;
1042

1043 1044 1045
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
1046 1047
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1048
				ath5k_debug_printtxbuf(ah, bf);
1049

1050
				ath5k_txbuf_free_skb(ah, bf);
1051

1052
				spin_lock(&ah->txbuflock);
1053 1054
				list_move_tail(&bf->list, &ah->txbuf);
				ah->txbuf_len++;
1055
				txq->txq_len--;
1056
				spin_unlock(&ah->txbuflock);
1057
			}
1058 1059 1060 1061
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1062
	}
1063 1064
}

1065
static void
1066
ath5k_txq_release(struct ath5k_hw *ah)
1067
{
1068
	struct ath5k_txq *txq = ah->txqs;
1069
	unsigned int i;
1070

1071
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1072
		if (txq->setup) {
1073
			ath5k_hw_release_tx_queue(ah, txq->qnum);
1074 1075 1076
			txq->setup = false;
		}
}
1077 1078


1079 1080 1081
/*************\
* RX Handling *
\*************/
1082

1083 1084 1085
/*
 * Enable the receive h/w following a reset.
 */
1086
static int
1087
ath5k_rx_start(struct ath5k_hw *ah)
1088
{
1089 1090 1091
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1092

1093
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1094

1095
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1096
		  common->cachelsz, common->rx_bufsize);
1097

1098 1099 1100 1101
	spin_lock_bh(&ah->rxbuflock);
	ah->rxlink = NULL;
	list_for_each_entry(bf, &ah->rxbuf, list) {
		ret = ath5k_rxbuf_setup(ah, bf);
1102
		if (ret != 0) {
1103
			spin_unlock_bh(&ah->rxbuflock);
1104 1105
			goto err;
		}
1106
	}
1107
	bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1108
	ath5k_hw_set_rxdp(ah, bf->daddr);
1109
	spin_unlock_bh(&ah->rxbuflock);
1110

1111
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1112
	ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1113
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1114 1115

	return 0;
1116
err:
1117 1118 1119
	return ret;
}

1120
/*
1121 1122 1123 1124 1125
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1126 1127
 */
static void
1128
ath5k_rx_stop(struct ath5k_hw *ah)
1129 1130
{

1131
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1132
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1133

1134
	ath5k_debug_printrxbuffs(ah);
1135
}
1136

1137
static unsigned int
1138
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1139 1140 1141 1142 1143
		   struct ath5k_rx_status *rs)
{
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1144

1145 1146 1147
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1148

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1161 1162 1163 1164

	return 0;
}

1165

1166
static void
1167
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1168
		     struct ieee80211_rx_status *rxs)
1169
{
1170
	struct ath_common *common = ath5k_hw_common(ah);
1171 1172 1173
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1174

1175 1176
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1177
	    ether_addr_equal(mgmt->bssid, common->curbssid)) {
1178 1179 1180 1181 1182
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
1183
		tsf = ath5k_hw_get_tsf64(ah);
1184 1185
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1186

1187
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1188 1189 1190 1191 1192
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1193

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
1206
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1207 1208 1209 1210 1211
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1212

1213 1214 1215 1216 1217 1218
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
1219 1220
		if (hw_tu >= ah->nexttbtt)
			ath5k_beacon_update_timers(ah, bc_tstamp);
B
Bruno Randolf 已提交
1221 1222 1223 1224

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
1225 1226 1227
		if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
			ath5k_beacon_update_timers(ah, bc_tstamp);
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
B
Bruno Randolf 已提交
1228 1229
				"fixed beacon timers after beacon receive\n");
		}
1230 1231
	}
}
1232

1233
static void
1234
ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1235 1236 1237
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath_common *common = ath5k_hw_common(ah);
1238

1239 1240
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
1241
	    !ether_addr_equal(mgmt->bssid, common->curbssid))
1242
		return;
1243

B
Bruno Randolf 已提交
1244
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1245

1246 1247 1248
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1249

1250 1251 1252 1253
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1254
{
1255
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1256 1257
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1258

1259
	if (ieee80211_has_a4(frame_control))
1260
		padpos += ETH_ALEN;
1261 1262

	if (ieee80211_is_data_qos(frame_control))
1263 1264 1265
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1266 1267
}

1268 1269 1270 1271 1272
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1273
{
1274 1275
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1276

1277
	if (padsize && skb->len > padpos) {
1278

1279 1280
		if (skb_headroom(skb) < padsize)
			return -1;
1281

1282
		skb_push(skb, padsize);
1283
		memmove(skb->data, skb->data + padsize, padpos);
1284 1285
		return padsize;
	}
B
Bob Copeland 已提交
1286

1287 1288
	return 0;
}
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1307

1308
	if (padsize && skb->len >= padpos + padsize) {
1309 1310 1311
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1312
	}
B
Bob Copeland 已提交
1313

1314
	return 0;
1315 1316 1317
}

static void
1318
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1319
		    struct ath5k_rx_status *rs)
1320
{
1321 1322 1323 1324 1325 1326 1327 1328 1329
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1330 1331

	/*
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
1344
	 * frame, but I'm not sure.
1345 1346 1347 1348 1349
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1350
	 */
1351
	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
1352
	rxs->flag |= RX_FLAG_MACTIME_START;
1353

1354 1355
	rxs->freq = ah->curchan->center_freq;
	rxs->band = ah->curchan->band;
1356

1357
	rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1358

1359
	rxs->antenna = rs->rs_antenna;
1360

1361
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1362
		ah->stats.antenna_rx[rs->rs_antenna]++;
1363
	else
1364
		ah->stats.antenna_rx[0]++; /* invalid */
1365

1366 1367
	rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1368

1369
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1370
	    ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1371
		rxs->flag |= RX_FLAG_SHORTPRE;
1372

1373
	trace_ath5k_rx(ah, skb);
1374

1375
	ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
1376

1377
	/* check beacons in IBSS mode */
1378 1379
	if (ah->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(ah, skb, rxs);
1380

1381
	ieee80211_rx(ah->hw, skb);
1382
}
1383

1384 1385 1386 1387
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1388
 */
1389
static bool
1390
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1391
{
1392 1393
	ah->stats.rx_all_count++;
	ah->stats.rx_bytes_count += rs->rs_datalen;
1394

1395 1396
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
1397
			ah->stats.rxerr_crc++;
1398
		if (rs->rs_status & AR5K_RXERR_FIFO)
1399
			ah->stats.rxerr_fifo++;
1400
		if (rs->rs_status & AR5K_RXERR_PHY) {
1401
			ah->stats.rxerr_phy++;
1402
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1403
				ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
1417
			ah->stats.rxerr_decrypt++;
1418 1419 1420 1421 1422
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
1423
			ah->stats.rxerr_mic++;
1424
			return true;
1425 1426
		}

1427 1428 1429 1430
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1431

1432
	if (unlikely(rs->rs_more)) {
1433
		ah->stats.rxerr_jumbo++;
1434 1435 1436
		return false;
	}
	return true;
1437 1438
}

1439
static void
1440
ath5k_set_current_imask(struct ath5k_hw *ah)
1441
{
1442
	enum ath5k_int imask;
1443 1444
	unsigned long flags;

1445 1446 1447
	spin_lock_irqsave(&ah->irqlock, flags);
	imask = ah->imask;
	if (ah->rx_pending)
1448
		imask &= ~AR5K_INT_RX_ALL;
1449
	if (ah->tx_pending)
1450
		imask &= ~AR5K_INT_TX_ALL;
1451 1452
	ath5k_hw_set_imr(ah, imask);
	spin_unlock_irqrestore(&ah->irqlock, flags);
1453 1454
}

1455
static void
1456
ath5k_tasklet_rx(unsigned long data)
1457
{
1458 1459 1460
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
1461
	struct ath5k_hw *ah = (void *)data;
L
Luis R. Rodriguez 已提交
1462
	struct ath_common *common = ath5k_hw_common(ah);
1463 1464 1465
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1466

1467 1468 1469
	spin_lock(&ah->rxbuflock);
	if (list_empty(&ah->rxbuf)) {
		ATH5K_WARN(ah, "empty rx buf pool\n");
1470 1471 1472
		goto unlock;
	}
	do {
1473
		bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1474 1475 1476
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1477

1478
		/* bail if HW is still using self-linked descriptor */
1479
		if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1480
			break;
1481

1482
		ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1483 1484 1485
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
1486 1487
			ATH5K_ERR(ah, "error in processing rx descriptor\n");
			ah->stats.rxerr_proc++;
1488 1489
			break;
		}
1490

1491 1492
		if (ath5k_receive_frame_ok(ah, &rs)) {
			next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1493

1494 1495 1496 1497 1498 1499
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1500

1501
			dma_unmap_single(ah->dev, bf->skbaddr,
1502
					 common->rx_bufsize,
1503
					 DMA_FROM_DEVICE);
1504

1505
			skb_put(skb, rs.rs_datalen);
1506

1507
			ath5k_receive_frame(ah, skb, &rs);
1508

1509 1510
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1511
		}
1512
next:
1513 1514
		list_move_tail(&bf->list, &ah->rxbuf);
	} while (ath5k_rxbuf_setup(ah, bf) == 0);
1515
unlock:
1516 1517 1518
	spin_unlock(&ah->rxbuflock);
	ah->rx_pending = false;
	ath5k_set_current_imask(ah);
1519 1520
}

B
Bruno Randolf 已提交
1521

1522 1523 1524
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1525

1526
void
1527 1528
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1529
{
1530
	struct ath5k_hw *ah = hw->priv;
1531 1532 1533
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1534

1535
	trace_ath5k_tx(ah, skb, txq);
B
Bruno Randolf 已提交
1536

1537 1538 1539 1540 1541 1542
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
1543
		ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1544 1545 1546
			  " headroom to pad");
		goto drop_packet;
	}
1547

1548 1549
	if (txq->txq_len >= txq->txq_max &&
	    txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
B
Bruno Randolf 已提交
1550 1551
		ieee80211_stop_queue(hw, txq->qnum);

1552 1553 1554 1555
	spin_lock_irqsave(&ah->txbuflock, flags);
	if (list_empty(&ah->txbuf)) {
		ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&ah->txbuflock, flags);
B
Bruno Randolf 已提交
1556
		ieee80211_stop_queues(hw);
1557
		goto drop_packet;
1558
	}
1559
	bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1560
	list_del(&bf->list);
1561 1562
	ah->txbuf_len--;
	if (list_empty(&ah->txbuf))
1563
		ieee80211_stop_queues(hw);
1564
	spin_unlock_irqrestore(&ah->txbuflock, flags);
1565 1566 1567

	bf->skb = skb;

1568
	if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
1569
		bf->skb = NULL;
1570 1571 1572 1573
		spin_lock_irqsave(&ah->txbuflock, flags);
		list_add_tail(&bf->list, &ah->txbuf);
		ah->txbuf_len++;
		spin_unlock_irqrestore(&ah->txbuflock, flags);
1574
		goto drop_packet;
1575
	}
1576
	return;
1577

1578
drop_packet:
F
Felix Fietkau 已提交
1579
	ieee80211_free_txskb(hw, skb);
1580 1581
}

1582
static void
1583
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1584
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1585 1586
{
	struct ieee80211_tx_info *info;
1587
	u8 tries[3];
1588 1589
	int i;

1590 1591
	ah->stats.tx_all_count++;
	ah->stats.tx_bytes_count += skb->len;
1592 1593
	info = IEEE80211_SKB_CB(skb);

1594 1595 1596 1597
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1598
	ieee80211_tx_info_clear_status(info);
1599 1600

	for (i = 0; i < ts->ts_final_idx; i++) {
1601 1602 1603
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1604
		r->count = tries[i];
1605 1606
	}

1607
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1608
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1609 1610

	if (unlikely(ts->ts_status)) {
1611
		ah->stats.ack_fail++;
1612 1613
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1614
			ah->stats.txerr_filt++;
1615 1616
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
1617
			ah->stats.txerr_retry++;
1618
		if (ts->ts_status & AR5K_TXERR_FIFO)
1619
			ah->stats.txerr_fifo++;
1620 1621 1622
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1623 1624 1625

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1626 1627 1628 1629 1630 1631 1632 1633 1634
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1635
		ah->stats.antenna_tx[ts->ts_antenna]++;
1636
	else
1637
		ah->stats.antenna_tx[0]++; /* invalid */
1638

1639 1640
	trace_ath5k_tx_complete(ah, skb, txq, ts);
	ieee80211_tx_status(ah->hw, skb);
1641
}
1642 1643

static void
1644
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1645
{
1646 1647 1648 1649
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1650
	int ret;
1651

1652 1653
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1654 1655 1656 1657 1658 1659 1660

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

1661
			ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1662 1663 1664
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
1665
				ATH5K_ERR(ah,
1666 1667 1668 1669 1670 1671 1672
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1673

1674
			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1675
					DMA_TO_DEVICE);
1676
			ath5k_tx_frame_completed(ah, skb, txq, &ts);
1677
		}
1678

1679 1680 1681
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1682 1683
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1684
		 */
1685 1686 1687 1688
		if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
			spin_lock(&ah->txbuflock);
			list_move_tail(&bf->list, &ah->txbuf);
			ah->txbuf_len++;
1689
			txq->txq_len--;
1690
			spin_unlock(&ah->txbuflock);
1691
		}
1692 1693
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1694
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1695
		ieee80211_wake_queue(ah->hw, txq->qnum);
1696 1697 1698 1699 1700
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1701
	int i;
1702
	struct ath5k_hw *ah = (void *)data;
1703

1704
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1705
		if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1706
			ath5k_tx_processq(ah, &ah->txqs[i]);
1707

1708 1709
	ah->tx_pending = false;
	ath5k_set_current_imask(ah);
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1721
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1722 1723
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1724
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1725
	struct ath5k_desc *ds;
1726 1727
	int ret = 0;
	u8 antenna;
1728
	u32 flags;
1729
	const int padsize = 0;
1730

1731
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1732
			DMA_TO_DEVICE);
1733
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1734 1735
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1736

1737 1738
	if (dma_mapping_error(ah->dev, bf->skbaddr)) {
		ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1739 1740
		dev_kfree_skb_any(skb);
		bf->skb = NULL;
1741 1742 1743 1744
		return -EIO;
	}

	ds = bf->desc;
1745
	antenna = ah->ah_tx_ant;
1746 1747

	flags = AR5K_TXDESC_NOACK;
1748
	if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1749 1750
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1751
	} else
1752
		ds->ds_link = 0;
1753 1754 1755 1756 1757 1758 1759

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1760
	 * automatically, based on ACKed packets.
1761 1762 1763 1764 1765
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1766 1767 1768 1769 1770
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1771 1772
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1773
		antenna = ah->bsent & 4 ? 2 : 1;
1774

1775

1776 1777 1778
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1779
	ds->ds_data = bf->skbaddr;
1780
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1781
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1782 1783
			AR5K_PKT_TYPE_BEACON,
			(ah->ah_txpower.txp_requested * 2),
1784
			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1785
			1, AR5K_TXKEYIX_INVALID,
1786
			antenna, flags, 0, 0);
1787 1788 1789 1790 1791
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1792
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1793 1794 1795
	return ret;
}

1796 1797 1798 1799 1800 1801 1802
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1803
int
1804 1805 1806
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
1807
	struct ath5k_hw *ah = hw->priv;
1808
	struct ath5k_vif *avf;
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1823
	avf = (void *)vif->drv_priv;
1824
	ath5k_txbuf_free_skb(ah, avf->bbuf);
1825
	avf->bbuf->skb = skb;
1826
	ret = ath5k_beacon_setup(ah, avf->bbuf);
1827 1828 1829 1830
out:
	return ret;
}

1831 1832 1833 1834 1835
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1836 1837
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1838 1839
 */
static void
1840
ath5k_beacon_send(struct ath5k_hw *ah)
1841
{
1842 1843 1844
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1845
	struct sk_buff *skb;
1846
	int err;
1847

1848
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1849 1850 1851

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1852
	 * not, don't don't try to post another: skip this
1853 1854 1855 1856
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
1857 1858 1859 1860 1861 1862
	if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
		ah->bmisscount++;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
			"missed %u consecutive beacons\n", ah->bmisscount);
		if (ah->bmisscount > 10) {	/* NB: 10 is a guess */
			ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1863
				"stuck beacon time (%u missed)\n",
1864 1865
				ah->bmisscount);
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1866
				  "stuck beacon, resetting\n");
1867
			ieee80211_queue_work(ah->hw, &ah->reset_work);
1868 1869 1870
		}
		return;
	}
1871 1872
	if (unlikely(ah->bmisscount != 0)) {
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1873
			"resume beacon xmit after %u misses\n",
1874 1875
			ah->bmisscount);
		ah->bmisscount = 0;
1876 1877
	}

1878 1879
	if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
			ah->num_mesh_vifs > 1) ||
1880
			ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1881 1882
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
1883 1884 1885
		int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
		vif = ah->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1886
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
1887
			(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1888
	} else /* only one interface */
1889
		vif = ah->bslot[0];
1890 1891 1892 1893 1894 1895 1896

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;

1897 1898 1899 1900 1901
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1902 1903
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
		ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1904 1905 1906
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1907
	/* refresh the beacon for AP or MESH mode */
1908
	if (ah->opmode == NL80211_IFTYPE_AP ||
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
		err = ath5k_beacon_update(ah->hw, vif);
		if (err)
			return;
	}

	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
		     ah->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
		return;
	}
B
Bob Copeland 已提交
1920

1921
	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1922

1923 1924 1925 1926
	ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, ah->bhalq);
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
		ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1927

1928
	skb = ieee80211_get_buffered_bc(ah->hw, vif);
1929
	while (skb) {
1930
		ath5k_tx_queue(ah->hw, skb, ah->cabq);
1931

1932
		if (ah->cabq->txq_len >= ah->cabq->txq_max)
1933 1934
			break;

1935
		skb = ieee80211_get_buffered_bc(ah->hw, vif);
1936 1937
	}

1938
	ah->bsent++;
1939 1940
}

1941 1942 1943
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
1944
 * @ah: struct ath5k_hw pointer we are operating on
1945 1946 1947 1948 1949 1950 1951 1952
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1953
 * when a TSF update has been detected, but also when an new IBSS is created or
1954 1955 1956
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1957
void
1958
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
1959
{
1960 1961
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1962

1963
	intval = ah->bintval & AR5K_BEACON_PERIOD;
1964 1965
	if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
		+ ah->num_mesh_vifs > 1) {
1966 1967
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
1968
			ATH5K_WARN(ah, "intval %u is too low, min 15\n",
1969 1970
				   intval);
	}
1971 1972 1973
	if (WARN_ON(!intval))
		return;

1974 1975
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1976

1977 1978 1979
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1980

1981
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1982
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1983
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1984 1985
	 * configuration we need to make sure it is bigger than that. */

1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
2001
		 * beacon received, SW merge happened but HW TSF not yet updated.
2002 2003 2004 2005 2006
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
2007
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2021

2022
	ah->nexttbtt = nexttbtt;
2023

2024
	intval |= AR5K_BEACON_ENA;
2025
	ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2026 2027 2028 2029 2030 2031

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
2032
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2033 2034
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
2035
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2036 2037
			"reset HW TSF and timers\n");
	else
2038
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2039 2040
			"updated timers based on beacon TSF\n");

2041
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2042 2043 2044
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2045
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2046 2047 2048
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2049 2050
}

2051 2052 2053
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
2054
 * @ah: struct ath5k_hw pointer we are operating on
2055
 *
2056
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2057
 * interrupts to detect TSF updates only.
2058
 */
2059
void
2060
ath5k_beacon_config(struct ath5k_hw *ah)
2061
{
2062
	spin_lock_bh(&ah->block);
2063 2064
	ah->bmisscount = 0;
	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2065

2066
	if (ah->enable_beacon) {
2067
		/*
2068 2069
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2070
		 * only once here.
2071
		 * We use the SWBA interrupt only to keep track of the beacon
2072
		 * timers in order to detect automatic TSF updates.
2073
		 */
2074
		ath5k_beaconq_config(ah);
2075

2076
		ah->imask |= AR5K_INT_SWBA;
2077

2078
		if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2079
			if (ath5k_hw_hasveol(ah))
2080
				ath5k_beacon_send(ah);
J
Jiri Slaby 已提交
2081
		} else
2082
			ath5k_beacon_update_timers(ah, -1);
2083
	} else {
2084
		ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2085 2086
	}

2087
	ath5k_hw_set_imr(ah, ah->imask);
2088
	mmiowb();
2089
	spin_unlock_bh(&ah->block);
2090 2091
}

N
Nick Kossifidis 已提交
2092 2093
static void ath5k_tasklet_beacon(unsigned long data)
{
2094
	struct ath5k_hw *ah = (struct ath5k_hw *) data;
N
Nick Kossifidis 已提交
2095 2096 2097 2098 2099 2100

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2101
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2102 2103
	 * automatic TSF updates happened.
	 */
2104
	if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2105
		/* XXX: only if VEOL supported */
2106 2107 2108
		u64 tsf = ath5k_hw_get_tsf64(ah);
		ah->nexttbtt += ah->bintval;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
N
Nick Kossifidis 已提交
2109 2110
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
2111
				ah->nexttbtt,
N
Nick Kossifidis 已提交
2112 2113 2114
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
2115 2116 2117
		spin_lock(&ah->block);
		ath5k_beacon_send(ah);
		spin_unlock(&ah->block);
N
Nick Kossifidis 已提交
2118 2119 2120
	}
}

2121 2122 2123 2124 2125

/********************\
* Interrupt handling *
\********************/

2126 2127 2128
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2129
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
N
Nick Kossifidis 已提交
2130 2131 2132 2133 2134
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run ANI only when calibration is not active */

2135 2136
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2137
		tasklet_schedule(&ah->ani_tasklet);
2138

N
Nick Kossifidis 已提交
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run calibration only when another calibration
		 * is not running.
		 *
		 * Note: This is for both full/short calibration,
		 * if it's time for a full one, ath5k_calibrate_work will deal
		 * with it. */

		ah->ah_cal_next_short = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
		ieee80211_queue_work(ah->hw, &ah->calib_work);
2153 2154 2155 2156 2157 2158
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2159
static void
2160
ath5k_schedule_rx(struct ath5k_hw *ah)
2161
{
2162 2163
	ah->rx_pending = true;
	tasklet_schedule(&ah->rxtq);
2164 2165 2166
}

static void
2167
ath5k_schedule_tx(struct ath5k_hw *ah)
2168
{
2169 2170
	ah->tx_pending = true;
	tasklet_schedule(&ah->txtq);
2171 2172
}

P
Pavel Roskin 已提交
2173
static irqreturn_t
2174 2175
ath5k_intr(int irq, void *dev_id)
{
2176
	struct ath5k_hw *ah = dev_id;
2177 2178 2179
	enum ath5k_int status;
	unsigned int counter = 1000;

N
Nick Kossifidis 已提交
2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190

	/*
	 * If hw is not ready (or detached) and we get an
	 * interrupt, or if we have no interrupts pending
	 * (that means it's not for us) skip it.
	 *
	 * NOTE: Group 0/1 PCI interface registers are not
	 * supported on WiSOCs, so we can't check for pending
	 * interrupts (ISR belongs to another register group
	 * so we are ok).
	 */
2191
	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
N
Nick Kossifidis 已提交
2192 2193
			((ath5k_get_bus_type(ah) != ATH_AHB) &&
			!ath5k_hw_is_intr_pending(ah))))
2194 2195
		return IRQ_NONE;

N
Nick Kossifidis 已提交
2196
	/** Main loop **/
2197
	do {
N
Nick Kossifidis 已提交
2198 2199
		ath5k_hw_get_isr(ah, &status);	/* NB: clears IRQ too */

2200 2201
		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, ah->imask);
N
Nick Kossifidis 已提交
2202 2203 2204 2205 2206 2207 2208 2209

		/*
		 * Fatal hw error -> Log and reset
		 *
		 * Fatal errors are unrecoverable so we have to
		 * reset the card. These errors include bus and
		 * dma errors.
		 */
2210
		if (unlikely(status & AR5K_INT_FATAL)) {
N
Nick Kossifidis 已提交
2211

2212
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2213
				  "fatal int, resetting\n");
2214
			ieee80211_queue_work(ah->hw, &ah->reset_work);
N
Nick Kossifidis 已提交
2215 2216 2217 2218 2219 2220 2221 2222

		/*
		 * RX Overrun -> Count and reset if needed
		 *
		 * Receive buffers are full. Either the bus is busy or
		 * the CPU is not fast enough to process all received
		 * frames.
		 */
2223
		} else if (unlikely(status & AR5K_INT_RXORN)) {
N
Nick Kossifidis 已提交
2224

B
Bruno Randolf 已提交
2225 2226 2227
			/*
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
N
Nick Kossifidis 已提交
2228
			 * We don't know exactly which versions need a reset
B
Bruno Randolf 已提交
2229 2230
			 * this guess is copied from the HAL.
			 */
2231
			ah->stats.rxorn_intr++;
N
Nick Kossifidis 已提交
2232

2233
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2234
				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2235
					  "rx overrun, resetting\n");
2236
				ieee80211_queue_work(ah->hw, &ah->reset_work);
2237
			} else
2238
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2239

2240
		} else {
N
Nick Kossifidis 已提交
2241 2242

			/* Software Beacon Alert -> Schedule beacon tasklet */
2243
			if (status & AR5K_INT_SWBA)
2244
				tasklet_hi_schedule(&ah->beacontq);
2245

N
Nick Kossifidis 已提交
2246 2247 2248 2249 2250 2251 2252 2253
			/*
			 * No more RX descriptors -> Just count
			 *
			 * NB: the hardware should re-read the link when
			 *     RXE bit is written, but it doesn't work at
			 *     least on older hardware revs.
			 */
			if (status & AR5K_INT_RXEOL)
2254
				ah->stats.rxeol_intr++;
N
Nick Kossifidis 已提交
2255 2256 2257 2258


			/* TX Underrun -> Bump tx trigger level */
			if (status & AR5K_INT_TXURN)
2259
				ath5k_hw_update_tx_triglevel(ah, true);
N
Nick Kossifidis 已提交
2260 2261

			/* RX -> Schedule rx tasklet */
2262
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2263
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2264 2265 2266 2267 2268 2269

			/* TX -> Schedule tx tasklet */
			if (status & (AR5K_INT_TXOK
					| AR5K_INT_TXDESC
					| AR5K_INT_TXERR
					| AR5K_INT_TXEOL))
2270
				ath5k_schedule_tx(ah);
N
Nick Kossifidis 已提交
2271 2272 2273 2274 2275 2276

			/* Missed beacon -> TODO
			if (status & AR5K_INT_BMISS)
			*/

			/* MIB event -> Update counters and notify ANI */
2277
			if (status & AR5K_INT_MIB) {
2278
				ah->stats.mib_intr++;
B
Bruno Randolf 已提交
2279
				ath5k_hw_update_mib_counters(ah);
2280
				ath5k_ani_mib_intr(ah);
2281
			}
N
Nick Kossifidis 已提交
2282 2283

			/* GPIO -> Notify RFKill layer */
2284
			if (status & AR5K_INT_GPIO)
2285
				tasklet_schedule(&ah->rf_kill.toggleq);
B
Bob Copeland 已提交
2286

2287
		}
2288 2289 2290 2291

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2292
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2293

N
Nick Kossifidis 已提交
2294 2295 2296 2297 2298 2299
	/*
	 * Until we handle rx/tx interrupts mask them on IMR
	 *
	 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
	 * and unset after we 've handled the interrupts.
	 */
2300 2301
	if (ah->rx_pending || ah->tx_pending)
		ath5k_set_current_imask(ah);
2302

2303
	if (unlikely(!counter))
2304
		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2305

N
Nick Kossifidis 已提交
2306
	/* Fire up calibration poll */
2307
	ath5k_intr_calibration_poll(ah);
2308

2309 2310 2311 2312 2313 2314 2315 2316
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
N
Nick Kossifidis 已提交
2317
ath5k_calibrate_work(struct work_struct *work)
2318
{
N
Nick Kossifidis 已提交
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
		calib_work);

	/* Should we run a full calibration ? */
	if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {

		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;

		ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
				"running full calibration\n");

		if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
			/*
			 * Rfgain is out of bounds, reset the chip
			 * to load new gain values.
			 */
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
					"got new rfgain, resetting\n");
			ieee80211_queue_work(ah->hw, &ah->reset_work);
		}
	} else
		ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2343

2344

2345 2346 2347
	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
		ieee80211_frequency_to_channel(ah->curchan->center_freq),
		ah->curchan->hw_value);
2348

2349 2350
	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
		ATH5K_ERR(ah, "calibration of channel %u failed\n",
2351
			ieee80211_frequency_to_channel(
2352
				ah->curchan->center_freq));
2353

N
Nick Kossifidis 已提交
2354
	/* Clear calibration flags */
2355
	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
N
Nick Kossifidis 已提交
2356
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2357
	else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
N
Nick Kossifidis 已提交
2358
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2359 2360 2361
}


2362 2363 2364
static void
ath5k_tasklet_ani(unsigned long data)
{
2365
	struct ath5k_hw *ah = (void *)data;
2366 2367 2368 2369

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2370 2371 2372
}


2373 2374 2375
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
2376
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2377 2378 2379 2380 2381
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2382
	mutex_lock(&ah->lock);
2383

2384 2385 2386
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
2387
			spin_lock_bh(&txq->lock);
2388
			if (txq->txq_len > 1) {
2389
				if (txq->txq_poll_mark) {
2390
					ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2391 2392 2393
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2394
					txq->txq_stuck++;
2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
2406
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2407
			  "TX queues stuck, resetting\n");
2408
		ath5k_reset(ah, NULL, true);
2409 2410
	}

2411
	mutex_unlock(&ah->lock);
2412

2413
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2414 2415 2416 2417
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2418 2419 2420
/*************************\
* Initialization routines *
\*************************/
2421

2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437
static const struct ieee80211_iface_limit if_limits[] = {
	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) },
	{ .max = 4,	.types =
#ifdef CONFIG_MAC80211_MESH
				 BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
				 BIT(NL80211_IFTYPE_AP) },
};

static const struct ieee80211_iface_combination if_comb = {
	.limits = if_limits,
	.n_limits = ARRAY_SIZE(if_limits),
	.max_interfaces = 2048,
	.num_different_channels = 1,
};

B
Bill Pemberton 已提交
2438
int
2439
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2440
{
2441
	struct ieee80211_hw *hw = ah->hw;
2442 2443 2444 2445 2446
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
2447
	SET_IEEE80211_DEV(hw, ah->dev);
2448
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2449 2450
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
2451
			IEEE80211_HW_MFP_CAPABLE |
2452
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2453 2454 2455 2456 2457 2458 2459

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2460 2461 2462
	hw->wiphy->iface_combinations = &if_comb;
	hw->wiphy->n_iface_combinations = 1;

2463 2464 2465
	/* SW support for IBSS_RSN is provided by mac80211 */
	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;

2466 2467 2468 2469
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2470 2471 2472 2473 2474 2475 2476
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
2477
	__set_bit(ATH_STAT_INVALID, ah->status);
2478

2479 2480 2481 2482 2483 2484 2485
	ah->opmode = NL80211_IFTYPE_STATION;
	ah->bintval = 1000;
	mutex_init(&ah->lock);
	spin_lock_init(&ah->rxbuflock);
	spin_lock_init(&ah->txbuflock);
	spin_lock_init(&ah->block);
	spin_lock_init(&ah->irqlock);
2486 2487

	/* Setup interrupt handler */
2488
	ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2489
	if (ret) {
2490
		ATH5K_ERR(ah, "request_irq failed\n");
2491 2492 2493
		goto err;
	}

2494
	common = ath5k_hw_common(ah);
2495 2496
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
2497
	common->ah = ah;
2498
	common->hw = hw;
2499
	common->priv = ah;
2500
	common->clockrate = 40;
2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
2512
	ret = ath5k_hw_init(ah);
2513
	if (ret)
2514
		goto err_irq;
2515

2516 2517
	/* Set up multi-rate retry capabilities */
	if (ah->ah_capabilities.cap_has_mrr_support) {
2518
		hw->max_rates = 4;
2519 2520
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2521 2522 2523 2524 2525 2526 2527 2528 2529
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

2530 2531 2532 2533
	ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
					ah->ah_mac_srev,
					ah->ah_phy_revision);
2534

2535
	if (!ah->ah_single_chip) {
2536
		/* Single chip radio (!RF5111) */
2537 2538
		if (ah->ah_radio_5ghz_revision &&
			!ah->ah_radio_2ghz_revision) {
2539 2540
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
2541 2542
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2543
					ath5k_chip_name(AR5K_VERSION_RAD,
2544 2545
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2546
			/* No 2GHz support (5110 and some
2547
			 * 5GHz only cards) -> report 5GHz radio */
2548
			} else if (!test_bit(AR5K_MODE_11B,
2549 2550
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2551
					ath5k_chip_name(AR5K_VERSION_RAD,
2552 2553
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2554 2555
			/* Multiband radio */
			} else {
2556
				ATH5K_INFO(ah, "RF%s multiband radio found"
2557 2558
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
2559 2560
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2561 2562 2563 2564
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
2565 2566 2567
		else if (ah->ah_radio_5ghz_revision &&
				ah->ah_radio_2ghz_revision) {
			ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2568
				ath5k_chip_name(AR5K_VERSION_RAD,
2569 2570 2571
					ah->ah_radio_5ghz_revision),
					ah->ah_radio_5ghz_revision);
			ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2572
				ath5k_chip_name(AR5K_VERSION_RAD,
2573 2574
					ah->ah_radio_2ghz_revision),
					ah->ah_radio_2ghz_revision);
2575 2576 2577
		}
	}

2578
	ath5k_debug_init_device(ah);
2579 2580

	/* ready to process interrupts */
2581
	__clear_bit(ATH_STAT_INVALID, ah->status);
2582 2583 2584

	return 0;
err_ah:
2585
	ath5k_hw_deinit(ah);
2586
err_irq:
2587
	free_irq(ah->irq, ah);
2588 2589 2590 2591
err:
	return ret;
}

2592
static int
2593
ath5k_stop_locked(struct ath5k_hw *ah)
2594 2595
{

2596 2597
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, ah->status));
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
2614
	ieee80211_stop_queues(ah->hw);
2615

2616 2617
	if (!test_bit(ATH_STAT_INVALID, ah->status)) {
		ath5k_led_off(ah);
2618
		ath5k_hw_set_imr(ah, 0);
2619 2620
		synchronize_irq(ah->irq);
		ath5k_rx_stop(ah);
2621
		ath5k_hw_dma_stop(ah);
2622
		ath5k_drain_tx_buffs(ah);
2623 2624 2625 2626
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2627 2628
}

2629
int ath5k_start(struct ieee80211_hw *hw)
2630
{
2631
	struct ath5k_hw *ah = hw->priv;
2632 2633
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2634

2635
	mutex_lock(&ah->lock);
2636

2637
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2638 2639

	/*
2640 2641
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2642
	 */
2643
	ath5k_stop_locked(ah);
2644

2645 2646 2647 2648 2649 2650 2651
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
2652
	ah->curchan = ah->hw->conf.channel;
N
Nick Kossifidis 已提交
2653 2654 2655 2656 2657 2658 2659 2660 2661
	ah->imask = AR5K_INT_RXOK
		| AR5K_INT_RXERR
		| AR5K_INT_RXEOL
		| AR5K_INT_RXORN
		| AR5K_INT_TXDESC
		| AR5K_INT_TXEOL
		| AR5K_INT_FATAL
		| AR5K_INT_GLOBAL
		| AR5K_INT_MIB;
2662

2663
	ret = ath5k_reset(ah, NULL, false);
2664 2665
	if (ret)
		goto done;
2666

2667 2668
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_start(ah);
2669 2670 2671 2672 2673 2674 2675 2676

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2677 2678 2679
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2680

2681 2682
	for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
		ah->bslot[i] = NULL;
2683

2684 2685 2686
	ret = 0;
done:
	mmiowb();
2687
	mutex_unlock(&ah->lock);
2688

2689
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2690 2691
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2692 2693 2694
	return ret;
}

2695
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2696
{
2697 2698 2699 2700 2701 2702
	ah->rx_pending = false;
	ah->tx_pending = false;
	tasklet_kill(&ah->rxtq);
	tasklet_kill(&ah->txtq);
	tasklet_kill(&ah->beacontq);
	tasklet_kill(&ah->ani_tasklet);
2703 2704 2705 2706 2707 2708 2709 2710
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2711
void ath5k_stop(struct ieee80211_hw *hw)
2712
{
2713
	struct ath5k_hw *ah = hw->priv;
2714 2715
	int ret;

2716 2717 2718
	mutex_lock(&ah->lock);
	ret = ath5k_stop_locked(ah);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
2739
		ret = ath5k_hw_on_hold(ah);
2740

2741
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2742
				"putting device to sleep\n");
2743 2744
	}

2745
	mmiowb();
2746
	mutex_unlock(&ah->lock);
2747

2748
	ath5k_stop_tasklets(ah);
2749

2750
	cancel_delayed_work_sync(&ah->tx_complete_work);
2751

2752 2753
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_stop(ah);
2754 2755
}

2756 2757 2758
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2759
 *
2760
 * This should be called with ah->lock.
2761
 */
2762
static int
2763
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2764
							bool skip_pcu)
2765
{
B
Bruno Randolf 已提交
2766
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2767
	int ret, ani_mode;
2768
	bool fast;
2769

2770
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2771

2772
	ath5k_hw_set_imr(ah, 0);
2773 2774
	synchronize_irq(ah->irq);
	ath5k_stop_tasklets(ah);
2775

L
Lucas De Marchi 已提交
2776
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2777 2778
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
2779
	ani_mode = ah->ani_state.ani_mode;
N
Nick Kossifidis 已提交
2780 2781
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2782 2783 2784
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
2785
	ath5k_drain_tx_buffs(ah);
2786
	if (chan)
2787
		ah->curchan = chan;
2788 2789 2790

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2791
	ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2792
	if (ret) {
2793
		ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2794 2795
		goto err;
	}
J
Jiri Slaby 已提交
2796

2797
	ret = ath5k_rx_start(ah);
J
Jiri Slaby 已提交
2798
	if (ret) {
2799
		ATH5K_ERR(ah, "can't start recv logic\n");
2800 2801
		goto err;
	}
J
Jiri Slaby 已提交
2802

N
Nick Kossifidis 已提交
2803
	ath5k_ani_init(ah, ani_mode);
2804

N
Nick Kossifidis 已提交
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822
	/*
	 * Set calibration intervals
	 *
	 * Note: We don't need to run calibration imediately
	 * since some initial calibration is done on reset
	 * even for fast channel switching. Also on scanning
	 * this will get set again and again and it won't get
	 * executed unless we connect somewhere and spend some
	 * time on the channel (that's what calibration needs
	 * anyway to be accurate).
	 */
	ah->ah_cal_next_full = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
	ah->ah_cal_next_ani = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
	ah->ah_cal_next_short = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);

2823
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2824

B
Bruno Randolf 已提交
2825
	/* clear survey data and cycle counters */
2826
	memset(&ah->survey, 0, sizeof(ah->survey));
2827
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2828 2829 2830
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2831
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2832

2833
	/*
J
Jiri Slaby 已提交
2834 2835 2836 2837 2838
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2839 2840 2841
	 *
	 * XXX needed?
	 */
2842
/*	ath5k_chan_change(ah, c); */
2843

2844
	ath5k_beacon_config(ah);
J
Jiri Slaby 已提交
2845
	/* intrs are enabled by ath5k_beacon_config */
2846

2847
	ieee80211_wake_queues(ah->hw);
B
Bruno Randolf 已提交
2848

2849 2850 2851 2852 2853
	return 0;
err:
	return ret;
}

2854 2855
static void ath5k_reset_work(struct work_struct *work)
{
2856
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2857 2858
		reset_work);

2859 2860 2861
	mutex_lock(&ah->lock);
	ath5k_reset(ah, NULL, true);
	mutex_unlock(&ah->lock);
2862 2863
}

B
Bill Pemberton 已提交
2864
static int
2865
ath5k_init(struct ieee80211_hw *hw)
2866
{
2867

2868
	struct ath5k_hw *ah = hw->priv;
2869
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2870
	struct ath5k_txq *txq;
2871
	u8 mac[ETH_ALEN] = {};
2872 2873 2874
	int ret;


2875 2876
	/*
	 * Collect the channel list.  The 802.11 layer
2877
	 * is responsible for filtering this list based
2878 2879 2880 2881 2882
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
2883
		ATH5K_ERR(ah, "can't get channels\n");
2884 2885
		goto err;
	}
J
Jiri Slaby 已提交
2886

2887 2888 2889
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2890
	ret = ath5k_desc_alloc(ah);
2891
	if (ret) {
2892
		ATH5K_ERR(ah, "can't allocate descriptors\n");
2893 2894
		goto err;
	}
2895

2896 2897 2898 2899 2900 2901 2902 2903
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
2904
		ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2905 2906
		goto err_desc;
	}
2907 2908 2909 2910 2911
	ah->bhalq = ret;
	ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(ah->cabq)) {
		ATH5K_ERR(ah, "can't setup cab queue\n");
		ret = PTR_ERR(ah->cabq);
2912 2913
		goto err_bhal;
	}
2914

2915 2916 2917 2918 2919
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
2920
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2921
		if (IS_ERR(txq)) {
2922
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2923 2924 2925
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2926
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2927
		if (IS_ERR(txq)) {
2928
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2929 2930 2931
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2932
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2933
		if (IS_ERR(txq)) {
2934
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2935 2936 2937
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2938
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2939
		if (IS_ERR(txq)) {
2940
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2941 2942 2943 2944 2945 2946
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
2947
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2948
		if (IS_ERR(txq)) {
2949
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2950 2951 2952 2953 2954
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2955

2956 2957 2958 2959
	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2960

2961
	INIT_WORK(&ah->reset_work, ath5k_reset_work);
N
Nick Kossifidis 已提交
2962
	INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
2963
	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2964

2965
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2966
	if (ret) {
2967
		ATH5K_ERR(ah, "unable to read address from EEPROM\n");
2968
		goto err_queues;
2969
	}
2970

2971 2972
	SET_IEEE80211_PERM_ADDR(hw, mac);
	/* All MAC address bits matter for ACKs */
2973
	ath5k_update_bssid_mask_and_opmode(ah, NULL);
2974 2975 2976 2977

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
2978
		ATH5K_ERR(ah, "can't initialize regulatory system\n");
2979 2980 2981 2982 2983
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
2984
		ATH5K_ERR(ah, "can't register ieee80211 hw\n");
2985 2986 2987 2988 2989 2990
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

2991
	ath5k_init_leds(ah);
2992

2993
	ath5k_sysfs_register(ah);
2994 2995 2996

	return 0;
err_queues:
2997
	ath5k_txq_release(ah);
2998
err_bhal:
2999
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
3000
err_desc:
3001
	ath5k_desc_free(ah);
3002 3003 3004 3005
err:
	return ret;
}

3006
void
3007
ath5k_deinit_ah(struct ath5k_hw *ah)
3008
{
3009
	struct ieee80211_hw *hw = ah->hw;
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
3025 3026 3027 3028
	ath5k_desc_free(ah);
	ath5k_txq_release(ah);
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
	ath5k_unregister_leds(ah);
3029

3030
	ath5k_sysfs_unregister(ah);
3031 3032 3033 3034 3035
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
3036 3037
	ath5k_hw_deinit(ah);
	free_irq(ah->irq, ah);
3038 3039
}

3040
bool
3041
ath5k_any_vif_assoc(struct ath5k_hw *ah)
3042
{
3043
	struct ath5k_vif_iter_data iter_data;
3044 3045 3046 3047 3048
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

3049 3050 3051
	ieee80211_iterate_active_interfaces_atomic(
		ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
		ath5k_vif_iter, &iter_data);
3052 3053 3054
	return iter_data.any_assoc;
}

3055
void
P
Pavel Roskin 已提交
3056
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3057
{
3058
	struct ath5k_hw *ah = hw->priv;
3059 3060 3061 3062 3063 3064 3065
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
3066
	ah->filter_flags = rfilt;
3067
}
3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087

void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
		   const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (ah && ah->hw)
		printk("%s" pr_fmt("%s: %pV"),
		       level, wiphy_name(ah->hw->wiphy), &vaf);
	else
		printk("%s" pr_fmt("%pV"), level, &vaf);

	va_end(args);
}