base.c 80.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
45
#include <linux/dma-mapping.h>
J
Jiri Slaby 已提交
46
#include <linux/hardirq.h>
47
#include <linux/if.h>
J
Jiri Slaby 已提交
48
#include <linux/io.h>
49 50 51 52
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
53
#include <linux/slab.h>
54
#include <linux/etherdevice.h>
55
#include <linux/nl80211.h>
56 57 58 59 60 61 62 63

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
64
#include "ani.h"
65 66
#include "ath5k.h"
#include "../regd.h"
67

68 69 70
#define CREATE_TRACE_POINTS
#include "trace.h"

71 72
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
73
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
74

75
static int modparam_all_channels;
B
Bob Copeland 已提交
76
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
77 78
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

79 80 81 82
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");

83 84 85 86 87
static int ath5k_modparam_no_hw_rfkill_switch;
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
								bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");

88

89 90 91 92 93 94 95
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

96
static int ath5k_init(struct ieee80211_hw *hw);
97
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
98
								bool skip_pcu);
99 100

/* Known SREVs */
J
Jiri Slaby 已提交
101
static const struct ath5k_srev_name srev_names[] = {
F
Felix Fietkau 已提交
102 103 104 105 106 107 108 109 110
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
F
Felix Fietkau 已提交
129
#endif
130
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
131 132
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
133
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
134 135 136
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
137
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
138 139
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
140 141 142 143
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
144
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
F
Felix Fietkau 已提交
145 146 147 148
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
149 150 151
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

J
Jiri Slaby 已提交
152
static const struct ieee80211_rate ath5k_rates[] = {
B
Bruno Randolf 已提交
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
};

193 194 195 196 197 198 199 200 201 202
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

203
const char *
204 205 206 207 208 209 210 211
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
212 213 214 215 216

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
217 218 219 220 221 222 223
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
L
Luis R. Rodriguez 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
240

241 242 243 244 245
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
246
{
247
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
248 249
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
250

251 252
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
253

254 255 256
/********************\
* Channel/mode setup *
\********************/
257

258 259 260
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
261
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
262
{
263 264 265 266 267
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
268 269 270
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
271 272 273 274 275
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
276
}
277

278
static unsigned int
279 280
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
281
{
282
	unsigned int count, size, freq, ch;
283
	enum ieee80211_band band;
284

285 286 287
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
288
		size = 220;
289
		band = IEEE80211_BAND_5GHZ;
290 291 292 293
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
294
		band = IEEE80211_BAND_2GHZ;
295 296
		break;
	default:
297
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
298
		return 0;
299 300
	}

301 302
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
303 304 305 306
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
307

308 309 310 311 312
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

313
		/* Check if channel is supported by the chipset */
314
		if (!ath5k_channel_ok(ah, &channels[count]))
315
			continue;
316

317 318
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
319
			continue;
320

321 322
		count++;
	}
323

324 325
	return count;
}
326

327
static void
328
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
329 330
{
	u8 i;
331

332
	for (i = 0; i < AR5K_MAX_RATES; i++)
333
		ah->rate_idx[b->band][i] = -1;
334

335
	for (i = 0; i < b->n_bitrates; i++) {
336
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
337
		if (b->bitrates[i].hw_value_short)
338
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
339
	}
340
}
341

342 343 344
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
345
	struct ath5k_hw *ah = hw->priv;
346 347 348
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
349

350 351
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
352

353
	/* 2GHz band */
354
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
355
	sband->band = IEEE80211_BAND_2GHZ;
356
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
357

358
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
359 360 361 362
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
363

364
		sband->channels = ah->channels;
365
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
366
					AR5K_MODE_11G, max_c);
367

368 369 370
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
371
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
372 373 374 375
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
376

377 378 379 380 381 382 383 384 385 386
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
387 388 389
			}
		}

390
		sband->channels = ah->channels;
391
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
392
					AR5K_MODE_11B, max_c);
393

394 395 396 397
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
398
	ath5k_setup_rate_idx(ah, sband);
399

400
	/* 5GHz band, A mode */
401 402
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
403
		sband->band = IEEE80211_BAND_5GHZ;
404
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
405

406 407 408
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
409

410
		sband->channels = &ah->channels[count_c];
411
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
412
					AR5K_MODE_11A, max_c);
413

414 415
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
416
	ath5k_setup_rate_idx(ah, sband);
417

418
	ath5k_debug_dump_bands(ah);
419 420 421 422

	return 0;
}

423 424 425 426 427
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
428
 * Called with ah->lock.
429
 */
430
int
431
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
432
{
433
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
434
		  "channel set, resetting (%u -> %u MHz)\n",
435
		  ah->curchan->center_freq, chan->center_freq);
436

437
	/*
438 439 440 441
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
442
	 */
443
	return ath5k_reset(ah, chan, true);
444 445
}

446
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
447
{
448
	struct ath5k_vif_iter_data *iter_data = data;
449
	int i;
450
	struct ath5k_vif *avf = (void *)vif->drv_priv;
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
470 471 472 473

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
B
Ben Greear 已提交
474
	 * interfaces is allowed.
475 476 477
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
478 479 480
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
481 482
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
483
	}
484 485
}

486
void
487
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
488
				   struct ieee80211_vif *vif)
489
{
490
	struct ath_common *common = ath5k_hw_common(ah);
491 492
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
493 494 495 496 497 498 499 500 501

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
502
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
503
	iter_data.n_stas = 0;
504 505

	if (vif)
506
		ath5k_vif_iter(&iter_data, vif->addr, vif);
507 508

	/* Get list of all active MAC addresses */
509
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
510
						   &iter_data);
511
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
512

513 514
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
515
		/* Nothing active, default to station mode */
516
		ah->opmode = NL80211_IFTYPE_STATION;
517

518 519 520
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
521

522
	if (iter_data.need_set_hw_addr && iter_data.found_active)
523
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
524

525 526
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
527

528 529 530 531
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
532
		 * Enabling PROMISC appears to fix that problem.
533
		 */
534
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
535
	}
536

537 538 539
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
540
}
541

542
static inline int
543
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
544 545
{
	int rix;
546

547 548 549 550 551
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

552
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
553 554 555 556 557 558 559 560 561 562 563
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
564
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
565
{
566
	struct ath_common *common = ath5k_hw_common(ah);
567
	struct sk_buff *skb;
568 569

	/*
570 571
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
572
	 */
573 574 575
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
576

577
	if (!skb) {
578
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
579 580
				common->rx_bufsize);
		return NULL;
581 582
	}

583
	*skb_addr = dma_map_single(ah->dev,
584
				   skb->data, common->rx_bufsize,
585 586
				   DMA_FROM_DEVICE);

587 588
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
589 590
		dev_kfree_skb(skb);
		return NULL;
591
	}
592 593
	return skb;
}
594

595
static int
596
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
597 598 599 600
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
601

602
	if (!skb) {
603
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
604 605 606
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
607 608
	}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
628
	if (ret) {
629
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
630
		return ret;
631 632
	}

633 634 635
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
636 637 638
	return 0;
}

639
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
640
{
641 642 643
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
644

645 646
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
647

648 649 650 651 652 653 654 655
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
656
	else
657
		htype = AR5K_PKT_TYPE_NORMAL;
658

659
	return htype;
660 661
}

662
static int
663
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
664
		  struct ath5k_txq *txq, int padsize)
665
{
666 667 668 669 670 671 672 673 674 675 676
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
677

678
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
679

680
	/* XXX endianness */
681
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
682
			DMA_TO_DEVICE);
683

684
	rate = ieee80211_get_tx_rate(ah->hw, info);
685 686 687 688
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
689

690 691
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
692

693 694 695
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
696

697 698 699 700 701 702 703 704 705 706 707
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
708 709
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
710
			info->control.vif, pktlen, info));
711 712 713
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
714 715
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
716
			info->control.vif, pktlen, info));
717 718 719 720
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
721
		(ah->power_level * 2),
722 723 724 725 726 727
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

728 729 730 731 732 733 734 735
	/* Set up MRR descriptor */
	if (ah->ah_capabilities.cap_has_mrr_support) {
		memset(mrr_rate, 0, sizeof(mrr_rate));
		memset(mrr_tries, 0, sizeof(mrr_tries));
		for (i = 0; i < 3; i++) {
			rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
			if (!rate)
				break;
736

737 738 739
			mrr_rate[i] = rate->hw_value;
			mrr_tries[i] = info->control.rates[i + 1].count;
		}
740

741 742 743 744 745
		ath5k_hw_setup_mrr_tx_desc(ah, ds,
			mrr_rate[0], mrr_tries[0],
			mrr_rate[1], mrr_tries[1],
			mrr_rate[2], mrr_tries[2]);
	}
746

747 748
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
B
Bruno Randolf 已提交
749

750 751
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
B
Bruno Randolf 已提交
752
	txq->txq_len++;
753 754 755 756
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
B
Bruno Randolf 已提交
757

758 759 760 761 762 763 764
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
765
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
766
	return ret;
B
Bruno Randolf 已提交
767 768
}

769 770 771 772
/*******************\
* Descriptors setup *
\*******************/

773
static int
774
ath5k_desc_alloc(struct ath5k_hw *ah)
775
{
776 777 778 779 780
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
781

782
	/* allocate descriptors */
783
	ah->desc_len = sizeof(struct ath5k_desc) *
784
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
785

786 787 788 789
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
790 791 792
		ret = -ENOMEM;
		goto err;
	}
793 794 795 796
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
797

798 799 800
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
801
		ATH5K_ERR(ah, "can't allocate bufptr\n");
802 803 804
		ret = -ENOMEM;
		goto err_free;
	}
805
	ah->bufptr = bf;
806

807
	INIT_LIST_HEAD(&ah->rxbuf);
808 809 810
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
811
		list_add_tail(&bf->list, &ah->rxbuf);
812
	}
813

814 815
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
816
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
817 818
		bf->desc = ds;
		bf->daddr = da;
819
		list_add_tail(&bf->list, &ah->txbuf);
820 821
	}

822
	/* beacon buffers */
823
	INIT_LIST_HEAD(&ah->bcbuf);
824 825 826
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
827
		list_add_tail(&bf->list, &ah->bcbuf);
828
	}
829

830 831
	return 0;
err_free:
832
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
833
err:
834
	ah->desc = NULL;
835 836
	return ret;
}
837

838
void
839
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
840 841 842 843
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
844
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
845 846 847 848 849 850 851 852
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
853
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
854 855 856 857 858 859
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
860
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
861 862 863 864 865 866 867
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

868
static void
869
ath5k_desc_free(struct ath5k_hw *ah)
870 871
{
	struct ath5k_buf *bf;
872

873 874 875 876 877 878
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
879

880
	/* Free memory associated with all descriptors */
881 882 883
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
884

885 886
	kfree(ah->bufptr);
	ah->bufptr = NULL;
887 888
}

889 890 891 892 893 894

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
895
ath5k_txq_setup(struct ath5k_hw *ah,
896
		int qtype, int subtype)
897
{
898 899 900
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
901 902 903 904 905
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
906 907
	};
	int qnum;
908

909
	/*
910 911 912 913 914 915 916 917 918 919
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
920
	 */
921 922 923 924 925 926 927 928 929 930
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
931
	txq = &ah->txqs[qnum];
932 933 934 935 936 937
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
B
Bruno Randolf 已提交
938
		txq->txq_len = 0;
939
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
940
		txq->txq_poll_mark = false;
941
		txq->txq_stuck = 0;
942
	}
943
	return &ah->txqs[qnum];
944 945
}

946 947
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
948
{
949
	struct ath5k_txq_info qi = {
950 951 952 953 954
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
955 956 957
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
958

959
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
960 961
}

962
static int
963
ath5k_beaconq_config(struct ath5k_hw *ah)
964
{
965 966
	struct ath5k_txq_info qi;
	int ret;
967

968
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
969 970
	if (ret)
		goto err;
971

972 973
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
974 975 976 977 978 979 980
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
981
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
982 983 984 985 986
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
987
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
988
	}
989

990
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
991 992
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
993

994
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
995
	if (ret) {
996
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
997 998 999
			"hardware queue!\n", __func__);
		goto err;
	}
1000
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1001 1002
	if (ret)
		goto err;
1003

1004 1005 1006 1007
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1008

1009
	qi.tqi_ready_time = (ah->bintval * 80) / 100;
1010 1011 1012
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1013

1014 1015 1016
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1017 1018
}

1019 1020 1021
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
1022
 * @ah The &struct ath5k_hw
1023 1024 1025 1026 1027 1028 1029
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1030
static void
1031
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
1032
{
1033
	struct ath5k_txq *txq;
1034
	struct ath5k_buf *bf, *bf0;
1035
	int i;
1036

1037 1038 1039
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
1040 1041
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1042
				ath5k_debug_printtxbuf(ah, bf);
1043

1044
				ath5k_txbuf_free_skb(ah, bf);
1045

1046 1047 1048
				spin_lock_bh(&ah->txbuflock);
				list_move_tail(&bf->list, &ah->txbuf);
				ah->txbuf_len++;
1049
				txq->txq_len--;
1050
				spin_unlock_bh(&ah->txbuflock);
1051
			}
1052 1053 1054 1055
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1056
	}
1057 1058
}

1059
static void
1060
ath5k_txq_release(struct ath5k_hw *ah)
1061
{
1062
	struct ath5k_txq *txq = ah->txqs;
1063
	unsigned int i;
1064

1065
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
1066
		if (txq->setup) {
1067
			ath5k_hw_release_tx_queue(ah, txq->qnum);
1068 1069 1070
			txq->setup = false;
		}
}
1071 1072


1073 1074 1075
/*************\
* RX Handling *
\*************/
1076

1077 1078 1079
/*
 * Enable the receive h/w following a reset.
 */
1080
static int
1081
ath5k_rx_start(struct ath5k_hw *ah)
1082
{
1083 1084 1085
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1086

1087
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1088

1089
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1090
		  common->cachelsz, common->rx_bufsize);
1091

1092 1093 1094 1095
	spin_lock_bh(&ah->rxbuflock);
	ah->rxlink = NULL;
	list_for_each_entry(bf, &ah->rxbuf, list) {
		ret = ath5k_rxbuf_setup(ah, bf);
1096
		if (ret != 0) {
1097
			spin_unlock_bh(&ah->rxbuflock);
1098 1099
			goto err;
		}
1100
	}
1101
	bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1102
	ath5k_hw_set_rxdp(ah, bf->daddr);
1103
	spin_unlock_bh(&ah->rxbuflock);
1104

1105
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1106
	ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
1107
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1108 1109

	return 0;
1110
err:
1111 1112 1113
	return ret;
}

1114
/*
1115 1116 1117 1118 1119
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1120 1121
 */
static void
1122
ath5k_rx_stop(struct ath5k_hw *ah)
1123 1124
{

1125
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1126
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1127

1128
	ath5k_debug_printrxbuffs(ah);
1129
}
1130

1131
static unsigned int
1132
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
1133 1134 1135 1136 1137
		   struct ath5k_rx_status *rs)
{
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1138

1139 1140 1141
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1142

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1155 1156 1157 1158

	return 0;
}

1159

1160
static void
1161
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1162
		     struct ieee80211_rx_status *rxs)
1163
{
1164
	struct ath_common *common = ath5k_hw_common(ah);
1165 1166 1167
	u64 tsf, bc_tstamp;
	u32 hw_tu;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1168

1169 1170 1171 1172 1173 1174 1175 1176
	if (ieee80211_is_beacon(mgmt->frame_control) &&
	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
		/*
		 * Received an IBSS beacon with the same BSSID. Hardware *must*
		 * have updated the local TSF. We have to work around various
		 * hardware bugs, though...
		 */
1177
		tsf = ath5k_hw_get_tsf64(ah);
1178 1179
		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
		hw_tu = TSF_TO_TU(tsf);
1180

1181
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1182 1183 1184 1185 1186
			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
			(unsigned long long)bc_tstamp,
			(unsigned long long)rxs->mactime,
			(unsigned long long)(rxs->mactime - bc_tstamp),
			(unsigned long long)tsf);
1187

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
		/*
		 * Sometimes the HW will give us a wrong tstamp in the rx
		 * status, causing the timestamp extension to go wrong.
		 * (This seems to happen especially with beacon frames bigger
		 * than 78 byte (incl. FCS))
		 * But we know that the receive timestamp must be later than the
		 * timestamp of the beacon since HW must have synced to that.
		 *
		 * NOTE: here we assume mactime to be after the frame was
		 * received, not like mac80211 which defines it at the start.
		 */
		if (bc_tstamp > rxs->mactime) {
1200
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1201 1202 1203 1204 1205
				"fixing mactime from %llx to %llx\n",
				(unsigned long long)rxs->mactime,
				(unsigned long long)tsf);
			rxs->mactime = tsf;
		}
1206

1207 1208 1209 1210 1211 1212
		/*
		 * Local TSF might have moved higher than our beacon timers,
		 * in that case we have to update them to continue sending
		 * beacons. This also takes care of synchronizing beacon sending
		 * times with other stations.
		 */
1213 1214
		if (hw_tu >= ah->nexttbtt)
			ath5k_beacon_update_timers(ah, bc_tstamp);
B
Bruno Randolf 已提交
1215 1216 1217 1218

		/* Check if the beacon timers are still correct, because a TSF
		 * update might have created a window between them - for a
		 * longer description see the comment of this function: */
1219 1220 1221
		if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
			ath5k_beacon_update_timers(ah, bc_tstamp);
			ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
B
Bruno Randolf 已提交
1222 1223
				"fixed beacon timers after beacon receive\n");
		}
1224 1225
	}
}
1226

1227
static void
1228
ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1229 1230 1231
{
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
	struct ath_common *common = ath5k_hw_common(ah);
1232

1233 1234 1235 1236
	/* only beacons from our BSSID */
	if (!ieee80211_is_beacon(mgmt->frame_control) ||
	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
		return;
1237

B
Bruno Randolf 已提交
1238
	ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1239

1240 1241 1242
	/* in IBSS mode we should keep RSSI statistics per neighbour */
	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
}
1243

1244 1245 1246 1247
/*
 * Compute padding position. skb must contain an IEEE 802.11 frame
 */
static int ath5k_common_padpos(struct sk_buff *skb)
1248
{
1249
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1250 1251
	__le16 frame_control = hdr->frame_control;
	int padpos = 24;
1252

1253
	if (ieee80211_has_a4(frame_control))
1254
		padpos += ETH_ALEN;
1255 1256

	if (ieee80211_is_data_qos(frame_control))
1257 1258 1259
		padpos += IEEE80211_QOS_CTL_LEN;

	return padpos;
1260 1261
}

1262 1263 1264 1265 1266
/*
 * This function expects an 802.11 frame and returns the number of
 * bytes added, or -1 if we don't have enough header room.
 */
static int ath5k_add_padding(struct sk_buff *skb)
1267
{
1268 1269
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1270

1271
	if (padsize && skb->len > padpos) {
1272

1273 1274
		if (skb_headroom(skb) < padsize)
			return -1;
1275

1276
		skb_push(skb, padsize);
1277
		memmove(skb->data, skb->data + padsize, padpos);
1278 1279
		return padsize;
	}
B
Bob Copeland 已提交
1280

1281 1282
	return 0;
}
1283

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
/*
 * The MAC header is padded to have 32-bit boundary if the
 * packet payload is non-zero. The general calculation for
 * padsize would take into account odd header lengths:
 * padsize = 4 - (hdrlen & 3); however, since only
 * even-length headers are used, padding can only be 0 or 2
 * bytes and we can optimize this a bit.  We must not try to
 * remove padding from short control frames that do not have a
 * payload.
 *
 * This function expects an 802.11 frame and returns the number of
 * bytes removed.
 */
static int ath5k_remove_padding(struct sk_buff *skb)
{
	int padpos = ath5k_common_padpos(skb);
	int padsize = padpos & 3;
1301

1302
	if (padsize && skb->len >= padpos + padsize) {
1303 1304 1305
		memmove(skb->data + padsize, skb->data, padpos);
		skb_pull(skb, padsize);
		return padsize;
1306
	}
B
Bob Copeland 已提交
1307

1308
	return 0;
1309 1310 1311
}

static void
1312
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1313
		    struct ath5k_rx_status *rs)
1314
{
1315 1316 1317 1318 1319 1320 1321 1322 1323
	struct ieee80211_rx_status *rxs;

	ath5k_remove_padding(skb);

	rxs = IEEE80211_SKB_RXCB(skb);

	rxs->flag = 0;
	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
		rxs->flag |= RX_FLAG_MMIC_ERROR;
1324 1325

	/*
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
	 * always extend the mac timestamp, since this information is
	 * also needed for proper IBSS merging.
	 *
	 * XXX: it might be too late to do it here, since rs_tstamp is
	 * 15bit only. that means TSF extension has to be done within
	 * 32768usec (about 32ms). it might be necessary to move this to
	 * the interrupt handler, like it is done in madwifi.
	 *
	 * Unfortunately we don't know when the hardware takes the rx
	 * timestamp (beginning of phy frame, data frame, end of rx?).
	 * The only thing we know is that it is hardware specific...
	 * On AR5213 it seems the rx timestamp is at the end of the
1338
	 * frame, but I'm not sure.
1339 1340 1341 1342 1343
	 *
	 * NOTE: mac80211 defines mactime at the beginning of the first
	 * data symbol. Since we don't have any time references it's
	 * impossible to comply to that. This affects IBSS merge only
	 * right now, so it's not too bad...
1344
	 */
1345
	rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
J
Johannes Berg 已提交
1346
	rxs->flag |= RX_FLAG_MACTIME_MPDU;
1347

1348 1349
	rxs->freq = ah->curchan->center_freq;
	rxs->band = ah->curchan->band;
1350

1351
	rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
1352

1353
	rxs->antenna = rs->rs_antenna;
1354

1355
	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1356
		ah->stats.antenna_rx[rs->rs_antenna]++;
1357
	else
1358
		ah->stats.antenna_rx[0]++; /* invalid */
1359

1360 1361
	rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
	rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
1362

1363
	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1364
	    ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
1365
		rxs->flag |= RX_FLAG_SHORTPRE;
1366

1367
	trace_ath5k_rx(ah, skb);
1368

1369
	ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi);
1370

1371
	/* check beacons in IBSS mode */
1372 1373
	if (ah->opmode == NL80211_IFTYPE_ADHOC)
		ath5k_check_ibss_tsf(ah, skb, rxs);
1374

1375
	ieee80211_rx(ah->hw, skb);
1376
}
1377

1378 1379 1380 1381
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
 *
 * Check if we want to further process this frame or not. Also update
 * statistics. Return true if we want this frame, false if not.
1382
 */
1383
static bool
1384
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
1385
{
1386 1387
	ah->stats.rx_all_count++;
	ah->stats.rx_bytes_count += rs->rs_datalen;
1388

1389 1390
	if (unlikely(rs->rs_status)) {
		if (rs->rs_status & AR5K_RXERR_CRC)
1391
			ah->stats.rxerr_crc++;
1392
		if (rs->rs_status & AR5K_RXERR_FIFO)
1393
			ah->stats.rxerr_fifo++;
1394
		if (rs->rs_status & AR5K_RXERR_PHY) {
1395
			ah->stats.rxerr_phy++;
1396
			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
1397
				ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
			return false;
		}
		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
			/*
			 * Decrypt error.  If the error occurred
			 * because there was no hardware key, then
			 * let the frame through so the upper layers
			 * can process it.  This is necessary for 5210
			 * parts which have no way to setup a ``clear''
			 * key cache entry.
			 *
			 * XXX do key cache faulting
			 */
1411
			ah->stats.rxerr_decrypt++;
1412 1413 1414 1415 1416
			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
			    !(rs->rs_status & AR5K_RXERR_CRC))
				return true;
		}
		if (rs->rs_status & AR5K_RXERR_MIC) {
1417
			ah->stats.rxerr_mic++;
1418
			return true;
1419 1420
		}

1421 1422 1423 1424
		/* reject any frames with non-crypto errors */
		if (rs->rs_status & ~(AR5K_RXERR_DECRYPT))
			return false;
	}
1425

1426
	if (unlikely(rs->rs_more)) {
1427
		ah->stats.rxerr_jumbo++;
1428 1429 1430
		return false;
	}
	return true;
1431 1432
}

1433
static void
1434
ath5k_set_current_imask(struct ath5k_hw *ah)
1435
{
1436
	enum ath5k_int imask;
1437 1438
	unsigned long flags;

1439 1440 1441
	spin_lock_irqsave(&ah->irqlock, flags);
	imask = ah->imask;
	if (ah->rx_pending)
1442
		imask &= ~AR5K_INT_RX_ALL;
1443
	if (ah->tx_pending)
1444
		imask &= ~AR5K_INT_TX_ALL;
1445 1446
	ath5k_hw_set_imr(ah, imask);
	spin_unlock_irqrestore(&ah->irqlock, flags);
1447 1448
}

1449
static void
1450
ath5k_tasklet_rx(unsigned long data)
1451
{
1452 1453 1454
	struct ath5k_rx_status rs = {};
	struct sk_buff *skb, *next_skb;
	dma_addr_t next_skb_addr;
1455
	struct ath5k_hw *ah = (void *)data;
L
Luis R. Rodriguez 已提交
1456
	struct ath_common *common = ath5k_hw_common(ah);
1457 1458 1459
	struct ath5k_buf *bf;
	struct ath5k_desc *ds;
	int ret;
1460

1461 1462 1463
	spin_lock(&ah->rxbuflock);
	if (list_empty(&ah->rxbuf)) {
		ATH5K_WARN(ah, "empty rx buf pool\n");
1464 1465 1466
		goto unlock;
	}
	do {
1467
		bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
1468 1469 1470
		BUG_ON(bf->skb == NULL);
		skb = bf->skb;
		ds = bf->desc;
1471

1472
		/* bail if HW is still using self-linked descriptor */
1473
		if (ath5k_hw_get_rxdp(ah) == bf->daddr)
1474
			break;
1475

1476
		ret = ah->ah_proc_rx_desc(ah, ds, &rs);
1477 1478 1479
		if (unlikely(ret == -EINPROGRESS))
			break;
		else if (unlikely(ret)) {
1480 1481
			ATH5K_ERR(ah, "error in processing rx descriptor\n");
			ah->stats.rxerr_proc++;
1482 1483
			break;
		}
1484

1485 1486
		if (ath5k_receive_frame_ok(ah, &rs)) {
			next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
1487

1488 1489 1490 1491 1492 1493
			/*
			 * If we can't replace bf->skb with a new skb under
			 * memory pressure, just skip this packet
			 */
			if (!next_skb)
				goto next;
1494

1495
			dma_unmap_single(ah->dev, bf->skbaddr,
1496
					 common->rx_bufsize,
1497
					 DMA_FROM_DEVICE);
1498

1499
			skb_put(skb, rs.rs_datalen);
1500

1501
			ath5k_receive_frame(ah, skb, &rs);
1502

1503 1504
			bf->skb = next_skb;
			bf->skbaddr = next_skb_addr;
1505
		}
1506
next:
1507 1508
		list_move_tail(&bf->list, &ah->rxbuf);
	} while (ath5k_rxbuf_setup(ah, bf) == 0);
1509
unlock:
1510 1511 1512
	spin_unlock(&ah->rxbuflock);
	ah->rx_pending = false;
	ath5k_set_current_imask(ah);
1513 1514
}

B
Bruno Randolf 已提交
1515

1516 1517 1518
/*************\
* TX Handling *
\*************/
B
Bruno Randolf 已提交
1519

1520
void
1521 1522
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
	       struct ath5k_txq *txq)
1523
{
1524
	struct ath5k_hw *ah = hw->priv;
1525 1526 1527
	struct ath5k_buf *bf;
	unsigned long flags;
	int padsize;
B
Bruno Randolf 已提交
1528

1529
	trace_ath5k_tx(ah, skb, txq);
B
Bruno Randolf 已提交
1530

1531 1532 1533 1534 1535 1536
	/*
	 * The hardware expects the header padded to 4 byte boundaries.
	 * If this is not the case, we add the padding after the header.
	 */
	padsize = ath5k_add_padding(skb);
	if (padsize < 0) {
1537
		ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
1538 1539 1540
			  " headroom to pad");
		goto drop_packet;
	}
1541

1542 1543
	if (txq->txq_len >= txq->txq_max &&
	    txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
B
Bruno Randolf 已提交
1544 1545
		ieee80211_stop_queue(hw, txq->qnum);

1546 1547 1548 1549
	spin_lock_irqsave(&ah->txbuflock, flags);
	if (list_empty(&ah->txbuf)) {
		ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
		spin_unlock_irqrestore(&ah->txbuflock, flags);
B
Bruno Randolf 已提交
1550
		ieee80211_stop_queues(hw);
1551
		goto drop_packet;
1552
	}
1553
	bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
1554
	list_del(&bf->list);
1555 1556
	ah->txbuf_len--;
	if (list_empty(&ah->txbuf))
1557
		ieee80211_stop_queues(hw);
1558
	spin_unlock_irqrestore(&ah->txbuflock, flags);
1559 1560 1561

	bf->skb = skb;

1562
	if (ath5k_txbuf_setup(ah, bf, txq, padsize)) {
1563
		bf->skb = NULL;
1564 1565 1566 1567
		spin_lock_irqsave(&ah->txbuflock, flags);
		list_add_tail(&bf->list, &ah->txbuf);
		ah->txbuf_len++;
		spin_unlock_irqrestore(&ah->txbuflock, flags);
1568
		goto drop_packet;
1569
	}
1570
	return;
1571

1572 1573
drop_packet:
	dev_kfree_skb_any(skb);
1574 1575
}

1576
static void
1577
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
1578
			 struct ath5k_txq *txq, struct ath5k_tx_status *ts)
1579 1580
{
	struct ieee80211_tx_info *info;
1581
	u8 tries[3];
1582 1583
	int i;

1584 1585
	ah->stats.tx_all_count++;
	ah->stats.tx_bytes_count += skb->len;
1586 1587
	info = IEEE80211_SKB_CB(skb);

1588 1589 1590 1591
	tries[0] = info->status.rates[0].count;
	tries[1] = info->status.rates[1].count;
	tries[2] = info->status.rates[2].count;

1592
	ieee80211_tx_info_clear_status(info);
1593 1594

	for (i = 0; i < ts->ts_final_idx; i++) {
1595 1596 1597
		struct ieee80211_tx_rate *r =
			&info->status.rates[i];

1598
		r->count = tries[i];
1599 1600
	}

1601
	info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
1602
	info->status.rates[ts->ts_final_idx + 1].idx = -1;
1603 1604

	if (unlikely(ts->ts_status)) {
1605
		ah->stats.ack_fail++;
1606 1607
		if (ts->ts_status & AR5K_TXERR_FILT) {
			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1608
			ah->stats.txerr_filt++;
1609 1610
		}
		if (ts->ts_status & AR5K_TXERR_XRETRY)
1611
			ah->stats.txerr_retry++;
1612
		if (ts->ts_status & AR5K_TXERR_FIFO)
1613
			ah->stats.txerr_fifo++;
1614 1615 1616
	} else {
		info->flags |= IEEE80211_TX_STAT_ACK;
		info->status.ack_signal = ts->ts_rssi;
1617 1618 1619

		/* count the successful attempt as well */
		info->status.rates[ts->ts_final_idx].count++;
1620 1621 1622 1623 1624 1625 1626 1627 1628
	}

	/*
	* Remove MAC header padding before giving the frame
	* back to mac80211.
	*/
	ath5k_remove_padding(skb);

	if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
1629
		ah->stats.antenna_tx[ts->ts_antenna]++;
1630
	else
1631
		ah->stats.antenna_tx[0]++; /* invalid */
1632

1633 1634
	trace_ath5k_tx_complete(ah, skb, txq, ts);
	ieee80211_tx_status(ah->hw, skb);
1635
}
1636 1637

static void
1638
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
1639
{
1640 1641 1642 1643
	struct ath5k_tx_status ts = {};
	struct ath5k_buf *bf, *bf0;
	struct ath5k_desc *ds;
	struct sk_buff *skb;
1644
	int ret;
1645

1646 1647
	spin_lock(&txq->lock);
	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1648 1649 1650 1651 1652 1653 1654

		txq->txq_poll_mark = false;

		/* skb might already have been processed last time. */
		if (bf->skb != NULL) {
			ds = bf->desc;

1655
			ret = ah->ah_proc_tx_desc(ah, ds, &ts);
1656 1657 1658
			if (unlikely(ret == -EINPROGRESS))
				break;
			else if (unlikely(ret)) {
1659
				ATH5K_ERR(ah,
1660 1661 1662 1663 1664 1665 1666
					"error %d while processing "
					"queue %u\n", ret, txq->qnum);
				break;
			}

			skb = bf->skb;
			bf->skb = NULL;
1667

1668
			dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
1669
					DMA_TO_DEVICE);
1670
			ath5k_tx_frame_completed(ah, skb, txq, &ts);
1671
		}
1672

1673 1674 1675
		/*
		 * It's possible that the hardware can say the buffer is
		 * completed when it hasn't yet loaded the ds_link from
1676 1677
		 * host memory and moved on.
		 * Always keep the last descriptor to avoid HW races...
1678
		 */
1679 1680 1681 1682
		if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
			spin_lock(&ah->txbuflock);
			list_move_tail(&bf->list, &ah->txbuf);
			ah->txbuf_len++;
1683
			txq->txq_len--;
1684
			spin_unlock(&ah->txbuflock);
1685
		}
1686 1687
	}
	spin_unlock(&txq->lock);
B
Bruno Randolf 已提交
1688
	if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
1689
		ieee80211_wake_queue(ah->hw, txq->qnum);
1690 1691 1692 1693 1694
}

static void
ath5k_tasklet_tx(unsigned long data)
{
B
Bob Copeland 已提交
1695
	int i;
1696
	struct ath5k_hw *ah = (void *)data;
1697

1698
	for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
1699
		if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
1700
			ath5k_tx_processq(ah, &ah->txqs[i]);
1701

1702 1703
	ah->tx_pending = false;
	ath5k_set_current_imask(ah);
1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714
}


/*****************\
* Beacon handling *
\*****************/

/*
 * Setup the beacon frame for transmit.
 */
static int
1715
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
1716 1717
{
	struct sk_buff *skb = bf->skb;
J
Johannes Berg 已提交
1718
	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1719
	struct ath5k_desc *ds;
1720 1721
	int ret = 0;
	u8 antenna;
1722
	u32 flags;
1723
	const int padsize = 0;
1724

1725
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
1726
			DMA_TO_DEVICE);
1727
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1728 1729
			"skbaddr %llx\n", skb, skb->data, skb->len,
			(unsigned long long)bf->skbaddr);
1730

1731 1732
	if (dma_mapping_error(ah->dev, bf->skbaddr)) {
		ATH5K_ERR(ah, "beacon DMA mapping failed\n");
1733 1734
		dev_kfree_skb_any(skb);
		bf->skb = NULL;
1735 1736 1737 1738
		return -EIO;
	}

	ds = bf->desc;
1739
	antenna = ah->ah_tx_ant;
1740 1741

	flags = AR5K_TXDESC_NOACK;
1742
	if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1743 1744
		ds->ds_link = bf->daddr;	/* self-linked */
		flags |= AR5K_TXDESC_VEOL;
1745
	} else
1746
		ds->ds_link = 0;
1747 1748 1749 1750 1751 1752 1753

	/*
	 * If we use multiple antennas on AP and use
	 * the Sectored AP scenario, switch antenna every
	 * 4 beacons to make sure everybody hears our AP.
	 * When a client tries to associate, hw will keep
	 * track of the tx antenna to be used for this client
1754
	 * automatically, based on ACKed packets.
1755 1756 1757 1758 1759
	 *
	 * Note: AP still listens and transmits RTS on the
	 * default antenna which is supposed to be an omni.
	 *
	 * Note2: On sectored scenarios it's possible to have
B
Bob Copeland 已提交
1760 1761 1762 1763 1764
	 * multiple antennas (1 omni -- the default -- and 14
	 * sectors), so if we choose to actually support this
	 * mode, we need to allow the user to set how many antennas
	 * we have and tweak the code below to send beacons
	 * on all of them.
1765 1766
	 */
	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
1767
		antenna = ah->bsent & 4 ? 2 : 1;
1768

1769

1770 1771 1772
	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
1773
	ds->ds_data = bf->skbaddr;
1774
	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1775
			ieee80211_get_hdrlen_from_skb(skb), padsize,
1776 1777
			AR5K_PKT_TYPE_BEACON, (ah->power_level * 2),
			ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1778
			1, AR5K_TXKEYIX_INVALID,
1779
			antenna, flags, 0, 0);
1780 1781 1782 1783 1784
	if (ret)
		goto err_unmap;

	return 0;
err_unmap:
1785
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1786 1787 1788
	return ret;
}

1789 1790 1791 1792 1793 1794 1795
/*
 * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
 * this is called only once at config_bss time, for AP we do it every
 * SWBA interrupt so that the TIM will reflect buffered frames.
 *
 * Called with the beacon lock.
 */
1796
int
1797 1798 1799
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	int ret;
1800
	struct ath5k_hw *ah = hw->priv;
1801
	struct ath5k_vif *avf = (void *)vif->drv_priv;
1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
	struct sk_buff *skb;

	if (WARN_ON(!vif)) {
		ret = -EINVAL;
		goto out;
	}

	skb = ieee80211_beacon_get(hw, vif);

	if (!skb) {
		ret = -ENOMEM;
		goto out;
	}

1816
	ath5k_txbuf_free_skb(ah, avf->bbuf);
1817
	avf->bbuf->skb = skb;
1818
	ret = ath5k_beacon_setup(ah, avf->bbuf);
1819 1820 1821 1822
out:
	return ret;
}

1823 1824 1825 1826 1827
/*
 * Transmit a beacon frame at SWBA.  Dynamic updates to the
 * frame contents are done as needed and the slot time is
 * also adjusted based on current state.
 *
1828 1829
 * This is called from software irq context (beacontq tasklets)
 * or user context from ath5k_beacon_config.
1830 1831
 */
static void
1832
ath5k_beacon_send(struct ath5k_hw *ah)
1833
{
1834 1835 1836
	struct ieee80211_vif *vif;
	struct ath5k_vif *avf;
	struct ath5k_buf *bf;
1837
	struct sk_buff *skb;
1838
	int err;
1839

1840
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1841 1842 1843

	/*
	 * Check if the previous beacon has gone out.  If
B
Bob Copeland 已提交
1844
	 * not, don't don't try to post another: skip this
1845 1846 1847 1848
	 * period and wait for the next.  Missed beacons
	 * indicate a problem and should not occur.  If we
	 * miss too many consecutive beacons reset the device.
	 */
1849 1850 1851 1852 1853 1854
	if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
		ah->bmisscount++;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
			"missed %u consecutive beacons\n", ah->bmisscount);
		if (ah->bmisscount > 10) {	/* NB: 10 is a guess */
			ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1855
				"stuck beacon time (%u missed)\n",
1856 1857
				ah->bmisscount);
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
1858
				  "stuck beacon, resetting\n");
1859
			ieee80211_queue_work(ah->hw, &ah->reset_work);
1860 1861 1862
		}
		return;
	}
1863 1864
	if (unlikely(ah->bmisscount != 0)) {
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1865
			"resume beacon xmit after %u misses\n",
1866 1867
			ah->bmisscount);
		ah->bmisscount = 0;
1868 1869
	}

1870 1871
	if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) ||
			ah->opmode == NL80211_IFTYPE_MESH_POINT) {
1872 1873
		u64 tsf = ath5k_hw_get_tsf64(ah);
		u32 tsftu = TSF_TO_TU(tsf);
1874 1875 1876
		int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
		vif = ah->bslot[(slot + 1) % ATH_BCBUF];
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
1877
			"tsf %llx tsftu %x intval %u slot %u vif %p\n",
1878
			(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
1879
	} else /* only one interface */
1880
		vif = ah->bslot[0];
1881 1882 1883 1884 1885 1886 1887

	if (!vif)
		return;

	avf = (void *)vif->drv_priv;
	bf = avf->bbuf;

1888 1889 1890 1891 1892
	/*
	 * Stop any current dma and put the new frame on the queue.
	 * This should never fail since we check above that no frames
	 * are still pending on the queue.
	 */
1893 1894
	if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
		ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
1895 1896 1897
		/* NB: hw still stops DMA, so proceed */
	}

J
Javier Cardona 已提交
1898
	/* refresh the beacon for AP or MESH mode */
1899
	if (ah->opmode == NL80211_IFTYPE_AP ||
1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
		err = ath5k_beacon_update(ah->hw, vif);
		if (err)
			return;
	}

	if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
		     ah->opmode == NL80211_IFTYPE_MONITOR)) {
		ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
		return;
	}
B
Bob Copeland 已提交
1911

1912
	trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
1913

1914 1915 1916 1917
	ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
	ath5k_hw_start_tx_dma(ah, ah->bhalq);
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
		ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
1918

1919
	skb = ieee80211_get_buffered_bc(ah->hw, vif);
1920
	while (skb) {
1921
		ath5k_tx_queue(ah->hw, skb, ah->cabq);
1922

1923
		if (ah->cabq->txq_len >= ah->cabq->txq_max)
1924 1925
			break;

1926
		skb = ieee80211_get_buffered_bc(ah->hw, vif);
1927 1928
	}

1929
	ah->bsent++;
1930 1931
}

1932 1933 1934
/**
 * ath5k_beacon_update_timers - update beacon timers
 *
1935
 * @ah: struct ath5k_hw pointer we are operating on
1936 1937 1938 1939 1940 1941 1942 1943
 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
 *          beacon timer update based on the current HW TSF.
 *
 * Calculate the next target beacon transmit time (TBTT) based on the timestamp
 * of a received beacon or the current local hardware TSF and write it to the
 * beacon timer registers.
 *
 * This is called in a variety of situations, e.g. when a beacon is received,
1944
 * when a TSF update has been detected, but also when an new IBSS is created or
1945 1946 1947
 * when we otherwise know we have to update the timers, but we keep it in this
 * function to have it all together in one place.
 */
1948
void
1949
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
1950
{
1951 1952
	u32 nexttbtt, intval, hw_tu, bc_tu;
	u64 hw_tsf;
1953

1954 1955
	intval = ah->bintval & AR5K_BEACON_PERIOD;
	if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs > 1) {
1956 1957
		intval /= ATH_BCBUF;	/* staggered multi-bss beacons */
		if (intval < 15)
1958
			ATH5K_WARN(ah, "intval %u is too low, min 15\n",
1959 1960
				   intval);
	}
1961 1962 1963
	if (WARN_ON(!intval))
		return;

1964 1965
	/* beacon TSF converted to TU */
	bc_tu = TSF_TO_TU(bc_tsf);
1966

1967 1968 1969
	/* current TSF converted to TU */
	hw_tsf = ath5k_hw_get_tsf64(ah);
	hw_tu = TSF_TO_TU(hw_tsf);
1970

1971
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
1972
	/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
L
Lucas De Marchi 已提交
1973
	 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
1974 1975
	 * configuration we need to make sure it is bigger than that. */

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
	if (bc_tsf == -1) {
		/*
		 * no beacons received, called internally.
		 * just need to refresh timers based on HW TSF.
		 */
		nexttbtt = roundup(hw_tu + FUDGE, intval);
	} else if (bc_tsf == 0) {
		/*
		 * no beacon received, probably called by ath5k_reset_tsf().
		 * reset TSF to start with 0.
		 */
		nexttbtt = intval;
		intval |= AR5K_BEACON_RESET_TSF;
	} else if (bc_tsf > hw_tsf) {
		/*
L
Lucas De Marchi 已提交
1991
		 * beacon received, SW merge happened but HW TSF not yet updated.
1992 1993 1994 1995 1996
		 * not possible to reconfigure timers yet, but next time we
		 * receive a beacon with the same BSSID, the hardware will
		 * automatically update the TSF and then we need to reconfigure
		 * the timers.
		 */
1997
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
			"need to wait for HW TSF sync\n");
		return;
	} else {
		/*
		 * most important case for beacon synchronization between STA.
		 *
		 * beacon received and HW TSF has been already updated by HW.
		 * update next TBTT based on the TSF of the beacon, but make
		 * sure it is ahead of our local TSF timer.
		 */
		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
	}
#undef FUDGE
2011

2012
	ah->nexttbtt = nexttbtt;
2013

2014
	intval |= AR5K_BEACON_ENA;
2015
	ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
2016 2017 2018 2019 2020 2021

	/*
	 * debugging output last in order to preserve the time critical aspect
	 * of this function
	 */
	if (bc_tsf == -1)
2022
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2023 2024
			"reconfigured timers based on HW TSF\n");
	else if (bc_tsf == 0)
2025
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2026 2027
			"reset HW TSF and timers\n");
	else
2028
		ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2029 2030
			"updated timers based on beacon TSF\n");

2031
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
2032 2033 2034
			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
			  (unsigned long long) bc_tsf,
			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2035
	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2036 2037 2038
		intval & AR5K_BEACON_PERIOD,
		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2039 2040
}

2041 2042 2043
/**
 * ath5k_beacon_config - Configure the beacon queues and interrupts
 *
2044
 * @ah: struct ath5k_hw pointer we are operating on
2045
 *
2046
 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2047
 * interrupts to detect TSF updates only.
2048
 */
2049
void
2050
ath5k_beacon_config(struct ath5k_hw *ah)
2051
{
2052
	unsigned long flags;
2053

2054 2055 2056
	spin_lock_irqsave(&ah->block, flags);
	ah->bmisscount = 0;
	ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2057

2058
	if (ah->enable_beacon) {
2059
		/*
2060 2061
		 * In IBSS mode we use a self-linked tx descriptor and let the
		 * hardware send the beacons automatically. We have to load it
2062
		 * only once here.
2063
		 * We use the SWBA interrupt only to keep track of the beacon
2064
		 * timers in order to detect automatic TSF updates.
2065
		 */
2066
		ath5k_beaconq_config(ah);
2067

2068
		ah->imask |= AR5K_INT_SWBA;
2069

2070
		if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2071
			if (ath5k_hw_hasveol(ah))
2072
				ath5k_beacon_send(ah);
J
Jiri Slaby 已提交
2073
		} else
2074
			ath5k_beacon_update_timers(ah, -1);
2075
	} else {
2076
		ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
2077 2078
	}

2079
	ath5k_hw_set_imr(ah, ah->imask);
2080
	mmiowb();
2081
	spin_unlock_irqrestore(&ah->block, flags);
2082 2083
}

N
Nick Kossifidis 已提交
2084 2085
static void ath5k_tasklet_beacon(unsigned long data)
{
2086
	struct ath5k_hw *ah = (struct ath5k_hw *) data;
N
Nick Kossifidis 已提交
2087 2088 2089 2090 2091 2092

	/*
	 * Software beacon alert--time to send a beacon.
	 *
	 * In IBSS mode we use this interrupt just to
	 * keep track of the next TBTT (target beacon
2093
	 * transmission time) in order to detect whether
N
Nick Kossifidis 已提交
2094 2095
	 * automatic TSF updates happened.
	 */
2096
	if (ah->opmode == NL80211_IFTYPE_ADHOC) {
2097
		/* XXX: only if VEOL supported */
2098 2099 2100
		u64 tsf = ath5k_hw_get_tsf64(ah);
		ah->nexttbtt += ah->bintval;
		ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
N
Nick Kossifidis 已提交
2101 2102
				"SWBA nexttbtt: %x hw_tu: %x "
				"TSF: %llx\n",
2103
				ah->nexttbtt,
N
Nick Kossifidis 已提交
2104 2105 2106
				TSF_TO_TU(tsf),
				(unsigned long long) tsf);
	} else {
2107 2108 2109
		spin_lock(&ah->block);
		ath5k_beacon_send(ah);
		spin_unlock(&ah->block);
N
Nick Kossifidis 已提交
2110 2111 2112
	}
}

2113 2114 2115 2116 2117

/********************\
* Interrupt handling *
\********************/

2118 2119 2120
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
2121
	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
N
Nick Kossifidis 已提交
2122 2123 2124 2125 2126
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
	   !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run ANI only when calibration is not active */

2127 2128
		ah->ah_cal_next_ani = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2129
		tasklet_schedule(&ah->ani_tasklet);
2130

N
Nick Kossifidis 已提交
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
		!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {

		/* Run calibration only when another calibration
		 * is not running.
		 *
		 * Note: This is for both full/short calibration,
		 * if it's time for a full one, ath5k_calibrate_work will deal
		 * with it. */

		ah->ah_cal_next_short = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
		ieee80211_queue_work(ah->hw, &ah->calib_work);
2145 2146 2147 2148 2149 2150
	}
	/* we could use SWI to generate enough interrupts to meet our
	 * calibration interval requirements, if necessary:
	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}

2151
static void
2152
ath5k_schedule_rx(struct ath5k_hw *ah)
2153
{
2154 2155
	ah->rx_pending = true;
	tasklet_schedule(&ah->rxtq);
2156 2157 2158
}

static void
2159
ath5k_schedule_tx(struct ath5k_hw *ah)
2160
{
2161 2162
	ah->tx_pending = true;
	tasklet_schedule(&ah->txtq);
2163 2164
}

P
Pavel Roskin 已提交
2165
static irqreturn_t
2166 2167
ath5k_intr(int irq, void *dev_id)
{
2168
	struct ath5k_hw *ah = dev_id;
2169 2170 2171
	enum ath5k_int status;
	unsigned int counter = 1000;

N
Nick Kossifidis 已提交
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182

	/*
	 * If hw is not ready (or detached) and we get an
	 * interrupt, or if we have no interrupts pending
	 * (that means it's not for us) skip it.
	 *
	 * NOTE: Group 0/1 PCI interface registers are not
	 * supported on WiSOCs, so we can't check for pending
	 * interrupts (ISR belongs to another register group
	 * so we are ok).
	 */
2183
	if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
N
Nick Kossifidis 已提交
2184 2185
			((ath5k_get_bus_type(ah) != ATH_AHB) &&
			!ath5k_hw_is_intr_pending(ah))))
2186 2187
		return IRQ_NONE;

N
Nick Kossifidis 已提交
2188
	/** Main loop **/
2189
	do {
N
Nick Kossifidis 已提交
2190 2191
		ath5k_hw_get_isr(ah, &status);	/* NB: clears IRQ too */

2192 2193
		ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
				status, ah->imask);
N
Nick Kossifidis 已提交
2194 2195 2196 2197 2198 2199 2200 2201

		/*
		 * Fatal hw error -> Log and reset
		 *
		 * Fatal errors are unrecoverable so we have to
		 * reset the card. These errors include bus and
		 * dma errors.
		 */
2202
		if (unlikely(status & AR5K_INT_FATAL)) {
N
Nick Kossifidis 已提交
2203

2204
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2205
				  "fatal int, resetting\n");
2206
			ieee80211_queue_work(ah->hw, &ah->reset_work);
N
Nick Kossifidis 已提交
2207 2208 2209 2210 2211 2212 2213 2214

		/*
		 * RX Overrun -> Count and reset if needed
		 *
		 * Receive buffers are full. Either the bus is busy or
		 * the CPU is not fast enough to process all received
		 * frames.
		 */
2215
		} else if (unlikely(status & AR5K_INT_RXORN)) {
N
Nick Kossifidis 已提交
2216

B
Bruno Randolf 已提交
2217 2218 2219
			/*
			 * Older chipsets need a reset to come out of this
			 * condition, but we treat it as RX for newer chips.
N
Nick Kossifidis 已提交
2220
			 * We don't know exactly which versions need a reset
B
Bruno Randolf 已提交
2221 2222
			 * this guess is copied from the HAL.
			 */
2223
			ah->stats.rxorn_intr++;
N
Nick Kossifidis 已提交
2224

2225
			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2226
				ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2227
					  "rx overrun, resetting\n");
2228
				ieee80211_queue_work(ah->hw, &ah->reset_work);
2229
			} else
2230
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2231

2232
		} else {
N
Nick Kossifidis 已提交
2233 2234

			/* Software Beacon Alert -> Schedule beacon tasklet */
2235
			if (status & AR5K_INT_SWBA)
2236
				tasklet_hi_schedule(&ah->beacontq);
2237

N
Nick Kossifidis 已提交
2238 2239 2240 2241 2242 2243 2244 2245
			/*
			 * No more RX descriptors -> Just count
			 *
			 * NB: the hardware should re-read the link when
			 *     RXE bit is written, but it doesn't work at
			 *     least on older hardware revs.
			 */
			if (status & AR5K_INT_RXEOL)
2246
				ah->stats.rxeol_intr++;
N
Nick Kossifidis 已提交
2247 2248 2249 2250


			/* TX Underrun -> Bump tx trigger level */
			if (status & AR5K_INT_TXURN)
2251
				ath5k_hw_update_tx_triglevel(ah, true);
N
Nick Kossifidis 已提交
2252 2253

			/* RX -> Schedule rx tasklet */
2254
			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2255
				ath5k_schedule_rx(ah);
N
Nick Kossifidis 已提交
2256 2257 2258 2259 2260 2261

			/* TX -> Schedule tx tasklet */
			if (status & (AR5K_INT_TXOK
					| AR5K_INT_TXDESC
					| AR5K_INT_TXERR
					| AR5K_INT_TXEOL))
2262
				ath5k_schedule_tx(ah);
N
Nick Kossifidis 已提交
2263 2264 2265 2266 2267 2268

			/* Missed beacon -> TODO
			if (status & AR5K_INT_BMISS)
			*/

			/* MIB event -> Update counters and notify ANI */
2269
			if (status & AR5K_INT_MIB) {
2270
				ah->stats.mib_intr++;
B
Bruno Randolf 已提交
2271
				ath5k_hw_update_mib_counters(ah);
2272
				ath5k_ani_mib_intr(ah);
2273
			}
N
Nick Kossifidis 已提交
2274 2275

			/* GPIO -> Notify RFKill layer */
2276
			if (status & AR5K_INT_GPIO)
2277
				tasklet_schedule(&ah->rf_kill.toggleq);
B
Bob Copeland 已提交
2278

2279
		}
2280 2281 2282 2283

		if (ath5k_get_bus_type(ah) == ATH_AHB)
			break;

2284
	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2285

N
Nick Kossifidis 已提交
2286 2287 2288 2289 2290 2291
	/*
	 * Until we handle rx/tx interrupts mask them on IMR
	 *
	 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
	 * and unset after we 've handled the interrupts.
	 */
2292 2293
	if (ah->rx_pending || ah->tx_pending)
		ath5k_set_current_imask(ah);
2294

2295
	if (unlikely(!counter))
2296
		ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
2297

N
Nick Kossifidis 已提交
2298
	/* Fire up calibration poll */
2299
	ath5k_intr_calibration_poll(ah);
2300

2301 2302 2303 2304 2305 2306 2307 2308
	return IRQ_HANDLED;
}

/*
 * Periodically recalibrate the PHY to account
 * for temperature/environment changes.
 */
static void
N
Nick Kossifidis 已提交
2309
ath5k_calibrate_work(struct work_struct *work)
2310
{
N
Nick Kossifidis 已提交
2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
		calib_work);

	/* Should we run a full calibration ? */
	if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {

		ah->ah_cal_next_full = jiffies +
			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
		ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;

		ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
				"running full calibration\n");

		if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
			/*
			 * Rfgain is out of bounds, reset the chip
			 * to load new gain values.
			 */
			ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
					"got new rfgain, resetting\n");
			ieee80211_queue_work(ah->hw, &ah->reset_work);
		}

		/* TODO: On full calibration we should stop TX here,
		 * so that it doesn't interfere (mostly due to gain_f
		 * calibration that messes with tx packets -see phy.c).
		 *
		 * NOTE: Stopping the queues from above is not enough
		 * to stop TX but saves us from disconecting (at least
		 * we don't lose packets). */
		ieee80211_stop_queues(ah->hw);
	} else
		ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
2344

2345

2346 2347 2348
	ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
		ieee80211_frequency_to_channel(ah->curchan->center_freq),
		ah->curchan->hw_value);
2349

2350 2351
	if (ath5k_hw_phy_calibrate(ah, ah->curchan))
		ATH5K_ERR(ah, "calibration of channel %u failed\n",
2352
			ieee80211_frequency_to_channel(
2353
				ah->curchan->center_freq));
2354

N
Nick Kossifidis 已提交
2355 2356 2357 2358 2359 2360
	/* Clear calibration flags */
	if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
		ieee80211_wake_queues(ah->hw);
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
	} else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
		ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
2361 2362 2363
}


2364 2365 2366
static void
ath5k_tasklet_ani(unsigned long data)
{
2367
	struct ath5k_hw *ah = (void *)data;
2368 2369 2370 2371

	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
	ath5k_ani_calibration(ah);
	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2372 2373 2374
}


2375 2376 2377
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
2378
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2379 2380 2381 2382 2383
			tx_complete_work.work);
	struct ath5k_txq *txq;
	int i;
	bool needreset = false;

2384
	mutex_lock(&ah->lock);
2385

2386 2387 2388
	for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
		if (ah->txqs[i].setup) {
			txq = &ah->txqs[i];
2389
			spin_lock_bh(&txq->lock);
2390
			if (txq->txq_len > 1) {
2391
				if (txq->txq_poll_mark) {
2392
					ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
2393 2394 2395
						  "TX queue stuck %d\n",
						  txq->qnum);
					needreset = true;
2396
					txq->txq_stuck++;
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407
					spin_unlock_bh(&txq->lock);
					break;
				} else {
					txq->txq_poll_mark = true;
				}
			}
			spin_unlock_bh(&txq->lock);
		}
	}

	if (needreset) {
2408
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2409
			  "TX queues stuck, resetting\n");
2410
		ath5k_reset(ah, NULL, true);
2411 2412
	}

2413
	mutex_unlock(&ah->lock);
2414

2415
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2416 2417 2418 2419
		msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}


2420 2421 2422
/*************************\
* Initialization routines *
\*************************/
2423

2424
int __devinit
2425
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2426
{
2427
	struct ieee80211_hw *hw = ah->hw;
2428 2429 2430 2431 2432
	struct ath_common *common;
	int ret;
	int csz;

	/* Initialize driver private data */
2433
	SET_IEEE80211_DEV(hw, ah->dev);
2434
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2435 2436 2437
			IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
			IEEE80211_HW_SIGNAL_DBM |
			IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2438 2439 2440 2441 2442 2443 2444

	hw->wiphy->interface_modes =
		BIT(NL80211_IFTYPE_AP) |
		BIT(NL80211_IFTYPE_STATION) |
		BIT(NL80211_IFTYPE_ADHOC) |
		BIT(NL80211_IFTYPE_MESH_POINT);

2445 2446 2447 2448
	/* both antennas can be configured as RX or TX */
	hw->wiphy->available_antennas_tx = 0x3;
	hw->wiphy->available_antennas_rx = 0x3;

2449 2450 2451 2452 2453 2454 2455
	hw->extra_tx_headroom = 2;
	hw->channel_change_time = 5000;

	/*
	 * Mark the device as detached to avoid processing
	 * interrupts until setup is complete.
	 */
2456
	__set_bit(ATH_STAT_INVALID, ah->status);
2457

2458 2459 2460 2461 2462 2463 2464
	ah->opmode = NL80211_IFTYPE_STATION;
	ah->bintval = 1000;
	mutex_init(&ah->lock);
	spin_lock_init(&ah->rxbuflock);
	spin_lock_init(&ah->txbuflock);
	spin_lock_init(&ah->block);
	spin_lock_init(&ah->irqlock);
2465 2466

	/* Setup interrupt handler */
2467
	ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
2468
	if (ret) {
2469
		ATH5K_ERR(ah, "request_irq failed\n");
2470 2471 2472
		goto err;
	}

2473
	common = ath5k_hw_common(ah);
2474 2475
	common->ops = &ath5k_common_ops;
	common->bus_ops = bus_ops;
2476
	common->ah = ah;
2477
	common->hw = hw;
2478
	common->priv = ah;
2479
	common->clockrate = 40;
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490

	/*
	 * Cache line size is used to size and align various
	 * structures used to communicate with the hardware.
	 */
	ath5k_read_cachesize(common, &csz);
	common->cachelsz = csz << 2; /* convert to bytes */

	spin_lock_init(&common->cc_lock);

	/* Initialize device */
2491
	ret = ath5k_hw_init(ah);
2492
	if (ret)
2493
		goto err_irq;
2494

2495 2496
	/* Set up multi-rate retry capabilities */
	if (ah->ah_capabilities.cap_has_mrr_support) {
2497
		hw->max_rates = 4;
2498 2499
		hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
					 AR5K_INIT_RETRY_LONG);
2500 2501 2502 2503 2504 2505 2506 2507 2508
	}

	hw->vif_data_size = sizeof(struct ath5k_vif);

	/* Finish private driver data initialization */
	ret = ath5k_init(hw);
	if (ret)
		goto err_ah;

2509 2510 2511 2512
	ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
			ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
					ah->ah_mac_srev,
					ah->ah_phy_revision);
2513

2514
	if (!ah->ah_single_chip) {
2515
		/* Single chip radio (!RF5111) */
2516 2517
		if (ah->ah_radio_5ghz_revision &&
			!ah->ah_radio_2ghz_revision) {
2518 2519
			/* No 5GHz support -> report 2GHz radio */
			if (!test_bit(AR5K_MODE_11A,
2520 2521
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2522
					ath5k_chip_name(AR5K_VERSION_RAD,
2523 2524
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2525
			/* No 2GHz support (5110 and some
2526
			 * 5GHz only cards) -> report 5GHz radio */
2527
			} else if (!test_bit(AR5K_MODE_11B,
2528 2529
				ah->ah_capabilities.cap_mode)) {
				ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2530
					ath5k_chip_name(AR5K_VERSION_RAD,
2531 2532
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2533 2534
			/* Multiband radio */
			} else {
2535
				ATH5K_INFO(ah, "RF%s multiband radio found"
2536 2537
					" (0x%x)\n",
					ath5k_chip_name(AR5K_VERSION_RAD,
2538 2539
						ah->ah_radio_5ghz_revision),
						ah->ah_radio_5ghz_revision);
2540 2541 2542 2543
			}
		}
		/* Multi chip radio (RF5111 - RF2111) ->
		 * report both 2GHz/5GHz radios */
2544 2545 2546
		else if (ah->ah_radio_5ghz_revision &&
				ah->ah_radio_2ghz_revision) {
			ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
2547
				ath5k_chip_name(AR5K_VERSION_RAD,
2548 2549 2550
					ah->ah_radio_5ghz_revision),
					ah->ah_radio_5ghz_revision);
			ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
2551
				ath5k_chip_name(AR5K_VERSION_RAD,
2552 2553
					ah->ah_radio_2ghz_revision),
					ah->ah_radio_2ghz_revision);
2554 2555 2556
		}
	}

2557
	ath5k_debug_init_device(ah);
2558 2559

	/* ready to process interrupts */
2560
	__clear_bit(ATH_STAT_INVALID, ah->status);
2561 2562 2563

	return 0;
err_ah:
2564
	ath5k_hw_deinit(ah);
2565
err_irq:
2566
	free_irq(ah->irq, ah);
2567 2568 2569 2570
err:
	return ret;
}

2571
static int
2572
ath5k_stop_locked(struct ath5k_hw *ah)
2573 2574
{

2575 2576
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
			test_bit(ATH_STAT_INVALID, ah->status));
2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592

	/*
	 * Shutdown the hardware and driver:
	 *    stop output from above
	 *    disable interrupts
	 *    turn off timers
	 *    turn off the radio
	 *    clear transmit machinery
	 *    clear receive machinery
	 *    drain and release tx queues
	 *    reclaim beacon resources
	 *    power down hardware
	 *
	 * Note that some of this work is not possible if the
	 * hardware is gone (invalid).
	 */
2593
	ieee80211_stop_queues(ah->hw);
2594

2595 2596
	if (!test_bit(ATH_STAT_INVALID, ah->status)) {
		ath5k_led_off(ah);
2597
		ath5k_hw_set_imr(ah, 0);
2598 2599
		synchronize_irq(ah->irq);
		ath5k_rx_stop(ah);
2600
		ath5k_hw_dma_stop(ah);
2601
		ath5k_drain_tx_buffs(ah);
2602 2603 2604 2605
		ath5k_hw_phy_disable(ah);
	}

	return 0;
2606 2607
}

2608
int ath5k_start(struct ieee80211_hw *hw)
2609
{
2610
	struct ath5k_hw *ah = hw->priv;
2611 2612
	struct ath_common *common = ath5k_hw_common(ah);
	int ret, i;
2613

2614
	mutex_lock(&ah->lock);
2615

2616
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
2617 2618

	/*
2619 2620
	 * Stop anything previously setup.  This is safe
	 * no matter this is the first time through or not.
2621
	 */
2622
	ath5k_stop_locked(ah);
2623

2624 2625 2626 2627 2628 2629 2630
	/*
	 * The basic interface to setting the hardware in a good
	 * state is ``reset''.  On return the hardware is known to
	 * be powered up and with interrupts disabled.  This must
	 * be followed by initialization of the appropriate bits
	 * and then setup of the interrupt mask.
	 */
2631
	ah->curchan = ah->hw->conf.channel;
N
Nick Kossifidis 已提交
2632 2633 2634 2635 2636 2637 2638 2639 2640
	ah->imask = AR5K_INT_RXOK
		| AR5K_INT_RXERR
		| AR5K_INT_RXEOL
		| AR5K_INT_RXORN
		| AR5K_INT_TXDESC
		| AR5K_INT_TXEOL
		| AR5K_INT_FATAL
		| AR5K_INT_GLOBAL
		| AR5K_INT_MIB;
2641

2642
	ret = ath5k_reset(ah, NULL, false);
2643 2644
	if (ret)
		goto done;
2645

2646 2647
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_start(ah);
2648 2649 2650 2651 2652 2653 2654 2655

	/*
	 * Reset the key cache since some parts do not reset the
	 * contents on initial power up or resume from suspend.
	 */
	for (i = 0; i < common->keymax; i++)
		ath_hw_keyreset(common, (u16) i);

N
Nick Kossifidis 已提交
2656 2657 2658
	/* Use higher rates for acks instead of base
	 * rate */
	ah->ah_ack_bitrate_high = true;
2659

2660 2661
	for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
		ah->bslot[i] = NULL;
2662

2663 2664 2665
	ret = 0;
done:
	mmiowb();
2666
	mutex_unlock(&ah->lock);
2667

2668
	ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2669 2670
			msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));

2671 2672 2673
	return ret;
}

2674
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
2675
{
2676 2677 2678 2679 2680 2681
	ah->rx_pending = false;
	ah->tx_pending = false;
	tasklet_kill(&ah->rxtq);
	tasklet_kill(&ah->txtq);
	tasklet_kill(&ah->beacontq);
	tasklet_kill(&ah->ani_tasklet);
2682 2683 2684 2685 2686 2687 2688 2689
}

/*
 * Stop the device, grabbing the top-level lock to protect
 * against concurrent entry through ath5k_init (which can happen
 * if another thread does a system call and the thread doing the
 * stop is preempted).
 */
2690
void ath5k_stop(struct ieee80211_hw *hw)
2691
{
2692
	struct ath5k_hw *ah = hw->priv;
2693 2694
	int ret;

2695 2696 2697
	mutex_lock(&ah->lock);
	ret = ath5k_stop_locked(ah);
	if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
		/*
		 * Don't set the card in full sleep mode!
		 *
		 * a) When the device is in this state it must be carefully
		 * woken up or references to registers in the PCI clock
		 * domain may freeze the bus (and system).  This varies
		 * by chip and is mostly an issue with newer parts
		 * (madwifi sources mentioned srev >= 0x78) that go to
		 * sleep more quickly.
		 *
		 * b) On older chips full sleep results a weird behaviour
		 * during wakeup. I tested various cards with srev < 0x78
		 * and they don't wake up after module reload, a second
		 * module reload is needed to bring the card up again.
		 *
		 * Until we figure out what's going on don't enable
		 * full chip reset on any chip (this is what Legacy HAL
		 * and Sam's HAL do anyway). Instead Perform a full reset
		 * on the device (same as initial state after attach) and
		 * leave it idle (keep MAC/BB on warm reset) */
2718
		ret = ath5k_hw_on_hold(ah);
2719

2720
		ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
2721
				"putting device to sleep\n");
2722 2723
	}

2724
	mmiowb();
2725
	mutex_unlock(&ah->lock);
2726

2727
	ath5k_stop_tasklets(ah);
2728

2729
	cancel_delayed_work_sync(&ah->tx_complete_work);
2730

2731 2732
	if (!ath5k_modparam_no_hw_rfkill_switch)
		ath5k_rfkill_hw_stop(ah);
2733 2734
}

2735 2736 2737
/*
 * Reset the hardware.  If chan is not NULL, then also pause rx/tx
 * and change to the given channel.
2738
 *
2739
 * This should be called with ah->lock.
2740
 */
2741
static int
2742
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
2743
							bool skip_pcu)
2744
{
B
Bruno Randolf 已提交
2745
	struct ath_common *common = ath5k_hw_common(ah);
N
Nick Kossifidis 已提交
2746
	int ret, ani_mode;
2747
	bool fast;
2748

2749
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
2750

2751
	ath5k_hw_set_imr(ah, 0);
2752 2753
	synchronize_irq(ah->irq);
	ath5k_stop_tasklets(ah);
2754

L
Lucas De Marchi 已提交
2755
	/* Save ani mode and disable ANI during
N
Nick Kossifidis 已提交
2756 2757
	 * reset. If we don't we might get false
	 * PHY error interrupts. */
2758
	ani_mode = ah->ani_state.ani_mode;
N
Nick Kossifidis 已提交
2759 2760
	ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);

2761 2762 2763
	/* We are going to empty hw queues
	 * so we should also free any remaining
	 * tx buffers */
2764
	ath5k_drain_tx_buffs(ah);
2765
	if (chan)
2766
		ah->curchan = chan;
2767 2768 2769

	fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0;

2770
	ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
J
Jiri Slaby 已提交
2771
	if (ret) {
2772
		ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
2773 2774
		goto err;
	}
J
Jiri Slaby 已提交
2775

2776
	ret = ath5k_rx_start(ah);
J
Jiri Slaby 已提交
2777
	if (ret) {
2778
		ATH5K_ERR(ah, "can't start recv logic\n");
2779 2780
		goto err;
	}
J
Jiri Slaby 已提交
2781

N
Nick Kossifidis 已提交
2782
	ath5k_ani_init(ah, ani_mode);
2783

N
Nick Kossifidis 已提交
2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
	/*
	 * Set calibration intervals
	 *
	 * Note: We don't need to run calibration imediately
	 * since some initial calibration is done on reset
	 * even for fast channel switching. Also on scanning
	 * this will get set again and again and it won't get
	 * executed unless we connect somewhere and spend some
	 * time on the channel (that's what calibration needs
	 * anyway to be accurate).
	 */
	ah->ah_cal_next_full = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
	ah->ah_cal_next_ani = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
	ah->ah_cal_next_short = jiffies +
		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);

2802
	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2803

B
Bruno Randolf 已提交
2804
	/* clear survey data and cycle counters */
2805
	memset(&ah->survey, 0, sizeof(ah->survey));
2806
	spin_lock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2807 2808 2809
	ath_hw_cycle_counters_update(common);
	memset(&common->cc_survey, 0, sizeof(common->cc_survey));
	memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2810
	spin_unlock_bh(&common->cc_lock);
B
Bruno Randolf 已提交
2811

2812
	/*
J
Jiri Slaby 已提交
2813 2814 2815 2816 2817
	 * Change channels and update the h/w rate map if we're switching;
	 * e.g. 11a to 11b/g.
	 *
	 * We may be doing a reset in response to an ioctl that changes the
	 * channel so update any state that might change as a result.
2818 2819 2820
	 *
	 * XXX needed?
	 */
2821
/*	ath5k_chan_change(ah, c); */
2822

2823
	ath5k_beacon_config(ah);
J
Jiri Slaby 已提交
2824
	/* intrs are enabled by ath5k_beacon_config */
2825

2826
	ieee80211_wake_queues(ah->hw);
B
Bruno Randolf 已提交
2827

2828 2829 2830 2831 2832
	return 0;
err:
	return ret;
}

2833 2834
static void ath5k_reset_work(struct work_struct *work)
{
2835
	struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
2836 2837
		reset_work);

2838 2839 2840
	mutex_lock(&ah->lock);
	ath5k_reset(ah, NULL, true);
	mutex_unlock(&ah->lock);
2841 2842
}

2843
static int __devinit
2844
ath5k_init(struct ieee80211_hw *hw)
2845
{
2846

2847
	struct ath5k_hw *ah = hw->priv;
2848
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
B
Bruno Randolf 已提交
2849
	struct ath5k_txq *txq;
2850
	u8 mac[ETH_ALEN] = {};
2851 2852 2853
	int ret;


2854 2855
	/*
	 * Collect the channel list.  The 802.11 layer
2856
	 * is responsible for filtering this list based
2857 2858 2859 2860 2861
	 * on settings like the phy mode and regulatory
	 * domain restrictions.
	 */
	ret = ath5k_setup_bands(hw);
	if (ret) {
2862
		ATH5K_ERR(ah, "can't get channels\n");
2863 2864
		goto err;
	}
J
Jiri Slaby 已提交
2865

2866 2867 2868
	/*
	 * Allocate tx+rx descriptors and populate the lists.
	 */
2869
	ret = ath5k_desc_alloc(ah);
2870
	if (ret) {
2871
		ATH5K_ERR(ah, "can't allocate descriptors\n");
2872 2873
		goto err;
	}
2874

2875 2876 2877 2878 2879 2880 2881 2882
	/*
	 * Allocate hardware transmit queues: one queue for
	 * beacon frames and one data queue for each QoS
	 * priority.  Note that hw functions handle resetting
	 * these queues at the needed time.
	 */
	ret = ath5k_beaconq_setup(ah);
	if (ret < 0) {
2883
		ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
2884 2885
		goto err_desc;
	}
2886 2887 2888 2889 2890
	ah->bhalq = ret;
	ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
	if (IS_ERR(ah->cabq)) {
		ATH5K_ERR(ah, "can't setup cab queue\n");
		ret = PTR_ERR(ah->cabq);
2891 2892
		goto err_bhal;
	}
2893

2894 2895 2896 2897 2898
	/* 5211 and 5212 usually support 10 queues but we better rely on the
	 * capability information */
	if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
		/* This order matches mac80211's queue priority, so we can
		* directly use the mac80211 queue number without any mapping */
2899
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2900
		if (IS_ERR(txq)) {
2901
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2902 2903 2904
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2905
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2906
		if (IS_ERR(txq)) {
2907
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2908 2909 2910
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2911
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2912
		if (IS_ERR(txq)) {
2913
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2914 2915 2916
			ret = PTR_ERR(txq);
			goto err_queues;
		}
2917
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2918
		if (IS_ERR(txq)) {
2919
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2920 2921 2922 2923 2924 2925
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 4;
	} else {
		/* older hardware (5210) can only support one data queue */
2926
		txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2927
		if (IS_ERR(txq)) {
2928
			ATH5K_ERR(ah, "can't setup xmit queue\n");
2929 2930 2931 2932 2933
			ret = PTR_ERR(txq);
			goto err_queues;
		}
		hw->queues = 1;
	}
2934

2935 2936 2937 2938
	tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
	tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
	tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
	tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
2939

2940
	INIT_WORK(&ah->reset_work, ath5k_reset_work);
N
Nick Kossifidis 已提交
2941
	INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
2942
	INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
2943

2944
	ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
2945
	if (ret) {
2946
		ATH5K_ERR(ah, "unable to read address from EEPROM\n");
2947
		goto err_queues;
2948
	}
2949

2950 2951
	SET_IEEE80211_PERM_ADDR(hw, mac);
	/* All MAC address bits matter for ACKs */
2952
	ath5k_update_bssid_mask_and_opmode(ah, NULL);
2953 2954 2955 2956

	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
	if (ret) {
2957
		ATH5K_ERR(ah, "can't initialize regulatory system\n");
2958 2959 2960 2961 2962
		goto err_queues;
	}

	ret = ieee80211_register_hw(hw);
	if (ret) {
2963
		ATH5K_ERR(ah, "can't register ieee80211 hw\n");
2964 2965 2966 2967 2968 2969
		goto err_queues;
	}

	if (!ath_is_world_regd(regulatory))
		regulatory_hint(hw->wiphy, regulatory->alpha2);

2970
	ath5k_init_leds(ah);
2971

2972
	ath5k_sysfs_register(ah);
2973 2974 2975

	return 0;
err_queues:
2976
	ath5k_txq_release(ah);
2977
err_bhal:
2978
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
2979
err_desc:
2980
	ath5k_desc_free(ah);
2981 2982 2983 2984
err:
	return ret;
}

2985
void
2986
ath5k_deinit_ah(struct ath5k_hw *ah)
2987
{
2988
	struct ieee80211_hw *hw = ah->hw;
2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003

	/*
	 * NB: the order of these is important:
	 * o call the 802.11 layer before detaching ath5k_hw to
	 *   ensure callbacks into the driver to delete global
	 *   key cache entries can be handled
	 * o reclaim the tx queue data structures after calling
	 *   the 802.11 layer as we'll get called back to reclaim
	 *   node state and potentially want to use them
	 * o to cleanup the tx queues the hal is called, so detach
	 *   it last
	 * XXX: ??? detach ath5k_hw ???
	 * Other than that, it's straightforward...
	 */
	ieee80211_unregister_hw(hw);
3004 3005 3006 3007
	ath5k_desc_free(ah);
	ath5k_txq_release(ah);
	ath5k_hw_release_tx_queue(ah, ah->bhalq);
	ath5k_unregister_leds(ah);
3008

3009
	ath5k_sysfs_unregister(ah);
3010 3011 3012 3013 3014
	/*
	 * NB: can't reclaim these until after ieee80211_ifdetach
	 * returns because we'll get called back to reclaim node
	 * state and potentially want to use them.
	 */
3015 3016
	ath5k_hw_deinit(ah);
	free_irq(ah->irq, ah);
3017 3018
}

3019
bool
3020
ath5k_any_vif_assoc(struct ath5k_hw *ah)
3021
{
3022
	struct ath5k_vif_iter_data iter_data;
3023 3024 3025 3026 3027
	iter_data.hw_macaddr = NULL;
	iter_data.any_assoc = false;
	iter_data.need_set_hw_addr = false;
	iter_data.found_active = true;

3028
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
3029 3030 3031 3032
						   &iter_data);
	return iter_data.any_assoc;
}

3033
void
P
Pavel Roskin 已提交
3034
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3035
{
3036
	struct ath5k_hw *ah = hw->priv;
3037 3038 3039 3040 3041 3042 3043
	u32 rfilt;
	rfilt = ath5k_hw_get_rx_filter(ah);
	if (enable)
		rfilt |= AR5K_RX_FILTER_BEACON;
	else
		rfilt &= ~AR5K_RX_FILTER_BEACON;
	ath5k_hw_set_rx_filter(ah, rfilt);
3044
	ah->filter_flags = rfilt;
3045
}