rc80211_minstrel.c 19.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Based on minstrel.c:
 *   Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz>
 *   Sponsored by Indranet Technologies Ltd
 *
 * Based on sample.c:
 *   Copyright (c) 2005 John Bicket
 *   All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *   1. Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer,
 *      without modification.
 *   2. Redistributions in binary form must reproduce at minimum a disclaimer
 *      similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *      redistribution must be conditioned upon including a substantially
 *      similar Disclaimer requirement for further binary redistribution.
 *   3. Neither the names of the above-listed copyright holders nor the names
 *      of any contributors may be used to endorse or promote products derived
 *      from this software without specific prior written permission.
 *
 *   Alternatively, this software may be distributed under the terms of the
 *   GNU General Public License ("GPL") version 2 as published by the Free
 *   Software Foundation.
 *
 *   NO WARRANTY
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 *   AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 *   THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 *   OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 *   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 *   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 *   IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 *   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 *   THE POSSIBILITY OF SUCH DAMAGES.
 */
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
53
#include <linux/slab.h>
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"

#define SAMPLE_TBL(_mi, _idx, _col) \
		_mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]

/* convert mac80211 rate index to local array index */
static inline int
rix_to_ndx(struct minstrel_sta_info *mi, int rix)
{
	int i = rix;
	for (i = rix; i >= 0; i--)
		if (mi->r[i].rix == rix)
			break;
	return i;
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85
/* find & sort topmost throughput rates */
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
	int j = MAX_THR_RATES;

	while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
		j--;
	if (j < MAX_THR_RATES - 1)
		memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
	if (j < MAX_THR_RATES)
		tp_list[j] = i;
}

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static void
minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *ratetbl,
		  int offset, int idx)
{
	struct minstrel_rate *r = &mi->r[idx];

	ratetbl->rate[offset].idx = r->rix;
	ratetbl->rate[offset].count = r->adjusted_retry_count;
	ratetbl->rate[offset].count_cts = r->retry_count_cts;
	ratetbl->rate[offset].count_rts = r->retry_count_rtscts;
}

static void
minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
	struct ieee80211_sta_rates *ratetbl;
	int i = 0;

	ratetbl = kzalloc(sizeof(*ratetbl), GFP_ATOMIC);
	if (!ratetbl)
		return;

	/* Start with max_tp_rate */
	minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[0]);

	if (mp->hw->max_rates >= 3) {
		/* At least 3 tx rates supported, use max_tp_rate2 next */
		minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[1]);
	}

	if (mp->hw->max_rates >= 2) {
		/* At least 2 tx rates supported, use max_prob_rate next */
		minstrel_set_rate(mi, ratetbl, i++, mi->max_prob_rate);
	}

	/* Use lowest rate last */
	ratetbl->rate[i].idx = mi->lowest_rix;
	ratetbl->rate[i].count = mp->max_retry;
	ratetbl->rate[i].count_cts = mp->max_retry;
	ratetbl->rate[i].count_rts = mp->max_retry;

	rate_control_set_rates(mp->hw, mi->sta, ratetbl);
}

130 131 132
static void
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
133 134
	u8 tmp_tp_rate[MAX_THR_RATES];
	u8 tmp_prob_rate = 0;
135 136 137
	u32 usecs;
	int i;

W
Weilong Chen 已提交
138
	for (i = 0; i < MAX_THR_RATES; i++)
139 140
	    tmp_tp_rate[i] = 0;

141 142 143 144 145 146 147
	for (i = 0; i < mi->n_rates; i++) {
		struct minstrel_rate *mr = &mi->r[i];

		usecs = mr->perfect_tx_time;
		if (!usecs)
			usecs = 1000000;

148 149
		if (unlikely(mr->attempts > 0)) {
			mr->sample_skipped = 0;
150
			mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
151 152
			mr->succ_hist += mr->success;
			mr->att_hist += mr->attempts;
153 154 155
			mr->probability = minstrel_ewma(mr->probability,
							mr->cur_prob,
							EWMA_LEVEL);
156 157
		} else
			mr->sample_skipped++;
158 159 160 161 162 163

		mr->last_success = mr->success;
		mr->last_attempts = mr->attempts;
		mr->success = 0;
		mr->attempts = 0;

164 165 166 167 168 169
		/* Update throughput per rate, reset thr. below 10% success */
		if (mr->probability < MINSTREL_FRAC(10, 100))
			mr->cur_tp = 0;
		else
			mr->cur_tp = mr->probability * (1000000 / usecs);

170 171
		/* Sample less often below the 10% chance of success.
		 * Sample less often above the 95% chance of success. */
172 173
		if (mr->probability > MINSTREL_FRAC(95, 100) ||
		    mr->probability < MINSTREL_FRAC(10, 100)) {
174 175 176
			mr->adjusted_retry_count = mr->retry_count >> 1;
			if (mr->adjusted_retry_count > 2)
				mr->adjusted_retry_count = 2;
177
			mr->sample_limit = 4;
178
		} else {
179
			mr->sample_limit = -1;
180 181 182 183 184
			mr->adjusted_retry_count = mr->retry_count;
		}
		if (!mr->adjusted_retry_count)
			mr->adjusted_retry_count = 2;

185 186 187 188 189 190 191 192
		minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate);

		/* To determine the most robust rate (max_prob_rate) used at
		 * 3rd mmr stage we distinct between two cases:
		 * (1) if any success probabilitiy >= 95%, out of those rates
		 * choose the maximum throughput rate as max_prob_rate
		 * (2) if all success probabilities < 95%, the rate with
		 * highest success probability is choosen as max_prob_rate */
W
Weilong Chen 已提交
193
		if (mr->probability >= MINSTREL_FRAC(95, 100)) {
194 195 196 197 198
			if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
				tmp_prob_rate = i;
		} else {
			if (mr->probability >= mi->r[tmp_prob_rate].probability)
				tmp_prob_rate = i;
199 200 201
		}
	}

202 203 204
	/* Assign the new rate set */
	memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
	mi->max_prob_rate = tmp_prob_rate;
205

206 207 208 209 210 211 212 213 214
#ifdef CONFIG_MAC80211_DEBUGFS
	/* use fixed index if set */
	if (mp->fixed_rate_idx != -1) {
		mi->max_tp_rate[0] = mp->fixed_rate_idx;
		mi->max_tp_rate[1] = mp->fixed_rate_idx;
		mi->max_prob_rate = mp->fixed_rate_idx;
	}
#endif

215 216
	/* Reset update timer */
	mi->stats_update = jiffies;
217 218

	minstrel_update_rates(mp, mi);
219 220 221 222
}

static void
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
W
Weilong Chen 已提交
223
		   struct ieee80211_sta *sta, void *priv_sta,
224 225
		   struct sk_buff *skb)
{
226
	struct minstrel_priv *mp = priv;
227 228
	struct minstrel_sta_info *mi = priv_sta;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
229 230 231
	struct ieee80211_tx_rate *ar = info->status.rates;
	int i, ndx;
	int success;
232

233
	success = !!(info->flags & IEEE80211_TX_STAT_ACK);
234

235 236
	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
		if (ar[i].idx < 0)
237 238
			break;

239
		ndx = rix_to_ndx(mi, ar[i].idx);
240 241 242
		if (ndx < 0)
			continue;

243
		mi->r[ndx].attempts += ar[i].count;
244

245
		if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
246 247 248 249 250 251 252 253
			mi->r[ndx].success += success;
	}

	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
		mi->sample_count++;

	if (mi->sample_deferred > 0)
		mi->sample_deferred--;
254 255 256 257

	if (time_after(jiffies, mi->stats_update +
				(mp->update_interval * HZ) / 1000))
		minstrel_update_stats(mp, mi);
258 259 260 261 262
}


static inline unsigned int
minstrel_get_retry_count(struct minstrel_rate *mr,
W
Weilong Chen 已提交
263
			 struct ieee80211_tx_info *info)
264 265 266
{
	unsigned int retry = mr->adjusted_retry_count;

267
	if (info->control.use_rts)
268
		retry = max(2U, min(mr->retry_count_rtscts, retry));
269
	else if (info->control.use_cts_prot)
270 271 272 273 274 275 276 277 278
		retry = max(2U, min(mr->retry_count_cts, retry));
	return retry;
}


static int
minstrel_get_next_sample(struct minstrel_sta_info *mi)
{
	unsigned int sample_ndx;
279 280
	sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column);
	mi->sample_row++;
281
	if ((int) mi->sample_row >= mi->n_rates) {
282
		mi->sample_row = 0;
283 284 285 286 287 288 289
		mi->sample_column++;
		if (mi->sample_column >= SAMPLE_COLUMNS)
			mi->sample_column = 0;
	}
	return sample_ndx;
}

290
static void
291 292
minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
		  void *priv_sta, struct ieee80211_tx_rate_control *txrc)
293
{
294
	struct sk_buff *skb = txrc->skb;
295 296 297
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct minstrel_sta_info *mi = priv_sta;
	struct minstrel_priv *mp = priv;
298 299 300
	struct ieee80211_tx_rate *rate = &info->control.rates[0];
	struct minstrel_rate *msr, *mr;
	unsigned int ndx;
301
	bool mrr_capable;
302
	bool prev_sample;
303
	int delta;
304
	int sampling_ratio;
305

306
	/* management/no-ack frames do not use rate control */
307
	if (rate_control_send_low(sta, priv_sta, txrc))
308 309
		return;

310 311 312 313 314 315 316 317
	/* check multi-rate-retry capabilities & adjust lookaround_rate */
	mrr_capable = mp->has_mrr &&
		      !txrc->rts &&
		      !txrc->bss_conf->use_cts_prot;
	if (mrr_capable)
		sampling_ratio = mp->lookaround_rate_mrr;
	else
		sampling_ratio = mp->lookaround_rate;
318

319
	/* increase sum packet counter */
320
	mi->packet_count++;
321

322 323 324 325 326
#ifdef CONFIG_MAC80211_DEBUGFS
	if (mp->fixed_rate_idx != -1)
		return;
#endif

327
	delta = (mi->packet_count * sampling_ratio / 100) -
328 329
			(mi->sample_count + mi->sample_deferred / 2);

330
	/* delta < 0: no sampling required */
331
	prev_sample = mi->prev_sample;
332 333 334
	mi->prev_sample = false;
	if (delta < 0 || (!mrr_capable && prev_sample))
		return;
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	if (mi->packet_count >= 10000) {
		mi->sample_deferred = 0;
		mi->sample_count = 0;
		mi->packet_count = 0;
	} else if (delta > mi->n_rates * 2) {
		/* With multi-rate retry, not every planned sample
		 * attempt actually gets used, due to the way the retry
		 * chain is set up - [max_tp,sample,prob,lowest] for
		 * sample_rate < max_tp.
		 *
		 * If there's too much sampling backlog and the link
		 * starts getting worse, minstrel would start bursting
		 * out lots of sampling frames, which would result
		 * in a large throughput loss. */
		mi->sample_count += (delta - mi->n_rates * 2);
	}

	/* get next random rate sample */
	ndx = minstrel_get_next_sample(mi);
	msr = &mi->r[ndx];
	mr = &mi->r[mi->max_tp_rate[0]];

	/* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage)
	 * rate sampling method should be used.
	 * Respect such rates that are not sampled for 20 interations.
	 */
	if (mrr_capable &&
	    msr->perfect_tx_time > mr->perfect_tx_time &&
	    msr->sample_skipped < 20) {
		/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
		 * packets that have the sampling rate deferred to the
		 * second MRR stage. Increase the sample counter only
		 * if the deferred sample rate was actually used.
		 * Use the sample_deferred counter to make sure that
		 * the sampling is not done in large bursts */
		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
		rate++;
		mi->sample_deferred++;
	} else {
		if (!msr->sample_limit != 0)
			return;

		mi->sample_count++;
		if (msr->sample_limit > 0)
			msr->sample_limit--;
381
	}
382 383 384 385

	/* If we're not using MRR and the sampling rate already
	 * has a probability of >95%, we shouldn't be attempting
	 * to use it, as this only wastes precious airtime */
386
	if (!mrr_capable &&
387
	   (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
388 389
		return;

390
	mi->prev_sample = true;
391

392 393
	rate->idx = mi->r[ndx].rix;
	rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
394 395 396 397
}


static void
398 399
calc_rate_durations(enum ieee80211_band band,
		    struct minstrel_rate *d,
400 401
		    struct ieee80211_rate *rate,
		    struct cfg80211_chan_def *chandef)
402 403
{
	int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
404
	int shift = ieee80211_chandef_get_shift(chandef);
405

406
	d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
407 408
			DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
			shift);
409
	d->ack_time = ieee80211_frame_duration(band, 10,
410 411
			DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
			shift);
412 413 414 415 416 417 418 419 420
}

static void
init_sample_table(struct minstrel_sta_info *mi)
{
	unsigned int i, col, new_idx;
	u8 rnd[8];

	mi->sample_column = 0;
421
	mi->sample_row = 0;
422
	memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates);
423 424

	for (col = 0; col < SAMPLE_COLUMNS; col++) {
425
		prandom_bytes(rnd, sizeof(rnd));
426 427 428 429
		for (i = 0; i < mi->n_rates; i++) {
			new_idx = (i + rnd[i & 7]) % mi->n_rates;
			while (SAMPLE_TBL(mi, new_idx, col) != 0xff)
				new_idx = (new_idx + 1) % mi->n_rates;
430

431
			SAMPLE_TBL(mi, new_idx, col) = i;
432 433 434 435 436 437
		}
	}
}

static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
438 439
		   struct cfg80211_chan_def *chandef,
		   struct ieee80211_sta *sta, void *priv_sta)
440 441 442
{
	struct minstrel_sta_info *mi = priv_sta;
	struct minstrel_priv *mp = priv;
443
	struct ieee80211_rate *ctl_rate;
444 445
	unsigned int i, n = 0;
	unsigned int t_slot = 9; /* FIXME: get real slot time */
446
	u32 rate_flags;
447

448
	mi->sta = sta;
449
	mi->lowest_rix = rate_lowest_index(sband, sta);
450
	ctl_rate = &sband->bitrates[mi->lowest_rix];
451 452
	mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
				ctl_rate->bitrate,
453 454
				!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
				ieee80211_chandef_get_shift(chandef));
455

456
	rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
457 458 459
	memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
	mi->max_prob_rate = 0;

460 461 462 463 464
	for (i = 0; i < sband->n_bitrates; i++) {
		struct minstrel_rate *mr = &mi->r[n];
		unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
		unsigned int tx_time_single;
		unsigned int cw = mp->cw_min;
465
		int shift;
466 467 468

		if (!rate_supported(sta, sband->band, i))
			continue;
469 470 471
		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
			continue;

472 473 474 475
		n++;
		memset(mr, 0, sizeof(*mr));

		mr->rix = i;
476 477 478 479 480
		shift = ieee80211_chandef_get_shift(chandef);
		mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
					   (1 << shift) * 5);
		calc_rate_durations(sband->band, mr, &sband->bitrates[i],
				    chandef);
481 482 483

		/* calculate maximum number of retransmissions before
		 * fallback (based on maximum segment size) */
484
		mr->sample_limit = -1;
485 486 487 488 489 490 491 492 493
		mr->retry_count = 1;
		mr->retry_count_cts = 1;
		mr->retry_count_rtscts = 1;
		tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
		do {
			/* add one retransmission */
			tx_time_single = mr->ack_time + mr->perfect_tx_time;

			/* contention window */
494 495
			tx_time_single += (t_slot * cw) >> 1;
			cw = min((cw << 1) | 1, mp->cw_max);
496 497 498 499 500 501 502 503 504 505 506 507 508

			tx_time += tx_time_single;
			tx_time_cts += tx_time_single + mi->sp_ack_dur;
			tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur;
			if ((tx_time_cts < mp->segment_size) &&
				(mr->retry_count_cts < mp->max_retry))
				mr->retry_count_cts++;
			if ((tx_time_rtscts < mp->segment_size) &&
				(mr->retry_count_rtscts < mp->max_retry))
				mr->retry_count_rtscts++;
		} while ((tx_time < mp->segment_size) &&
				(++mr->retry_count < mp->max_retry));
		mr->adjusted_retry_count = mr->retry_count;
509 510
		if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G))
			mr->retry_count_cts = mr->retry_count;
511 512 513 514 515 516 517 518 519 520 521
	}

	for (i = n; i < sband->n_bitrates; i++) {
		struct minstrel_rate *mr = &mi->r[i];
		mr->rix = -1;
	}

	mi->n_rates = n;
	mi->stats_update = jiffies;

	init_sample_table(mi);
522
	minstrel_update_rates(mp, mi);
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
}

static void *
minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
	struct ieee80211_supported_band *sband;
	struct minstrel_sta_info *mi;
	struct minstrel_priv *mp = priv;
	struct ieee80211_hw *hw = mp->hw;
	int max_rates = 0;
	int i;

	mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
	if (!mi)
		return NULL;

	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
540
		sband = hw->wiphy->bands[i];
541
		if (sband && sband->n_bitrates > max_rates)
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
			max_rates = sband->n_bitrates;
	}

	mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
	if (!mi->r)
		goto error;

	mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
	if (!mi->sample_table)
		goto error1;

	mi->stats_update = jiffies;
	return mi;

error1:
	kfree(mi->r);
error:
	kfree(mi);
	return NULL;
}

static void
minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
	struct minstrel_sta_info *mi = priv_sta;

	kfree(mi->sample_table);
	kfree(mi->r);
	kfree(mi);
}

573 574 575 576 577
static void
minstrel_init_cck_rates(struct minstrel_priv *mp)
{
	static const int bitrates[4] = { 10, 20, 55, 110 };
	struct ieee80211_supported_band *sband;
578
	u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
579 580 581 582 583 584 585 586 587 588 589 590
	int i, j;

	sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
	if (!sband)
		return;

	for (i = 0, j = 0; i < sband->n_bitrates; i++) {
		struct ieee80211_rate *rate = &sband->bitrates[i];

		if (rate->flags & IEEE80211_RATE_ERP_G)
			continue;

591 592 593
		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
			continue;

594 595 596 597 598 599 600 601 602 603
		for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
			if (rate->bitrate != bitrates[j])
				continue;

			mp->cck_rates[j] = i;
			break;
		}
	}
}

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
static void *
minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
	struct minstrel_priv *mp;

	mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
	if (!mp)
		return NULL;

	/* contention window settings
	 * Just an approximation. Using the per-queue values would complicate
	 * the calculations and is probably unnecessary */
	mp->cw_min = 15;
	mp->cw_max = 1023;

	/* number of packets (in %) to use for sampling other rates
	 * sample less often for non-mrr packets, because the overhead
	 * is much higher than with mrr */
	mp->lookaround_rate = 5;
	mp->lookaround_rate_mrr = 10;

	/* maximum time that the hw is allowed to stay in one MRR segment */
	mp->segment_size = 6000;

628 629
	if (hw->max_rate_tries > 0)
		mp->max_retry = hw->max_rate_tries;
630 631 632 633
	else
		/* safe default, does not necessarily have to match hw properties */
		mp->max_retry = 7;

634
	if (hw->max_rates >= 4)
635 636 637 638 639
		mp->has_mrr = true;

	mp->hw = hw;
	mp->update_interval = 100;

640 641 642 643 644 645
#ifdef CONFIG_MAC80211_DEBUGFS
	mp->fixed_rate_idx = (u32) -1;
	mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
			S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
#endif

646 647
	minstrel_init_cck_rates(mp);

648 649 650 651 652 653
	return mp;
}

static void
minstrel_free(void *priv)
{
654 655 656
#ifdef CONFIG_MAC80211_DEBUGFS
	debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
#endif
657 658 659
	kfree(priv);
}

660
const struct rate_control_ops mac80211_minstrel = {
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
	.name = "minstrel",
	.tx_status = minstrel_tx_status,
	.get_rate = minstrel_get_rate,
	.rate_init = minstrel_rate_init,
	.alloc = minstrel_alloc,
	.free = minstrel_free,
	.alloc_sta = minstrel_alloc_sta,
	.free_sta = minstrel_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
	.add_sta_debugfs = minstrel_add_sta_debugfs,
	.remove_sta_debugfs = minstrel_remove_sta_debugfs,
#endif
};

int __init
rc80211_minstrel_init(void)
{
	return ieee80211_rate_control_register(&mac80211_minstrel);
}

void
rc80211_minstrel_exit(void)
{
	ieee80211_rate_control_unregister(&mac80211_minstrel);
}