rc80211_minstrel.c 20.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
/*
 * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Based on minstrel.c:
 *   Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz>
 *   Sponsored by Indranet Technologies Ltd
 *
 * Based on sample.c:
 *   Copyright (c) 2005 John Bicket
 *   All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *   1. Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer,
 *      without modification.
 *   2. Redistributions in binary form must reproduce at minimum a disclaimer
 *      similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *      redistribution must be conditioned upon including a substantially
 *      similar Disclaimer requirement for further binary redistribution.
 *   3. Neither the names of the above-listed copyright holders nor the names
 *      of any contributors may be used to endorse or promote products derived
 *      from this software without specific prior written permission.
 *
 *   Alternatively, this software may be distributed under the terms of the
 *   GNU General Public License ("GPL") version 2 as published by the Free
 *   Software Foundation.
 *
 *   NO WARRANTY
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 *   AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 *   THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 *   OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 *   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 *   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 *   IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 *   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 *   THE POSSIBILITY OF SUCH DAMAGES.
 */
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
53
#include <linux/slab.h>
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"

#define SAMPLE_TBL(_mi, _idx, _col) \
		_mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]

/* convert mac80211 rate index to local array index */
static inline int
rix_to_ndx(struct minstrel_sta_info *mi, int rix)
{
	int i = rix;
	for (i = rix; i >= 0; i--)
		if (mi->r[i].rix == rix)
			break;
	return i;
}

72
/* return current EMWA throughput */
73
int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
74 75 76 77 78 79 80 81 82 83
{
	int usecs;

	usecs = mr->perfect_tx_time;
	if (!usecs)
		usecs = 1000000;

	/* reset thr. below 10% success */
	if (mr->stats.prob_ewma < MINSTREL_FRAC(10, 100))
		return 0;
84 85 86

	if (prob_ewma > MINSTREL_FRAC(90, 100))
		return MINSTREL_TRUNC(100000 * (MINSTREL_FRAC(90, 100) / usecs));
87
	else
88
		return MINSTREL_TRUNC(100000 * (prob_ewma / usecs));
89 90
}

91 92 93 94
/* find & sort topmost throughput rates */
static inline void
minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
95 96
	int j;
	struct minstrel_rate_stats *tmp_mrs;
97
	struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
98

99
	for (j = MAX_THR_RATES; j > 0; --j) {
100
		tmp_mrs = &mi->r[tp_list[j - 1]].stats;
101 102 103
		if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
		    minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
			break;
104
	}
105

106 107 108 109 110 111
	if (j < MAX_THR_RATES - 1)
		memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
	if (j < MAX_THR_RATES)
		tp_list[j] = i;
}

112 113 114 115 116 117 118 119 120
static void
minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *ratetbl,
		  int offset, int idx)
{
	struct minstrel_rate *r = &mi->r[idx];

	ratetbl->rate[offset].idx = r->rix;
	ratetbl->rate[offset].count = r->adjusted_retry_count;
	ratetbl->rate[offset].count_cts = r->retry_count_cts;
121
	ratetbl->rate[offset].count_rts = r->stats.retry_count_rtscts;
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
}

static void
minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
	struct ieee80211_sta_rates *ratetbl;
	int i = 0;

	ratetbl = kzalloc(sizeof(*ratetbl), GFP_ATOMIC);
	if (!ratetbl)
		return;

	/* Start with max_tp_rate */
	minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[0]);

	if (mp->hw->max_rates >= 3) {
		/* At least 3 tx rates supported, use max_tp_rate2 next */
		minstrel_set_rate(mi, ratetbl, i++, mi->max_tp_rate[1]);
	}

	if (mp->hw->max_rates >= 2) {
		/* At least 2 tx rates supported, use max_prob_rate next */
		minstrel_set_rate(mi, ratetbl, i++, mi->max_prob_rate);
	}

	/* Use lowest rate last */
	ratetbl->rate[i].idx = mi->lowest_rix;
	ratetbl->rate[i].count = mp->max_retry;
	ratetbl->rate[i].count_cts = mp->max_retry;
	ratetbl->rate[i].count_rts = mp->max_retry;

	rate_control_set_rates(mp->hw, mi->sta, ratetbl);
}

156
/*
157
* Recalculate statistics and counters of a given rate
158 159 160 161
*/
void
minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
{
162 163
	unsigned int cur_prob;

164 165
	if (unlikely(mrs->attempts > 0)) {
		mrs->sample_skipped = 0;
166
		cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
167
		if (unlikely(!mrs->att_hist)) {
168
			mrs->prob_ewma = cur_prob;
169 170
		} else {
			/* update exponential weighted moving variance */
171 172 173 174
			mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
							cur_prob,
							mrs->prob_ewma,
							EWMA_LEVEL);
175 176

			/*update exponential weighted moving avarage */
177
			mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
178
						       cur_prob,
179 180
						       EWMA_LEVEL);
		}
181 182 183 184 185 186 187 188 189 190 191 192
		mrs->att_hist += mrs->attempts;
		mrs->succ_hist += mrs->success;
	} else {
		mrs->sample_skipped++;
	}

	mrs->last_success = mrs->success;
	mrs->last_attempts = mrs->attempts;
	mrs->success = 0;
	mrs->attempts = 0;
}

193 194 195
static void
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
196 197
	u8 tmp_tp_rate[MAX_THR_RATES];
	u8 tmp_prob_rate = 0;
198
	int i, tmp_cur_tp, tmp_prob_tp;
199

W
Weilong Chen 已提交
200
	for (i = 0; i < MAX_THR_RATES; i++)
201 202
	    tmp_tp_rate[i] = 0;

203 204
	for (i = 0; i < mi->n_rates; i++) {
		struct minstrel_rate *mr = &mi->r[i];
205
		struct minstrel_rate_stats *mrs = &mi->r[i].stats;
206
		struct minstrel_rate_stats *tmp_mrs = &mi->r[tmp_prob_rate].stats;
207

208
		/* Update statistics of success probability per rate */
209
		minstrel_calc_rate_stats(mrs);
210 211 212

		/* Sample less often below the 10% chance of success.
		 * Sample less often above the 95% chance of success. */
213 214
		if (mrs->prob_ewma > MINSTREL_FRAC(95, 100) ||
		    mrs->prob_ewma < MINSTREL_FRAC(10, 100)) {
215
			mr->adjusted_retry_count = mrs->retry_count >> 1;
216 217
			if (mr->adjusted_retry_count > 2)
				mr->adjusted_retry_count = 2;
218
			mr->sample_limit = 4;
219
		} else {
220
			mr->sample_limit = -1;
221
			mr->adjusted_retry_count = mrs->retry_count;
222 223 224 225
		}
		if (!mr->adjusted_retry_count)
			mr->adjusted_retry_count = 2;

226 227 228 229 230 231 232
		minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate);

		/* To determine the most robust rate (max_prob_rate) used at
		 * 3rd mmr stage we distinct between two cases:
		 * (1) if any success probabilitiy >= 95%, out of those rates
		 * choose the maximum throughput rate as max_prob_rate
		 * (2) if all success probabilities < 95%, the rate with
S
Stephen Hemminger 已提交
233
		 * highest success probability is chosen as max_prob_rate */
234
		if (mrs->prob_ewma >= MINSTREL_FRAC(95, 100)) {
235 236 237
			tmp_cur_tp = minstrel_get_tp_avg(mr, mrs->prob_ewma);
			tmp_prob_tp = minstrel_get_tp_avg(&mi->r[tmp_prob_rate],
							  tmp_mrs->prob_ewma);
238
			if (tmp_cur_tp >= tmp_prob_tp)
239 240
				tmp_prob_rate = i;
		} else {
241
			if (mrs->prob_ewma >= tmp_mrs->prob_ewma)
242
				tmp_prob_rate = i;
243 244 245
		}
	}

246 247 248
	/* Assign the new rate set */
	memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
	mi->max_prob_rate = tmp_prob_rate;
249

250 251 252 253 254 255 256 257 258
#ifdef CONFIG_MAC80211_DEBUGFS
	/* use fixed index if set */
	if (mp->fixed_rate_idx != -1) {
		mi->max_tp_rate[0] = mp->fixed_rate_idx;
		mi->max_tp_rate[1] = mp->fixed_rate_idx;
		mi->max_prob_rate = mp->fixed_rate_idx;
	}
#endif

259
	/* Reset update timer */
260
	mi->last_stats_update = jiffies;
261 262

	minstrel_update_rates(mp, mi);
263 264 265 266
}

static void
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
W
Weilong Chen 已提交
267
		   struct ieee80211_sta *sta, void *priv_sta,
268
		   struct ieee80211_tx_info *info)
269
{
270
	struct minstrel_priv *mp = priv;
271
	struct minstrel_sta_info *mi = priv_sta;
272 273 274
	struct ieee80211_tx_rate *ar = info->status.rates;
	int i, ndx;
	int success;
275

276
	success = !!(info->flags & IEEE80211_TX_STAT_ACK);
277

278 279
	for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
		if (ar[i].idx < 0)
280 281
			break;

282
		ndx = rix_to_ndx(mi, ar[i].idx);
283 284 285
		if (ndx < 0)
			continue;

286
		mi->r[ndx].stats.attempts += ar[i].count;
287

288
		if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
289
			mi->r[ndx].stats.success += success;
290 291 292
	}

	if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
293
		mi->sample_packets++;
294 295 296

	if (mi->sample_deferred > 0)
		mi->sample_deferred--;
297

298
	if (time_after(jiffies, mi->last_stats_update +
299 300
				(mp->update_interval * HZ) / 1000))
		minstrel_update_stats(mp, mi);
301 302 303 304 305
}


static inline unsigned int
minstrel_get_retry_count(struct minstrel_rate *mr,
W
Weilong Chen 已提交
306
			 struct ieee80211_tx_info *info)
307
{
308
	u8 retry = mr->adjusted_retry_count;
309

310
	if (info->control.use_rts)
311
		retry = max_t(u8, 2, min(mr->stats.retry_count_rtscts, retry));
312
	else if (info->control.use_cts_prot)
313
		retry = max_t(u8, 2, min(mr->retry_count_cts, retry));
314 315 316 317 318 319 320 321
	return retry;
}


static int
minstrel_get_next_sample(struct minstrel_sta_info *mi)
{
	unsigned int sample_ndx;
322 323
	sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column);
	mi->sample_row++;
324
	if ((int) mi->sample_row >= mi->n_rates) {
325
		mi->sample_row = 0;
326 327 328 329 330 331 332
		mi->sample_column++;
		if (mi->sample_column >= SAMPLE_COLUMNS)
			mi->sample_column = 0;
	}
	return sample_ndx;
}

333
static void
334 335
minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
		  void *priv_sta, struct ieee80211_tx_rate_control *txrc)
336
{
337
	struct sk_buff *skb = txrc->skb;
338 339 340
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct minstrel_sta_info *mi = priv_sta;
	struct minstrel_priv *mp = priv;
341 342 343
	struct ieee80211_tx_rate *rate = &info->control.rates[0];
	struct minstrel_rate *msr, *mr;
	unsigned int ndx;
344
	bool mrr_capable;
345
	bool prev_sample;
346
	int delta;
347
	int sampling_ratio;
348

349
	/* management/no-ack frames do not use rate control */
350
	if (rate_control_send_low(sta, priv_sta, txrc))
351 352
		return;

353 354 355 356 357 358 359 360
	/* check multi-rate-retry capabilities & adjust lookaround_rate */
	mrr_capable = mp->has_mrr &&
		      !txrc->rts &&
		      !txrc->bss_conf->use_cts_prot;
	if (mrr_capable)
		sampling_ratio = mp->lookaround_rate_mrr;
	else
		sampling_ratio = mp->lookaround_rate;
361

362
	/* increase sum packet counter */
363
	mi->total_packets++;
364

365 366 367 368 369
#ifdef CONFIG_MAC80211_DEBUGFS
	if (mp->fixed_rate_idx != -1)
		return;
#endif

370 371
	delta = (mi->total_packets * sampling_ratio / 100) -
			(mi->sample_packets + mi->sample_deferred / 2);
372

373
	/* delta < 0: no sampling required */
374
	prev_sample = mi->prev_sample;
375 376 377
	mi->prev_sample = false;
	if (delta < 0 || (!mrr_capable && prev_sample))
		return;
378

379
	if (mi->total_packets >= 10000) {
380
		mi->sample_deferred = 0;
381 382
		mi->sample_packets = 0;
		mi->total_packets = 0;
383 384 385 386 387 388 389 390 391 392
	} else if (delta > mi->n_rates * 2) {
		/* With multi-rate retry, not every planned sample
		 * attempt actually gets used, due to the way the retry
		 * chain is set up - [max_tp,sample,prob,lowest] for
		 * sample_rate < max_tp.
		 *
		 * If there's too much sampling backlog and the link
		 * starts getting worse, minstrel would start bursting
		 * out lots of sampling frames, which would result
		 * in a large throughput loss. */
393
		mi->sample_packets += (delta - mi->n_rates * 2);
394 395 396 397 398 399 400 401 402 403 404 405 406
	}

	/* get next random rate sample */
	ndx = minstrel_get_next_sample(mi);
	msr = &mi->r[ndx];
	mr = &mi->r[mi->max_tp_rate[0]];

	/* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage)
	 * rate sampling method should be used.
	 * Respect such rates that are not sampled for 20 interations.
	 */
	if (mrr_capable &&
	    msr->perfect_tx_time > mr->perfect_tx_time &&
407
	    msr->stats.sample_skipped < 20) {
408 409 410 411 412 413 414 415 416 417
		/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
		 * packets that have the sampling rate deferred to the
		 * second MRR stage. Increase the sample counter only
		 * if the deferred sample rate was actually used.
		 * Use the sample_deferred counter to make sure that
		 * the sampling is not done in large bursts */
		info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
		rate++;
		mi->sample_deferred++;
	} else {
418
		if (!msr->sample_limit)
419 420
			return;

421
		mi->sample_packets++;
422 423
		if (msr->sample_limit > 0)
			msr->sample_limit--;
424
	}
425 426 427 428

	/* If we're not using MRR and the sampling rate already
	 * has a probability of >95%, we shouldn't be attempting
	 * to use it, as this only wastes precious airtime */
429
	if (!mrr_capable &&
430
	   (mi->r[ndx].stats.prob_ewma > MINSTREL_FRAC(95, 100)))
431 432
		return;

433
	mi->prev_sample = true;
434

435 436
	rate->idx = mi->r[ndx].rix;
	rate->count = minstrel_get_retry_count(&mi->r[ndx], info);
437 438 439 440
}


static void
441
calc_rate_durations(enum nl80211_band band,
442
		    struct minstrel_rate *d,
443 444
		    struct ieee80211_rate *rate,
		    struct cfg80211_chan_def *chandef)
445 446
{
	int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
447
	int shift = ieee80211_chandef_get_shift(chandef);
448

449
	d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
450 451
			DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
			shift);
452
	d->ack_time = ieee80211_frame_duration(band, 10,
453 454
			DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
			shift);
455 456 457 458 459 460 461 462 463
}

static void
init_sample_table(struct minstrel_sta_info *mi)
{
	unsigned int i, col, new_idx;
	u8 rnd[8];

	mi->sample_column = 0;
464
	mi->sample_row = 0;
465
	memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates);
466 467

	for (col = 0; col < SAMPLE_COLUMNS; col++) {
468
		prandom_bytes(rnd, sizeof(rnd));
469 470 471 472
		for (i = 0; i < mi->n_rates; i++) {
			new_idx = (i + rnd[i & 7]) % mi->n_rates;
			while (SAMPLE_TBL(mi, new_idx, col) != 0xff)
				new_idx = (new_idx + 1) % mi->n_rates;
473

474
			SAMPLE_TBL(mi, new_idx, col) = i;
475 476 477 478 479 480
		}
	}
}

static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
481 482
		   struct cfg80211_chan_def *chandef,
		   struct ieee80211_sta *sta, void *priv_sta)
483 484 485
{
	struct minstrel_sta_info *mi = priv_sta;
	struct minstrel_priv *mp = priv;
486
	struct ieee80211_rate *ctl_rate;
487 488
	unsigned int i, n = 0;
	unsigned int t_slot = 9; /* FIXME: get real slot time */
489
	u32 rate_flags;
490

491
	mi->sta = sta;
492
	mi->lowest_rix = rate_lowest_index(sband, sta);
493
	ctl_rate = &sband->bitrates[mi->lowest_rix];
494 495
	mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
				ctl_rate->bitrate,
496 497
				!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
				ieee80211_chandef_get_shift(chandef));
498

499
	rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
500 501 502
	memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
	mi->max_prob_rate = 0;

503 504
	for (i = 0; i < sband->n_bitrates; i++) {
		struct minstrel_rate *mr = &mi->r[n];
505
		struct minstrel_rate_stats *mrs = &mi->r[n].stats;
506 507 508
		unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
		unsigned int tx_time_single;
		unsigned int cw = mp->cw_min;
509
		int shift;
510 511 512

		if (!rate_supported(sta, sband->band, i))
			continue;
513 514 515
		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
			continue;

516 517
		n++;
		memset(mr, 0, sizeof(*mr));
518
		memset(mrs, 0, sizeof(*mrs));
519 520

		mr->rix = i;
521 522 523 524 525
		shift = ieee80211_chandef_get_shift(chandef);
		mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
					   (1 << shift) * 5);
		calc_rate_durations(sband->band, mr, &sband->bitrates[i],
				    chandef);
526 527 528

		/* calculate maximum number of retransmissions before
		 * fallback (based on maximum segment size) */
529
		mr->sample_limit = -1;
530
		mrs->retry_count = 1;
531
		mr->retry_count_cts = 1;
532
		mrs->retry_count_rtscts = 1;
533 534 535 536 537 538
		tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
		do {
			/* add one retransmission */
			tx_time_single = mr->ack_time + mr->perfect_tx_time;

			/* contention window */
539 540
			tx_time_single += (t_slot * cw) >> 1;
			cw = min((cw << 1) | 1, mp->cw_max);
541 542 543 544 545 546 547 548

			tx_time += tx_time_single;
			tx_time_cts += tx_time_single + mi->sp_ack_dur;
			tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur;
			if ((tx_time_cts < mp->segment_size) &&
				(mr->retry_count_cts < mp->max_retry))
				mr->retry_count_cts++;
			if ((tx_time_rtscts < mp->segment_size) &&
549 550
				(mrs->retry_count_rtscts < mp->max_retry))
				mrs->retry_count_rtscts++;
551
		} while ((tx_time < mp->segment_size) &&
552 553
				(++mr->stats.retry_count < mp->max_retry));
		mr->adjusted_retry_count = mrs->retry_count;
554
		if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G))
555
			mr->retry_count_cts = mrs->retry_count;
556 557 558 559 560 561 562 563
	}

	for (i = n; i < sband->n_bitrates; i++) {
		struct minstrel_rate *mr = &mi->r[i];
		mr->rix = -1;
	}

	mi->n_rates = n;
564
	mi->last_stats_update = jiffies;
565 566

	init_sample_table(mi);
567
	minstrel_update_rates(mp, mi);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
}

static void *
minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
	struct ieee80211_supported_band *sband;
	struct minstrel_sta_info *mi;
	struct minstrel_priv *mp = priv;
	struct ieee80211_hw *hw = mp->hw;
	int max_rates = 0;
	int i;

	mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
	if (!mi)
		return NULL;

584
	for (i = 0; i < NUM_NL80211_BANDS; i++) {
585
		sband = hw->wiphy->bands[i];
586
		if (sband && sband->n_bitrates > max_rates)
587 588 589 590 591 592 593 594 595 596 597
			max_rates = sband->n_bitrates;
	}

	mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
	if (!mi->r)
		goto error;

	mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
	if (!mi->sample_table)
		goto error1;

598
	mi->last_stats_update = jiffies;
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	return mi;

error1:
	kfree(mi->r);
error:
	kfree(mi);
	return NULL;
}

static void
minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
	struct minstrel_sta_info *mi = priv_sta;

	kfree(mi->sample_table);
	kfree(mi->r);
	kfree(mi);
}

618 619 620 621 622
static void
minstrel_init_cck_rates(struct minstrel_priv *mp)
{
	static const int bitrates[4] = { 10, 20, 55, 110 };
	struct ieee80211_supported_band *sband;
623
	u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
624 625
	int i, j;

626
	sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
627 628 629 630 631 632 633 634 635
	if (!sband)
		return;

	for (i = 0, j = 0; i < sband->n_bitrates; i++) {
		struct ieee80211_rate *rate = &sband->bitrates[i];

		if (rate->flags & IEEE80211_RATE_ERP_G)
			continue;

636 637 638
		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
			continue;

639 640 641 642 643 644 645 646 647 648
		for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
			if (rate->bitrate != bitrates[j])
				continue;

			mp->cck_rates[j] = i;
			break;
		}
	}
}

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
static void *
minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
	struct minstrel_priv *mp;

	mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
	if (!mp)
		return NULL;

	/* contention window settings
	 * Just an approximation. Using the per-queue values would complicate
	 * the calculations and is probably unnecessary */
	mp->cw_min = 15;
	mp->cw_max = 1023;

	/* number of packets (in %) to use for sampling other rates
	 * sample less often for non-mrr packets, because the overhead
	 * is much higher than with mrr */
	mp->lookaround_rate = 5;
	mp->lookaround_rate_mrr = 10;

	/* maximum time that the hw is allowed to stay in one MRR segment */
	mp->segment_size = 6000;

673 674
	if (hw->max_rate_tries > 0)
		mp->max_retry = hw->max_rate_tries;
675 676 677 678
	else
		/* safe default, does not necessarily have to match hw properties */
		mp->max_retry = 7;

679
	if (hw->max_rates >= 4)
680 681 682 683 684
		mp->has_mrr = true;

	mp->hw = hw;
	mp->update_interval = 100;

685 686 687 688 689 690
#ifdef CONFIG_MAC80211_DEBUGFS
	mp->fixed_rate_idx = (u32) -1;
	mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
			S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
#endif

691 692
	minstrel_init_cck_rates(mp);

693 694 695 696 697 698
	return mp;
}

static void
minstrel_free(void *priv)
{
699 700 701
#ifdef CONFIG_MAC80211_DEBUGFS
	debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
#endif
702 703 704
	kfree(priv);
}

705 706 707
static u32 minstrel_get_expected_throughput(void *priv_sta)
{
	struct minstrel_sta_info *mi = priv_sta;
708
	struct minstrel_rate_stats *tmp_mrs;
709
	int idx = mi->max_tp_rate[0];
710
	int tmp_cur_tp;
711 712 713 714

	/* convert pkt per sec in kbps (1200 is the average pkt size used for
	 * computing cur_tp
	 */
715
	tmp_mrs = &mi->r[idx].stats;
716
	tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
717 718 719
	tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;

	return tmp_cur_tp;
720 721
}

722
const struct rate_control_ops mac80211_minstrel = {
723
	.name = "minstrel",
724
	.tx_status_noskb = minstrel_tx_status,
725 726 727 728 729 730 731 732 733 734
	.get_rate = minstrel_get_rate,
	.rate_init = minstrel_rate_init,
	.alloc = minstrel_alloc,
	.free = minstrel_free,
	.alloc_sta = minstrel_alloc_sta,
	.free_sta = minstrel_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
	.add_sta_debugfs = minstrel_add_sta_debugfs,
	.remove_sta_debugfs = minstrel_remove_sta_debugfs,
#endif
735
	.get_expected_throughput = minstrel_get_expected_throughput,
736 737 738 739 740 741 742 743 744 745 746 747 748 749
};

int __init
rc80211_minstrel_init(void)
{
	return ieee80211_rate_control_register(&mac80211_minstrel);
}

void
rc80211_minstrel_exit(void)
{
	ieee80211_rate_control_unregister(&mac80211_minstrel);
}