rc80211_minstrel_ht.c 32.0 KB
Newer Older
1
/*
2
 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"

#define AVG_PKT_SIZE	1200

/* Number of bits for an average sized packet */
#define MCS_NBITS (AVG_PKT_SIZE << 3)

/* Number of symbols for a packet with (bps) bits per symbol */
25
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
26

27
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
28 29
#define MCS_SYMBOL_TIME(sgi, syms)					\
	(sgi ?								\
30 31
	  ((syms) * 18000 + 4000) / 5 :	/* syms * 3.6 us */		\
	  ((syms) * 1000) << 2		/* syms * 4 us */		\
32 33 34 35 36
	)

/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))

37 38 39 40 41 42 43 44
/*
 * Define group sort order: HT40 -> SGI -> #streams
 */
#define GROUP_IDX(_streams, _sgi, _ht40)	\
	MINSTREL_MAX_STREAMS * 2 * _ht40 +	\
	MINSTREL_MAX_STREAMS * _sgi +		\
	_streams - 1

45
/* MCS rate information for an MCS group */
46 47
#define MCS_GROUP(_streams, _sgi, _ht40)				\
	[GROUP_IDX(_streams, _sgi, _ht40)] = {				\
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
	.streams = _streams,						\
	.flags =							\
		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
		(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0),		\
	.duration = {							\
		MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260)		\
	}								\
}

64
#define CCK_DURATION(_bitrate, _short, _len)		\
65
	(1000 * (10 /* SIFS */ +			\
W
Weilong Chen 已提交
66
	 (_short ? 72 + 24 : 144 + 48) +		\
67
	 (8 * (_len + 4) * 10) / (_bitrate)))
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

#define CCK_ACK_DURATION(_bitrate, _short)			\
	(CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) +	\
	 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))

#define CCK_DURATION_LIST(_short)			\
	CCK_ACK_DURATION(10, _short),			\
	CCK_ACK_DURATION(20, _short),			\
	CCK_ACK_DURATION(55, _short),			\
	CCK_ACK_DURATION(110, _short)

#define CCK_GROUP						\
	[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS] = {	\
		.streams = 0,					\
		.duration = {					\
			CCK_DURATION_LIST(false),		\
			CCK_DURATION_LIST(true)			\
		}						\
	}

88 89 90 91
/*
 * To enable sufficiently targeted rate sampling, MCS rates are divided into
 * groups, based on the number of streams and flags (HT40, SGI) that they
 * use.
92 93 94
 *
 * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
 * HT40 -> SGI -> #streams
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
 */
const struct mcs_group minstrel_mcs_groups[] = {
	MCS_GROUP(1, 0, 0),
	MCS_GROUP(2, 0, 0),
#if MINSTREL_MAX_STREAMS >= 3
	MCS_GROUP(3, 0, 0),
#endif

	MCS_GROUP(1, 1, 0),
	MCS_GROUP(2, 1, 0),
#if MINSTREL_MAX_STREAMS >= 3
	MCS_GROUP(3, 1, 0),
#endif

	MCS_GROUP(1, 0, 1),
	MCS_GROUP(2, 0, 1),
#if MINSTREL_MAX_STREAMS >= 3
	MCS_GROUP(3, 0, 1),
#endif

	MCS_GROUP(1, 1, 1),
	MCS_GROUP(2, 1, 1),
#if MINSTREL_MAX_STREAMS >= 3
	MCS_GROUP(3, 1, 1),
#endif
120 121 122

	/* must be last */
	CCK_GROUP
123 124
};

125 126
#define MINSTREL_CCK_GROUP	(ARRAY_SIZE(minstrel_mcs_groups) - 1)

127
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
128

129 130 131
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);

132 133 134 135 136 137
/*
 * Look up an MCS group index based on mac80211 rate information
 */
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
138
	return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
139 140
			 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
			 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
141 142
}

143 144 145 146 147 148 149 150
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		      struct ieee80211_tx_rate *rate)
{
	int group, idx;

	if (rate->flags & IEEE80211_TX_RC_MCS) {
		group = minstrel_ht_get_group_idx(rate);
151
		idx = rate->idx % 8;
152 153 154 155 156 157 158 159 160 161 162 163 164 165
	} else {
		group = MINSTREL_CCK_GROUP;

		for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
			if (rate->idx == mp->cck_rates[idx])
				break;

		/* short preamble */
		if (!(mi->groups[group].supported & BIT(idx)))
			idx += 4;
	}
	return &mi->groups[group].rates[idx];
}

166 167 168 169 170 171 172 173 174 175 176
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
	return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}


/*
 * Recalculate success probabilities and counters for a rate using EWMA
 */
static void
177
minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
{
	if (unlikely(mr->attempts > 0)) {
		mr->sample_skipped = 0;
		mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
		if (!mr->att_hist)
			mr->probability = mr->cur_prob;
		else
			mr->probability = minstrel_ewma(mr->probability,
				mr->cur_prob, EWMA_LEVEL);
		mr->att_hist += mr->attempts;
		mr->succ_hist += mr->success;
	} else {
		mr->sample_skipped++;
	}
	mr->last_success = mr->success;
	mr->last_attempts = mr->attempts;
	mr->success = 0;
	mr->attempts = 0;
}

/*
 * Calculate throughput based on the average A-MPDU length, taking into account
 * the expected number of retransmissions and their expected length
 */
static void
203
minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
204 205
{
	struct minstrel_rate_stats *mr;
206 207
	unsigned int nsecs = 0;
	unsigned int tp;
208
	unsigned int prob;
209 210

	mr = &mi->groups[group].rates[rate];
211
	prob = mr->probability;
212

213
	if (prob < MINSTREL_FRAC(1, 10)) {
214 215 216 217
		mr->cur_tp = 0;
		return;
	}

218 219 220 221 222 223 224
	/*
	 * For the throughput calculation, limit the probability value to 90% to
	 * account for collision related packet error rate fluctuation
	 */
	if (prob > MINSTREL_FRAC(9, 10))
		prob = MINSTREL_FRAC(9, 10);

225
	if (group != MINSTREL_CCK_GROUP)
226
		nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
227

228 229
	nsecs += minstrel_mcs_groups[group].duration[rate];

230 231
	/* prob is scaled - see MINSTREL_FRAC above */
	tp = 1000000 * ((prob * 1000) / nsecs);
232
	mr->cur_tp = MINSTREL_TRUNC(tp);
233 234
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
/*
 * Find & sort topmost throughput rates
 *
 * If multiple rates provide equal throughput the sorting is based on their
 * current success probability. Higher success probability is preferred among
 * MCS groups, CCK rates do not provide aggregation and are therefore at last.
 */
static void
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
			       u8 *tp_list)
{
	int cur_group, cur_idx, cur_thr, cur_prob;
	int tmp_group, tmp_idx, tmp_thr, tmp_prob;
	int j = MAX_THR_RATES;

	cur_group = index / MCS_GROUP_RATES;
	cur_idx = index  % MCS_GROUP_RATES;
	cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
	cur_prob = mi->groups[cur_group].rates[cur_idx].probability;

	tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
	tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
	tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;

	while (j > 0 && (cur_thr > tmp_thr ||
	      (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
		j--;
		tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
		tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
		tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
		tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
	}

	if (j < MAX_THR_RATES - 1) {
		memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
		       (MAX_THR_RATES - (j + 1))));
	}
	if (j < MAX_THR_RATES)
		tp_list[j] = index;
}

/*
 * Find and set the topmost probability rate per sta and per group
 */
static void
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
	int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;

	mg = &mi->groups[index / MCS_GROUP_RATES];
	mr = &mg->rates[index % MCS_GROUP_RATES];

	tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
	tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
	tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;

	/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
	 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
	max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
	    (max_tp_group != MINSTREL_CCK_GROUP))
		return;

	if (mr->probability > MINSTREL_FRAC(75, 100)) {
		if (mr->cur_tp > tmp_tp)
			mi->max_prob_rate = index;
		if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
			mg->max_group_prob_rate = index;
	} else {
		if (mr->probability > tmp_prob)
			mi->max_prob_rate = index;
		if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
			mg->max_group_prob_rate = index;
	}
}


/*
 * Assign new rate set per sta and use CCK rates only if the fastest
 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
 * rate sets where MCS and CCK rates are mixed, because CCK rates can
 * not use aggregation.
 */
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
				 u8 tmp_mcs_tp_rate[MAX_THR_RATES],
				 u8 tmp_cck_tp_rate[MAX_THR_RATES])
{
	unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
	int i;

	tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
	tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;

	tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
	tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;

	if (tmp_cck_tp > tmp_mcs_tp) {
		for(i = 0; i < MAX_THR_RATES; i++) {
			minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
						       tmp_mcs_tp_rate);
		}
	}

}

/*
 * Try to increase robustness of max_prob rate by decrease number of
 * streams if possible.
 */
static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
	int tmp_max_streams, group;
	int tmp_tp = 0;

	tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
			  MCS_GROUP_RATES].streams;
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
		mg = &mi->groups[group];
		if (!mg->supported || group == MINSTREL_CCK_GROUP)
			continue;
		mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
		if (tmp_tp < mr->cur_tp &&
		   (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
				mi->max_prob_rate = mg->max_group_prob_rate;
				tmp_tp = mr->cur_tp;
		}
	}
}

374 375 376 377 378 379
/*
 * Update rate statistics and select new primary rates
 *
 * Rules for rate selection:
 *  - max_prob_rate must use only one stream, as a tradeoff between delivery
 *    probability and throughput during strong fluctuations
380
 *  - as long as the max prob rate has a probability of more than 75%, pick
381 382 383 384 385 386 387
 *    higher throughput rates, even if the probablity is a bit lower
 */
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
388 389 390
	int group, i, j;
	u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
	u8 tmp_cck_tp_rate[MAX_THR_RATES], index;
391 392 393 394 395 396 397 398 399 400 401

	if (mi->ampdu_packets > 0) {
		mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
			MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
		mi->ampdu_len = 0;
		mi->ampdu_packets = 0;
	}

	mi->sample_slow = 0;
	mi->sample_count = 0;

402 403 404 405 406
	/* Initialize global rate indexes */
	for(j = 0; j < MAX_THR_RATES; j++){
		tmp_mcs_tp_rate[j] = 0;
		tmp_cck_tp_rate[j] = 0;
	}
407

408 409
	/* Find best rate sets within all MCS groups*/
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
410 411 412 413 414 415 416

		mg = &mi->groups[group];
		if (!mg->supported)
			continue;

		mi->sample_count++;

417 418 419 420
		/* (re)Initialize group rate indexes */
		for(j = 0; j < MAX_THR_RATES; j++)
			tmp_group_tp_rate[j] = group;

421 422 423 424
		for (i = 0; i < MCS_GROUP_RATES; i++) {
			if (!(mg->supported & BIT(i)))
				continue;

425 426
			index = MCS_GROUP_RATES * group + i;

427 428
			mr = &mg->rates[i];
			mr->retry_updated = false;
429 430
			minstrel_calc_rate_ewma(mr);
			minstrel_ht_calc_tp(mi, group, i);
431 432 433 434

			if (!mr->cur_tp)
				continue;

435 436 437 438 439 440 441
			/* Find max throughput rate set */
			if (group != MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_mcs_tp_rate);
			} else if (group == MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_cck_tp_rate);
442 443
			}

444 445 446
			/* Find max throughput rate set within a group */
			minstrel_ht_sort_best_tp_rates(mi, index,
						       tmp_group_tp_rate);
447

448 449
			/* Find max probability rate per group and global */
			minstrel_ht_set_best_prob_rate(mi, index);
450 451
		}

452 453
		memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
		       sizeof(mg->max_group_tp_rate));
454 455
	}

456 457 458
	/* Assign new rate set per sta */
	minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
	memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
459

460 461 462 463 464
	/* Try to increase robustness of max_prob_rate*/
	minstrel_ht_prob_rate_reduce_streams(mi);

	/* try to sample all available rates during each interval */
	mi->sample_count *= 8;
465

466 467 468
#ifdef CONFIG_MAC80211_DEBUGFS
	/* use fixed index if set */
	if (mp->fixed_rate_idx != -1) {
469 470
		for (i = 0; i < 4; i++)
			mi->max_tp_rate[i] = mp->fixed_rate_idx;
471 472 473
		mi->max_prob_rate = mp->fixed_rate_idx;
	}
#endif
474

475
	/* Reset update timer */
476 477 478 479
	mi->stats_update = jiffies;
}

static bool
480
minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
481
{
482
	if (rate->idx < 0)
483 484
		return false;

485
	if (!rate->count)
486 487
		return false;

488 489 490 491 492 493 494
	if (rate->flags & IEEE80211_TX_RC_MCS)
		return true;

	return rate->idx == mp->cck_rates[0] ||
	       rate->idx == mp->cck_rates[1] ||
	       rate->idx == mp->cck_rates[2] ||
	       rate->idx == mp->cck_rates[3];
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
}

static void
minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;

	for (;;) {
		mi->sample_group++;
		mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
		mg = &mi->groups[mi->sample_group];

		if (!mg->supported)
			continue;

		if (++mg->index >= MCS_GROUP_RATES) {
			mg->index = 0;
			if (++mg->column >= ARRAY_SIZE(sample_table))
				mg->column = 0;
		}
		break;
	}
}

static void
520
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary)
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
{
	int group, orig_group;

	orig_group = group = *idx / MCS_GROUP_RATES;
	while (group > 0) {
		group--;

		if (!mi->groups[group].supported)
			continue;

		if (minstrel_mcs_groups[group].streams >
		    minstrel_mcs_groups[orig_group].streams)
			continue;

		if (primary)
536
			*idx = mi->groups[group].max_group_tp_rate[0];
537
		else
538
			*idx = mi->groups[group].max_group_tp_rate[1];
539 540 541 542 543
		break;
	}
}

static void
544
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
545 546 547 548 549 550 551 552
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	u16 tid;

	if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
		return;

553
	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
554 555 556
		return;

	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
557
	if (likely(sta->ampdu_mlme.tid_tx[tid]))
558 559
		return;

560 561 562
	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
		return;

563
	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
564 565 566 567 568 569 570 571 572 573 574 575 576
}

static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
                      struct ieee80211_sta *sta, void *priv_sta,
                      struct sk_buff *skb)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_tx_rate *ar = info->status.rates;
	struct minstrel_rate_stats *rate, *rate2;
	struct minstrel_priv *mp = priv;
577
	bool last, update = false;
578
	int i;
579 580 581 582 583 584 585 586 587

	if (!msp->is_ht)
		return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);

	/* This packet was aggregated but doesn't carry status info */
	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
		return;

B
Björn Smedman 已提交
588 589 590
	if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
		info->status.ampdu_ack_len =
			(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
591 592 593 594 595 596 597
		info->status.ampdu_len = 1;
	}

	mi->ampdu_packets++;
	mi->ampdu_len += info->status.ampdu_len;

	if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
598
		mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
599
		mi->sample_tries = 1;
600 601 602
		mi->sample_count--;
	}

603
	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
604 605
		mi->sample_packets += info->status.ampdu_len;

606
	last = !minstrel_ht_txstat_valid(mp, &ar[0]);
607 608
	for (i = 0; !last; i++) {
		last = (i == IEEE80211_TX_MAX_RATES - 1) ||
609
		       !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
610

611
		rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
612

B
Björn Smedman 已提交
613
		if (last)
614 615 616 617 618 619 620 621 622
			rate->success += info->status.ampdu_ack_len;

		rate->attempts += ar[i].count * info->status.ampdu_len;
	}

	/*
	 * check for sudden death of spatial multiplexing,
	 * downgrade to a lower number of streams if necessary.
	 */
623
	rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
624 625
	if (rate->attempts > 30 &&
	    MINSTREL_FRAC(rate->success, rate->attempts) <
626
	    MINSTREL_FRAC(20, 100)) {
627
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
628 629
		update = true;
	}
630

631
	rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
632 633
	if (rate2->attempts > 30 &&
	    MINSTREL_FRAC(rate2->success, rate2->attempts) <
634
	    MINSTREL_FRAC(20, 100)) {
635
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
636 637
		update = true;
	}
638 639

	if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
640
		update = true;
641
		minstrel_ht_update_stats(mp, mi);
642 643
		if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
		    mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
644
			minstrel_aggr_check(sta, skb);
645
	}
646 647 648

	if (update)
		minstrel_ht_update_rates(mp, mi);
649 650 651 652 653 654 655 656 657 658
}

static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                         int index)
{
	struct minstrel_rate_stats *mr;
	const struct mcs_group *group;
	unsigned int tx_time, tx_time_rtscts, tx_time_data;
	unsigned int cw = mp->cw_min;
659
	unsigned int ctime = 0;
660 661
	unsigned int t_slot = 9; /* FIXME */
	unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
662
	unsigned int overhead = 0, overhead_rtscts = 0;
663 664 665 666 667 668 669 670 671 672 673 674 675

	mr = minstrel_get_ratestats(mi, index);
	if (mr->probability < MINSTREL_FRAC(1, 10)) {
		mr->retry_count = 1;
		mr->retry_count_rtscts = 1;
		return;
	}

	mr->retry_count = 2;
	mr->retry_count_rtscts = 2;
	mr->retry_updated = true;

	group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
676
	tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
677 678 679 680 681 682 683

	/* Contention time for first 2 tries */
	ctime = (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);
	ctime += (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);

684 685 686 687 688
	if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
		overhead = mi->overhead;
		overhead_rtscts = mi->overhead_rtscts;
	}

689
	/* Total TX time for data and Contention after first 2 tries */
690 691
	tx_time = ctime + 2 * (overhead + tx_time_data);
	tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
692 693

	/* See how many more tries we can fit inside segment size */
694
	do {
695 696 697 698 699
		/* Contention time for this try */
		ctime = (t_slot * cw) >> 1;
		cw = min((cw << 1) | 1, mp->cw_max);

		/* Total TX time after this try */
700 701
		tx_time += ctime + overhead + tx_time_data;
		tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
702

703 704 705 706 707 708 709 710 711
		if (tx_time_rtscts < mp->segment_size)
			mr->retry_count_rtscts++;
	} while ((tx_time < mp->segment_size) &&
	         (++mr->retry_count < mp->max_retry));
}


static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
712
                     struct ieee80211_sta_rates *ratetbl, int offset, int index)
713 714 715
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
	struct minstrel_rate_stats *mr;
716 717
	u8 idx;
	u16 flags;
718 719 720 721 722

	mr = minstrel_get_ratestats(mi, index);
	if (!mr->retry_updated)
		minstrel_calc_retransmit(mp, mi, index);

723 724 725 726 727 728 729 730 731
	if (mr->probability < MINSTREL_FRAC(20, 100) || !mr->retry_count) {
		ratetbl->rate[offset].count = 2;
		ratetbl->rate[offset].count_rts = 2;
		ratetbl->rate[offset].count_cts = 2;
	} else {
		ratetbl->rate[offset].count = mr->retry_count;
		ratetbl->rate[offset].count_cts = mr->retry_count;
		ratetbl->rate[offset].count_rts = mr->retry_count_rtscts;
	}
732 733

	if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
734 735 736
		idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
		flags = 0;
	} else {
737
		idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757
		flags = IEEE80211_TX_RC_MCS | group->flags;
	}

	if (offset > 0) {
		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
		flags |= IEEE80211_TX_RC_USE_RTS_CTS;
	}

	ratetbl->rate[offset].idx = idx;
	ratetbl->rate[offset].flags = flags;
}

static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct ieee80211_sta_rates *rates;
	int i = 0;

	rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
	if (!rates)
758
		return;
759

760 761
	/* Start with max_tp_rate[0] */
	minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
762 763

	if (mp->hw->max_rates >= 3) {
764 765
		/* At least 3 tx rates supported, use max_tp_rate[1] next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
766 767 768 769 770 771
	}

	if (mp->hw->max_rates >= 2) {
		/*
		 * At least 2 tx rates supported, use max_prob_rate next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
772 773
	}

774 775
	rates->rate[i].idx = -1;
	rate_control_set_rates(mp->hw, mi->sta, rates);
776 777 778 779 780 781 782 783 784 785 786 787 788 789
}

static inline int
minstrel_get_duration(int index)
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
	return group->duration[index % MCS_GROUP_RATES];
}

static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct minstrel_rate_stats *mr;
	struct minstrel_mcs_group_data *mg;
790
	unsigned int sample_dur, sample_group, cur_max_tp_streams;
791 792 793 794 795 796 797 798 799 800
	int sample_idx = 0;

	if (mi->sample_wait > 0) {
		mi->sample_wait--;
		return -1;
	}

	if (!mi->sample_tries)
		return -1;

801 802
	sample_group = mi->sample_group;
	mg = &mi->groups[sample_group];
803
	sample_idx = sample_table[mg->column][mg->index];
804 805 806 807 808
	minstrel_next_sample_idx(mi);

	if (!(mg->supported & BIT(sample_idx)))
		return -1;

809
	mr = &mg->rates[sample_idx];
810
	sample_idx += sample_group * MCS_GROUP_RATES;
811

812 813 814
	/*
	 * Sampling might add some overhead (RTS, no aggregation)
	 * to the frame. Hence, don't use sampling for the currently
815
	 * used rates.
816
	 */
817 818
	if (sample_idx == mi->max_tp_rate[0] ||
	    sample_idx == mi->max_tp_rate[1] ||
819
	    sample_idx == mi->max_prob_rate)
820
		return -1;
821

822
	/*
823 824
	 * Do not sample if the probability is already higher than 95%
	 * to avoid wasting airtime.
825
	 */
826
	if (mr->probability > MINSTREL_FRAC(95, 100))
827
		return -1;
828 829 830 831 832

	/*
	 * Make sure that lower rates get sampled only occasionally,
	 * if the link is working perfectly.
	 */
833 834 835

	cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
		MCS_GROUP_RATES].streams;
836
	sample_dur = minstrel_get_duration(sample_idx);
837 838
	if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
	    (cur_max_tp_streams - 1 <
839 840
	     minstrel_mcs_groups[sample_group].streams ||
	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
841
		if (mr->sample_skipped < 20)
842
			return -1;
843 844

		if (mi->sample_slow++ > 2)
845
			return -1;
846
	}
847
	mi->sample_tries--;
848 849 850 851

	return sample_idx;
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
static void
minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
				    struct minstrel_ht_sta *mi, bool val)
{
	u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;

	if (!supported || !mi->cck_supported_short)
		return;

	if (supported & (mi->cck_supported_short << (val * 4)))
		return;

	supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
	mi->groups[MINSTREL_CCK_GROUP].supported = supported;
}

868 869 870 871
static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                     struct ieee80211_tx_rate_control *txrc)
{
872
	const struct mcs_group *sample_group;
873
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
874
	struct ieee80211_tx_rate *rate = &info->status.rates[0];
875 876 877 878 879 880 881 882 883 884 885 886
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct minstrel_priv *mp = priv;
	int sample_idx;

	if (rate_control_send_low(sta, priv_sta, txrc))
		return;

	if (!msp->is_ht)
		return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);

	info->flags |= mi->tx_flags;
887
	minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
888

889 890 891 892 893
#ifdef CONFIG_MAC80211_DEBUGFS
	if (mp->fixed_rate_idx != -1)
		return;
#endif

894 895
	/* Don't use EAPOL frames for sampling on non-mrr hw */
	if (mp->hw->max_rates == 1 &&
896
	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
897 898 899
		sample_idx = -1;
	else
		sample_idx = minstrel_get_sample_rate(mp, mi);
900

901 902 903 904 905 906 907
	mi->total_packets++;

	/* wraparound */
	if (mi->total_packets == ~0) {
		mi->total_packets = 0;
		mi->sample_packets = 0;
	}
908 909 910 911 912 913

	if (sample_idx < 0)
		return;

	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
914 915 916 917 918 919 920 921 922
	rate->count = 1;

	if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
		int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
		rate->idx = mp->cck_rates[idx];
		rate->flags = 0;
		return;
	}

923
	rate->idx = sample_idx % MCS_GROUP_RATES +
924
		    (sample_group->streams - 1) * 8;
925
	rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
926 927
}

928 929 930 931 932 933 934 935 936 937
static void
minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		       struct ieee80211_supported_band *sband,
		       struct ieee80211_sta *sta)
{
	int i;

	if (sband->band != IEEE80211_BAND_2GHZ)
		return;

938 939 940
	if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
		return;

941 942 943 944 945 946 947 948 949 950 951 952 953 954
	mi->cck_supported = 0;
	mi->cck_supported_short = 0;
	for (i = 0; i < 4; i++) {
		if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
			continue;

		mi->cck_supported |= BIT(i);
		if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
			mi->cck_supported_short |= BIT(i);
	}

	mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
}

955 956
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
957
			struct cfg80211_chan_def *chandef,
958
                        struct ieee80211_sta *sta, void *priv_sta)
959 960 961 962 963 964
{
	struct minstrel_priv *mp = priv;
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
	u16 sta_cap = sta->ht_cap.cap;
965
	int n_supported = 0;
966 967 968 969 970
	int ack_dur;
	int stbc;
	int i;

	/* fall back to the old minstrel for legacy stations */
971 972
	if (!sta->ht_cap.ht_supported)
		goto use_legacy;
973 974

	BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
975
		MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS + 1);
976 977 978

	msp->is_ht = true;
	memset(mi, 0, sizeof(*mi));
979 980

	mi->sta = sta;
981 982
	mi->stats_update = jiffies;

983 984 985
	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
	mi->overhead += ack_dur;
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;

	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);

	/* When using MRR, sample more on the first attempt, without delay */
	if (mp->has_mrr) {
		mi->sample_count = 16;
		mi->sample_wait = 0;
	} else {
		mi->sample_count = 8;
		mi->sample_wait = 8;
	}
	mi->sample_tries = 4;

	stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
		IEEE80211_HT_CAP_RX_STBC_SHIFT;
	mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;

	if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
		mi->tx_flags |= IEEE80211_TX_CTL_LDPC;

	for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
		mi->groups[i].supported = 0;
1009 1010 1011 1012 1013
		if (i == MINSTREL_CCK_GROUP) {
			minstrel_ht_update_cck(mp, mi, sband, sta);
			continue;
		}

1014
		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
1015 1016 1017 1018 1019 1020 1021
			if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
				if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
					continue;
			} else {
				if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
					continue;
			}
1022 1023
		}

1024 1025
		if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
		    sta->bandwidth < IEEE80211_STA_RX_BW_40)
1026 1027
			continue;

1028
		/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
1029
		if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
1030 1031 1032
		    minstrel_mcs_groups[i].streams > 1)
			continue;

1033 1034
		mi->groups[i].supported =
			mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
1035 1036 1037

		if (mi->groups[i].supported)
			n_supported++;
1038
	}
1039 1040 1041 1042

	if (!n_supported)
		goto use_legacy;

1043
	/* create an initial rate table with the lowest supported rates */
1044
	minstrel_ht_update_stats(mp, mi);
1045
	minstrel_ht_update_rates(mp, mi);
1046

1047 1048 1049 1050 1051 1052 1053
	return;

use_legacy:
	msp->is_ht = false;
	memset(&msp->legacy, 0, sizeof(msp->legacy));
	msp->legacy.r = msp->ratelist;
	msp->legacy.sample_table = msp->sample_table;
1054 1055
	return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
					   &msp->legacy);
1056 1057 1058 1059
}

static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
1060
		      struct cfg80211_chan_def *chandef,
1061 1062
                      struct ieee80211_sta *sta, void *priv_sta)
{
1063
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1064 1065 1066 1067
}

static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
1068
			struct cfg80211_chan_def *chandef,
1069
                        struct ieee80211_sta *sta, void *priv_sta,
1070
                        u32 changed)
1071
{
1072
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
}

static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
	struct ieee80211_supported_band *sband;
	struct minstrel_ht_sta_priv *msp;
	struct minstrel_priv *mp = priv;
	struct ieee80211_hw *hw = mp->hw;
	int max_rates = 0;
	int i;

	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
		sband = hw->wiphy->bands[i];
		if (sband && sband->n_bitrates > max_rates)
			max_rates = sband->n_bitrates;
	}

1091
	msp = kzalloc(sizeof(*msp), gfp);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	if (!msp)
		return NULL;

	msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
	if (!msp->ratelist)
		goto error;

	msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
	if (!msp->sample_table)
		goto error1;

	return msp;

error1:
1106
	kfree(msp->ratelist);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
error:
	kfree(msp);
	return NULL;
}

static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;

	kfree(msp->sample_table);
	kfree(msp->ratelist);
	kfree(msp);
}

static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
	return mac80211_minstrel.alloc(hw, debugfsdir);
}

static void
minstrel_ht_free(void *priv)
{
	mac80211_minstrel.free(priv);
}

1134 1135 1136 1137 1138 1139 1140 1141 1142
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	int i, j;

	if (!msp->is_ht)
		return mac80211_minstrel.get_expected_throughput(priv_sta);

1143 1144
	i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
1145 1146 1147 1148 1149

	/* convert cur_tp from pkt per second in kbps */
	return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
}

1150
static const struct rate_control_ops mac80211_minstrel_ht = {
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	.name = "minstrel_ht",
	.tx_status = minstrel_ht_tx_status,
	.get_rate = minstrel_ht_get_rate,
	.rate_init = minstrel_ht_rate_init,
	.rate_update = minstrel_ht_rate_update,
	.alloc_sta = minstrel_ht_alloc_sta,
	.free_sta = minstrel_ht_free_sta,
	.alloc = minstrel_ht_alloc,
	.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
	.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
	.remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
#endif
1164
	.get_expected_throughput = minstrel_ht_get_expected_throughput,
1165 1166 1167
};


1168
static void __init init_sample_table(void)
1169 1170 1171 1172 1173 1174
{
	int col, i, new_idx;
	u8 rnd[MCS_GROUP_RATES];

	memset(sample_table, 0xff, sizeof(sample_table));
	for (col = 0; col < SAMPLE_COLUMNS; col++) {
1175
		prandom_bytes(rnd, sizeof(rnd));
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
		for (i = 0; i < MCS_GROUP_RATES; i++) {
			new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
			while (sample_table[col][new_idx] != 0xff)
				new_idx = (new_idx + 1) % MCS_GROUP_RATES;

			sample_table[col][new_idx] = i;
		}
	}
}

int __init
rc80211_minstrel_ht_init(void)
{
	init_sample_table();
	return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}

void
rc80211_minstrel_ht_exit(void)
{
	ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}