rc80211_minstrel_ht.c 31.9 KB
Newer Older
1
/*
2
 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"

#define AVG_PKT_SIZE	1200

/* Number of bits for an average sized packet */
#define MCS_NBITS (AVG_PKT_SIZE << 3)

/* Number of symbols for a packet with (bps) bits per symbol */
25
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
26

27
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
28 29
#define MCS_SYMBOL_TIME(sgi, syms)					\
	(sgi ?								\
30 31
	  ((syms) * 18000 + 4000) / 5 :	/* syms * 3.6 us */		\
	  ((syms) * 1000) << 2		/* syms * 4 us */		\
32 33 34 35 36
	)

/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))

37 38 39
#define BW_20			0
#define BW_40			1

40 41 42 43
/*
 * Define group sort order: HT40 -> SGI -> #streams
 */
#define GROUP_IDX(_streams, _sgi, _ht40)	\
44
	MINSTREL_HT_GROUP_0 +			\
45
	MINSTREL_MAX_STREAMS * 2 * _ht40 +	\
46
	MINSTREL_MAX_STREAMS * _sgi +	\
47 48
	_streams - 1

49
/* MCS rate information for an MCS group */
50 51
#define MCS_GROUP(_streams, _sgi, _ht40)				\
	[GROUP_IDX(_streams, _sgi, _ht40)] = {				\
52 53
	.streams = _streams,						\
	.flags =							\
54
		IEEE80211_TX_RC_MCS |					\
55 56 57 58 59 60 61 62 63 64 65 66 67 68
		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
		(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0),		\
	.duration = {							\
		MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260)		\
	}								\
}

69
#define CCK_DURATION(_bitrate, _short, _len)		\
70
	(1000 * (10 /* SIFS */ +			\
W
Weilong Chen 已提交
71
	 (_short ? 72 + 24 : 144 + 48) +		\
72
	 (8 * (_len + 4) * 10) / (_bitrate)))
73 74 75 76 77 78 79 80 81 82 83

#define CCK_ACK_DURATION(_bitrate, _short)			\
	(CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) +	\
	 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))

#define CCK_DURATION_LIST(_short)			\
	CCK_ACK_DURATION(10, _short),			\
	CCK_ACK_DURATION(20, _short),			\
	CCK_ACK_DURATION(55, _short),			\
	CCK_ACK_DURATION(110, _short)

84 85 86
#define CCK_GROUP					\
	[MINSTREL_CCK_GROUP] = {			\
		.streams = 0,				\
87
		.flags = 0,				\
88 89 90 91
		.duration = {				\
			CCK_DURATION_LIST(false),	\
			CCK_DURATION_LIST(true)		\
		}					\
92 93
	}

94 95 96 97
/*
 * To enable sufficiently targeted rate sampling, MCS rates are divided into
 * groups, based on the number of streams and flags (HT40, SGI) that they
 * use.
98 99
 *
 * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
100
 * BW -> SGI -> #streams
101 102
 */
const struct mcs_group minstrel_mcs_groups[] = {
103 104
	MCS_GROUP(1, 0, BW_20),
	MCS_GROUP(2, 0, BW_20),
105
#if MINSTREL_MAX_STREAMS >= 3
106
	MCS_GROUP(3, 0, BW_20),
107 108
#endif

109 110
	MCS_GROUP(1, 1, BW_20),
	MCS_GROUP(2, 1, BW_20),
111
#if MINSTREL_MAX_STREAMS >= 3
112
	MCS_GROUP(3, 1, BW_20),
113 114
#endif

115 116
	MCS_GROUP(1, 0, BW_40),
	MCS_GROUP(2, 0, BW_40),
117
#if MINSTREL_MAX_STREAMS >= 3
118
	MCS_GROUP(3, 0, BW_40),
119 120
#endif

121 122
	MCS_GROUP(1, 1, BW_40),
	MCS_GROUP(2, 1, BW_40),
123
#if MINSTREL_MAX_STREAMS >= 3
124
	MCS_GROUP(3, 1, BW_40),
125
#endif
126 127

	CCK_GROUP
128 129
};

130

131
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
132

133 134 135
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);

136 137 138 139 140 141
/*
 * Look up an MCS group index based on mac80211 rate information
 */
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
142
	return GROUP_IDX((rate->idx / 8) + 1,
143 144
			 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
			 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
145 146
}

147 148 149 150 151 152 153 154
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		      struct ieee80211_tx_rate *rate)
{
	int group, idx;

	if (rate->flags & IEEE80211_TX_RC_MCS) {
		group = minstrel_ht_get_group_idx(rate);
155
		idx = rate->idx % 8;
156 157 158 159 160 161 162 163 164 165 166 167 168 169
	} else {
		group = MINSTREL_CCK_GROUP;

		for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
			if (rate->idx == mp->cck_rates[idx])
				break;

		/* short preamble */
		if (!(mi->groups[group].supported & BIT(idx)))
			idx += 4;
	}
	return &mi->groups[group].rates[idx];
}

170 171 172 173 174 175 176 177 178 179 180
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
	return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}


/*
 * Recalculate success probabilities and counters for a rate using EWMA
 */
static void
181
minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
{
	if (unlikely(mr->attempts > 0)) {
		mr->sample_skipped = 0;
		mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
		if (!mr->att_hist)
			mr->probability = mr->cur_prob;
		else
			mr->probability = minstrel_ewma(mr->probability,
				mr->cur_prob, EWMA_LEVEL);
		mr->att_hist += mr->attempts;
		mr->succ_hist += mr->success;
	} else {
		mr->sample_skipped++;
	}
	mr->last_success = mr->success;
	mr->last_attempts = mr->attempts;
	mr->success = 0;
	mr->attempts = 0;
}

/*
 * Calculate throughput based on the average A-MPDU length, taking into account
 * the expected number of retransmissions and their expected length
 */
static void
207
minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
208 209
{
	struct minstrel_rate_stats *mr;
210 211
	unsigned int nsecs = 0;
	unsigned int tp;
212
	unsigned int prob;
213 214

	mr = &mi->groups[group].rates[rate];
215
	prob = mr->probability;
216

217
	if (prob < MINSTREL_FRAC(1, 10)) {
218 219 220 221
		mr->cur_tp = 0;
		return;
	}

222 223 224 225 226 227 228
	/*
	 * For the throughput calculation, limit the probability value to 90% to
	 * account for collision related packet error rate fluctuation
	 */
	if (prob > MINSTREL_FRAC(9, 10))
		prob = MINSTREL_FRAC(9, 10);

229
	if (group != MINSTREL_CCK_GROUP)
230
		nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
231

232 233
	nsecs += minstrel_mcs_groups[group].duration[rate];

234 235
	/* prob is scaled - see MINSTREL_FRAC above */
	tp = 1000000 * ((prob * 1000) / nsecs);
236
	mr->cur_tp = MINSTREL_TRUNC(tp);
237 238
}

239 240 241 242 243 244 245 246
/*
 * Find & sort topmost throughput rates
 *
 * If multiple rates provide equal throughput the sorting is based on their
 * current success probability. Higher success probability is preferred among
 * MCS groups, CCK rates do not provide aggregation and are therefore at last.
 */
static void
247 248
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
			       u16 *tp_list)
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
{
	int cur_group, cur_idx, cur_thr, cur_prob;
	int tmp_group, tmp_idx, tmp_thr, tmp_prob;
	int j = MAX_THR_RATES;

	cur_group = index / MCS_GROUP_RATES;
	cur_idx = index  % MCS_GROUP_RATES;
	cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
	cur_prob = mi->groups[cur_group].rates[cur_idx].probability;

	tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
	tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
	tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;

	while (j > 0 && (cur_thr > tmp_thr ||
	      (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
		j--;
		tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
		tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
		tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
		tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
	}

	if (j < MAX_THR_RATES - 1) {
		memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
		       (MAX_THR_RATES - (j + 1))));
	}
	if (j < MAX_THR_RATES)
		tp_list[j] = index;
}

/*
 * Find and set the topmost probability rate per sta and per group
 */
static void
285
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
	int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;

	mg = &mi->groups[index / MCS_GROUP_RATES];
	mr = &mg->rates[index % MCS_GROUP_RATES];

	tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
	tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
	tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;

	/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
	 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
	max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
	    (max_tp_group != MINSTREL_CCK_GROUP))
		return;

	if (mr->probability > MINSTREL_FRAC(75, 100)) {
		if (mr->cur_tp > tmp_tp)
			mi->max_prob_rate = index;
		if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
			mg->max_group_prob_rate = index;
	} else {
		if (mr->probability > tmp_prob)
			mi->max_prob_rate = index;
		if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
			mg->max_group_prob_rate = index;
	}
}


/*
 * Assign new rate set per sta and use CCK rates only if the fastest
 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
 * rate sets where MCS and CCK rates are mixed, because CCK rates can
 * not use aggregation.
 */
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
328 329
				 u16 tmp_mcs_tp_rate[MAX_THR_RATES],
				 u16 tmp_cck_tp_rate[MAX_THR_RATES])
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
{
	unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
	int i;

	tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
	tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;

	tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
	tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;

	if (tmp_cck_tp > tmp_mcs_tp) {
		for(i = 0; i < MAX_THR_RATES; i++) {
			minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
						       tmp_mcs_tp_rate);
		}
	}

}

/*
 * Try to increase robustness of max_prob rate by decrease number of
 * streams if possible.
 */
static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
	int tmp_max_streams, group;
	int tmp_tp = 0;

	tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
			  MCS_GROUP_RATES].streams;
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
		mg = &mi->groups[group];
		if (!mg->supported || group == MINSTREL_CCK_GROUP)
			continue;
		mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
		if (tmp_tp < mr->cur_tp &&
		   (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
				mi->max_prob_rate = mg->max_group_prob_rate;
				tmp_tp = mr->cur_tp;
		}
	}
}

378 379 380 381 382 383
/*
 * Update rate statistics and select new primary rates
 *
 * Rules for rate selection:
 *  - max_prob_rate must use only one stream, as a tradeoff between delivery
 *    probability and throughput during strong fluctuations
384
 *  - as long as the max prob rate has a probability of more than 75%, pick
385 386 387 388 389 390 391
 *    higher throughput rates, even if the probablity is a bit lower
 */
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
	struct minstrel_rate_stats *mr;
392
	int group, i, j;
393 394
	u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
	u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
395 396 397 398 399 400 401 402 403 404 405

	if (mi->ampdu_packets > 0) {
		mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
			MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
		mi->ampdu_len = 0;
		mi->ampdu_packets = 0;
	}

	mi->sample_slow = 0;
	mi->sample_count = 0;

406 407 408 409 410
	/* Initialize global rate indexes */
	for(j = 0; j < MAX_THR_RATES; j++){
		tmp_mcs_tp_rate[j] = 0;
		tmp_cck_tp_rate[j] = 0;
	}
411

412 413
	/* Find best rate sets within all MCS groups*/
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
414 415 416 417 418 419 420

		mg = &mi->groups[group];
		if (!mg->supported)
			continue;

		mi->sample_count++;

421 422 423 424
		/* (re)Initialize group rate indexes */
		for(j = 0; j < MAX_THR_RATES; j++)
			tmp_group_tp_rate[j] = group;

425 426 427 428
		for (i = 0; i < MCS_GROUP_RATES; i++) {
			if (!(mg->supported & BIT(i)))
				continue;

429 430
			index = MCS_GROUP_RATES * group + i;

431 432
			mr = &mg->rates[i];
			mr->retry_updated = false;
433 434
			minstrel_calc_rate_ewma(mr);
			minstrel_ht_calc_tp(mi, group, i);
435 436 437 438

			if (!mr->cur_tp)
				continue;

439 440 441 442 443 444 445
			/* Find max throughput rate set */
			if (group != MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_mcs_tp_rate);
			} else if (group == MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_cck_tp_rate);
446 447
			}

448 449 450
			/* Find max throughput rate set within a group */
			minstrel_ht_sort_best_tp_rates(mi, index,
						       tmp_group_tp_rate);
451

452 453
			/* Find max probability rate per group and global */
			minstrel_ht_set_best_prob_rate(mi, index);
454 455
		}

456 457
		memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
		       sizeof(mg->max_group_tp_rate));
458 459
	}

460 461 462
	/* Assign new rate set per sta */
	minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
	memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
463

464 465 466 467 468
	/* Try to increase robustness of max_prob_rate*/
	minstrel_ht_prob_rate_reduce_streams(mi);

	/* try to sample all available rates during each interval */
	mi->sample_count *= 8;
469

470 471 472
#ifdef CONFIG_MAC80211_DEBUGFS
	/* use fixed index if set */
	if (mp->fixed_rate_idx != -1) {
473 474
		for (i = 0; i < 4; i++)
			mi->max_tp_rate[i] = mp->fixed_rate_idx;
475 476 477
		mi->max_prob_rate = mp->fixed_rate_idx;
	}
#endif
478

479
	/* Reset update timer */
480 481 482 483
	mi->stats_update = jiffies;
}

static bool
484
minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
485
{
486
	if (rate->idx < 0)
487 488
		return false;

489
	if (!rate->count)
490 491
		return false;

492 493 494 495 496 497 498
	if (rate->flags & IEEE80211_TX_RC_MCS)
		return true;

	return rate->idx == mp->cck_rates[0] ||
	       rate->idx == mp->cck_rates[1] ||
	       rate->idx == mp->cck_rates[2] ||
	       rate->idx == mp->cck_rates[3];
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
}

static void
minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;

	for (;;) {
		mi->sample_group++;
		mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
		mg = &mi->groups[mi->sample_group];

		if (!mg->supported)
			continue;

		if (++mg->index >= MCS_GROUP_RATES) {
			mg->index = 0;
			if (++mg->column >= ARRAY_SIZE(sample_table))
				mg->column = 0;
		}
		break;
	}
}

static void
524
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
{
	int group, orig_group;

	orig_group = group = *idx / MCS_GROUP_RATES;
	while (group > 0) {
		group--;

		if (!mi->groups[group].supported)
			continue;

		if (minstrel_mcs_groups[group].streams >
		    minstrel_mcs_groups[orig_group].streams)
			continue;

		if (primary)
540
			*idx = mi->groups[group].max_group_tp_rate[0];
541
		else
542
			*idx = mi->groups[group].max_group_tp_rate[1];
543 544 545 546 547
		break;
	}
}

static void
548
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
549 550 551 552 553 554 555 556
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	u16 tid;

	if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
		return;

557
	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
558 559 560
		return;

	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
561
	if (likely(sta->ampdu_mlme.tid_tx[tid]))
562 563
		return;

564 565 566
	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
		return;

567
	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
568 569 570 571 572 573 574 575 576 577 578 579 580
}

static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
                      struct ieee80211_sta *sta, void *priv_sta,
                      struct sk_buff *skb)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	struct ieee80211_tx_rate *ar = info->status.rates;
	struct minstrel_rate_stats *rate, *rate2;
	struct minstrel_priv *mp = priv;
581
	bool last, update = false;
582
	int i;
583 584 585 586 587 588 589 590 591

	if (!msp->is_ht)
		return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);

	/* This packet was aggregated but doesn't carry status info */
	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
		return;

B
Björn Smedman 已提交
592 593 594
	if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
		info->status.ampdu_ack_len =
			(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
595 596 597 598 599 600 601
		info->status.ampdu_len = 1;
	}

	mi->ampdu_packets++;
	mi->ampdu_len += info->status.ampdu_len;

	if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
602
		mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
603
		mi->sample_tries = 1;
604 605 606
		mi->sample_count--;
	}

607
	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
608 609
		mi->sample_packets += info->status.ampdu_len;

610
	last = !minstrel_ht_txstat_valid(mp, &ar[0]);
611 612
	for (i = 0; !last; i++) {
		last = (i == IEEE80211_TX_MAX_RATES - 1) ||
613
		       !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
614

615
		rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
616

B
Björn Smedman 已提交
617
		if (last)
618 619 620 621 622 623 624 625 626
			rate->success += info->status.ampdu_ack_len;

		rate->attempts += ar[i].count * info->status.ampdu_len;
	}

	/*
	 * check for sudden death of spatial multiplexing,
	 * downgrade to a lower number of streams if necessary.
	 */
627
	rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
628 629
	if (rate->attempts > 30 &&
	    MINSTREL_FRAC(rate->success, rate->attempts) <
630
	    MINSTREL_FRAC(20, 100)) {
631
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
632 633
		update = true;
	}
634

635
	rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
636 637
	if (rate2->attempts > 30 &&
	    MINSTREL_FRAC(rate2->success, rate2->attempts) <
638
	    MINSTREL_FRAC(20, 100)) {
639
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
640 641
		update = true;
	}
642 643

	if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
644
		update = true;
645
		minstrel_ht_update_stats(mp, mi);
646 647
		if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
		    mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
648
			minstrel_aggr_check(sta, skb);
649
	}
650 651 652

	if (update)
		minstrel_ht_update_rates(mp, mi);
653 654 655 656 657 658 659 660 661 662
}

static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                         int index)
{
	struct minstrel_rate_stats *mr;
	const struct mcs_group *group;
	unsigned int tx_time, tx_time_rtscts, tx_time_data;
	unsigned int cw = mp->cw_min;
663
	unsigned int ctime = 0;
664 665
	unsigned int t_slot = 9; /* FIXME */
	unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
666
	unsigned int overhead = 0, overhead_rtscts = 0;
667 668 669 670 671 672 673 674 675 676 677 678 679

	mr = minstrel_get_ratestats(mi, index);
	if (mr->probability < MINSTREL_FRAC(1, 10)) {
		mr->retry_count = 1;
		mr->retry_count_rtscts = 1;
		return;
	}

	mr->retry_count = 2;
	mr->retry_count_rtscts = 2;
	mr->retry_updated = true;

	group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
680
	tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
681 682 683 684 685 686 687

	/* Contention time for first 2 tries */
	ctime = (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);
	ctime += (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);

688 689 690 691 692
	if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
		overhead = mi->overhead;
		overhead_rtscts = mi->overhead_rtscts;
	}

693
	/* Total TX time for data and Contention after first 2 tries */
694 695
	tx_time = ctime + 2 * (overhead + tx_time_data);
	tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
696 697

	/* See how many more tries we can fit inside segment size */
698
	do {
699 700 701 702 703
		/* Contention time for this try */
		ctime = (t_slot * cw) >> 1;
		cw = min((cw << 1) | 1, mp->cw_max);

		/* Total TX time after this try */
704 705
		tx_time += ctime + overhead + tx_time_data;
		tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
706

707 708 709 710 711 712 713 714 715
		if (tx_time_rtscts < mp->segment_size)
			mr->retry_count_rtscts++;
	} while ((tx_time < mp->segment_size) &&
	         (++mr->retry_count < mp->max_retry));
}


static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
716
                     struct ieee80211_sta_rates *ratetbl, int offset, int index)
717 718 719
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
	struct minstrel_rate_stats *mr;
720
	u8 idx;
721
	u16 flags = group->flags;
722 723 724 725 726

	mr = minstrel_get_ratestats(mi, index);
	if (!mr->retry_updated)
		minstrel_calc_retransmit(mp, mi, index);

727 728 729 730 731 732 733 734 735
	if (mr->probability < MINSTREL_FRAC(20, 100) || !mr->retry_count) {
		ratetbl->rate[offset].count = 2;
		ratetbl->rate[offset].count_rts = 2;
		ratetbl->rate[offset].count_cts = 2;
	} else {
		ratetbl->rate[offset].count = mr->retry_count;
		ratetbl->rate[offset].count_cts = mr->retry_count;
		ratetbl->rate[offset].count_rts = mr->retry_count_rtscts;
	}
736

737
	if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP)
738
		idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
739
	else
740
		idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758

	if (offset > 0) {
		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
		flags |= IEEE80211_TX_RC_USE_RTS_CTS;
	}

	ratetbl->rate[offset].idx = idx;
	ratetbl->rate[offset].flags = flags;
}

static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct ieee80211_sta_rates *rates;
	int i = 0;

	rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
	if (!rates)
759
		return;
760

761 762
	/* Start with max_tp_rate[0] */
	minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
763 764

	if (mp->hw->max_rates >= 3) {
765 766
		/* At least 3 tx rates supported, use max_tp_rate[1] next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
767 768 769 770 771 772
	}

	if (mp->hw->max_rates >= 2) {
		/*
		 * At least 2 tx rates supported, use max_prob_rate next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
773 774
	}

775 776
	rates->rate[i].idx = -1;
	rate_control_set_rates(mp->hw, mi->sta, rates);
777 778 779 780 781 782 783 784 785 786 787 788 789 790
}

static inline int
minstrel_get_duration(int index)
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
	return group->duration[index % MCS_GROUP_RATES];
}

static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct minstrel_rate_stats *mr;
	struct minstrel_mcs_group_data *mg;
791
	unsigned int sample_dur, sample_group, cur_max_tp_streams;
792 793 794 795 796 797 798 799 800 801
	int sample_idx = 0;

	if (mi->sample_wait > 0) {
		mi->sample_wait--;
		return -1;
	}

	if (!mi->sample_tries)
		return -1;

802 803
	sample_group = mi->sample_group;
	mg = &mi->groups[sample_group];
804
	sample_idx = sample_table[mg->column][mg->index];
805 806 807 808 809
	minstrel_next_sample_idx(mi);

	if (!(mg->supported & BIT(sample_idx)))
		return -1;

810
	mr = &mg->rates[sample_idx];
811
	sample_idx += sample_group * MCS_GROUP_RATES;
812

813 814 815
	/*
	 * Sampling might add some overhead (RTS, no aggregation)
	 * to the frame. Hence, don't use sampling for the currently
816
	 * used rates.
817
	 */
818 819
	if (sample_idx == mi->max_tp_rate[0] ||
	    sample_idx == mi->max_tp_rate[1] ||
820
	    sample_idx == mi->max_prob_rate)
821
		return -1;
822

823
	/*
824 825
	 * Do not sample if the probability is already higher than 95%
	 * to avoid wasting airtime.
826
	 */
827
	if (mr->probability > MINSTREL_FRAC(95, 100))
828
		return -1;
829 830 831 832 833

	/*
	 * Make sure that lower rates get sampled only occasionally,
	 * if the link is working perfectly.
	 */
834 835 836

	cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
		MCS_GROUP_RATES].streams;
837
	sample_dur = minstrel_get_duration(sample_idx);
838 839
	if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
	    (cur_max_tp_streams - 1 <
840 841
	     minstrel_mcs_groups[sample_group].streams ||
	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
842
		if (mr->sample_skipped < 20)
843
			return -1;
844 845

		if (mi->sample_slow++ > 2)
846
			return -1;
847
	}
848
	mi->sample_tries--;
849 850 851 852

	return sample_idx;
}

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
static void
minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
				    struct minstrel_ht_sta *mi, bool val)
{
	u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;

	if (!supported || !mi->cck_supported_short)
		return;

	if (supported & (mi->cck_supported_short << (val * 4)))
		return;

	supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
	mi->groups[MINSTREL_CCK_GROUP].supported = supported;
}

869 870 871 872
static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                     struct ieee80211_tx_rate_control *txrc)
{
873
	const struct mcs_group *sample_group;
874
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
875
	struct ieee80211_tx_rate *rate = &info->status.rates[0];
876 877 878 879 880 881 882 883 884 885 886 887
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct minstrel_priv *mp = priv;
	int sample_idx;

	if (rate_control_send_low(sta, priv_sta, txrc))
		return;

	if (!msp->is_ht)
		return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);

	info->flags |= mi->tx_flags;
888
	minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
889

890 891 892 893 894
#ifdef CONFIG_MAC80211_DEBUGFS
	if (mp->fixed_rate_idx != -1)
		return;
#endif

895 896
	/* Don't use EAPOL frames for sampling on non-mrr hw */
	if (mp->hw->max_rates == 1 &&
897
	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
898 899 900
		sample_idx = -1;
	else
		sample_idx = minstrel_get_sample_rate(mp, mi);
901

902 903 904 905 906 907 908
	mi->total_packets++;

	/* wraparound */
	if (mi->total_packets == ~0) {
		mi->total_packets = 0;
		mi->sample_packets = 0;
	}
909 910 911 912 913 914

	if (sample_idx < 0)
		return;

	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
915 916 917 918 919
	rate->count = 1;

	if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
		int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
		rate->idx = mp->cck_rates[idx];
920 921 922
	} else {
		rate->idx = sample_idx % MCS_GROUP_RATES +
			    (sample_group->streams - 1) * 8;
923 924
	}

925
	rate->flags = sample_group->flags;
926 927
}

928 929 930 931 932 933 934 935 936 937
static void
minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		       struct ieee80211_supported_band *sband,
		       struct ieee80211_sta *sta)
{
	int i;

	if (sband->band != IEEE80211_BAND_2GHZ)
		return;

938 939 940
	if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
		return;

941 942 943 944 945 946 947 948 949 950 951 952 953 954
	mi->cck_supported = 0;
	mi->cck_supported_short = 0;
	for (i = 0; i < 4; i++) {
		if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
			continue;

		mi->cck_supported |= BIT(i);
		if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
			mi->cck_supported_short |= BIT(i);
	}

	mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
}

955 956
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
957
			struct cfg80211_chan_def *chandef,
958
                        struct ieee80211_sta *sta, void *priv_sta)
959 960 961 962 963 964
{
	struct minstrel_priv *mp = priv;
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
	u16 sta_cap = sta->ht_cap.cap;
965
	int n_supported = 0;
966 967 968 969 970
	int ack_dur;
	int stbc;
	int i;

	/* fall back to the old minstrel for legacy stations */
971 972
	if (!sta->ht_cap.ht_supported)
		goto use_legacy;
973

974
	BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
975 976 977

	msp->is_ht = true;
	memset(mi, 0, sizeof(*mi));
978 979

	mi->sta = sta;
980 981
	mi->stats_update = jiffies;

982 983 984
	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
	mi->overhead += ack_dur;
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;

	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);

	/* When using MRR, sample more on the first attempt, without delay */
	if (mp->has_mrr) {
		mi->sample_count = 16;
		mi->sample_wait = 0;
	} else {
		mi->sample_count = 8;
		mi->sample_wait = 8;
	}
	mi->sample_tries = 4;

	stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
		IEEE80211_HT_CAP_RX_STBC_SHIFT;
	mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;

	if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
		mi->tx_flags |= IEEE80211_TX_CTL_LDPC;

	for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
1007 1008
		u32 gflags = minstrel_mcs_groups[i].flags;

1009
		mi->groups[i].supported = 0;
1010 1011 1012 1013 1014
		if (i == MINSTREL_CCK_GROUP) {
			minstrel_ht_update_cck(mp, mi, sband, sta);
			continue;
		}

1015 1016
		if (gflags & IEEE80211_TX_RC_SHORT_GI) {
			if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1017 1018 1019 1020 1021 1022
				if (!(sta_cap & IEEE80211_HT_CAP_SGI_40))
					continue;
			} else {
				if (!(sta_cap & IEEE80211_HT_CAP_SGI_20))
					continue;
			}
1023 1024
		}

1025
		if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
1026
		    sta->bandwidth < IEEE80211_STA_RX_BW_40)
1027 1028
			continue;

1029
		/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
1030
		if (sta->smps_mode == IEEE80211_SMPS_STATIC &&
1031 1032 1033
		    minstrel_mcs_groups[i].streams > 1)
			continue;

1034 1035
		mi->groups[i].supported =
			mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
1036 1037 1038

		if (mi->groups[i].supported)
			n_supported++;
1039
	}
1040 1041 1042 1043

	if (!n_supported)
		goto use_legacy;

1044
	/* create an initial rate table with the lowest supported rates */
1045
	minstrel_ht_update_stats(mp, mi);
1046
	minstrel_ht_update_rates(mp, mi);
1047

1048 1049 1050 1051 1052 1053 1054
	return;

use_legacy:
	msp->is_ht = false;
	memset(&msp->legacy, 0, sizeof(msp->legacy));
	msp->legacy.r = msp->ratelist;
	msp->legacy.sample_table = msp->sample_table;
1055 1056
	return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
					   &msp->legacy);
1057 1058 1059 1060
}

static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
1061
		      struct cfg80211_chan_def *chandef,
1062 1063
                      struct ieee80211_sta *sta, void *priv_sta)
{
1064
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1065 1066 1067 1068
}

static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
1069
			struct cfg80211_chan_def *chandef,
1070
                        struct ieee80211_sta *sta, void *priv_sta,
1071
                        u32 changed)
1072
{
1073
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
}

static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
	struct ieee80211_supported_band *sband;
	struct minstrel_ht_sta_priv *msp;
	struct minstrel_priv *mp = priv;
	struct ieee80211_hw *hw = mp->hw;
	int max_rates = 0;
	int i;

	for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
		sband = hw->wiphy->bands[i];
		if (sband && sband->n_bitrates > max_rates)
			max_rates = sband->n_bitrates;
	}

1092
	msp = kzalloc(sizeof(*msp), gfp);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	if (!msp)
		return NULL;

	msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
	if (!msp->ratelist)
		goto error;

	msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
	if (!msp->sample_table)
		goto error1;

	return msp;

error1:
1107
	kfree(msp->ratelist);
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
error:
	kfree(msp);
	return NULL;
}

static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;

	kfree(msp->sample_table);
	kfree(msp->ratelist);
	kfree(msp);
}

static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
	return mac80211_minstrel.alloc(hw, debugfsdir);
}

static void
minstrel_ht_free(void *priv)
{
	mac80211_minstrel.free(priv);
}

1135 1136 1137 1138 1139 1140 1141 1142 1143
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	int i, j;

	if (!msp->is_ht)
		return mac80211_minstrel.get_expected_throughput(priv_sta);

1144 1145
	i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
1146 1147 1148 1149 1150

	/* convert cur_tp from pkt per second in kbps */
	return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
}

1151
static const struct rate_control_ops mac80211_minstrel_ht = {
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	.name = "minstrel_ht",
	.tx_status = minstrel_ht_tx_status,
	.get_rate = minstrel_ht_get_rate,
	.rate_init = minstrel_ht_rate_init,
	.rate_update = minstrel_ht_rate_update,
	.alloc_sta = minstrel_ht_alloc_sta,
	.free_sta = minstrel_ht_free_sta,
	.alloc = minstrel_ht_alloc,
	.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
	.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
	.remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
#endif
1165
	.get_expected_throughput = minstrel_ht_get_expected_throughput,
1166 1167 1168
};


1169
static void __init init_sample_table(void)
1170 1171 1172 1173 1174 1175
{
	int col, i, new_idx;
	u8 rnd[MCS_GROUP_RATES];

	memset(sample_table, 0xff, sizeof(sample_table));
	for (col = 0; col < SAMPLE_COLUMNS; col++) {
1176
		prandom_bytes(rnd, sizeof(rnd));
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
		for (i = 0; i < MCS_GROUP_RATES; i++) {
			new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
			while (sample_table[col][new_idx] != 0xff)
				new_idx = (new_idx + 1) % MCS_GROUP_RATES;

			sample_table[col][new_idx] = i;
		}
	}
}

int __init
rc80211_minstrel_ht_init(void)
{
	init_sample_table();
	return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}

void
rc80211_minstrel_ht_exit(void)
{
	ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}