rc80211_minstrel_ht.c 40.2 KB
Newer Older
1
/*
2
 * Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
3 4 5 6 7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
13
#include <linux/moduleparam.h>
14 15 16
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
17
#include "sta_info.h"
18 19 20
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"

21
#define AVG_AMPDU_SIZE	16
22 23 24
#define AVG_PKT_SIZE	1200

/* Number of bits for an average sized packet */
25
#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
26 27

/* Number of symbols for a packet with (bps) bits per symbol */
28
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
29

30
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
31 32
#define MCS_SYMBOL_TIME(sgi, syms)					\
	(sgi ?								\
33 34
	  ((syms) * 18000 + 4000) / 5 :	/* syms * 3.6 us */		\
	  ((syms) * 1000) << 2		/* syms * 4 us */		\
35 36 37
	)

/* Transmit duration for the raw data part of an average sized packet */
38 39
#define MCS_DURATION(streams, sgi, bps) \
	(MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
40

41 42
#define BW_20			0
#define BW_40			1
43
#define BW_80			2
44

45 46 47 48
/*
 * Define group sort order: HT40 -> SGI -> #streams
 */
#define GROUP_IDX(_streams, _sgi, _ht40)	\
49
	MINSTREL_HT_GROUP_0 +			\
50
	MINSTREL_MAX_STREAMS * 2 * _ht40 +	\
51
	MINSTREL_MAX_STREAMS * _sgi +	\
52 53
	_streams - 1

54
/* MCS rate information for an MCS group */
55 56
#define MCS_GROUP(_streams, _sgi, _ht40)				\
	[GROUP_IDX(_streams, _sgi, _ht40)] = {				\
57 58
	.streams = _streams,						\
	.flags =							\
59
		IEEE80211_TX_RC_MCS |					\
60 61 62 63 64 65 66 67 68 69 70 71 72 73
		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
		(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0),		\
	.duration = {							\
		MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78),		\
		MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234),	\
		MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260)		\
	}								\
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
#define VHT_GROUP_IDX(_streams, _sgi, _bw)				\
	(MINSTREL_VHT_GROUP_0 +						\
	 MINSTREL_MAX_STREAMS * 2 * (_bw) +				\
	 MINSTREL_MAX_STREAMS * (_sgi) +				\
	 (_streams) - 1)

#define BW2VBPS(_bw, r3, r2, r1)					\
	(_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)

#define VHT_GROUP(_streams, _sgi, _bw)					\
	[VHT_GROUP_IDX(_streams, _sgi, _bw)] = {			\
	.streams = _streams,						\
	.flags =							\
		IEEE80211_TX_RC_VHT_MCS |				\
		(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) |			\
		(_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH :		\
		 _bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0),	\
	.duration = {							\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  117,  54,  26)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  234, 108,  52)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  351, 162,  78)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  468, 216, 104)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  702, 324, 156)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw,  936, 432, 208)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw, 1053, 486, 234)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw, 1170, 540, 260)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw, 1404, 648, 312)),		\
		MCS_DURATION(_streams, _sgi,				\
			     BW2VBPS(_bw, 1560, 720, 346))		\
	}								\
}

115
#define CCK_DURATION(_bitrate, _short, _len)		\
116
	(1000 * (10 /* SIFS */ +			\
W
Weilong Chen 已提交
117
	 (_short ? 72 + 24 : 144 + 48) +		\
118
	 (8 * (_len + 4) * 10) / (_bitrate)))
119 120 121 122 123 124 125 126 127 128 129

#define CCK_ACK_DURATION(_bitrate, _short)			\
	(CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) +	\
	 CCK_DURATION(_bitrate, _short, AVG_PKT_SIZE))

#define CCK_DURATION_LIST(_short)			\
	CCK_ACK_DURATION(10, _short),			\
	CCK_ACK_DURATION(20, _short),			\
	CCK_ACK_DURATION(55, _short),			\
	CCK_ACK_DURATION(110, _short)

130 131 132
#define CCK_GROUP					\
	[MINSTREL_CCK_GROUP] = {			\
		.streams = 0,				\
133
		.flags = 0,				\
134 135 136 137
		.duration = {				\
			CCK_DURATION_LIST(false),	\
			CCK_DURATION_LIST(true)		\
		}					\
138 139
	}

140 141 142 143 144
static bool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only,
		 "Use only VHT rates when VHT is supported by sta.");

145 146 147 148
/*
 * To enable sufficiently targeted rate sampling, MCS rates are divided into
 * groups, based on the number of streams and flags (HT40, SGI) that they
 * use.
149 150
 *
 * Sortorder has to be fixed for GROUP_IDX macro to be applicable:
151
 * BW -> SGI -> #streams
152 153
 */
const struct mcs_group minstrel_mcs_groups[] = {
154 155 156
	MCS_GROUP(1, 0, BW_20),
	MCS_GROUP(2, 0, BW_20),
	MCS_GROUP(3, 0, BW_20),
157

158 159 160
	MCS_GROUP(1, 1, BW_20),
	MCS_GROUP(2, 1, BW_20),
	MCS_GROUP(3, 1, BW_20),
161

162 163 164
	MCS_GROUP(1, 0, BW_40),
	MCS_GROUP(2, 0, BW_40),
	MCS_GROUP(3, 0, BW_40),
165

166 167 168
	MCS_GROUP(1, 1, BW_40),
	MCS_GROUP(2, 1, BW_40),
	MCS_GROUP(3, 1, BW_40),
169

170 171 172 173 174 175 176 177 178
	CCK_GROUP,

	VHT_GROUP(1, 0, BW_20),
	VHT_GROUP(2, 0, BW_20),
	VHT_GROUP(3, 0, BW_20),

	VHT_GROUP(1, 1, BW_20),
	VHT_GROUP(2, 1, BW_20),
	VHT_GROUP(3, 1, BW_20),
179

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	VHT_GROUP(1, 0, BW_40),
	VHT_GROUP(2, 0, BW_40),
	VHT_GROUP(3, 0, BW_40),

	VHT_GROUP(1, 1, BW_40),
	VHT_GROUP(2, 1, BW_40),
	VHT_GROUP(3, 1, BW_40),

	VHT_GROUP(1, 0, BW_80),
	VHT_GROUP(2, 0, BW_80),
	VHT_GROUP(3, 0, BW_80),

	VHT_GROUP(1, 1, BW_80),
	VHT_GROUP(2, 1, BW_80),
	VHT_GROUP(3, 1, BW_80),
};
196

197
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
198

199 200 201
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
/*
 * Some VHT MCSes are invalid (when Ndbps / Nes is not an integer)
 * e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1
 *
 * Returns the valid mcs map for struct minstrel_mcs_group_data.supported
 */
static u16
minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
{
	u16 mask = 0;

	if (bw == BW_20) {
		if (nss != 3 && nss != 6)
			mask = BIT(9);
	} else if (bw == BW_80) {
		if (nss == 3 || nss == 7)
			mask = BIT(6);
		else if (nss == 6)
			mask = BIT(9);
	} else {
		WARN_ON(bw != BW_40);
	}

	switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) {
	case IEEE80211_VHT_MCS_SUPPORT_0_7:
		mask |= 0x300;
		break;
	case IEEE80211_VHT_MCS_SUPPORT_0_8:
		mask |= 0x200;
		break;
	case IEEE80211_VHT_MCS_SUPPORT_0_9:
		break;
	default:
		mask = 0x3ff;
	}

	return 0x3ff & ~mask;
}

241 242 243 244 245 246
/*
 * Look up an MCS group index based on mac80211 rate information
 */
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
247
	return GROUP_IDX((rate->idx / 8) + 1,
248 249
			 !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
			 !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
250 251
}

252 253 254 255 256 257 258 259 260
static int
minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
{
	return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate),
			     !!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
			     !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) +
			     2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
}

261 262 263 264 265 266 267 268
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		      struct ieee80211_tx_rate *rate)
{
	int group, idx;

	if (rate->flags & IEEE80211_TX_RC_MCS) {
		group = minstrel_ht_get_group_idx(rate);
269
		idx = rate->idx % 8;
270 271 272
	} else if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
		group = minstrel_vht_get_group_idx(rate);
		idx = ieee80211_rate_get_vht_mcs(rate);
273 274 275 276 277 278 279 280
	} else {
		group = MINSTREL_CCK_GROUP;

		for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++)
			if (rate->idx == mp->cck_rates[idx])
				break;

		/* short preamble */
281
		if (!(mi->supported[group] & BIT(idx)))
282 283 284 285 286
			idx += 4;
	}
	return &mi->groups[group].rates[idx];
}

287 288 289 290 291 292 293
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
	return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
}

/*
294 295
 * Return current throughput based on the average A-MPDU length, taking into
 * account the expected number of retransmissions and their expected length
296
 */
297
int
298 299
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
		       int prob_ewma)
300
{
301
	unsigned int nsecs = 0;
302

303
	/* do not account throughput if sucess prob is below 10% */
304
	if (prob_ewma < MINSTREL_FRAC(10, 100))
305
		return 0;
306

307
	if (group != MINSTREL_CCK_GROUP)
308
		nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
309

310 311
	nsecs += minstrel_mcs_groups[group].duration[rate];

312 313 314 315 316 317 318 319 320 321
	/*
	 * For the throughput calculation, limit the probability value to 90% to
	 * account for collision related packet error rate fluctuation
	 * (prob is scaled - see MINSTREL_FRAC above)
	 */
	if (prob_ewma > MINSTREL_FRAC(90, 100))
		return MINSTREL_TRUNC(100000 * ((MINSTREL_FRAC(90, 100) * 1000)
								      / nsecs));
	else
		return MINSTREL_TRUNC(100000 * ((prob_ewma * 1000) / nsecs));
322 323
}

324 325 326 327 328 329 330 331
/*
 * Find & sort topmost throughput rates
 *
 * If multiple rates provide equal throughput the sorting is based on their
 * current success probability. Higher success probability is preferred among
 * MCS groups, CCK rates do not provide aggregation and are therefore at last.
 */
static void
332 333
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
			       u16 *tp_list)
334
{
335 336
	int cur_group, cur_idx, cur_tp_avg, cur_prob;
	int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
337 338 339 340
	int j = MAX_THR_RATES;

	cur_group = index / MCS_GROUP_RATES;
	cur_idx = index  % MCS_GROUP_RATES;
341
	cur_prob = mi->groups[cur_group].rates[cur_idx].prob_ewma;
342
	cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
343

344
	do {
345 346
		tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
		tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
347
		tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
348 349
		tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
						    tmp_prob);
350 351
		if (cur_tp_avg < tmp_tp_avg ||
		    (cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob))
352 353 354
			break;
		j--;
	} while (j > 0);
355 356 357 358 359 360 361 362 363 364 365 366 367

	if (j < MAX_THR_RATES - 1) {
		memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
		       (MAX_THR_RATES - (j + 1))));
	}
	if (j < MAX_THR_RATES)
		tp_list[j] = index;
}

/*
 * Find and set the topmost probability rate per sta and per group
 */
static void
368
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
369 370
{
	struct minstrel_mcs_group_data *mg;
371
	struct minstrel_rate_stats *mrs;
372 373
	int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
	int max_tp_group, cur_tp_avg, cur_group, cur_idx;
374 375
	int max_gpr_group, max_gpr_idx;
	int max_gpr_tp_avg, max_gpr_prob;
376

377 378
	cur_group = index / MCS_GROUP_RATES;
	cur_idx = index % MCS_GROUP_RATES;
379
	mg = &mi->groups[index / MCS_GROUP_RATES];
380
	mrs = &mg->rates[index % MCS_GROUP_RATES];
381 382 383

	tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
	tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
384
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
385
	tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
386 387 388 389 390 391 392 393

	/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
	 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
	max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
	    (max_tp_group != MINSTREL_CCK_GROUP))
		return;

394 395 396 397
	max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
	max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
	max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;

398
	if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
399 400
		cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
						    mrs->prob_ewma);
401
		if (cur_tp_avg > tmp_tp_avg)
402
			mi->max_prob_rate = index;
403

404 405 406 407
		max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
							max_gpr_idx,
							max_gpr_prob);
		if (cur_tp_avg > max_gpr_tp_avg)
408 409
			mg->max_group_prob_rate = index;
	} else {
410
		if (mrs->prob_ewma > tmp_prob)
411
			mi->max_prob_rate = index;
412
		if (mrs->prob_ewma > max_gpr_prob)
413 414 415 416 417 418 419 420 421 422 423 424 425
			mg->max_group_prob_rate = index;
	}
}


/*
 * Assign new rate set per sta and use CCK rates only if the fastest
 * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
 * rate sets where MCS and CCK rates are mixed, because CCK rates can
 * not use aggregation.
 */
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
426 427
				 u16 tmp_mcs_tp_rate[MAX_THR_RATES],
				 u16 tmp_cck_tp_rate[MAX_THR_RATES])
428
{
429
	unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
430 431 432 433
	int i;

	tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
434 435
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
	tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
436 437 438

	tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
	tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
439 440
	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_ewma;
	tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458

	if (tmp_cck_tp > tmp_mcs_tp) {
		for(i = 0; i < MAX_THR_RATES; i++) {
			minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
						       tmp_mcs_tp_rate);
		}
	}

}

/*
 * Try to increase robustness of max_prob rate by decrease number of
 * streams if possible.
 */
static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
459
	int tmp_max_streams, group, tmp_idx, tmp_prob;
460 461 462 463 464 465
	int tmp_tp = 0;

	tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
			  MCS_GROUP_RATES].streams;
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
		mg = &mi->groups[group];
466
		if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
467
			continue;
468 469

		tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
470
		tmp_prob = mi->groups[group].rates[tmp_idx].prob_ewma;
471

472
		if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
473 474
		   (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
				mi->max_prob_rate = mg->max_group_prob_rate;
475
				tmp_tp = minstrel_ht_get_tp_avg(mi, group,
476 477
								tmp_idx,
								tmp_prob);
478 479 480 481
		}
	}
}

482 483 484 485 486 487
/*
 * Update rate statistics and select new primary rates
 *
 * Rules for rate selection:
 *  - max_prob_rate must use only one stream, as a tradeoff between delivery
 *    probability and throughput during strong fluctuations
488
 *  - as long as the max prob rate has a probability of more than 75%, pick
489 490 491 492 493 494
 *    higher throughput rates, even if the probablity is a bit lower
 */
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct minstrel_mcs_group_data *mg;
495
	struct minstrel_rate_stats *mrs;
496
	int group, i, j, cur_prob;
497 498
	u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
	u16 tmp_cck_tp_rate[MAX_THR_RATES], index;
499 500 501 502 503 504 505 506 507 508 509

	if (mi->ampdu_packets > 0) {
		mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
			MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
		mi->ampdu_len = 0;
		mi->ampdu_packets = 0;
	}

	mi->sample_slow = 0;
	mi->sample_count = 0;

510 511 512 513 514
	/* Initialize global rate indexes */
	for(j = 0; j < MAX_THR_RATES; j++){
		tmp_mcs_tp_rate[j] = 0;
		tmp_cck_tp_rate[j] = 0;
	}
515

516 517
	/* Find best rate sets within all MCS groups*/
	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
518 519

		mg = &mi->groups[group];
520
		if (!mi->supported[group])
521 522 523 524
			continue;

		mi->sample_count++;

525 526 527 528
		/* (re)Initialize group rate indexes */
		for(j = 0; j < MAX_THR_RATES; j++)
			tmp_group_tp_rate[j] = group;

529
		for (i = 0; i < MCS_GROUP_RATES; i++) {
530
			if (!(mi->supported[group] & BIT(i)))
531 532
				continue;

533 534
			index = MCS_GROUP_RATES * group + i;

535 536 537
			mrs = &mg->rates[i];
			mrs->retry_updated = false;
			minstrel_calc_rate_stats(mrs);
538
			cur_prob = mrs->prob_ewma;
539

540
			if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
541 542
				continue;

543 544 545 546 547 548 549
			/* Find max throughput rate set */
			if (group != MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_mcs_tp_rate);
			} else if (group == MINSTREL_CCK_GROUP) {
				minstrel_ht_sort_best_tp_rates(mi, index,
							       tmp_cck_tp_rate);
550 551
			}

552 553 554
			/* Find max throughput rate set within a group */
			minstrel_ht_sort_best_tp_rates(mi, index,
						       tmp_group_tp_rate);
555

556 557
			/* Find max probability rate per group and global */
			minstrel_ht_set_best_prob_rate(mi, index);
558 559
		}

560 561
		memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
		       sizeof(mg->max_group_tp_rate));
562 563
	}

564 565 566
	/* Assign new rate set per sta */
	minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
	memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
567

568 569 570 571 572
	/* Try to increase robustness of max_prob_rate*/
	minstrel_ht_prob_rate_reduce_streams(mi);

	/* try to sample all available rates during each interval */
	mi->sample_count *= 8;
573

574 575 576
#ifdef CONFIG_MAC80211_DEBUGFS
	/* use fixed index if set */
	if (mp->fixed_rate_idx != -1) {
577 578
		for (i = 0; i < 4; i++)
			mi->max_tp_rate[i] = mp->fixed_rate_idx;
579 580 581
		mi->max_prob_rate = mp->fixed_rate_idx;
	}
#endif
582

583
	/* Reset update timer */
584
	mi->last_stats_update = jiffies;
585 586 587
}

static bool
588
minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct ieee80211_tx_rate *rate)
589
{
590
	if (rate->idx < 0)
591 592
		return false;

593
	if (!rate->count)
594 595
		return false;

596 597
	if (rate->flags & IEEE80211_TX_RC_MCS ||
	    rate->flags & IEEE80211_TX_RC_VHT_MCS)
598 599 600 601 602 603
		return true;

	return rate->idx == mp->cck_rates[0] ||
	       rate->idx == mp->cck_rates[1] ||
	       rate->idx == mp->cck_rates[2] ||
	       rate->idx == mp->cck_rates[3];
604 605 606
}

static void
607
minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
608 609 610 611 612 613 614 615
{
	struct minstrel_mcs_group_data *mg;

	for (;;) {
		mi->sample_group++;
		mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
		mg = &mi->groups[mi->sample_group];

616
		if (!mi->supported[mi->sample_group])
617 618 619 620 621 622 623 624 625 626 627 628
			continue;

		if (++mg->index >= MCS_GROUP_RATES) {
			mg->index = 0;
			if (++mg->column >= ARRAY_SIZE(sample_table))
				mg->column = 0;
		}
		break;
	}
}

static void
629
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
630 631 632 633 634 635 636
{
	int group, orig_group;

	orig_group = group = *idx / MCS_GROUP_RATES;
	while (group > 0) {
		group--;

637
		if (!mi->supported[group])
638 639 640 641 642 643 644
			continue;

		if (minstrel_mcs_groups[group].streams >
		    minstrel_mcs_groups[orig_group].streams)
			continue;

		if (primary)
645
			*idx = mi->groups[group].max_group_tp_rate[0];
646
		else
647
			*idx = mi->groups[group].max_group_tp_rate[1];
648 649 650 651 652
		break;
	}
}

static void
653
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
654 655 656 657 658
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	u16 tid;

659 660 661
	if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
		return;

662 663 664
	if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
		return;

665
	if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
666 667
		return;

S
Sara Sharon 已提交
668
	tid = ieee80211_get_tid(hdr);
669
	if (likely(sta->ampdu_mlme.tid_tx[tid]))
670 671
		return;

672
	ieee80211_start_tx_ba_session(pubsta, tid, 0);
673 674 675 676
}

static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
677
                      void *priv_sta, struct ieee80211_tx_status *st)
678
{
679
	struct ieee80211_tx_info *info = st->info;
680 681 682 683 684
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_tx_rate *ar = info->status.rates;
	struct minstrel_rate_stats *rate, *rate2;
	struct minstrel_priv *mp = priv;
685
	bool last, update = false;
686
	int i;
687 688

	if (!msp->is_ht)
689 690
		return mac80211_minstrel.tx_status_ext(priv, sband,
						       &msp->legacy, st);
691 692 693 694 695 696

	/* This packet was aggregated but doesn't carry status info */
	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
		return;

B
Björn Smedman 已提交
697 698 699
	if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
		info->status.ampdu_ack_len =
			(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
700 701 702 703 704 705 706
		info->status.ampdu_len = 1;
	}

	mi->ampdu_packets++;
	mi->ampdu_len += info->status.ampdu_len;

	if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
707
		mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
708
		mi->sample_tries = 1;
709 710 711
		mi->sample_count--;
	}

712
	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
713 714
		mi->sample_packets += info->status.ampdu_len;

715
	last = !minstrel_ht_txstat_valid(mp, &ar[0]);
716 717
	for (i = 0; !last; i++) {
		last = (i == IEEE80211_TX_MAX_RATES - 1) ||
718
		       !minstrel_ht_txstat_valid(mp, &ar[i + 1]);
719

720
		rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
721

B
Björn Smedman 已提交
722
		if (last)
723 724 725 726 727 728 729 730 731
			rate->success += info->status.ampdu_ack_len;

		rate->attempts += ar[i].count * info->status.ampdu_len;
	}

	/*
	 * check for sudden death of spatial multiplexing,
	 * downgrade to a lower number of streams if necessary.
	 */
732
	rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
733 734
	if (rate->attempts > 30 &&
	    MINSTREL_FRAC(rate->success, rate->attempts) <
735
	    MINSTREL_FRAC(20, 100)) {
736
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
737 738
		update = true;
	}
739

740
	rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
741 742
	if (rate2->attempts > 30 &&
	    MINSTREL_FRAC(rate2->success, rate2->attempts) <
743
	    MINSTREL_FRAC(20, 100)) {
744
		minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
745 746
		update = true;
	}
747

748 749
	if (time_after(jiffies, mi->last_stats_update +
				(mp->update_interval / 2 * HZ) / 1000)) {
750
		update = true;
751 752
		minstrel_ht_update_stats(mp, mi);
	}
753 754 755

	if (update)
		minstrel_ht_update_rates(mp, mi);
756 757 758 759 760 761
}

static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                         int index)
{
762
	struct minstrel_rate_stats *mrs;
763 764 765
	const struct mcs_group *group;
	unsigned int tx_time, tx_time_rtscts, tx_time_data;
	unsigned int cw = mp->cw_min;
766
	unsigned int ctime = 0;
767 768
	unsigned int t_slot = 9; /* FIXME */
	unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
769
	unsigned int overhead = 0, overhead_rtscts = 0;
770

771 772 773 774
	mrs = minstrel_get_ratestats(mi, index);
	if (mrs->prob_ewma < MINSTREL_FRAC(1, 10)) {
		mrs->retry_count = 1;
		mrs->retry_count_rtscts = 1;
775 776 777
		return;
	}

778 779 780
	mrs->retry_count = 2;
	mrs->retry_count_rtscts = 2;
	mrs->retry_updated = true;
781 782

	group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
783
	tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
784 785 786 787 788 789 790

	/* Contention time for first 2 tries */
	ctime = (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);
	ctime += (t_slot * cw) >> 1;
	cw = min((cw << 1) | 1, mp->cw_max);

791 792 793 794 795
	if (index / MCS_GROUP_RATES != MINSTREL_CCK_GROUP) {
		overhead = mi->overhead;
		overhead_rtscts = mi->overhead_rtscts;
	}

796
	/* Total TX time for data and Contention after first 2 tries */
797 798
	tx_time = ctime + 2 * (overhead + tx_time_data);
	tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
799 800

	/* See how many more tries we can fit inside segment size */
801
	do {
802 803 804 805 806
		/* Contention time for this try */
		ctime = (t_slot * cw) >> 1;
		cw = min((cw << 1) | 1, mp->cw_max);

		/* Total TX time after this try */
807 808
		tx_time += ctime + overhead + tx_time_data;
		tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
809

810
		if (tx_time_rtscts < mp->segment_size)
811
			mrs->retry_count_rtscts++;
812
	} while ((tx_time < mp->segment_size) &&
813
	         (++mrs->retry_count < mp->max_retry));
814 815 816 817 818
}


static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
819
                     struct ieee80211_sta_rates *ratetbl, int offset, int index)
820 821
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
822
	struct minstrel_rate_stats *mrs;
823
	u8 idx;
824
	u16 flags = group->flags;
825

826 827
	mrs = minstrel_get_ratestats(mi, index);
	if (!mrs->retry_updated)
828 829
		minstrel_calc_retransmit(mp, mi, index);

830
	if (mrs->prob_ewma < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
831 832 833 834
		ratetbl->rate[offset].count = 2;
		ratetbl->rate[offset].count_rts = 2;
		ratetbl->rate[offset].count_cts = 2;
	} else {
835 836 837
		ratetbl->rate[offset].count = mrs->retry_count;
		ratetbl->rate[offset].count_cts = mrs->retry_count;
		ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
838
	}
839

840
	if (index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP)
841
		idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
842 843 844
	else if (flags & IEEE80211_TX_RC_VHT_MCS)
		idx = ((group->streams - 1) << 4) |
		      ((index % MCS_GROUP_RATES) & 0xF);
845
	else
846
		idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
847

848 849 850 851
	/* enable RTS/CTS if needed:
	 *  - if station is in dynamic SMPS (and streams > 1)
	 *  - for fallback rates, to increase chances of getting through
	 */
852
	if (offset > 0 ||
853 854
	    (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
	     group->streams > 1)) {
855 856 857 858 859 860 861 862
		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
		flags |= IEEE80211_TX_RC_USE_RTS_CTS;
	}

	ratetbl->rate[offset].idx = idx;
	ratetbl->rate[offset].flags = flags;
}

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
static inline int
minstrel_ht_get_prob_ewma(struct minstrel_ht_sta *mi, int rate)
{
	int group = rate / MCS_GROUP_RATES;
	rate %= MCS_GROUP_RATES;
	return mi->groups[group].rates[rate].prob_ewma;
}

static int
minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
{
	int group = mi->max_prob_rate / MCS_GROUP_RATES;
	const struct mcs_group *g = &minstrel_mcs_groups[group];
	int rate = mi->max_prob_rate % MCS_GROUP_RATES;

	/* Disable A-MSDU if max_prob_rate is bad */
	if (mi->groups[group].rates[rate].prob_ewma < MINSTREL_FRAC(50, 100))
		return 1;

	/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
	if (g->duration[rate] > MCS_DURATION(1, 0, 52))
		return 500;

	/*
	 * If the rate is slower than single-stream MCS4, limit A-MSDU to usual
	 * data packet size
	 */
	if (g->duration[rate] > MCS_DURATION(1, 0, 104))
		return 1600;

	/*
	 * If the rate is slower than single-stream MCS7, or if the max throughput
	 * rate success probability is less than 75%, limit A-MSDU to twice the usual
	 * data packet size
	 */
	if (g->duration[rate] > MCS_DURATION(1, 0, 260) ||
	    (minstrel_ht_get_prob_ewma(mi, mi->max_tp_rate[0]) <
	     MINSTREL_FRAC(75, 100)))
		return 3200;

	/*
	 * HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
	 * Since aggregation sessions are started/stopped without txq flush, use
	 * the limit here to avoid the complexity of having to de-aggregate
	 * packets in the queue.
	 */
	if (!mi->sta->vht_cap.vht_supported)
		return IEEE80211_MAX_MPDU_LEN_HT_BA;

	/* unlimited */
	return 0;
}

916 917 918 919 920 921 922 923
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
	struct ieee80211_sta_rates *rates;
	int i = 0;

	rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
	if (!rates)
924
		return;
925

926 927
	/* Start with max_tp_rate[0] */
	minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
928 929

	if (mp->hw->max_rates >= 3) {
930 931
		/* At least 3 tx rates supported, use max_tp_rate[1] next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
932 933 934 935 936 937
	}

	if (mp->hw->max_rates >= 2) {
		/*
		 * At least 2 tx rates supported, use max_prob_rate next */
		minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
938 939
	}

940
	mi->sta->max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
941 942
	rates->rate[i].idx = -1;
	rate_control_set_rates(mp->hw, mi->sta, rates);
943 944 945 946 947 948 949 950 951 952 953 954
}

static inline int
minstrel_get_duration(int index)
{
	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
	return group->duration[index % MCS_GROUP_RATES];
}

static int
minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
955
	struct minstrel_rate_stats *mrs;
956
	struct minstrel_mcs_group_data *mg;
957
	unsigned int sample_dur, sample_group, cur_max_tp_streams;
958
	int tp_rate1, tp_rate2;
959 960 961 962 963 964 965 966 967 968
	int sample_idx = 0;

	if (mi->sample_wait > 0) {
		mi->sample_wait--;
		return -1;
	}

	if (!mi->sample_tries)
		return -1;

969 970
	sample_group = mi->sample_group;
	mg = &mi->groups[sample_group];
971
	sample_idx = sample_table[mg->column][mg->index];
972
	minstrel_set_next_sample_idx(mi);
973

974
	if (!(mi->supported[sample_group] & BIT(sample_idx)))
975 976
		return -1;

977
	mrs = &mg->rates[sample_idx];
978
	sample_idx += sample_group * MCS_GROUP_RATES;
979

980 981 982 983 984 985 986 987 988 989
	/* Set tp_rate1, tp_rate2 to the highest / second highest max_tp_rate */
	if (minstrel_get_duration(mi->max_tp_rate[0]) >
	    minstrel_get_duration(mi->max_tp_rate[1])) {
		tp_rate1 = mi->max_tp_rate[1];
		tp_rate2 = mi->max_tp_rate[0];
	} else {
		tp_rate1 = mi->max_tp_rate[0];
		tp_rate2 = mi->max_tp_rate[1];
	}

990 991
	/*
	 * Sampling might add some overhead (RTS, no aggregation)
992 993
	 * to the frame. Hence, don't use sampling for the highest currently
	 * used highest throughput or probability rate.
994
	 */
995
	if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
996
		return -1;
997

998
	/*
999 1000
	 * Do not sample if the probability is already higher than 95%
	 * to avoid wasting airtime.
1001
	 */
1002
	if (mrs->prob_ewma > MINSTREL_FRAC(95, 100))
1003
		return -1;
1004 1005 1006 1007 1008

	/*
	 * Make sure that lower rates get sampled only occasionally,
	 * if the link is working perfectly.
	 */
1009

1010
	cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
1011
		MCS_GROUP_RATES].streams;
1012
	sample_dur = minstrel_get_duration(sample_idx);
1013
	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
1014
	    (cur_max_tp_streams - 1 <
1015 1016
	     minstrel_mcs_groups[sample_group].streams ||
	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
1017
		if (mrs->sample_skipped < 20)
1018
			return -1;
1019 1020

		if (mi->sample_slow++ > 2)
1021
			return -1;
1022
	}
1023
	mi->sample_tries--;
1024 1025 1026 1027 1028 1029 1030 1031

	return sample_idx;
}

static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                     struct ieee80211_tx_rate_control *txrc)
{
1032
	const struct mcs_group *sample_group;
1033
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
1034
	struct ieee80211_tx_rate *rate = &info->status.rates[0];
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct minstrel_priv *mp = priv;
	int sample_idx;

	if (rate_control_send_low(sta, priv_sta, txrc))
		return;

	if (!msp->is_ht)
		return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);

1046 1047 1048 1049
	if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
	    mi->max_prob_rate / MCS_GROUP_RATES != MINSTREL_CCK_GROUP)
		minstrel_aggr_check(sta, txrc->skb);

1050
	info->flags |= mi->tx_flags;
1051

1052 1053 1054 1055 1056
#ifdef CONFIG_MAC80211_DEBUGFS
	if (mp->fixed_rate_idx != -1)
		return;
#endif

1057 1058
	/* Don't use EAPOL frames for sampling on non-mrr hw */
	if (mp->hw->max_rates == 1 &&
1059
	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
1060 1061 1062
		sample_idx = -1;
	else
		sample_idx = minstrel_get_sample_rate(mp, mi);
1063

1064 1065 1066 1067 1068 1069 1070
	mi->total_packets++;

	/* wraparound */
	if (mi->total_packets == ~0) {
		mi->total_packets = 0;
		mi->sample_packets = 0;
	}
1071 1072 1073 1074 1075 1076

	if (sample_idx < 0)
		return;

	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
	info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1077 1078 1079 1080 1081
	rate->count = 1;

	if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
		int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
		rate->idx = mp->cck_rates[idx];
1082 1083 1084
	} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
		ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
				       sample_group->streams);
1085 1086 1087
	} else {
		rate->idx = sample_idx % MCS_GROUP_RATES +
			    (sample_group->streams - 1) * 8;
1088 1089
	}

1090
	rate->flags = sample_group->flags;
1091 1092
}

1093 1094 1095 1096 1097 1098 1099
static void
minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
		       struct ieee80211_supported_band *sband,
		       struct ieee80211_sta *sta)
{
	int i;

1100
	if (sband->band != NL80211_BAND_2GHZ)
1101 1102
		return;

1103
	if (!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
1104 1105
		return;

1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	mi->cck_supported = 0;
	mi->cck_supported_short = 0;
	for (i = 0; i < 4; i++) {
		if (!rate_supported(sta, sband->band, mp->cck_rates[i]))
			continue;

		mi->cck_supported |= BIT(i);
		if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
			mi->cck_supported_short |= BIT(i);
	}

1117
	mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported;
1118 1119
}

1120 1121
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
1122
			struct cfg80211_chan_def *chandef,
1123
                        struct ieee80211_sta *sta, void *priv_sta)
1124 1125 1126 1127 1128
{
	struct minstrel_priv *mp = priv;
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
	struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
1129
	u16 ht_cap = sta->ht_cap.cap;
1130
	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
1131
	struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
1132
	int use_vht;
1133
	int n_supported = 0;
1134 1135 1136
	int ack_dur;
	int stbc;
	int i;
1137
	bool ldpc;
1138 1139

	/* fall back to the old minstrel for legacy stations */
1140 1141
	if (!sta->ht_cap.ht_supported)
		goto use_legacy;
1142

1143
	BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
1144

1145 1146 1147
	if (vht_cap->vht_supported)
		use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
	else
1148
		use_vht = 0;
1149

1150 1151
	msp->is_ht = true;
	memset(mi, 0, sizeof(*mi));
1152 1153

	mi->sta = sta;
1154
	mi->last_stats_update = jiffies;
1155

1156 1157 1158
	ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
	mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
	mi->overhead += ack_dur;
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
	mi->overhead_rtscts = mi->overhead + 2 * ack_dur;

	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);

	/* When using MRR, sample more on the first attempt, without delay */
	if (mp->has_mrr) {
		mi->sample_count = 16;
		mi->sample_wait = 0;
	} else {
		mi->sample_count = 8;
		mi->sample_wait = 8;
	}
	mi->sample_tries = 4;

1173
	if (!use_vht) {
1174
		stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
1175
			IEEE80211_HT_CAP_RX_STBC_SHIFT;
1176

1177 1178 1179 1180 1181 1182
		ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
	} else {
		stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
			IEEE80211_VHT_CAP_RXSTBC_SHIFT;

		ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
1183
	}
1184

1185 1186 1187 1188
	mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
	if (ldpc)
		mi->tx_flags |= IEEE80211_TX_CTL_LDPC;

1189
	for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
1190
		u32 gflags = minstrel_mcs_groups[i].flags;
1191
		int bw, nss;
1192

1193
		mi->supported[i] = 0;
1194 1195 1196 1197 1198
		if (i == MINSTREL_CCK_GROUP) {
			minstrel_ht_update_cck(mp, mi, sband, sta);
			continue;
		}

1199 1200
		if (gflags & IEEE80211_TX_RC_SHORT_GI) {
			if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1201
				if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
1202 1203
					continue;
			} else {
1204
				if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
1205 1206
					continue;
			}
1207 1208
		}

1209
		if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
1210
		    sta->bandwidth < IEEE80211_STA_RX_BW_40)
1211 1212
			continue;

1213 1214
		nss = minstrel_mcs_groups[i].streams;

1215
		/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
1216 1217 1218 1219 1220
		if (sta->smps_mode == IEEE80211_SMPS_STATIC && nss > 1)
			continue;

		/* HT rate */
		if (gflags & IEEE80211_TX_RC_MCS) {
1221
			if (use_vht && minstrel_vht_only)
1222
				continue;
1223

1224 1225
			mi->supported[i] = mcs->rx_mask[nss - 1];
			if (mi->supported[i])
1226 1227 1228 1229 1230 1231 1232 1233
				n_supported++;
			continue;
		}

		/* VHT rate */
		if (!vht_cap->vht_supported ||
		    WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) ||
		    WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH))
1234 1235
			continue;

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
		if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
			if (sta->bandwidth < IEEE80211_STA_RX_BW_80 ||
			    ((gflags & IEEE80211_TX_RC_SHORT_GI) &&
			     !(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
				continue;
			}
		}

		if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
			bw = BW_40;
		else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
			bw = BW_80;
		else
			bw = BW_20;

1251
		mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
1252
				vht_cap->vht_mcs.tx_mcs_map);
1253

1254
		if (mi->supported[i])
1255
			n_supported++;
1256
	}
1257 1258 1259 1260

	if (!n_supported)
		goto use_legacy;

1261 1262 1263
	if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
		mi->cck_supported_short |= mi->cck_supported_short << 4;

1264
	/* create an initial rate table with the lowest supported rates */
1265
	minstrel_ht_update_stats(mp, mi);
1266
	minstrel_ht_update_rates(mp, mi);
1267

1268 1269 1270 1271 1272 1273 1274
	return;

use_legacy:
	msp->is_ht = false;
	memset(&msp->legacy, 0, sizeof(msp->legacy));
	msp->legacy.r = msp->ratelist;
	msp->legacy.sample_table = msp->sample_table;
1275 1276
	return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
					   &msp->legacy);
1277 1278 1279 1280
}

static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
1281
		      struct cfg80211_chan_def *chandef,
1282 1283
                      struct ieee80211_sta *sta, void *priv_sta)
{
1284
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1285 1286 1287 1288
}

static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
1289
			struct cfg80211_chan_def *chandef,
1290
                        struct ieee80211_sta *sta, void *priv_sta,
1291
                        u32 changed)
1292
{
1293
	minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
}

static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
	struct ieee80211_supported_band *sband;
	struct minstrel_ht_sta_priv *msp;
	struct minstrel_priv *mp = priv;
	struct ieee80211_hw *hw = mp->hw;
	int max_rates = 0;
	int i;

1306
	for (i = 0; i < NUM_NL80211_BANDS; i++) {
1307 1308 1309 1310 1311
		sband = hw->wiphy->bands[i];
		if (sband && sband->n_bitrates > max_rates)
			max_rates = sband->n_bitrates;
	}

1312
	msp = kzalloc(sizeof(*msp), gfp);
1313 1314 1315
	if (!msp)
		return NULL;

K
Kees Cook 已提交
1316
	msp->ratelist = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
1317 1318 1319
	if (!msp->ratelist)
		goto error;

1320
	msp->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
1321 1322 1323 1324 1325 1326
	if (!msp->sample_table)
		goto error1;

	return msp;

error1:
1327
	kfree(msp->ratelist);
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
error:
	kfree(msp);
	return NULL;
}

static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;

	kfree(msp->sample_table);
	kfree(msp->ratelist);
	kfree(msp);
}

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
static void
minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
{
	static const int bitrates[4] = { 10, 20, 55, 110 };
	struct ieee80211_supported_band *sband;
	u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
	int i, j;

	sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
	if (!sband)
		return;

	for (i = 0; i < sband->n_bitrates; i++) {
		struct ieee80211_rate *rate = &sband->bitrates[i];

		if (rate->flags & IEEE80211_RATE_ERP_G)
			continue;

		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
			continue;

		for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
			if (rate->bitrate != bitrates[j])
				continue;

			mp->cck_rates[j] = i;
			break;
		}
	}
}

1374 1375 1376
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
	struct minstrel_priv *mp;

	mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
	if (!mp)
		return NULL;

	/* contention window settings
	 * Just an approximation. Using the per-queue values would complicate
	 * the calculations and is probably unnecessary */
	mp->cw_min = 15;
	mp->cw_max = 1023;

	/* number of packets (in %) to use for sampling other rates
	 * sample less often for non-mrr packets, because the overhead
	 * is much higher than with mrr */
	mp->lookaround_rate = 5;
	mp->lookaround_rate_mrr = 10;

	/* maximum time that the hw is allowed to stay in one MRR segment */
	mp->segment_size = 6000;

	if (hw->max_rate_tries > 0)
		mp->max_retry = hw->max_rate_tries;
	else
		/* safe default, does not necessarily have to match hw properties */
		mp->max_retry = 7;

	if (hw->max_rates >= 4)
		mp->has_mrr = true;

	mp->hw = hw;
	mp->update_interval = 100;

#ifdef CONFIG_MAC80211_DEBUGFS
	mp->fixed_rate_idx = (u32) -1;
	debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
			   &mp->fixed_rate_idx);
#endif

	minstrel_ht_init_cck_rates(mp);

	return mp;
1419 1420 1421 1422 1423
}

static void
minstrel_ht_free(void *priv)
{
1424
	kfree(priv);
1425 1426
}

1427 1428 1429 1430
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
	struct minstrel_ht_sta_priv *msp = priv_sta;
	struct minstrel_ht_sta *mi = &msp->ht;
1431
	int i, j, prob, tp_avg;
1432 1433 1434 1435

	if (!msp->is_ht)
		return mac80211_minstrel.get_expected_throughput(priv_sta);

1436 1437
	i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
	j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
1438
	prob = mi->groups[i].rates[j].prob_ewma;
1439

1440
	/* convert tp_avg from pkt per second in kbps */
1441 1442
	tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
	tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
1443 1444

	return tp_avg;
1445 1446
}

1447
static const struct rate_control_ops mac80211_minstrel_ht = {
1448
	.name = "minstrel_ht",
1449
	.tx_status_ext = minstrel_ht_tx_status,
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	.get_rate = minstrel_ht_get_rate,
	.rate_init = minstrel_ht_rate_init,
	.rate_update = minstrel_ht_rate_update,
	.alloc_sta = minstrel_ht_alloc_sta,
	.free_sta = minstrel_ht_free_sta,
	.alloc = minstrel_ht_alloc,
	.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
	.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
#endif
1460
	.get_expected_throughput = minstrel_ht_get_expected_throughput,
1461 1462 1463
};


1464
static void __init init_sample_table(void)
1465 1466 1467 1468 1469 1470
{
	int col, i, new_idx;
	u8 rnd[MCS_GROUP_RATES];

	memset(sample_table, 0xff, sizeof(sample_table));
	for (col = 0; col < SAMPLE_COLUMNS; col++) {
1471
		prandom_bytes(rnd, sizeof(rnd));
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
		for (i = 0; i < MCS_GROUP_RATES; i++) {
			new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
			while (sample_table[col][new_idx] != 0xff)
				new_idx = (new_idx + 1) % MCS_GROUP_RATES;

			sample_table[col][new_idx] = i;
		}
	}
}

int __init
1483
rc80211_minstrel_init(void)
1484 1485 1486 1487 1488 1489
{
	init_sample_table();
	return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}

void
1490
rc80211_minstrel_exit(void)
1491 1492 1493
{
	ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}