提交 7bc04215 编写于 作者: F Felix Fietkau 提交者: Kalle Valo

mt76: add driver code for MT76x2e

MT76x2e is a 2x2 PCIe 802.11ac chipset by MediaTek. This driver has full
support for AP, station, ad-hoc, mesh and monitor mode.
Signed-off-by: NFelix Fietkau <nbd@nbd.name>
Signed-off-by: NLorenzo Bianconi <lorenzo.bianconi83@gmail.com>
Signed-off-by: NKalle Valo <kvalo@codeaurora.org>
上级 17f1de56
......@@ -11,4 +11,5 @@ config WLAN_VENDOR_MEDIATEK
if WLAN_VENDOR_MEDIATEK
source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
source "drivers/net/wireless/mediatek/mt76/Kconfig"
endif # WLAN_VENDOR_MEDIATEK
obj-$(CONFIG_MT7601U) += mt7601u/
obj-$(CONFIG_MT76_CORE) += mt76/
config MT76_CORE
tristate
config MT76x2E
tristate "MediaTek MT76x2E (PCIe) support"
select MT76_CORE
depends on MAC80211
depends on PCI
---help---
This adds support for MT7612/MT7602/MT7662-based wireless PCIe devices.
obj-$(CONFIG_MT76_CORE) += mt76.o
obj-$(CONFIG_MT76x2E) += mt76x2e.o
mt76-y := \
mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o
CFLAGS_trace.o := -I$(src)
mt76x2e-y := \
mt76x2_pci.o mt76x2_dma.o \
mt76x2_main.o mt76x2_init.o mt76x2_debugfs.o mt76x2_tx.o \
mt76x2_core.o mt76x2_mac.o mt76x2_eeprom.o mt76x2_mcu.o mt76x2_phy.o \
mt76x2_dfs.o mt76x2_trace.o
CFLAGS_mt76x2_trace.o := -I$(src)
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76x2_H
#define __MT76x2_H
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/bitops.h>
#include <linux/kfifo.h>
#define MT7662_FIRMWARE "mt7662.bin"
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
#define MT76x2_RX_RING_SIZE 256
#define MT_RX_HEADROOM 32
#define MT_MAX_CHAINS 2
#define MT_CALIBRATE_INTERVAL HZ
#include "mt76.h"
#include "mt76x2_regs.h"
#include "mt76x2_mac.h"
#include "mt76x2_dfs.h"
struct mt76x2_mcu {
struct mutex mutex;
wait_queue_head_t wait;
struct sk_buff_head res_q;
u32 msg_seq;
};
struct mt76x2_rx_freq_cal {
s8 high_gain[MT_MAX_CHAINS];
s8 rssi_offset[MT_MAX_CHAINS];
s8 lna_gain;
u32 mcu_gain;
};
struct mt76x2_calibration {
struct mt76x2_rx_freq_cal rx;
u8 agc_gain_init[MT_MAX_CHAINS];
u8 agc_gain_cur[MT_MAX_CHAINS];
int avg_rssi[MT_MAX_CHAINS];
int avg_rssi_all;
s8 agc_gain_adjust;
s8 low_gain;
u8 temp;
bool init_cal_done;
bool tssi_cal_done;
bool tssi_comp_pending;
bool dpd_cal_done;
bool channel_cal_done;
};
struct mt76x2_dev {
struct mt76_dev mt76; /* must be first */
struct mac_address macaddr_list[8];
struct mutex mutex;
const u16 *beacon_offsets;
unsigned long wcid_mask[128 / BITS_PER_LONG];
int txpower_conf;
int txpower_cur;
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x2_tx_status);
struct mt76x2_mcu mcu;
struct sk_buff *rx_head;
struct tasklet_struct tx_tasklet;
struct tasklet_struct pre_tbtt_tasklet;
struct delayed_work cal_work;
struct delayed_work mac_work;
u32 aggr_stats[32];
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[128];
spinlock_t irq_lock;
u32 irqmask;
struct sk_buff *beacons[8];
u8 beacon_mask;
u8 beacon_data_mask;
u32 rev;
u32 rxfilter;
u16 chainmask;
struct mt76x2_calibration cal;
s8 target_power;
s8 target_power_delta[2];
struct mt76_rate_power rate_power;
bool enable_tpc;
u8 coverage_class;
u8 slottime;
struct mt76x2_dfs_pattern_detector dfs_pd;
};
struct mt76x2_vif {
u8 idx;
struct mt76_wcid group_wcid;
};
struct mt76x2_sta {
struct mt76_wcid wcid; /* must be first */
struct mt76x2_tx_status status;
int n_frames;
};
static inline bool is_mt7612(struct mt76x2_dev *dev)
{
return (dev->rev >> 16) == 0x7612;
}
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set);
static inline void mt76x2_irq_enable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, 0, mask);
}
static inline void mt76x2_irq_disable(struct mt76x2_dev *dev, u32 mask)
{
mt76x2_set_irq_mask(dev, mask, 0);
}
extern const struct ieee80211_ops mt76x2_ops;
struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x2_dev *dev);
void mt76x2_init_debugfs(struct mt76x2_dev *dev);
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance);
void mt76x2_phy_power_on(struct mt76x2_dev *dev);
int mt76x2_init_hardware(struct mt76x2_dev *dev);
void mt76x2_stop_hardware(struct mt76x2_dev *dev);
int mt76x2_eeprom_init(struct mt76x2_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x2_dev *dev, int channel);
void mt76x2_set_tx_ackto(struct mt76x2_dev *dev);
int mt76x2_phy_start(struct mt76x2_dev *dev);
int mt76x2_phy_set_channel(struct mt76x2_dev *dev,
struct cfg80211_chan_def *chandef);
int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain);
void mt76x2_phy_calibrate(struct work_struct *work);
void mt76x2_phy_set_txpower(struct mt76x2_dev *dev);
int mt76x2_mcu_init(struct mt76x2_dev *dev);
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan);
int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on);
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel);
int mt76x2_mcu_cleanup(struct mt76x2_dev *dev);
int mt76x2_dma_init(struct mt76x2_dev *dev);
void mt76x2_dma_cleanup(struct mt76x2_dev *dev);
void mt76x2_cleanup(struct mt76x2_dev *dev);
int mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq);
void mt76x2_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
void mt76x2_tx_complete(struct mt76x2_dev *dev, struct sk_buff *skb);
int mt76x2_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
void mt76x2_pre_tbtt_tasklet(unsigned long arg);
void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt76x2_update_channel(struct mt76_dev *mdev);
s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
const struct ieee80211_tx_rate *rate);
s8 mt76x2_tx_get_txpwr_adj(struct mt76x2_dev *dev, s8 txpwr, s8 max_txpwr_adj);
void mt76x2_tx_set_txpwr_auto(struct mt76x2_dev *dev, s8 txpwr);
#endif
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/delay.h>
#include "mt76x2.h"
#include "mt76x2_trace.h"
void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set)
{
unsigned long flags;
spin_lock_irqsave(&dev->irq_lock, flags);
dev->irqmask &= ~clear;
dev->irqmask |= set;
mt76_wr(dev, MT_INT_MASK_CSR, dev->irqmask);
spin_unlock_irqrestore(&dev->irq_lock, flags);
}
void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
mt76x2_irq_enable(dev, MT_INT_RX_DONE(q));
}
irqreturn_t mt76x2_irq_handler(int irq, void *dev_instance)
{
struct mt76x2_dev *dev = dev_instance;
u32 intr;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
return IRQ_NONE;
trace_dev_irq(dev, intr, dev->irqmask);
intr &= dev->irqmask;
if (intr & MT_INT_TX_DONE_ALL) {
mt76x2_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->tx_tasklet);
}
if (intr & MT_INT_RX_DONE(0)) {
mt76x2_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
}
if (intr & MT_INT_RX_DONE(1)) {
mt76x2_irq_disable(dev, MT_INT_RX_DONE(1));
napi_schedule(&dev->mt76.napi[1]);
}
if (intr & MT_INT_PRE_TBTT)
tasklet_schedule(&dev->pre_tbtt_tasklet);
/* send buffered multicast frames now */
if (intr & MT_INT_TBTT)
mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
if (intr & MT_INT_TX_STAT) {
mt76x2_mac_poll_tx_status(dev, true);
tasklet_schedule(&dev->tx_tasklet);
}
if (intr & MT_INT_GPTIMER) {
mt76x2_irq_disable(dev, MT_INT_GPTIMER);
tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
}
return IRQ_HANDLED;
}
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/debugfs.h>
#include "mt76x2.h"
static int
mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt76x2_dev *dev = file->private;
int i, j;
for (i = 0; i < 4; i++) {
seq_puts(file, "Length: ");
for (j = 0; j < 8; j++)
seq_printf(file, "%8d | ", i * 8 + j + 1);
seq_puts(file, "\n");
seq_puts(file, "Count: ");
for (j = 0; j < 8; j++)
seq_printf(file, "%8d | ", dev->aggr_stats[i * 8 + j]);
seq_puts(file, "\n");
seq_puts(file, "--------");
for (j = 0; j < 8; j++)
seq_puts(file, "-----------");
seq_puts(file, "\n");
}
return 0;
}
static int
mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
{
return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
}
static void
seq_puts_array(struct seq_file *file, const char *str, s8 *val, int len)
{
int i;
seq_printf(file, "%10s:", str);
for (i = 0; i < len; i++)
seq_printf(file, " %2d", val[i]);
seq_puts(file, "\n");
}
static int read_txpower(struct seq_file *file, void *data)
{
struct mt76x2_dev *dev = dev_get_drvdata(file->private);
seq_printf(file, "Target power: %d\n", dev->target_power);
seq_puts_array(file, "Delta", dev->target_power_delta,
ARRAY_SIZE(dev->target_power_delta));
seq_puts_array(file, "CCK", dev->rate_power.cck,
ARRAY_SIZE(dev->rate_power.cck));
seq_puts_array(file, "OFDM", dev->rate_power.ofdm,
ARRAY_SIZE(dev->rate_power.ofdm));
seq_puts_array(file, "HT", dev->rate_power.ht,
ARRAY_SIZE(dev->rate_power.ht));
seq_puts_array(file, "VHT", dev->rate_power.vht,
ARRAY_SIZE(dev->rate_power.vht));
return 0;
}
static const struct file_operations fops_ampdu_stat = {
.open = mt76x2_ampdu_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
mt76x2_dfs_stat_read(struct seq_file *file, void *data)
{
int i;
struct mt76x2_dev *dev = file->private;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
seq_printf(file, "engine: %d\n", i);
seq_printf(file, " hw pattern detected:\t%d\n",
dfs_pd->stats[i].hw_pattern);
seq_printf(file, " hw pulse discarded:\t%d\n",
dfs_pd->stats[i].hw_pulse_discarded);
}
return 0;
}
static int
mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
{
return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
}
static const struct file_operations fops_dfs_stat = {
.open = mt76x2_dfs_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void mt76x2_init_debugfs(struct mt76x2_dev *dev)
{
struct dentry *dir;
dir = mt76_register_debugfs(&dev->mt76);
if (!dir)
return;
debugfs_create_u8("temperature", S_IRUSR, dir, &dev->cal.temp);
debugfs_create_bool("tpc", S_IRUSR | S_IWUSR, dir, &dev->enable_tpc);
debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", S_IRUSR, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
}
/*
* Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76x2.h"
#define RADAR_SPEC(m, len, el, eh, wl, wh, \
w_tolerance, tl, th, t_tolerance, \
bl, bh, event_exp, power_jmp) \
{ \
.mode = m, \
.avg_len = len, \
.e_low = el, \
.e_high = eh, \
.w_low = wl, \
.w_high = wh, \
.w_margin = w_tolerance, \
.t_low = tl, \
.t_high = th, \
.t_margin = t_tolerance, \
.b_low = bl, \
.b_high = bh, \
.event_expiration = event_exp, \
.pwr_jmp = power_jmp \
}
static const struct mt76x2_radar_specs etsi_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
0x7fffffff, 0x155cc0, 0x19dd),
RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
0x7fffffff, 0x2191c0, 0x15cc),
/* 40MHz */
RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
0x7fffffff, 0x155cc0, 0x19dd),
RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
0x7fffffff, 0x2191c0, 0x15cc),
/* 80MHz */
RADAR_SPEC(0, 8, 2, 15, 106, 150, 10, 4900, 100096, 10, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(0, 40, 4, 59, 96, 380, 150, 4900, 100096, 40, 0,
0x7fffffff, 0x155cc0, 0x19cc),
RADAR_SPEC(3, 60, 20, 46, 300, 640, 80, 4900, 10100, 80, 0,
0x7fffffff, 0x155cc0, 0x19dd),
RADAR_SPEC(8, 8, 2, 9, 106, 150, 32, 4900, 296704, 32, 0,
0x7fffffff, 0x2191c0, 0x15cc)
};
static const struct mt76x2_radar_specs fcc_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0xfe808, 0x13dc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0xfe808, 0x19dd),
RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0xfe808, 0x12cc),
RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0x57bcf00, 0x1289),
/* 40MHz */
RADAR_SPEC(0, 8, 2, 12, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0xfe808, 0x13dc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0xfe808, 0x19dd),
RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0xfe808, 0x12cc),
RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0x57bcf00, 0x1289),
/* 80MHz */
RADAR_SPEC(0, 8, 2, 14, 106, 150, 15, 2900, 80100, 15, 0,
0x7fffffff, 0xfe808, 0x16cc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0xfe808, 0x19dd),
RADAR_SPEC(0, 40, 4, 54, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0xfe808, 0x12cc),
RADAR_SPEC(2, 60, 15, 63, 640, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0x57bcf00, 0x1289)
};
static const struct mt76x2_radar_specs jp_w56_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0x14c080, 0x13dc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0x14c080, 0x19dd),
RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0x14c080, 0x12cc),
RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0X57bcf00, 0x1289),
/* 40MHz */
RADAR_SPEC(0, 8, 2, 7, 106, 150, 5, 2900, 80100, 5, 0,
0x7fffffff, 0x14c080, 0x13dc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0x14c080, 0x19dd),
RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0x14c080, 0x12cc),
RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0X57bcf00, 0x1289),
/* 80MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 15, 2900, 80100, 15, 0,
0x7fffffff, 0x14c080, 0x16cc),
RADAR_SPEC(0, 8, 2, 7, 106, 140, 5, 27600, 27900, 5, 0,
0x7fffffff, 0x14c080, 0x19dd),
RADAR_SPEC(0, 40, 4, 44, 96, 480, 150, 2900, 80100, 40, 0,
0x7fffffff, 0x14c080, 0x12cc),
RADAR_SPEC(2, 60, 15, 48, 940, 2080, 32, 19600, 40200, 32, 0,
0x3938700, 0X57bcf00, 0x1289)
};
static const struct mt76x2_radar_specs jp_w53_radar_specs[] = {
/* 20MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 },
RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 },
/* 40MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 },
RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 },
/* 80MHz */
RADAR_SPEC(0, 8, 2, 9, 106, 150, 20, 28400, 77000, 20, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 },
RADAR_SPEC(0, 40, 4, 44, 96, 200, 150, 28400, 77000, 60, 0,
0x7fffffff, 0x14c080, 0x16cc),
{ 0 }
};
static void mt76x2_dfs_set_capture_mode_ctrl(struct mt76x2_dev *dev,
u8 enable)
{
u32 data;
data = (1 << 1) | enable;
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
static bool mt76x2_dfs_check_chirp(struct mt76x2_dev *dev)
{
bool ret = false;
u32 current_ts, delta_ts;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
current_ts = mt76_rr(dev, MT_PBF_LIFE_TIMER);
delta_ts = current_ts - dfs_pd->chirp_pulse_ts;
dfs_pd->chirp_pulse_ts = current_ts;
/* 12 sec */
if (delta_ts <= (12 * (1 << 20))) {
if (++dfs_pd->chirp_pulse_cnt > 8)
ret = true;
} else {
dfs_pd->chirp_pulse_cnt = 1;
}
return ret;
}
static void mt76x2_dfs_get_hw_pulse(struct mt76x2_dev *dev,
struct mt76x2_dfs_hw_pulse *pulse)
{
u32 data;
/* select channel */
data = (MT_DFS_CH_EN << 16) | pulse->engine;
mt76_wr(dev, MT_BBP(DFS, 0), data);
/* reported period */
pulse->period = mt76_rr(dev, MT_BBP(DFS, 19));
/* reported width */
pulse->w1 = mt76_rr(dev, MT_BBP(DFS, 20));
pulse->w2 = mt76_rr(dev, MT_BBP(DFS, 23));
/* reported burst number */
pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
}
static bool mt76x2_dfs_check_hw_pulse(struct mt76x2_dev *dev,
struct mt76x2_dfs_hw_pulse *pulse)
{
bool ret = false;
if (!pulse->period || !pulse->w1)
return false;
switch (dev->dfs_pd.region) {
case NL80211_DFS_FCC:
if (pulse->engine > 3)
break;
if (pulse->engine == 3) {
ret = mt76x2_dfs_check_chirp(dev);
break;
}
/* check short pulse*/
if (pulse->w1 < 120)
ret = (pulse->period >= 2900 &&
(pulse->period <= 4700 ||
pulse->period >= 6400) &&
(pulse->period <= 6800 ||
pulse->period >= 10200) &&
pulse->period <= 61600);
else if (pulse->w1 < 130) /* 120 - 130 */
ret = (pulse->period >= 2900 &&
pulse->period <= 61600);
else
ret = (pulse->period >= 3500 &&
pulse->period <= 10100);
break;
case NL80211_DFS_ETSI:
if (pulse->engine >= 3)
break;
ret = (pulse->period >= 4900 &&
(pulse->period <= 10200 ||
pulse->period >= 12400) &&
pulse->period <= 100100);
break;
case NL80211_DFS_JP:
if (dev->mt76.chandef.chan->center_freq >= 5250 &&
dev->mt76.chandef.chan->center_freq <= 5350) {
/* JPW53 */
if (pulse->w1 <= 130)
ret = (pulse->period >= 28360 &&
(pulse->period <= 28700 ||
pulse->period >= 76900) &&
pulse->period <= 76940);
break;
}
if (pulse->engine > 3)
break;
if (pulse->engine == 3) {
ret = mt76x2_dfs_check_chirp(dev);
break;
}
/* check short pulse*/
if (pulse->w1 < 120)
ret = (pulse->period >= 2900 &&
(pulse->period <= 4700 ||
pulse->period >= 6400) &&
(pulse->period <= 6800 ||
pulse->period >= 27560) &&
(pulse->period <= 27960 ||
pulse->period >= 28360) &&
(pulse->period <= 28700 ||
pulse->period >= 79900) &&
pulse->period <= 80100);
else if (pulse->w1 < 130) /* 120 - 130 */
ret = (pulse->period >= 2900 &&
(pulse->period <= 10100 ||
pulse->period >= 27560) &&
(pulse->period <= 27960 ||
pulse->period >= 28360) &&
(pulse->period <= 28700 ||
pulse->period >= 79900) &&
pulse->period <= 80100);
else
ret = (pulse->period >= 3900 &&
pulse->period <= 10100);
break;
case NL80211_DFS_UNSET:
default:
return false;
}
return ret;
}
static void mt76x2_dfs_tasklet(unsigned long arg)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *)arg;
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
u32 engine_mask;
int i;
if (test_bit(MT76_SCANNING, &dev->mt76.state))
goto out;
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
if (!(engine_mask & 0xf))
goto out;
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
struct mt76x2_dfs_hw_pulse pulse;
if (!(engine_mask & (1 << i)))
continue;
pulse.engine = i;
mt76x2_dfs_get_hw_pulse(dev, &pulse);
if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
dfs_pd->stats[i].hw_pulse_discarded++;
continue;
}
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw);
/* reset hw detector */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
return;
}
/* reset hw detector */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
out:
mt76x2_irq_enable(dev, MT_INT_GPTIMER);
}
static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
{
u32 data;
u8 i, shift;
const struct mt76x2_radar_specs *radar_specs;
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_40:
shift = MT_DFS_NUM_ENGINES;
break;
case NL80211_CHAN_WIDTH_80:
shift = 2 * MT_DFS_NUM_ENGINES;
break;
default:
shift = 0;
break;
}
switch (dev->dfs_pd.region) {
case NL80211_DFS_FCC:
radar_specs = &fcc_radar_specs[shift];
break;
case NL80211_DFS_ETSI:
radar_specs = &etsi_radar_specs[shift];
break;
case NL80211_DFS_JP:
if (dev->mt76.chandef.chan->center_freq >= 5250 &&
dev->mt76.chandef.chan->center_freq <= 5350)
radar_specs = &jp_w53_radar_specs[shift];
else
radar_specs = &jp_w56_radar_specs[shift];
break;
case NL80211_DFS_UNSET:
default:
return;
}
data = (MT_DFS_VGA_MASK << 16) |
(MT_DFS_PWR_GAIN_OFFSET << 12) |
(MT_DFS_PWR_DOWN_TIME << 8) |
(MT_DFS_SYM_ROUND << 4) |
(MT_DFS_DELTA_DELAY & 0xf);
mt76_wr(dev, MT_BBP(DFS, 2), data);
data = (MT_DFS_RX_PE_MASK << 16) | MT_DFS_PKT_END_MASK;
mt76_wr(dev, MT_BBP(DFS, 3), data);
for (i = 0; i < MT_DFS_NUM_ENGINES; i++) {
/* configure engine */
mt76_wr(dev, MT_BBP(DFS, 0), i);
/* detection mode + avg_len */
data = ((radar_specs[i].avg_len & 0x1ff) << 16) |
(radar_specs[i].mode & 0xf);
mt76_wr(dev, MT_BBP(DFS, 4), data);
/* dfs energy */
data = ((radar_specs[i].e_high & 0x0fff) << 16) |
(radar_specs[i].e_low & 0x0fff);
mt76_wr(dev, MT_BBP(DFS, 5), data);
/* dfs period */
mt76_wr(dev, MT_BBP(DFS, 7), radar_specs[i].t_low);
mt76_wr(dev, MT_BBP(DFS, 9), radar_specs[i].t_high);
/* dfs burst */
mt76_wr(dev, MT_BBP(DFS, 11), radar_specs[i].b_low);
mt76_wr(dev, MT_BBP(DFS, 13), radar_specs[i].b_high);
/* dfs width */
data = ((radar_specs[i].w_high & 0x0fff) << 16) |
(radar_specs[i].w_low & 0x0fff);
mt76_wr(dev, MT_BBP(DFS, 14), data);
/* dfs margins */
data = (radar_specs[i].w_margin << 16) |
radar_specs[i].t_margin;
mt76_wr(dev, MT_BBP(DFS, 15), data);
/* dfs event expiration */
mt76_wr(dev, MT_BBP(DFS, 17), radar_specs[i].event_expiration);
/* dfs pwr adj */
mt76_wr(dev, MT_BBP(DFS, 30), radar_specs[i].pwr_jmp);
}
/* reset status */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
mt76_wr(dev, MT_BBP(DFS, 36), 0x3);
/* enable detection*/
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
mt76_wr(dev, 0x212c, 0x0c350001);
}
void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
{
u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
agc_r8 = mt76_rr(dev, MT_BBP(AGC, 8));
agc_r4 = mt76_rr(dev, MT_BBP(AGC, 4));
val_r8 = (agc_r8 & 0x00007e00) >> 9;
val_r4 = agc_r4 & ~0x1f000000;
val_r4 += (((val_r8 + 1) >> 1) << 24);
mt76_wr(dev, MT_BBP(AGC, 4), val_r4);
dfs_r31 = FIELD_GET(MT_BBP_AGC_LNA_HIGH_GAIN, val_r4);
dfs_r31 += val_r8;
dfs_r31 -= (agc_r8 & 0x00000038) >> 3;
dfs_r31 = (dfs_r31 << 16) | 0x00000307;
mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
}
void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
tasklet_kill(&dev->dfs_pd.dfs_tasklet);
if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
mt76x2_dfs_set_bbp_params(dev);
/* enable debug mode */
mt76x2_dfs_set_capture_mode_ctrl(dev, true);
mt76x2_irq_enable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 1);
} else {
/* disable hw detector */
mt76_wr(dev, MT_BBP(DFS, 0), 0);
/* clear detector status */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
mt76_wr(dev, 0x212c, 0);
mt76x2_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 0);
}
}
void mt76x2_dfs_init_detector(struct mt76x2_dev *dev)
{
struct mt76x2_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
dfs_pd->region = NL80211_DFS_UNSET;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
(unsigned long)dev);
}
/*
* Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76x2_DFS_H
#define __MT76x2_DFS_H
#include <linux/types.h>
#include <linux/nl80211.h>
#define MT_DFS_GP_INTERVAL (10 << 4) /* 64 us unit */
#define MT_DFS_NUM_ENGINES 4
/* bbp params */
#define MT_DFS_SYM_ROUND 0
#define MT_DFS_DELTA_DELAY 2
#define MT_DFS_VGA_MASK 0
#define MT_DFS_PWR_GAIN_OFFSET 3
#define MT_DFS_PWR_DOWN_TIME 0xf
#define MT_DFS_RX_PE_MASK 0xff
#define MT_DFS_PKT_END_MASK 0
#define MT_DFS_CH_EN 0xf
struct mt76x2_radar_specs {
u8 mode;
u16 avg_len;
u16 e_low;
u16 e_high;
u16 w_low;
u16 w_high;
u16 w_margin;
u32 t_low;
u32 t_high;
u16 t_margin;
u32 b_low;
u32 b_high;
u32 event_expiration;
u16 pwr_jmp;
};
struct mt76x2_dfs_hw_pulse {
u8 engine;
u32 period;
u32 w1;
u32 w2;
u32 burst;
};
struct mt76x2_dfs_engine_stats {
u32 hw_pattern;
u32 hw_pulse_discarded;
};
struct mt76x2_dfs_pattern_detector {
enum nl80211_dfs_regions region;
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;
struct mt76x2_dfs_engine_stats stats[MT_DFS_NUM_ENGINES];
struct tasklet_struct dfs_tasklet;
};
void mt76x2_dfs_init_params(struct mt76x2_dev *dev);
void mt76x2_dfs_init_detector(struct mt76x2_dev *dev);
void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev);
#endif /* __MT76x2_DFS_H */
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76x2.h"
#include "mt76x2_dma.h"
int
mt76x2_tx_queue_mcu(struct mt76x2_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
struct mt76_queue *q = &dev->mt76.q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
tx_info = MT_MCU_MSG_TYPE_CMD |
FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) |
FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) |
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->mt76.dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
static int
mt76x2_init_tx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc)
{
int ret;
q->regs = dev->mt76.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
mt76x2_irq_enable(dev, MT_INT_TX_DONE(idx));
return 0;
}
void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
void *rxwi = skb->data;
if (q == MT_RXQ_MCU) {
skb_queue_tail(&dev->mcu.res_q, skb);
wake_up(&dev->mcu.wait);
return;
}
skb_pull(skb, sizeof(struct mt76x2_rxwi));
if (mt76x2_mac_process_rx(dev, skb, rxwi)) {
dev_kfree_skb(skb);
return;
}
mt76_rx(&dev->mt76, q, skb);
}
static int
mt76x2_init_rx_queue(struct mt76x2_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
int ret;
q->regs = dev->mt76.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
q->ndesc = n_desc;
q->buf_size = bufsize;
ret = mt76_queue_alloc(dev, q);
if (ret)
return ret;
mt76x2_irq_enable(dev, MT_INT_RX_DONE(idx));
return 0;
}
static void
mt76x2_tx_tasklet(unsigned long data)
{
struct mt76x2_dev *dev = (struct mt76x2_dev *) data;
int i;
mt76x2_mac_process_tx_status_fifo(dev);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76x2_mac_poll_tx_status(dev, false);
mt76x2_irq_enable(dev, MT_INT_TX_DONE_ALL);
}
int mt76x2_dma_init(struct mt76x2_dev *dev)
{
static const u8 wmm_queue_map[] = {
[IEEE80211_AC_BE] = 0,
[IEEE80211_AC_BK] = 1,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_VO] = 3,
};
int ret;
int i;
struct mt76_txwi_cache __maybe_unused *t;
struct mt76_queue *q;
BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x2_txwi));
BUILD_BUG_ON(sizeof(struct mt76x2_rxwi) > MT_RX_HEADROOM);
mt76_dma_attach(&dev->mt76);
init_waitqueue_head(&dev->mcu.wait);
skb_queue_head_init(&dev->mcu.res_q);
tasklet_init(&dev->tx_tasklet, mt76x2_tx_tasklet, (unsigned long) dev);
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[i],
wmm_queue_map[i], MT_TX_RING_SIZE);
if (ret)
return ret;
}
ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
if (ret)
return ret;
ret = mt76x2_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
ret = mt76x2_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
q = &dev->mt76.q_rx[MT_RXQ_MAIN];
q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x2_rxwi);
ret = mt76x2_init_rx_queue(dev, q, 0, MT76x2_RX_RING_SIZE, MT_RX_BUF_SIZE);
if (ret)
return ret;
return mt76_init_queues(dev);
}
void mt76x2_dma_cleanup(struct mt76x2_dev *dev)
{
tasklet_kill(&dev->tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76x2_DMA_H
#define __MT76x2_DMA_H
#include "dma.h"
#define MT_TXD_INFO_LEN GENMASK(13, 0)
#define MT_TXD_INFO_NEXT_VLD BIT(16)
#define MT_TXD_INFO_TX_BURST BIT(17)
#define MT_TXD_INFO_80211 BIT(19)
#define MT_TXD_INFO_TSO BIT(20)
#define MT_TXD_INFO_CSO BIT(21)
#define MT_TXD_INFO_WIV BIT(24)
#define MT_TXD_INFO_QSEL GENMASK(26, 25)
#define MT_TXD_INFO_TCO BIT(29)
#define MT_TXD_INFO_UCO BIT(30)
#define MT_TXD_INFO_ICO BIT(31)
#define MT_RX_FCE_INFO_LEN GENMASK(13, 0)
#define MT_RX_FCE_INFO_SELF_GEN BIT(15)
#define MT_RX_FCE_INFO_CMD_SEQ GENMASK(19, 16)
#define MT_RX_FCE_INFO_EVT_TYPE GENMASK(23, 20)
#define MT_RX_FCE_INFO_PCIE_INTR BIT(24)
#define MT_RX_FCE_INFO_QSEL GENMASK(26, 25)
#define MT_RX_FCE_INFO_D_PORT GENMASK(29, 27)
#define MT_RX_FCE_INFO_TYPE GENMASK(31, 30)
/* MCU request message header */
#define MT_MCU_MSG_LEN GENMASK(15, 0)
#define MT_MCU_MSG_CMD_SEQ GENMASK(19, 16)
#define MT_MCU_MSG_CMD_TYPE GENMASK(26, 20)
#define MT_MCU_MSG_PORT GENMASK(29, 27)
#define MT_MCU_MSG_TYPE GENMASK(31, 30)
#define MT_MCU_MSG_TYPE_CMD BIT(30)
enum mt76x2_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
MT_QSEL_EDCA,
MT_QSEL_EDCA_2,
};
enum dma_msg_port {
WLAN_PORT,
CPU_RX_PORT,
CPU_TX_PORT,
HOST_PORT,
VIRTUAL_CPU_RX_PORT,
VIRTUAL_CPU_TX_PORT,
DISCARD,
};
#endif
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <asm/unaligned.h>
#include "mt76x2.h"
#include "mt76x2_eeprom.h"
#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
static int
mt76x2_eeprom_copy(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field,
void *dest, int len)
{
if (field + len > dev->mt76.eeprom.size)
return -1;
memcpy(dest, dev->mt76.eeprom.data + field, len);
return 0;
}
static int
mt76x2_eeprom_get_macaddr(struct mt76x2_dev *dev)
{
void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
memcpy(dev->mt76.macaddr, src, ETH_ALEN);
return 0;
}
static void
mt76x2_eeprom_parse_hw_cap(struct mt76x2_dev *dev)
{
u16 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
dev->mt76.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
dev->mt76.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
break;
}
}
static int
mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data)
{
u32 val;
int i;
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
val |= MT_EFUSE_CTRL_KICK;
mt76_wr(dev, MT_EFUSE_CTRL, val);
if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
udelay(2);
val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
static int
mt76x2_get_efuse_data(struct mt76x2_dev *dev, void *buf, int len)
{
int ret, i;
for (i = 0; i + 16 <= len; i += 16) {
ret = mt76x2_efuse_read(dev, i, buf + i);
if (ret)
return ret;
}
return 0;
}
static bool
mt76x2_has_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
{
u16 *efuse_w = (u16 *) efuse;
if (efuse_w[MT_EE_NIC_CONF_0] != 0)
return false;
if (efuse_w[MT_EE_XTAL_TRIM_1] == 0xffff)
return false;
if (efuse_w[MT_EE_TX_POWER_DELTA_BW40] != 0)
return false;
if (efuse_w[MT_EE_TX_POWER_0_START_2G] == 0xffff)
return false;
if (efuse_w[MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA] != 0)
return false;
if (efuse_w[MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE] == 0xffff)
return false;
return true;
}
static void
mt76x2_apply_cal_free_data(struct mt76x2_dev *dev, u8 *efuse)
{
#define GROUP_5G(_id) \
MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
MT_EE_TX_POWER_0_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1, \
MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id), \
MT_EE_TX_POWER_1_START_5G + MT_TX_POWER_GROUP_SIZE_5G * (_id) + 1
static const u8 cal_free_bytes[] = {
MT_EE_XTAL_TRIM_1,
MT_EE_TX_POWER_EXT_PA_5G + 1,
MT_EE_TX_POWER_0_START_2G,
MT_EE_TX_POWER_0_START_2G + 1,
MT_EE_TX_POWER_1_START_2G,
MT_EE_TX_POWER_1_START_2G + 1,
GROUP_5G(0),
GROUP_5G(1),
GROUP_5G(2),
GROUP_5G(3),
GROUP_5G(4),
GROUP_5G(5),
MT_EE_RF_2G_TSSI_OFF_TXPOWER,
MT_EE_RF_2G_RX_HIGH_GAIN + 1,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN + 1,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN + 1,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN + 1,
};
u8 *eeprom = dev->mt76.eeprom.data;
u8 prev_grp0[4] = {
eeprom[MT_EE_TX_POWER_0_START_5G],
eeprom[MT_EE_TX_POWER_0_START_5G + 1],
eeprom[MT_EE_TX_POWER_1_START_5G],
eeprom[MT_EE_TX_POWER_1_START_5G + 1]
};
u16 val;
int i;
if (!mt76x2_has_cal_free_data(dev, efuse))
return;
for (i = 0; i < ARRAY_SIZE(cal_free_bytes); i++) {
int offset = cal_free_bytes[i];
eeprom[offset] = efuse[offset];
}
if (!(efuse[MT_EE_TX_POWER_0_START_5G] |
efuse[MT_EE_TX_POWER_0_START_5G + 1]))
memcpy(eeprom + MT_EE_TX_POWER_0_START_5G, prev_grp0, 2);
if (!(efuse[MT_EE_TX_POWER_1_START_5G] |
efuse[MT_EE_TX_POWER_1_START_5G + 1]))
memcpy(eeprom + MT_EE_TX_POWER_1_START_5G, prev_grp0 + 2, 2);
val = get_unaligned_le16(efuse + MT_EE_BT_RCAL_RESULT);
if (val != 0xffff)
eeprom[MT_EE_BT_RCAL_RESULT] = val & 0xff;
val = get_unaligned_le16(efuse + MT_EE_BT_VCDL_CALIBRATION);
if (val != 0xffff)
eeprom[MT_EE_BT_VCDL_CALIBRATION + 1] = val >> 8;
val = get_unaligned_le16(efuse + MT_EE_BT_PMUCFG);
if (val != 0xffff)
eeprom[MT_EE_BT_PMUCFG] = val & 0xff;
}
static int mt76x2_check_eeprom(struct mt76x2_dev *dev)
{
u16 val = get_unaligned_le16(dev->mt76.eeprom.data);
if (!val)
val = get_unaligned_le16(dev->mt76.eeprom.data + MT_EE_PCI_ID);
switch (val) {
case 0x7662:
case 0x7612:
return 0;
default:
dev_err(dev->mt76.dev, "EEPROM data check failed: %04x\n", val);
return -EINVAL;
}
}
static int
mt76x2_eeprom_load(struct mt76x2_dev *dev)
{
void *efuse;
int len = MT7662_EEPROM_SIZE;
bool found;
int ret;
ret = mt76_eeprom_init(&dev->mt76, len);
if (ret < 0)
return ret;
found = ret;
if (found)
found = !mt76x2_check_eeprom(dev);
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
dev->mt76.otp.size = len;
if (!dev->mt76.otp.data)
return -ENOMEM;
efuse = dev->mt76.otp.data;
if (mt76x2_get_efuse_data(dev, efuse, len))
goto out;
if (found) {
mt76x2_apply_cal_free_data(dev, efuse);
} else {
/* FIXME: check if efuse data is complete */
found = true;
memcpy(dev->mt76.eeprom.data, efuse, len);
}
out:
if (!found)
return -ENOENT;
return 0;
}
static inline int
mt76x2_sign_extend(u32 val, unsigned int size)
{
bool sign = val & BIT(size - 1);
val &= BIT(size - 1) - 1;
return sign ? val : -val;
}
static inline int
mt76x2_sign_extend_optional(u32 val, unsigned int size)
{
bool enable = val & BIT(size);
return enable ? mt76x2_sign_extend(val, size) : 0;
}
static bool
field_valid(u8 val)
{
return val != 0 && val != 0xff;
}
static void
mt76x2_set_rx_gain_group(struct mt76x2_dev *dev, u8 val)
{
s8 *dest = dev->cal.rx.high_gain;
if (!field_valid(val)) {
dest[0] = 0;
dest[1] = 0;
return;
}
dest[0] = mt76x2_sign_extend(val, 4);
dest[1] = mt76x2_sign_extend(val >> 4, 4);
}
static void
mt76x2_set_rssi_offset(struct mt76x2_dev *dev, int chain, u8 val)
{
s8 *dest = dev->cal.rx.rssi_offset;
if (!field_valid(val)) {
dest[chain] = 0;
return;
}
dest[chain] = mt76x2_sign_extend_optional(val, 7);
}
static enum mt76x2_cal_channel_group
mt76x2_get_cal_channel_group(int channel)
{
if (channel >= 184 && channel <= 196)
return MT_CH_5G_JAPAN;
if (channel <= 48)
return MT_CH_5G_UNII_1;
if (channel <= 64)
return MT_CH_5G_UNII_2;
if (channel <= 114)
return MT_CH_5G_UNII_2E_1;
if (channel <= 144)
return MT_CH_5G_UNII_2E_2;
return MT_CH_5G_UNII_3;
}
static u8
mt76x2_get_5g_rx_gain(struct mt76x2_dev *dev, u8 channel)
{
enum mt76x2_cal_channel_group group;
group = mt76x2_get_cal_channel_group(channel);
switch (group) {
case MT_CH_5G_JAPAN:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
case MT_CH_5G_UNII_1:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
case MT_CH_5G_UNII_2E_1:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2E_2:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
default:
return mt76x2_eeprom_get(dev, MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
}
}
void mt76x2_read_rx_gain(struct mt76x2_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
u8 lna;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
else
val = mt76x2_get_5g_rx_gain(dev, channel);
mt76x2_set_rx_gain_group(dev, val);
if (chan->band == NL80211_BAND_2GHZ) {
val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_0);
mt76x2_set_rssi_offset(dev, 0, val);
mt76x2_set_rssi_offset(dev, 1, val >> 8);
} else {
val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_0);
mt76x2_set_rssi_offset(dev, 0, val);
mt76x2_set_rssi_offset(dev, 1, val >> 8);
}
val = mt76x2_eeprom_get(dev, MT_EE_LNA_GAIN);
lna_2g = val & 0xff;
lna_5g[0] = val >> 8;
val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_2G_1);
lna_5g[1] = val >> 8;
val = mt76x2_eeprom_get(dev, MT_EE_RSSI_OFFSET_5G_1);
lna_5g[2] = val >> 8;
if (!field_valid(lna_5g[1]))
lna_5g[1] = lna_5g[0];
if (!field_valid(lna_5g[2]))
lna_5g[2] = lna_5g[0];
dev->cal.rx.mcu_gain = (lna_2g & 0xff);
dev->cal.rx.mcu_gain |= (lna_5g[0] & 0xff) << 8;
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
lna_2g = 0;
if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
memset(lna_5g, 0, sizeof(lna_5g));
if (chan->band == NL80211_BAND_2GHZ)
lna = lna_2g;
else if (channel <= 64)
lna = lna_5g[0];
else if (channel <= 128)
lna = lna_5g[1];
else
lna = lna_5g[2];
if (lna == 0xff)
lna = 0;
dev->cal.rx.lna_gain = mt76x2_sign_extend(lna, 8);
}
static s8
mt76x2_rate_power_val(u8 val)
{
if (!field_valid(val))
return 0;
return mt76x2_sign_extend_optional(val, 7);
}
void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t)
{
bool is_5ghz;
u16 val;
is_5ghz = dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ;
memset(t, 0, sizeof(*t));
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_CCK);
t->cck[0] = t->cck[1] = mt76x2_rate_power_val(val);
t->cck[2] = t->cck[3] = mt76x2_rate_power_val(val >> 8);
if (is_5ghz)
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
else
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
t->ofdm[0] = t->ofdm[1] = mt76x2_rate_power_val(val);
t->ofdm[2] = t->ofdm[3] = mt76x2_rate_power_val(val >> 8);
if (is_5ghz)
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
else
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
t->ofdm[4] = t->ofdm[5] = mt76x2_rate_power_val(val);
t->ofdm[6] = t->ofdm[7] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
t->ht[0] = t->ht[1] = mt76x2_rate_power_val(val);
t->ht[2] = t->ht[3] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
t->ht[4] = t->ht[5] = mt76x2_rate_power_val(val);
t->ht[6] = t->ht[7] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
t->ht[8] = t->ht[9] = mt76x2_rate_power_val(val);
t->ht[10] = t->ht[11] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
t->ht[12] = t->ht[13] = mt76x2_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
t->vht[0] = t->vht[1] = mt76x2_rate_power_val(val);
t->vht[2] = t->vht[3] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
t->vht[4] = t->vht[5] = mt76x2_rate_power_val(val);
t->vht[6] = t->vht[7] = mt76x2_rate_power_val(val >> 8);
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
t->vht[8] = t->vht[9] = mt76x2_rate_power_val(val >> 8);
}
static void
mt76x2_get_power_info_2g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
int chain, int offset)
{
int channel = dev->mt76.chandef.chan->hw_value;
int delta_idx;
u8 data[6];
u16 val;
if (channel < 6)
delta_idx = 3;
else if (channel < 11)
delta_idx = 4;
else
delta_idx = 5;
mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
t->target_power = val >> 8;
}
static void
mt76x2_get_power_info_5g(struct mt76x2_dev *dev, struct mt76x2_tx_power_info *t,
int chain, int offset)
{
int channel = dev->mt76.chandef.chan->hw_value;
enum mt76x2_cal_channel_group group;
int delta_idx;
u16 val;
u8 data[5];
group = mt76x2_get_cal_channel_group(channel);
offset += group * MT_TX_POWER_GROUP_SIZE_5G;
if (channel >= 192)
delta_idx = 4;
else if (channel >= 484)
delta_idx = 3;
else if (channel < 44)
delta_idx = 3;
else if (channel < 52)
delta_idx = 4;
else if (channel < 58)
delta_idx = 3;
else if (channel < 98)
delta_idx = 4;
else if (channel < 106)
delta_idx = 3;
else if (channel < 116)
delta_idx = 4;
else if (channel < 130)
delta_idx = 3;
else if (channel < 149)
delta_idx = 4;
else if (channel < 157)
delta_idx = 3;
else
delta_idx = 4;
mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x2_sign_extend_optional(data[delta_idx], 7);
val = mt76x2_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
t->target_power = val & 0xff;
}
void mt76x2_get_power_info(struct mt76x2_dev *dev,
struct mt76x2_tx_power_info *t)
{
u16 bw40, bw80;
memset(t, 0, sizeof(*t));
bw40 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
bw80 = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) {
bw40 >>= 8;
mt76x2_get_power_info_5g(dev, t, 0, MT_EE_TX_POWER_0_START_5G);
mt76x2_get_power_info_5g(dev, t, 1, MT_EE_TX_POWER_1_START_5G);
} else {
mt76x2_get_power_info_2g(dev, t, 0, MT_EE_TX_POWER_0_START_2G);
mt76x2_get_power_info_2g(dev, t, 1, MT_EE_TX_POWER_1_START_2G);
}
if (mt76x2_tssi_enabled(dev) || !field_valid(t->target_power))
t->target_power = t->chain[0].target_power;
t->delta_bw40 = mt76x2_rate_power_val(bw40);
t->delta_bw80 = mt76x2_rate_power_val(bw80);
}
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t)
{
enum nl80211_band band = dev->mt76.chandef.chan->band;
u16 val, slope;
u8 bounds;
memset(t, 0, sizeof(*t));
val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC))
return -EINVAL;
if (!mt76x2_ext_pa_enabled(dev, band))
return -EINVAL;
val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
if (!(val & BIT(7)))
return -EINVAL;
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
} else {
slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
bounds = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80) >> 8;
}
t->high_slope = slope & 0xff;
t->low_slope = slope >> 8;
t->lower_bound = 0 - (bounds & 0xf);
t->upper_bound = (bounds >> 4) & 0xf;
return 0;
}
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band)
{
u16 conf0 = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0);
if (band == NL80211_BAND_5GHZ)
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_5G);
else
return !(conf0 & MT_EE_NIC_CONF_0_PA_INT_2G);
}
int mt76x2_eeprom_init(struct mt76x2_dev *dev)
{
int ret;
ret = mt76x2_eeprom_load(dev);
if (ret)
return ret;
mt76x2_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);
return 0;
}
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76x2_EEPROM_H
#define __MT76x2_EEPROM_H
#include "mt76x2.h"
enum mt76x2_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_NIC_CONF_2 = 0x042,
MT_EE_XTAL_TRIM_1 = 0x03a,
MT_EE_XTAL_TRIM_2 = 0x09e,
MT_EE_LNA_GAIN = 0x044,
MT_EE_RSSI_OFFSET_2G_0 = 0x046,
MT_EE_RSSI_OFFSET_2G_1 = 0x048,
MT_EE_RSSI_OFFSET_5G_0 = 0x04a,
MT_EE_RSSI_OFFSET_5G_1 = 0x04c,
MT_EE_TX_POWER_DELTA_BW40 = 0x050,
MT_EE_TX_POWER_DELTA_BW80 = 0x052,
MT_EE_TX_POWER_EXT_PA_5G = 0x054,
MT_EE_TX_POWER_0_START_2G = 0x056,
MT_EE_TX_POWER_1_START_2G = 0x05c,
/* used as byte arrays */
#define MT_TX_POWER_GROUP_SIZE_5G 5
#define MT_TX_POWER_GROUPS_5G 6
MT_EE_TX_POWER_0_START_5G = 0x062,
MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
MT_EE_TX_POWER_1_START_5G = 0x080,
MT_EE_TX_POWER_CCK = 0x0a0,
MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2,
MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4,
MT_EE_TX_POWER_OFDM_5G_6M = 0x0b2,
MT_EE_TX_POWER_OFDM_5G_24M = 0x0b4,
MT_EE_TX_POWER_HT_MCS0 = 0x0a6,
MT_EE_TX_POWER_HT_MCS4 = 0x0a8,
MT_EE_TX_POWER_HT_MCS8 = 0x0aa,
MT_EE_TX_POWER_HT_MCS12 = 0x0ac,
MT_EE_TX_POWER_VHT_MCS0 = 0x0ba,
MT_EE_TX_POWER_VHT_MCS4 = 0x0bc,
MT_EE_TX_POWER_VHT_MCS8 = 0x0be,
MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
MT_EE_RF_2G_TSSI_OFF_TXPOWER = 0x0f6,
MT_EE_RF_2G_RX_HIGH_GAIN = 0x0f8,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN = 0x0fa,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN = 0x0fc,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN = 0x0fe,
MT_EE_BT_RCAL_RESULT = 0x138,
MT_EE_BT_VCDL_CALIBRATION = 0x13c,
MT_EE_BT_PMUCFG = 0x13e,
__MT_EE_MAX
};
#define MT_EE_NIC_CONF_0_PA_INT_2G BIT(8)
#define MT_EE_NIC_CONF_0_PA_INT_5G BIT(9)
#define MT_EE_NIC_CONF_0_BOARD_TYPE GENMASK(13, 12)
#define MT_EE_NIC_CONF_1_TEMP_TX_ALC BIT(1)
#define MT_EE_NIC_CONF_1_LNA_EXT_2G BIT(2)
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
enum mt76x2_board_type {
BOARD_TYPE_2GHZ = 1,
BOARD_TYPE_5GHZ = 2,
};
enum mt76x2_cal_channel_group {
MT_CH_5G_JAPAN,
MT_CH_5G_UNII_1,
MT_CH_5G_UNII_2,
MT_CH_5G_UNII_2E_1,
MT_CH_5G_UNII_2E_2,
MT_CH_5G_UNII_3,
__MT_CH_MAX
};
struct mt76x2_tx_power_info {
u8 target_power;
s8 delta_bw40;
s8 delta_bw80;
struct {
s8 tssi_slope;
s8 tssi_offset;
s8 target_power;
s8 delta;
} chain[MT_MAX_CHAINS];
};
struct mt76x2_temp_comp {
u8 temp_25_ref;
int lower_bound; /* J */
int upper_bound; /* J */
unsigned int high_slope; /* J / dB */
unsigned int low_slope; /* J / dB */
};
static inline int
mt76x2_eeprom_get(struct mt76x2_dev *dev, enum mt76x2_eeprom_field field)
{
if ((field & 1) || field >= __MT_EE_MAX)
return -1;
return get_unaligned_le16(dev->mt76.eeprom.data + field);
}
void mt76x2_get_rate_power(struct mt76x2_dev *dev, struct mt76_rate_power *t);
void mt76x2_get_power_info(struct mt76x2_dev *dev,
struct mt76x2_tx_power_info *t);
int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t);
bool mt76x2_ext_pa_enabled(struct mt76x2_dev *dev, enum nl80211_band band);
void mt76x2_read_rx_gain(struct mt76x2_dev *dev);
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev)
{
return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TEMP_TX_ALC;
}
static inline bool
mt76x2_tssi_enabled(struct mt76x2_dev *dev)
{
return !mt76x2_temp_tx_alc_enabled(dev) &&
(mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TX_ALC_EN);
}
static inline bool
mt76x2_has_ext_lna(struct mt76x2_dev *dev)
{
u32 val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
else
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
}
#endif
此差异已折叠。
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/delay.h>
#include "mt76x2.h"
#include "mt76x2_mcu.h"
#include "mt76x2_eeprom.h"
#include "mt76x2_trace.h"
void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr)
{
idx &= 7;
mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
get_unaligned_le16(addr + 4));
}
static void
mt76x2_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
case MT_PHY_TYPE_OFDM:
if (idx >= 8)
idx = 0;
if (status->band == NL80211_BAND_2GHZ)
idx += 4;
status->rate_idx = idx;
return;
case MT_PHY_TYPE_CCK:
if (idx >= 8) {
idx -= 8;
status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
}
if (idx >= 4)
idx = 0;
status->rate_idx = idx;
return;
case MT_PHY_TYPE_HT_GF:
status->enc_flags |= RX_ENC_FLAG_HT_GF;
/* fall through */
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
break;
case MT_PHY_TYPE_VHT:
status->encoding = RX_ENC_VHT;
status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx);
status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1;
break;
default:
WARN_ON(1);
return;
}
if (rate & MT_RXWI_RATE_LDPC)
status->enc_flags |= RX_ENC_FLAG_LDPC;
if (rate & MT_RXWI_RATE_SGI)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate & MT_RXWI_RATE_STBC)
status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT;
switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
case MT_PHY_BW_20:
break;
case MT_PHY_BW_40:
status->bw = RATE_INFO_BW_40;
break;
case MT_PHY_BW_80:
status->bw = RATE_INFO_BW_80;
break;
default:
break;
}
}
static __le16
mt76x2_mac_tx_rate_val(struct mt76x2_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
u8 nss = 1;
u8 bw = 0;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
rate_idx = rate->idx;
nss = 1 + (rate->idx >> 4);
phy = MT_PHY_TYPE_VHT;
if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
bw = 2;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
bw = 1;
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
rate_idx = rate->idx;
nss = 1 + (rate->idx >> 3);
phy = MT_PHY_TYPE_HT;
if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
phy = MT_PHY_TYPE_HT_GF;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
bw = 1;
} else {
const struct ieee80211_rate *r;
int band = dev->mt76.chandef.chan->band;
u16 val;
r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
val = r->hw_value;
phy = val >> 8;
rate_idx = val & 0xff;
bw = 0;
}
rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx);
rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy);
rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw);
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
rateval |= MT_RXWI_RATE_SGI;
*nss_val = nss;
return cpu_to_le16(rateval);
}
void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop)
{
u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
u32 bit = MT_WCID_DROP_MASK(idx);
/* prevent unnecessary writes */
if ((val & bit) != (bit * drop))
mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
spin_lock_bh(&dev->mt76.lock);
wcid->tx_rate = mt76x2_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
spin_unlock_bh(&dev->mt76.lock);
}
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
memset(txwi, 0, sizeof(*txwi));
if (wcid)
txwi->wcid = wcid->idx;
else
txwi->wcid = 0xff;
txwi->pktid = 1;
spin_lock_bh(&dev->mt76.lock);
if (rate->idx < 0 || !rate->count) {
txwi->rate = wcid->tx_rate;
max_txpwr_adj = wcid->max_txpwr_adj;
nss = wcid->tx_rate_nss;
} else {
txwi->rate = mt76x2_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, rate);
}
spin_unlock_bh(&dev->mt76.lock);
txpwr_adj = mt76x2_tx_get_txpwr_adj(dev, dev->txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
if (mt76xx_rev(dev) >= MT76XX_REV_E4)
txwi->txstream = 0x13;
else if (mt76xx_rev(dev) >= MT76XX_REV_E3 &&
!(txwi->rate & cpu_to_le16(rate_ht_mask)))
txwi->txstream = 0x93;
if (info->flags & IEEE80211_TX_CTL_LDPC)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
txwi_flags |= MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
sta->ht_cap.ampdu_density);
}
txwi->flags |= cpu_to_le16(txwi_flags);
txwi->len_ctl = cpu_to_le16(skb->len);
}
static void mt76x2_remove_hdr_pad(struct sk_buff *skb)
{
int len = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + 2, skb->data, len);
skb_pull(skb, 2);
}
int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct mt76x2_rxwi *rxwi = rxi;
u32 ctl = le32_to_cpu(rxwi->ctl);
u16 rate = le16_to_cpu(rxwi->rate);
int len;
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
mt76x2_remove_hdr_pad(skb);
if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
status->flag |= RX_FLAG_DECRYPTED;
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
}
len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
if (WARN_ON_ONCE(len > skb->len))
return -EINVAL;
pskb_trim(skb, len);
status->chains = BIT(0) | BIT(1);
status->chain_signal[0] = mt76x2_phy_get_rssi(dev, rxwi->rssi[0], 0);
status->chain_signal[1] = mt76x2_phy_get_rssi(dev, rxwi->rssi[1], 1);
status->signal = max(status->chain_signal[0], status->chain_signal[1]);
status->freq = dev->mt76.chandef.chan->center_freq;
status->band = dev->mt76.chandef.chan->band;
mt76x2_mac_process_rate(status, rate);
return 0;
}
static void
mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
enum nl80211_band band)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
txrate->idx = 0;
txrate->flags = 0;
txrate->count = 1;
switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) {
case MT_PHY_TYPE_OFDM:
if (band == NL80211_BAND_2GHZ)
idx += 4;
txrate->idx = idx;
return;
case MT_PHY_TYPE_CCK:
if (idx >= 8)
idx -= 8;
txrate->idx = idx;
return;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
/* fall through */
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
break;
case MT_PHY_TYPE_VHT:
txrate->flags |= IEEE80211_TX_RC_VHT_MCS;
txrate->idx = idx;
break;
default:
WARN_ON(1);
return;
}
switch (FIELD_GET(MT_RXWI_RATE_BW, rate)) {
case MT_PHY_BW_20:
break;
case MT_PHY_BW_40:
txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
break;
case MT_PHY_BW_80:
txrate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
break;
default:
WARN_ON(1);
break;
}
if (rate & MT_RXWI_RATE_SGI)
txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
}
static void
mt76x2_mac_fill_tx_status(struct mt76x2_dev *dev,
struct ieee80211_tx_info *info,
struct mt76x2_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
int i;
if (!n_frames)
return;
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76x2_mac_process_tx_rate(&rate[last_rate], st->rate,
dev->mt76.chandef.chan->band);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
cur_idx = rate[last_rate].idx + st->retry;
for (i = 0; i <= last_rate; i++) {
rate[i].flags = rate[last_rate].flags;
rate[i].idx = max_t(int, 0, cur_idx - i);
rate[i].count = 1;
}
if (last_rate > 0)
rate[last_rate - 1].count = st->retry + 1 - last_rate;
info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0;
if (st->pktid & MT_TXWI_PKTID_PROBE)
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
if (st->aggr)
info->flags |= IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_STAT_AMPDU;
if (!st->ack_req)
info->flags |= IEEE80211_TX_CTL_NO_ACK;
else if (st->success)
info->flags |= IEEE80211_TX_STAT_ACK;
}
static void
mt76x2_send_tx_status(struct mt76x2_dev *dev, struct mt76x2_tx_status *stat,
u8 *update)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
struct mt76_wcid *wcid = NULL;
struct mt76x2_sta *msta = NULL;
rcu_read_lock();
if (stat->wcid < ARRAY_SIZE(dev->wcid))
wcid = rcu_dereference(dev->wcid[stat->wcid]);
if (wcid) {
void *priv;
priv = msta = container_of(wcid, struct mt76x2_sta, wcid);
sta = container_of(priv, struct ieee80211_sta,
drv_priv);
}
if (msta && stat->aggr) {
u32 stat_val, stat_cache;
stat_val = stat->rate;
stat_val |= ((u32) stat->retry) << 16;
stat_cache = msta->status.rate;
stat_cache |= ((u32) msta->status.retry) << 16;
if (*update == 0 && stat_val == stat_cache &&
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
goto out;
}
mt76x2_mac_fill_tx_status(dev, &info, &msta->status,
msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
*update = 0;
} else {
mt76x2_mac_fill_tx_status(dev, &info, stat, 1);
*update = 1;
}
ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
out:
rcu_read_unlock();
}
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq)
{
struct mt76x2_tx_status stat = {};
unsigned long flags;
u8 update = 1;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return;
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
u32 stat1, stat2;
spin_lock_irqsave(&dev->irq_lock, flags);
stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
if (!(stat1 & MT_TX_STAT_FIFO_VALID)) {
spin_unlock_irqrestore(&dev->irq_lock, flags);
break;
}
spin_unlock_irqrestore(&dev->irq_lock, flags);
stat.valid = 1;
stat.success = !!(stat1 & MT_TX_STAT_FIFO_SUCCESS);
stat.aggr = !!(stat1 & MT_TX_STAT_FIFO_AGGR);
stat.ack_req = !!(stat1 & MT_TX_STAT_FIFO_ACKREQ);
stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, stat1);
stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, stat1);
stat.retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
mt76x2_send_tx_status(dev, &stat, &update);
continue;
}
kfifo_put(&dev->txstatus_fifo, stat);
}
}
static void
mt76x2_mac_queue_txdone(struct mt76x2_dev *dev, struct sk_buff *skb,
void *txwi_ptr)
{
struct mt76x2_tx_info *txi = mt76x2_skb_tx_info(skb);
struct mt76x2_txwi *txwi = txwi_ptr;
mt76x2_mac_poll_tx_status(dev, false);
txi->tries = 0;
txi->jiffies = jiffies;
txi->wcid = txwi->wcid;
txi->pktid = txwi->pktid;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76x2_tx_complete(dev, skb);
}
void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev)
{
struct mt76x2_tx_status stat;
u8 update = 1;
while (kfifo_get(&dev->txstatus_fifo, &stat))
mt76x2_send_tx_status(dev, &stat, &update);
}
void mt76x2_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
if (e->txwi)
mt76x2_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
else
dev_kfree_skb_any(e->skb);
}
static enum mt76x2_cipher_type
mt76x2_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
if (!key)
return MT_CIPHER_NONE;
if (key->keylen > 32)
return MT_CIPHER_NONE;
memcpy(key_data, key->key, key->keylen);
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
return MT_CIPHER_WEP40;
case WLAN_CIPHER_SUITE_WEP104:
return MT_CIPHER_WEP104;
case WLAN_CIPHER_SUITE_TKIP:
return MT_CIPHER_TKIP;
case WLAN_CIPHER_SUITE_CCMP:
return MT_CIPHER_AES_CCMP;
default:
return MT_CIPHER_NONE;
}
}
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
{
struct mt76_wcid_addr addr = {};
u32 attr;
attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
if (idx >= 128)
return;
if (mac)
memcpy(addr.macaddr, mac, ETH_ALEN);
mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
}
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x2_cipher_type cipher;
u8 key_data[32];
u8 iv_data[8];
cipher = mt76x2_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
memset(iv_data, 0, sizeof(iv_data));
if (key) {
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
iv_data[3] |= 0x20;
}
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
return 0;
}
int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key)
{
enum mt76x2_cipher_type cipher;
u8 key_data[32];
u32 val;
cipher = mt76x2_mac_get_key_info(key, key_data);
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
sizeof(key_data));
return 0;
}
static int
mt76_write_beacon(struct mt76x2_dev *dev, int offset, struct sk_buff *skb)
{
int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
struct mt76x2_txwi txwi;
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x2_txwi)))
return -ENOSPC;
mt76x2_mac_write_txwi(dev, &txwi, skb, NULL, NULL);
txwi.flags |= cpu_to_le16(MT_TXWI_FLAGS_TS);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);
mt76_wr_copy(dev, offset, skb->data, skb->len);
return 0;
}
static int
__mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 bcn_idx, struct sk_buff *skb)
{
int beacon_len = dev->beacon_offsets[1] - dev->beacon_offsets[0];
int beacon_addr = dev->beacon_offsets[bcn_idx];
int ret = 0;
int i;
/* Prevent corrupt transmissions during update */
mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
if (skb) {
ret = mt76_write_beacon(dev, beacon_addr, skb);
if (!ret)
dev->beacon_data_mask |= BIT(bcn_idx) &
dev->beacon_mask;
} else {
dev->beacon_data_mask &= ~BIT(bcn_idx);
for (i = 0; i < beacon_len; i += 4)
mt76_wr(dev, beacon_addr + i, 0);
}
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
return ret;
}
int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
struct sk_buff *skb)
{
bool force_update = false;
int bcn_idx = 0;
int i;
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
if (vif_idx == i) {
force_update = !!dev->beacons[i] ^ !!skb;
if (dev->beacons[i])
dev_kfree_skb(dev->beacons[i]);
dev->beacons[i] = skb;
__mt76x2_mac_set_beacon(dev, bcn_idx, skb);
} else if (force_update && dev->beacons[i]) {
__mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
}
bcn_idx += !!dev->beacons[i];
}
for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
if (!(dev->beacon_data_mask & BIT(i)))
break;
__mt76x2_mac_set_beacon(dev, i, NULL);
}
mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
bcn_idx - 1);
return 0;
}
void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val)
{
u8 old_mask = dev->beacon_mask;
bool en;
u32 reg;
if (val) {
dev->beacon_mask |= BIT(vif_idx);
} else {
dev->beacon_mask &= ~BIT(vif_idx);
mt76x2_mac_set_beacon(dev, vif_idx, NULL);
}
if (!!old_mask == !!dev->beacon_mask)
return;
en = dev->beacon_mask;
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
reg = MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN |
MT_BEACON_TIME_CFG_TIMER_EN;
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
if (en)
mt76x2_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
mt76x2_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
void mt76x2_update_channel(struct mt76_dev *mdev)
{
struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
struct mt76_channel_state *state;
u32 active, busy;
state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
busy = mt76_rr(dev, MT_CH_BUSY);
active = busy + mt76_rr(dev, MT_CH_IDLE);
spin_lock_bh(&dev->mt76.cc_lock);
state->cc_busy += busy;
state->cc_active += active;
spin_unlock_bh(&dev->mt76.cc_lock);
}
void mt76x2_mac_work(struct work_struct *work)
{
struct mt76x2_dev *dev = container_of(work, struct mt76x2_dev,
mac_work.work);
int i, idx;
mt76x2_update_channel(&dev->mt76);
for (i = 0, idx = 0; i < 16; i++) {
u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
dev->aggr_stats[idx++] += val & 0xffff;
dev->aggr_stats[idx++] += val >> 16;
}
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
}
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef __MT76x2_MAC_H
#define __MT76x2_MAC_H
#include "mt76.h"
struct mt76x2_dev;
struct mt76x2_sta;
struct mt76x2_vif;
struct mt76x2_txwi;
struct mt76x2_tx_status {
u8 valid:1;
u8 success:1;
u8 aggr:1;
u8 ack_req:1;
u8 wcid;
u8 pktid;
u8 retry;
u16 rate;
} __packed __aligned(2);
struct mt76x2_tx_info {
unsigned long jiffies;
u8 tries;
u8 wcid;
u8 pktid;
u8 retry;
};
struct mt76x2_rxwi {
__le32 rxinfo;
__le32 ctl;
__le16 tid_sn;
__le16 rate;
u8 rssi[4];
__le32 bbp_rxinfo[4];
};
#define MT_RXINFO_BA BIT(0)
#define MT_RXINFO_DATA BIT(1)
#define MT_RXINFO_NULL BIT(2)
#define MT_RXINFO_FRAG BIT(3)
#define MT_RXINFO_UNICAST BIT(4)
#define MT_RXINFO_MULTICAST BIT(5)
#define MT_RXINFO_BROADCAST BIT(6)
#define MT_RXINFO_MYBSS BIT(7)
#define MT_RXINFO_CRCERR BIT(8)
#define MT_RXINFO_ICVERR BIT(9)
#define MT_RXINFO_MICERR BIT(10)
#define MT_RXINFO_AMSDU BIT(11)
#define MT_RXINFO_HTC BIT(12)
#define MT_RXINFO_RSSI BIT(13)
#define MT_RXINFO_L2PAD BIT(14)
#define MT_RXINFO_AMPDU BIT(15)
#define MT_RXINFO_DECRYPT BIT(16)
#define MT_RXINFO_BSSIDX3 BIT(17)
#define MT_RXINFO_WAPI_KEY BIT(18)
#define MT_RXINFO_PN_LEN GENMASK(21, 19)
#define MT_RXINFO_SW_FTYPE0 BIT(22)
#define MT_RXINFO_SW_FTYPE1 BIT(23)
#define MT_RXINFO_PROBE_RESP BIT(24)
#define MT_RXINFO_BEACON BIT(25)
#define MT_RXINFO_DISASSOC BIT(26)
#define MT_RXINFO_DEAUTH BIT(27)
#define MT_RXINFO_ACTION BIT(28)
#define MT_RXINFO_TCP_SUM_ERR BIT(30)
#define MT_RXINFO_IP_SUM_ERR BIT(31)
#define MT_RXWI_CTL_WCID GENMASK(7, 0)
#define MT_RXWI_CTL_KEY_IDX GENMASK(9, 8)
#define MT_RXWI_CTL_BSS_IDX GENMASK(12, 10)
#define MT_RXWI_CTL_UDF GENMASK(15, 13)
#define MT_RXWI_CTL_MPDU_LEN GENMASK(29, 16)
#define MT_RXWI_CTL_EOF BIT(31)
#define MT_RXWI_TID GENMASK(3, 0)
#define MT_RXWI_SN GENMASK(15, 4)
#define MT_RXWI_RATE_INDEX GENMASK(5, 0)
#define MT_RXWI_RATE_LDPC BIT(6)
#define MT_RXWI_RATE_BW GENMASK(8, 7)
#define MT_RXWI_RATE_SGI BIT(9)
#define MT_RXWI_RATE_STBC BIT(10)
#define MT_RXWI_RATE_LDPC_EXSYM BIT(11)
#define MT_RXWI_RATE_PHY GENMASK(15, 13)
#define MT_RATE_INDEX_VHT_IDX GENMASK(3, 0)
#define MT_RATE_INDEX_VHT_NSS GENMASK(5, 4)
#define MT_TX_PWR_ADJ GENMASK(3, 0)
enum mt76x2_phy_bandwidth {
MT_PHY_BW_20,
MT_PHY_BW_40,
MT_PHY_BW_80,
};
#define MT_TXWI_FLAGS_FRAG BIT(0)
#define MT_TXWI_FLAGS_MMPS BIT(1)
#define MT_TXWI_FLAGS_CFACK BIT(2)
#define MT_TXWI_FLAGS_TS BIT(3)
#define MT_TXWI_FLAGS_AMPDU BIT(4)
#define MT_TXWI_FLAGS_MPDU_DENSITY GENMASK(7, 5)
#define MT_TXWI_FLAGS_TXOP GENMASK(9, 8)
#define MT_TXWI_FLAGS_NDPS BIT(10)
#define MT_TXWI_FLAGS_RTSBWSIG BIT(11)
#define MT_TXWI_FLAGS_NDP_BW GENMASK(13, 12)
#define MT_TXWI_FLAGS_SOUND BIT(14)
#define MT_TXWI_FLAGS_TX_RATE_LUT BIT(15)
#define MT_TXWI_ACK_CTL_REQ BIT(0)
#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
#define MT_TXWI_PKTID_PROBE BIT(7)
struct mt76x2_txwi {
__le16 flags;
__le16 rate;
u8 ack_ctl;
u8 wcid;
__le16 len_ctl;
__le32 iv;
__le32 eiv;
u8 aid;
u8 txstream;
u8 ctl2;
u8 pktid;
} __packed __aligned(4);
static inline struct mt76x2_tx_info *
mt76x2_skb_tx_info(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
return (void *) info->status.status_driver_data;
}
int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard);
int mt76x2_mac_start(struct mt76x2_dev *dev);
void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x2_dev *dev);
void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr);
int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
void *rxi);
void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
void mt76x2_mac_wcid_setup(struct mt76x2_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
int mt76x2_mac_wcid_set_key(struct mt76x2_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x2_mac_wcid_set_rate(struct mt76x2_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate);
void mt76x2_mac_wcid_set_drop(struct mt76x2_dev *dev, u8 idx, bool drop);
int mt76x2_mac_shared_key_setup(struct mt76x2_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key);
int mt76x2_mac_set_beacon(struct mt76x2_dev *dev, u8 vif_idx,
struct sk_buff *skb);
void mt76x2_mac_set_beacon_enable(struct mt76x2_dev *dev, u8 vif_idx, bool val);
void mt76x2_mac_poll_tx_status(struct mt76x2_dev *dev, bool irq);
void mt76x2_mac_process_tx_status_fifo(struct mt76x2_dev *dev);
void mt76x2_mac_work(struct work_struct *work);
#endif
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76x2.h"
static int
mt76x2_start(struct ieee80211_hw *hw)
{
struct mt76x2_dev *dev = hw->priv;
int ret;
mutex_lock(&dev->mutex);
ret = mt76x2_mac_start(dev);
if (ret)
goto out;
ret = mt76x2_phy_start(dev);
if (ret)
goto out;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static void
mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x2_dev *dev = hw->priv;
mutex_lock(&dev->mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2_stop_hardware(dev);
mutex_unlock(&dev->mutex);
}
static void
mt76x2_txq_init(struct mt76x2_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
if (txq->sta) {
struct mt76x2_sta *sta;
sta = (struct mt76x2_sta *) txq->sta->drv_priv;
mtxq->wcid = &sta->wcid;
} else {
struct mt76x2_vif *mvif;
mvif = (struct mt76x2_vif *) txq->vif->drv_priv;
mtxq->wcid = &mvif->group_wcid;
}
mt76_txq_init(&dev->mt76, txq);
}
static int
mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
unsigned int idx = 0;
int ret = 0;
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
/*
* Client mode typically only has one configurable BSSID register,
* which is used for bssidx=0. This is linked to the MAC address.
* Since mac80211 allows changing interface types, and we cannot
* force the use of the primary MAC address for a station mode
* interface, we need some other way of configuring a per-interface
* remote BSSID.
* The hardware provides an AP-Client feature, where bssidx 0-7 are
* used for AP mode and bssidx 8-15 for client mode.
* We shift the station interface bss index by 8 to force the
* hardware to recognize the BSSID.
* The resulting bssidx mismatch for unicast frames is ignored by hw.
*/
if (vif->type == NL80211_IFTYPE_STATION)
idx += 8;
mvif->idx = idx;
mvif->group_wcid.idx = 254 - idx;
mvif->group_wcid.hw_key_idx = -1;
mt76x2_txq_init(dev, vif->txq);
return ret;
}
static void
mt76x2_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76x2_dev *dev = hw->priv;
mt76_txq_remove(&dev->mt76, vif->txq);
}
static int
mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef)
{
int ret;
mt76_set_channel(&dev->mt76);
tasklet_disable(&dev->pre_tbtt_tasklet);
cancel_delayed_work_sync(&dev->cal_work);
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
/* channel cycle counters read-and-clear */
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
mt76x2_dfs_init_params(dev);
mt76x2_mac_resume(dev);
tasklet_enable(&dev->pre_tbtt_tasklet);
return ret;
}
static int
mt76x2_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x2_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER) {
dev->txpower_conf = hw->conf.power_level * 2;
if (test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) {
mt76x2_phy_set_txpower(dev);
mt76x2_tx_set_txpwr_auto(dev, dev->txpower_conf);
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
ret = mt76x2_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
mutex_unlock(&dev->mutex);
return ret;
}
static void
mt76x2_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct mt76x2_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
dev->rxfilter &= ~(_hw); \
dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mutex_lock(&dev->mutex);
dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
MT_RX_FILTR_CFG_CTS |
MT_RX_FILTR_CFG_CFEND |
MT_RX_FILTR_CFG_CFACK |
MT_RX_FILTR_CFG_BA |
MT_RX_FILTR_CFG_CTRL_RSV);
MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
*total_flags = flags;
mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
mutex_unlock(&dev->mutex);
}
static void
mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
mutex_lock(&dev->mutex);
if (changed & BSS_CHANGED_BSSID)
mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
if (changed & BSS_CHANGED_BEACON_INT)
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
if (changed & BSS_CHANGED_BEACON_ENABLED) {
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_mac_set_beacon_enable(dev, mvif->idx,
info->enable_beacon);
tasklet_enable(&dev->pre_tbtt_tasklet);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
int slottime = info->use_short_slot ? 9 : 20;
dev->slottime = slottime;
mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
}
mutex_unlock(&dev->mutex);
}
static int
mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
int ret = 0;
int idx = 0;
int i;
mutex_lock(&dev->mutex);
idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
if (idx < 0) {
ret = -ENOSPC;
goto out;
}
msta->wcid.idx = idx;
msta->wcid.hw_key_idx = -1;
mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76x2_mac_wcid_set_drop(dev, idx, false);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76x2_txq_init(dev, sta->txq[i]);
rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
out:
mutex_unlock(&dev->mutex);
return ret;
}
static int
mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
int idx = msta->wcid.idx;
int i;
mutex_lock(&dev->mutex);
rcu_assign_pointer(dev->wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x2_mac_wcid_set_drop(dev, idx, true);
mt76_wcid_free(dev->wcid_mask, idx);
mt76x2_mac_wcid_setup(dev, idx, 0, NULL);
mutex_unlock(&dev->mutex);
return 0;
}
static void
mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
{
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
struct mt76x2_dev *dev = hw->priv;
int idx = msta->wcid.idx;
switch (cmd) {
case STA_NOTIFY_SLEEP:
mt76x2_mac_wcid_set_drop(dev, idx, true);
mt76_stop_tx_queues(&dev->mt76, sta, true);
break;
case STA_NOTIFY_AWAKE:
mt76x2_mac_wcid_set_drop(dev, idx, false);
break;
}
}
static int
mt76x2_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
struct mt76x2_sta *msta;
struct mt76_wcid *wcid;
int idx = key->keyidx;
int ret;
/*
* The hardware does not support per-STA RX GTK, fall back
* to software mode for these.
*/
if ((vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
msta = sta ? (struct mt76x2_sta *) sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;
if (cmd == SET_KEY) {
key->hw_key_idx = wcid->idx;
wcid->hw_key_idx = idx;
} else {
if (idx == wcid->hw_key_idx)
wcid->hw_key_idx = -1;
key = NULL;
}
if (!msta) {
if (key || wcid->hw_key_idx == idx) {
ret = mt76x2_mac_wcid_set_key(dev, wcid->idx, key);
if (ret)
return ret;
}
return mt76x2_mac_shared_key_setup(dev, mvif->idx, idx, key);
}
return mt76x2_mac_wcid_set_key(dev, msta->wcid.idx, key);
}
static int
mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct mt76x2_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10;
u32 val;
if (params->cw_min)
cw_min = fls(params->cw_min);
if (params->cw_max)
cw_max = fls(params->cw_max);
val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) |
FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
mt76_wr(dev, MT_EDCA_CFG_AC(queue), val);
val = mt76_rr(dev, MT_WMM_TXOP(queue));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue));
val |= params->txop << MT_WMM_TXOP_SHIFT(queue);
mt76_wr(dev, MT_WMM_TXOP(queue), val);
val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue);
mt76_wr(dev, MT_WMM_AIFSN, val);
val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue));
val |= cw_min << MT_WMM_CWMIN_SHIFT(queue);
mt76_wr(dev, MT_WMM_CWMIN, val);
val = mt76_rr(dev, MT_WMM_CWMAX);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue));
val |= cw_max << MT_WMM_CWMAX_SHIFT(queue);
mt76_wr(dev, MT_WMM_CWMAX, val);
return 0;
}
static void
mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac)
{
struct mt76x2_dev *dev = hw->priv;
tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
static void
mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76x2_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
tasklet_enable(&dev->pre_tbtt_tasklet);
mt76_txq_schedule_all(&dev->mt76);
}
static void
mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
}
static int
mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
{
struct mt76x2_dev *dev = hw->priv;
*dbm = dev->txpower_cur / 2;
return 0;
}
static int
mt76x2_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
struct ieee80211_sta *sta = params->sta;
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
if (!txq)
return -EINVAL;
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
BIT(16 + tid));
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = *ssn << 4;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
return 0;
}
static void
mt76x2_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76x2_dev *dev = hw->priv;
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
struct ieee80211_tx_rate rate = {};
if (!rates)
return;
rate.idx = rates->rate[0].idx;
rate.flags = rates->rate[0].flags;
mt76x2_mac_wcid_set_rate(dev, &msta->wcid, &rate);
msta->wcid.max_txpwr_adj = mt76x2_tx_get_max_txpwr_adj(dev, &rate);
}
static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class)
{
struct mt76x2_dev *dev = hw->priv;
mutex_lock(&dev->mutex);
dev->coverage_class = coverage_class;
mt76x2_set_tx_ackto(dev);
mutex_unlock(&dev->mutex);
}
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
.stop = mt76x2_stop,
.add_interface = mt76x2_add_interface,
.remove_interface = mt76x2_remove_interface,
.config = mt76x2_config,
.configure_filter = mt76x2_configure_filter,
.bss_info_changed = mt76x2_bss_info_changed,
.sta_add = mt76x2_sta_add,
.sta_remove = mt76x2_sta_remove,
.sta_notify = mt76x2_sta_notify,
.set_key = mt76x2_set_key,
.conf_tx = mt76x2_conf_tx,
.sw_scan_start = mt76x2_sw_scan,
.sw_scan_complete = mt76x2_sw_scan_complete,
.flush = mt76x2_flush,
.ampdu_action = mt76x2_ampdu_action,
.get_txpower = mt76x2_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt76x2_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x2_set_coverage_class,
.get_survey = mt76_get_survey,
};
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include "mt76x2.h"
#include "mt76x2_mcu.h"
#include "mt76x2_dma.h"
#include "mt76x2_eeprom.h"
struct mt76x2_fw_header {
__le32 ilm_len;
__le32 dlm_len;
__le16 build_ver;
__le16 fw_ver;
u8 pad[4];
char build_time[16];
};
struct mt76x2_patch_header {
char build_time[16];
char platform[4];
char hw_version[4];
char patch_version[4];
u8 pad[2];
};
static struct sk_buff *mt76x2_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_KERNEL);
memcpy(skb_put(skb, len), data, len);
return skb;
}
static struct sk_buff *
mt76x2_mcu_get_response(struct mt76x2_dev *dev, unsigned long expires)
{
unsigned long timeout;
if (!time_is_after_jiffies(expires))
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mcu.wait, !skb_queue_empty(&dev->mcu.res_q),
timeout);
return skb_dequeue(&dev->mcu.res_q);
}
static int
mt76x2_mcu_msg_send(struct mt76x2_dev *dev, struct sk_buff *skb,
enum mcu_cmd cmd)
{
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
if (!skb)
return -EINVAL;
mutex_lock(&dev->mcu.mutex);
seq = ++dev->mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mcu.msg_seq & 0xf;
ret = mt76x2_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
goto out;
while (1) {
u32 *rxfce;
bool check_seq = false;
skb = mt76x2_mcu_get_response(dev, expires);
if (!skb) {
dev_err(dev->mt76.dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
break;
}
rxfce = (u32 *) skb->cb;
if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, *rxfce))
check_seq = true;
dev_kfree_skb(skb);
if (check_seq)
break;
}
out:
mutex_unlock(&dev->mcu.mutex);
return ret;
}
static int
mt76pci_load_rom_patch(struct mt76x2_dev *dev)
{
const struct firmware *fw = NULL;
struct mt76x2_patch_header *hdr;
bool rom_protect = !is_mt7612(dev);
int len, ret = 0;
__le32 *cur;
u32 patch_mask, patch_reg;
if (rom_protect && !mt76_poll(dev, MT_MCU_SEMAPHORE_03, 1, 1, 600)) {
dev_err(dev->mt76.dev,
"Could not get hardware semaphore for ROM PATCH\n");
return -ETIMEDOUT;
}
if (mt76xx_rev(dev) >= MT76XX_REV_E3) {
patch_mask = BIT(0);
patch_reg = MT_MCU_CLOCK_CTL;
} else {
patch_mask = BIT(1);
patch_reg = MT_MCU_COM_REG0;
}
if (rom_protect && (mt76_rr(dev, patch_reg) & patch_mask)) {
dev_info(dev->mt76.dev, "ROM patch already applied\n");
goto out;
}
ret = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
if (ret)
goto out;
if (!fw || !fw->data || fw->size <= sizeof(*hdr)) {
ret = -EIO;
dev_err(dev->mt76.dev, "Failed to load firmware\n");
goto out;
}
hdr = (struct mt76x2_patch_header *) fw->data;
dev_info(dev->mt76.dev, "ROM patch build: %.15s\n", hdr->build_time);
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ROM_PATCH_OFFSET);
cur = (__le32 *) (fw->data + sizeof(*hdr));
len = fw->size - sizeof(*hdr);
mt76_wr_copy(dev, MT_MCU_ROM_PATCH_ADDR, cur, len);
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
/* Trigger ROM */
mt76_wr(dev, MT_MCU_INT_LEVEL, 4);
if (!mt76_poll_msec(dev, patch_reg, patch_mask, patch_mask, 2000)) {
dev_err(dev->mt76.dev, "Failed to load ROM patch\n");
ret = -ETIMEDOUT;
}
out:
/* release semaphore */
if (rom_protect)
mt76_wr(dev, MT_MCU_SEMAPHORE_03, 1);
release_firmware(fw);
return ret;
}
static int
mt76pci_load_firmware(struct mt76x2_dev *dev)
{
const struct firmware *fw;
const struct mt76x2_fw_header *hdr;
int i, len, ret;
__le32 *cur;
u32 offset, val;
ret = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
if (ret)
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr))
goto error;
hdr = (const struct mt76x2_fw_header *) fw->data;
len = sizeof(*hdr);
len += le32_to_cpu(hdr->ilm_len);
len += le32_to_cpu(hdr->dlm_len);
if (fw->size != len)
goto error;
val = le16_to_cpu(hdr->fw_ver);
dev_info(dev->mt76.dev, "Firmware Version: %d.%d.%02d\n",
(val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf);
val = le16_to_cpu(hdr->build_ver);
dev_info(dev->mt76.dev, "Build: %x\n", val);
dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
cur = (__le32 *) (fw->data + sizeof(*hdr));
len = le32_to_cpu(hdr->ilm_len);
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_ILM_OFFSET);
mt76_wr_copy(dev, MT_MCU_ILM_ADDR, cur, len);
cur += len / sizeof(*cur);
len = le32_to_cpu(hdr->dlm_len);
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
offset = MT_MCU_DLM_ADDR_E3;
else
offset = MT_MCU_DLM_ADDR;
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, MT_MCU_DLM_OFFSET);
mt76_wr_copy(dev, offset, cur, len);
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_2);
if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
/* trigger firmware */
mt76_wr(dev, MT_MCU_INT_LEVEL, 2);
for (i = 200; i > 0; i--) {
val = mt76_rr(dev, MT_MCU_COM_REG0);
if (val & 1)
break;
msleep(10);
}
if (!i) {
dev_err(dev->mt76.dev, "Firmware failed to start\n");
release_firmware(fw);
return -ETIMEDOUT;
}
dev_info(dev->mt76.dev, "Firmware running!\n");
release_firmware(fw);
return ret;
error:
dev_err(dev->mt76.dev, "Invalid firmware\n");
release_firmware(fw);
return -ENOENT;
}
static int
mt76x2_mcu_function_select(struct mt76x2_dev *dev, enum mcu_function func,
u32 val)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_FUN_SET_OP);
}
int mt76x2_mcu_load_cr(struct mt76x2_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
struct sk_buff *skb;
struct {
u8 cr_mode;
u8 temp;
u8 ch;
u8 _pad0;
__le32 cfg;
} __packed __aligned(4) msg = {
.cr_mode = type,
.temp = temp_level,
.ch = channel,
};
u32 val;
val = BIT(31);
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_LOAD_CR);
}
int mt76x2_mcu_set_channel(struct mt76x2_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
struct sk_buff *skb;
struct {
u8 idx;
u8 scan;
u8 bw;
u8 _pad0;
__le16 chainmask;
u8 ext_chan;
u8 _pad1;
} __packed __aligned(4) msg = {
.idx = channel,
.scan = scan,
.bw = bw,
.chainmask = cpu_to_le16(dev->chainmask),
};
/* first set the channel without the extension channel info */
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_SWITCH_CHANNEL_OP);
}
int mt76x2_mcu_set_radio_state(struct mt76x2_dev *dev, bool on)
{
struct sk_buff *skb;
struct {
__le32 mode;
__le32 level;
} __packed __aligned(4) msg = {
.mode = cpu_to_le32(on ? RADIO_ON : RADIO_OFF),
.level = cpu_to_le32(0),
};
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_POWER_SAVING_OP);
}
int mt76x2_mcu_calibrate(struct mt76x2_dev *dev, enum mcu_calibration type,
u32 param)
{
struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(type),
.value = cpu_to_le32(param),
};
int ret;
mt76_clear(dev, MT_MCU_COM_REG0, BIT(31));
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
ret = mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
if (ret)
return ret;
if (WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
BIT(31), BIT(31), 100)))
return -ETIMEDOUT;
return 0;
}
int mt76x2_mcu_tssi_comp(struct mt76x2_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
struct sk_buff *skb;
struct {
__le32 id;
struct mt76x2_tssi_comp data;
} __packed __aligned(4) msg = {
.id = cpu_to_le32(MCU_CAL_TSSI_COMP),
.data = *tssi_data,
};
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP);
}
int mt76x2_mcu_init_gain(struct mt76x2_dev *dev, u8 channel, u32 gain,
bool force)
{
struct sk_buff *skb;
struct {
__le32 channel;
__le32 gain_val;
} __packed __aligned(4) msg = {
.channel = cpu_to_le32(channel),
.gain_val = cpu_to_le32(gain),
};
if (force)
msg.channel |= cpu_to_le32(BIT(31));
skb = mt76x2_mcu_msg_alloc(&msg, sizeof(msg));
return mt76x2_mcu_msg_send(dev, skb, CMD_INIT_GAIN_OP);
}
int mt76x2_mcu_init(struct mt76x2_dev *dev)
{
int ret;
mutex_init(&dev->mcu.mutex);
ret = mt76pci_load_rom_patch(dev);
if (ret)
return ret;
ret = mt76pci_load_firmware(dev);
if (ret)
return ret;
mt76x2_mcu_function_select(dev, Q_SELECT, 1);
return 0;
}
int mt76x2_mcu_cleanup(struct mt76x2_dev *dev)
{
struct sk_buff *skb;
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
while ((skb = skb_dequeue(&dev->mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "mt76x2_trace.h"
#endif
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册