提交 ab1ebc95 编写于 作者: D David S. Miller

Merge branch 'for-davem' of...

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
......@@ -290,7 +290,7 @@ struct mib_mac_mgmt {
u8 res;
u8 multi_domain_capability_implemented;
u8 multi_domain_capability_enabled;
u8 country_string[3];
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
u8 reserved[3];
} __packed;
......
......@@ -93,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
mem = ioremap_nocache(res->start, res->end - res->start + 1);
mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
......
......@@ -513,7 +513,7 @@ enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/
AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
......
......@@ -442,19 +442,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
return ath5k_reset(sc, chan, true);
}
struct ath_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
u8 active_mac[ETH_ALEN]; /* first active MAC */
bool need_set_hw_addr;
bool found_active;
bool any_assoc;
enum nl80211_iftype opmode;
};
static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath_vif_iter_data *iter_data = data;
struct ath5k_vif_iter_data *iter_data = data;
int i;
struct ath5k_vif *avf = (void *)vif->drv_priv;
......@@ -484,9 +474,12 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
*/
if (avf->opmode == NL80211_IFTYPE_AP)
iter_data->opmode = NL80211_IFTYPE_AP;
else
else {
if (avf->opmode == NL80211_IFTYPE_STATION)
iter_data->n_stas++;
if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
iter_data->opmode = avf->opmode;
}
}
void
......@@ -494,7 +487,8 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath5k_hw_common(sc->ah);
struct ath_vif_iter_data iter_data;
struct ath5k_vif_iter_data iter_data;
u32 rfilt;
/*
* Use the hardware MAC address as reference, the hardware uses it
......@@ -505,12 +499,13 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
iter_data.n_stas = 0;
if (vif)
ath_vif_iter(&iter_data, vif->addr, vif);
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
......@@ -528,20 +523,19 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
if (ath5k_hw_hasbssidmask(sc->ah))
ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
}
void
ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = sc->ah;
u32 rfilt;
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that probem.
*/
sc->filter_flags |= AR5K_RX_FILTER_PROM;
}
/* configure rx filter */
rfilt = sc->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
ath5k_hw_set_rx_filter(sc->ah, rfilt);
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
ath5k_update_bssid_mask_and_opmode(sc, vif);
}
static inline int
......@@ -1117,7 +1111,7 @@ ath5k_rx_start(struct ath5k_softc *sc)
spin_unlock_bh(&sc->rxbuflock);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
ath5k_mode_setup(sc, NULL); /* set filters, etc. */
ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
return 0;
......@@ -2923,13 +2917,13 @@ ath5k_deinit_softc(struct ath5k_softc *sc)
bool
ath_any_vif_assoc(struct ath5k_softc *sc)
{
struct ath_vif_iter_data iter_data;
struct ath5k_vif_iter_data iter_data;
iter_data.hw_macaddr = NULL;
iter_data.any_assoc = false;
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
return iter_data.any_assoc;
}
......
......@@ -259,6 +259,19 @@ struct ath5k_softc {
struct survey_info survey; /* collected survey info */
};
struct ath5k_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
u8 active_mac[ETH_ALEN]; /* first active MAC */
bool need_set_hw_addr;
bool found_active;
bool any_assoc;
enum nl80211_iftype opmode;
int n_stas;
};
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
#define ath5k_hw_hasbssidmask(_ah) \
(ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
#define ath5k_hw_hasveol(_ah) \
......
......@@ -158,8 +158,7 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
ath5k_mode_setup(sc, vif);
ath5k_update_bssid_mask_and_opmode(sc, vif);
ret = 0;
end:
mutex_unlock(&sc->lock);
......@@ -381,6 +380,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
u32 mfilt[2], rfilt;
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
mutex_lock(&sc->lock);
......@@ -454,6 +454,21 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
break;
}
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
&iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that probem.
*/
rfilt |= AR5K_RX_FILTER_PROM;
}
/* Set filters */
ath5k_hw_set_rx_filter(ah, rfilt);
......
......@@ -75,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
goto err_out;
}
mem = ioremap_nocache(res->start, res->end - res->start + 1);
mem = ioremap_nocache(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
......
......@@ -1020,28 +1020,29 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
static void ar9003_hw_do_getnf(struct ath_hw *ah,
int16_t nfarray[NUM_NF_READINGS])
{
int16_t nf;
nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
nfarray[0] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
nfarray[1] = sign_extend32(nf, 8);
#define AR_PHY_CH_MINCCA_PWR 0x1FF00000
#define AR_PHY_CH_MINCCA_PWR_S 20
#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_CH_EXT_MINCCA_PWR_S 16
nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
nfarray[2] = sign_extend32(nf, 8);
if (!IS_CHAN_HT40(ah->curchan))
return;
int16_t nf;
int i;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
nfarray[3] = sign_extend32(nf, 8);
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->rxchainmask & BIT(i)) {
nf = MS(REG_READ(ah, ah->nf_regs[i]),
AR_PHY_CH_MINCCA_PWR);
nfarray[i] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
nfarray[4] = sign_extend32(nf, 8);
if (IS_CHAN_HT40(ah->curchan)) {
u8 ext_idx = AR9300_MAX_CHAINS + i;
nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
nfarray[5] = sign_extend32(nf, 8);
nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]),
AR_PHY_CH_EXT_MINCCA_PWR);
nfarray[ext_idx] = sign_extend32(nf, 8);
}
}
}
}
static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
......
......@@ -15,6 +15,7 @@
*/
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include "ath9k.h"
......@@ -30,6 +31,19 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file)
return 0;
}
static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
u8 *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
#ifdef CONFIG_ATH_DEBUG
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
......@@ -548,10 +562,10 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("hw-tx-proc-desc: ", txprocdesc);
len += snprintf(buf + len, size - len,
"%s%11p%11p%10p%10p\n", "txq-memory-address:",
&(sc->tx.txq_map[WME_AC_BE]),
&(sc->tx.txq_map[WME_AC_BK]),
&(sc->tx.txq_map[WME_AC_VI]),
&(sc->tx.txq_map[WME_AC_VO]));
sc->tx.txq_map[WME_AC_BE],
sc->tx.txq_map[WME_AC_BK],
sc->tx.txq_map[WME_AC_VI],
sc->tx.txq_map[WME_AC_VO]);
if (len >= size)
goto done;
......@@ -1027,6 +1041,42 @@ static const struct file_operations fops_regval = {
.llseek = default_llseek,
};
#define REGDUMP_LINE_SIZE 20
static int open_file_regdump(struct inode *inode, struct file *file)
{
struct ath_softc *sc = inode->i_private;
unsigned int len = 0;
u8 *buf;
int i;
unsigned long num_regs, regdump_len, max_reg_offset;
max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
num_regs = max_reg_offset / 4 + 1;
regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
buf = vmalloc(regdump_len);
if (!buf)
return -ENOMEM;
ath9k_ps_wakeup(sc);
for (i = 0; i < num_regs; i++)
len += scnprintf(buf + len, regdump_len - len,
"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
ath9k_ps_restore(sc);
file->private_data = buf;
return 0;
}
static const struct file_operations fops_regdump = {
.open = open_file_regdump,
.read = ath9k_debugfs_read_buf,
.release = ath9k_debugfs_release_buf,
.owner = THIS_MODULE,
.llseek = default_llseek,/* read accesses f_pos */
};
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
......@@ -1091,6 +1141,10 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca))
goto err;
if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_regdump))
goto err;
sc->debug.regidx = 0;
return 0;
err:
......
......@@ -92,7 +92,7 @@ config B43_PHY_N
---help---
Support for the N-PHY.
This enables support for devices with N-PHY revision up to 2.
This enables support for devices with N-PHY.
Say N if you expect high stability and performance. Saying Y will not
affect other devices support and may provide support for basic needs.
......
......@@ -1168,23 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
/* PHY rev 0, 1, 2 */
u8 i, j;
u8 code;
u16 tmp;
u8 rfseq_events[3] = { 6, 8, 7 };
u8 rfseq_delays[3] = { 10, 30, 1 };
/* TODO: for PHY >= 3
s8 *lna1_gain, *lna2_gain;
u8 *gain_db, *gain_bits;
u16 *rfseq_init;
/* PHY rev >= 3 */
bool ghz5;
bool ext_lna;
u16 rssi_gain;
struct nphy_gain_ctl_workaround_entry *e;
u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
*/
u8 rfseq_events[3] = { 6, 8, 7 };
u8 rfseq_delays[3] = { 10, 30, 1 };
if (dev->phy.rev >= 3) {
/* TODO */
/* Prepare values */
ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
& B43_NPHY_BANDCTL_5GHZ;
ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
if (ghz5 && dev->phy.rev >= 5)
rssi_gain = 0x90;
else
rssi_gain = 0x50;
b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
/* Set Clip 2 detect */
b43_phy_set(dev, B43_NPHY_C1_CGAINI,
B43_NPHY_C1_CGAINI_CL2DETECT);
b43_phy_set(dev, B43_NPHY_C2_CGAINI,
B43_NPHY_C2_CGAINI_CL2DETECT);
b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
rssi_gain);
b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
rssi_gain);
b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
0x17);
b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
b43_phy_write(dev, 0x2A7, e->init_gain);
b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
e->rfseq_init);
b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
/* TODO: check defines. Do not match variables names */
b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
b43_phy_write(dev, 0x2A9, e->cliphi_gain);
b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
b43_phy_write(dev, 0x2AB, e->clipmd_gain);
b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
b43_phy_write(dev, 0x2AD, e->cliplo_gain);
b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
} else {
/* Set Clip 2 detect */
b43_phy_set(dev, B43_NPHY_C1_CGAINI,
......@@ -1308,6 +1383,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
u16 tmp16;
u32 tmp32;
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
b43_nphy_classifier(dev, 1, 0);
else
......@@ -1320,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
if (dev->phy.rev >= 3) {
tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
tmp32 &= 0xffffff;
b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
b43_phy_write(dev, 0x2AE, 0x000C);
/* TODO */
tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
0x2 : 0x9C40;
b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
b43_nphy_gain_ctrl_workarounds(dev);
b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
/* TODO */
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
(bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
tmp32 = 0x00088888;
else
tmp32 = 0x88888888;
b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
if (dev->phy.rev == 4 &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
0x70);
}
b43_phy_write(dev, 0x224, 0x039C);
b43_phy_write(dev, 0x225, 0x0357);
b43_phy_write(dev, 0x226, 0x0317);
b43_phy_write(dev, 0x227, 0x02D7);
b43_phy_write(dev, 0x228, 0x039C);
b43_phy_write(dev, 0x229, 0x0357);
b43_phy_write(dev, 0x22A, 0x0317);
b43_phy_write(dev, 0x22B, 0x02D7);
b43_phy_write(dev, 0x22C, 0x039C);
b43_phy_write(dev, 0x22D, 0x0357);
b43_phy_write(dev, 0x22E, 0x0317);
b43_phy_write(dev, 0x22F, 0x02D7);
} else {
if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
......@@ -3878,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
on ? 0 : 0x7FFF);
u16 val = on ? 0 : 0x7FFF;
if (dev->phy.rev >= 3)
b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val);
}
static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
......
......@@ -2709,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
{ /* 2GHz */
{ /* PHY rev 3 */
{ 7, 11, 16, 23 },
{ -5, 6, 10, 14 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x627E,
{ 0x613F, 0x613F, 0x613F, 0x613F },
0x107E, 0x0066, 0x0074,
0x18, 0x18, 0x18,
0x020D, 0x5,
},
{ /* PHY rev 4 */
{ 8, 12, 17, 25 },
{ -5, 6, 10, 14 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x527E,
{ 0x513F, 0x513F, 0x513F, 0x513F },
0x007E, 0x0066, 0x0074,
0x18, 0x18, 0x18,
0x01A1, 0x5,
},
{ /* PHY rev 5+ */
{ 9, 13, 18, 26 },
{ -3, 7, 11, 16 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x427E, /* invalid for external LNA! */
{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
0x1076, 0x0066, 0x106A,
0xC, 0xC, 0xC,
0x01D0, 0x5,
},
},
{ /* 5GHz */
{ /* PHY rev 3 */
{ 7, 11, 17, 23 },
{ -6, 2, 6, 10 },
{ 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 },
{ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 },
0x52DE,
{ 0x516F, 0x516F, 0x516F, 0x516F },
0x00DE, 0x00CA, 0x00CC,
0x1E, 0x1E, 0x1E,
0x01A1, 25,
},
{ /* PHY rev 4 */
{ 8, 12, 18, 23 },
{ -5, 2, 6, 10 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
0x629E,
{ 0x614F, 0x614F, 0x614F, 0x614F },
0x029E, 0x1084, 0x0086,
0x24, 0x24, 0x24,
0x0107, 25,
},
{ /* PHY rev 5+ */
{ 6, 10, 16, 21 },
{ -7, 0, 4, 8 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
{ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
0x729E,
{ 0x714F, 0x714F, 0x714F, 0x714F },
0x029E, 0x2084, 0x2086,
0x24, 0x24, 0x24,
0x00A9, 25,
},
},
};
static inline void assert_ntab_array_sizes(void)
{
#undef check
......@@ -2957,3 +3030,33 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
/* Volatile tables */
/* TODO */
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna)
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
B43_WARN_ON(dev->phy.rev < 3);
if (dev->phy.rev >= 5)
phy_idx = 2;
else if (dev->phy.rev == 4)
phy_idx = 1;
else
phy_idx = 0;
e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
/* Only one entry differs for external LNA, so instead making whole
* table 2 times bigger, hack is here
*/
if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
e->rfseq_init[0] &= 0x0FFF;
e->rfseq_init[1] &= 0x0FFF;
e->rfseq_init[2] &= 0x0FFF;
e->rfseq_init[3] &= 0x0FFF;
e->init_gain &= 0x0FFF;
}
return e;
}
......@@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 {
u8 val_addr1;
};
struct nphy_gain_ctl_workaround_entry {
s8 lna1_gain[4];
s8 lna2_gain[4];
u8 gain_db[10];
u8 gain_bits[10];
u16 init_gain;
u16 rfseq_init[4];
u16 cliphi_gain;
u16 clipmd_gain;
u16 cliplo_gain;
u16 crsmin;
u16 crsminl;
u16 crsminu;
u16 nbclip;
u16 wlclip;
};
/* Get entry with workaround values for gain ctl. Does not return NULL. */
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
struct b43_wldev *dev, bool ghz5, bool ext_lna);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
const struct b43_nphy_channeltab_entry_rev2 *
......
......@@ -961,7 +961,7 @@ struct ipw_country_channel_info {
struct ipw_country_info {
u8 id;
u8 length;
u8 country_str[3];
u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
struct ipw_country_channel_info groups[7];
} __packed;
......
......@@ -533,9 +533,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
void iwlagn_temperature(struct iwl_priv *priv)
{
/* store temperature from statistics (in Celsius) */
priv->temperature =
le32_to_cpu(priv->_agn.statistics.general.common.temperature);
/* store temperature from correct statistics (in Celsius) */
priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ?
priv->_agn.statistics_bt.general.common.temperature :
priv->_agn.statistics.general.common.temperature);
iwl_tt_handler(priv);
}
......@@ -994,241 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return -1;
}
/* Calc max signal level (dBm) among 3 possible receivers */
static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
{
u32 decrypt_out = 0;
if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
RX_RES_STATUS_STATION_FOUND)
decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
/* packet was not encrypted */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_NONE)
return decrypt_out;
/* packet was encrypted with unknown alg */
if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
RX_RES_STATUS_SEC_TYPE_ERR)
return decrypt_out;
/* decryption was not done in HW */
if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
RX_MPDU_RES_STATUS_DEC_DONE_MSK)
return decrypt_out;
switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
case RX_RES_STATUS_SEC_TYPE_CCMP:
/* alg is CCM: check MIC only */
if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
/* Bad MIC */
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
case RX_RES_STATUS_SEC_TYPE_TKIP:
if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
/* Bad TTAK */
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
/* fall through if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
else
decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
break;
}
IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
decrypt_in, decrypt_out);
return decrypt_out;
}
static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u16 len,
u32 ampdu_status,
struct iwl_rx_mem_buffer *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
IWL_DEBUG_DROP_LIMIT(priv,
"Dropping packet while interface is not open.\n");
return;
}
/* In case of HW accelerated crypto and bad decryption, drop */
if (!priv->cfg->mod_params->sw_crypto &&
iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
skb = dev_alloc_skb(128);
if (!skb) {
IWL_ERR(priv, "dev_alloc_skb failed\n");
return;
}
skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
iwl_update_stats(priv, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(priv->hw, skb);
priv->alloc_rxb_page--;
rxb->page = NULL;
}
/* Called for REPLY_RX (legacy ABG frames), or
* REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct ieee80211_hdr *header;
struct ieee80211_rx_status rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_phy_res *phy_res;
__le32 rx_pkt_status;
struct iwl_rx_mpdu_res_start *amsdu;
u32 len;
u32 ampdu_status;
u32 rate_n_flags;
/**
* REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
* REPLY_RX: physical layer info is in this buffer
* REPLY_RX_MPDU_CMD: physical layer info was sent in separate
* command and cached in priv->last_phy_res
*
* Here we set up local variables depending on which command is
* received.
*/
if (pkt->hdr.cmd == REPLY_RX) {
phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
+ phy_res->cfg_phy_cnt);
len = le16_to_cpu(phy_res->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
phy_res->cfg_phy_cnt + len);
ampdu_status = le32_to_cpu(rx_pkt_status);
} else {
if (!priv->_agn.last_phy_res_valid) {
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
return;
}
phy_res = &priv->_agn.last_phy_res;
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
len = le16_to_cpu(amsdu->byte_count);
rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
ampdu_status = iwlagn_translate_rx_status(priv,
le32_to_cpu(rx_pkt_status));
}
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
phy_res->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
le32_to_cpu(rx_pkt_status));
return;
}
/* This will be used in several places later */
rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
/* TSF isn't reliable. In order to allow smooth user experience,
* this W/A doesn't propagate it to the mac80211 */
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
/* Find max signal strength (dBm) among 3 antenna/receiver chains */
rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
iwl_dbg_log_rx_data_frame(priv, len, header);
IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
rx_status.signal, (unsigned long long)rx_status.mactime);
/*
* "antenna number"
*
* It seems that the antenna field in the phy flags value
* is actually a bit field. This is undefined by radiotap,
* it wants an actual antenna number but I always get "7"
* for most legacy frames I receive indicating that the
* same frame was received on all three RX chains.
*
* I think this field should be removed in favor of a
* new 802.11n radiotap field "RX chains" that is defined
* as a bitmask.
*/
rx_status.antenna =
(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
/* set the preamble flag if appropriate */
if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
rx_status.flag |= RX_FLAG_SHORTPRE;
/* Set up the HT phy flags */
if (rate_n_flags & RATE_MCS_HT_MSK)
rx_status.flag |= RX_FLAG_HT;
if (rate_n_flags & RATE_MCS_HT40_MSK)
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
}
/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
* This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
priv->_agn.last_phy_res_valid = true;
memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
sizeof(struct iwl_rx_phy_res));
}
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum ieee80211_band band,
......
......@@ -424,60 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
return 0;
}
/******************************************************************************
*
* Generic RX handler implementations
*
******************************************************************************/
static void iwl_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_alive_resp *palive;
struct delayed_work *pwork;
palive = &pkt->u.alive_frame;
IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
memcpy(&priv->card_alive_init,
&pkt->u.alive_frame,
sizeof(struct iwl_init_alive_resp));
pwork = &priv->init_alive_start;
} else {
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
memcpy(&priv->card_alive, &pkt->u.alive_frame,
sizeof(struct iwl_alive_resp));
pwork = &priv->alive_start;
}
/* We delay the ALIVE response by 5ms to
* give the HW RF Kill time to activate... */
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
else {
IWL_WARN(priv, "%s uCode did not respond OK.\n",
(palive->ver_subtype == INITIALIZE_SUBTYPE) ?
"init" : "runtime");
/*
* If fail to load init uCode,
* let's try to load the init uCode again.
* We should not get into this situation, but if it
* does happen, we should not move on and loading "runtime"
* without proper calibrate the device.
*/
if (palive->ver_subtype == INITIALIZE_SUBTYPE)
priv->ucode_type = UCODE_NONE;
queue_work(priv->workqueue, &priv->restart);
}
}
static void iwl_bg_beacon_update(struct work_struct *work)
{
struct iwl_priv *priv =
......@@ -712,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data)
}
}
static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_DEBUG
u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
"tsf:0x%.8x%.8x rate:%d\n",
status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
le32_to_cpu(beacon->low_tsf), rate);
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
queue_work(priv->workqueue, &priv->beacon_update);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static void iwl_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_CARD_DISABLED) ?
"Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up_interruptible(&priv->wait_command_queue);
}
static void iwl_bg_tx_flush(struct work_struct *work)
{
struct iwl_priv *priv =
......@@ -807,51 +676,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
}
}
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
* to the host.
*
* This function chains into the hardware specific files for them to setup
* any hardware specific handlers as well.
*/
static void iwl_setup_rx_handlers(struct iwl_priv *priv)
{
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl_rx_missed_beacon_notif;
/* Rx handlers */
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
/* block ack */
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
/**
* iwl_rx_handle - Main entry function for receiving responses from uCode
*
......@@ -3913,6 +3737,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
priv->rx_statistics_jiffies = jiffies;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv,
......
......@@ -190,10 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv);
void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
int iwlagn_rxq_stop(struct iwl_priv *priv);
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
/* tx */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
......@@ -243,14 +240,6 @@ static inline bool iwl_is_tx_success(u32 status)
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* rx */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/* scan */
int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwlagn_post_scan(struct iwl_priv *priv);
......
......@@ -869,33 +869,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
}
}
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
if (priv->switch_rxon.switch_in_progress) {
if (!le32_to_cpu(csa->status) &&
(csa->channel == priv->switch_rxon.channel)) {
rxon->channel = csa->channel;
ctx->staging.channel = csa->channel;
IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, true);
} else {
IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
le16_to_cpu(csa->channel));
iwl_chswitch_done(priv, false);
}
}
}
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
......@@ -1245,42 +1218,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
sleep->pm_sleep_mode, sleep->pm_wakeup_src);
#endif
}
void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
"notification for %s:\n", len,
get_cmd_string(pkt->hdr.cmd));
iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
}
void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
"seq 0x%04X ser 0x%08X\n",
le32_to_cpu(pkt->u.err_resp.error_type),
get_cmd_string(pkt->u.err_resp.cmd_id),
pkt->u.err_resp.cmd_id,
le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
le32_to_cpu(pkt->u.err_resp.error_info));
}
void iwl_clear_isr_stats(struct iwl_priv *priv)
{
memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
......
......@@ -441,10 +441,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
u32 decrypt_res,
struct ieee80211_rx_status *stats);
void iwl_irq_handle_error(struct iwl_priv *priv);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
......@@ -493,15 +489,6 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
{
}
#endif
/*****************************************************
* RX handlers.
* **************************************************/
void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_rx_reply_error(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
/*****************************************************
* RX
......@@ -513,11 +500,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* Handlers */
void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
/* TX helpers */
......
......@@ -1261,8 +1261,8 @@ struct iwl_priv {
/* track IBSS manager (last beacon) status */
u32 ibss_manager;
/* storing the jiffies when the plcp error rate is received */
unsigned long plcp_jiffies;
/* jiffies when last recovery from statistics was performed */
unsigned long rx_statistics_jiffies;
/* force reset */
struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
......
......@@ -387,7 +387,7 @@ struct lbs_offset_value {
struct mrvl_ie_domain_param_set {
struct mrvl_ie_header header;
u8 country_code[3];
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS];
} __packed;
......
......@@ -1056,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
}
memset(rxq->rxd, 0, size);
rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n");
pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
return -ENOMEM;
}
memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
for (i = 0; i < MWL8K_RX_DESCS; i++) {
int desc_size;
......@@ -1347,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
}
memset(txq->txd, 0, size);
txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n");
pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
return -ENOMEM;
}
memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
for (i = 0; i < MWL8K_TX_DESCS; i++) {
struct mwl8k_tx_desc *tx_desc;
......
......@@ -43,9 +43,8 @@ config P54_SPI
tristate "Prism54 SPI (stlc45xx) support"
depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS
---help---
This driver is for stlc4550 or stlc4560 based wireless chips.
This driver is experimental, untested and will probably only work on
Nokia's N800/N810 Portable Internet Tablet.
This driver is for stlc4550 or stlc4560 based wireless chips
such as Nokia's N800/N810 Portable Internet Tablet.
If you choose to build a module, it'll be called p54spi.
......
......@@ -779,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
......@@ -795,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
......@@ -1131,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 4, &word);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
rt2x00_desc_write(txd, 4, word);
......@@ -1164,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_RTS,
test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_desc_write(txd, 0, word);
......@@ -1276,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
......@@ -1315,27 +1317,25 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2400pci_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
u32 reg;
unsigned long flags;
/*
* Handle all tx queues.
......@@ -1347,7 +1347,7 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
......@@ -1355,7 +1355,7 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2400pci_tbtt_tasklet(unsigned long data)
......@@ -1376,7 +1376,6 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
......@@ -1418,13 +1417,13 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
reg |= mask;
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
......@@ -1641,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
......
......@@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct data_queue *queue = rt2x00dev->bcn;
unsigned int bcn_preload;
u32 reg;
......@@ -865,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit);
rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
......@@ -881,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
entry_priv = rt2x00dev->bcn[1].entries[0].priv_data;
entry_priv = rt2x00dev->atim->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
entry_priv->desc_dma);
rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
entry_priv = rt2x00dev->bcn[0].entries[0].priv_data;
entry_priv = rt2x00dev->bcn->entries[0].priv_data;
rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
entry_priv->desc_dma);
......@@ -1287,10 +1287,12 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 2, word);
rt2x00_desc_read(txd, 3, &word);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 3, word);
rt2x00_desc_read(txd, 10, &word);
......@@ -1315,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
......@@ -1408,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
struct queue_entry_priv_pci *entry_priv;
struct queue_entry *entry;
struct txdone_entry_desc txdesc;
......@@ -1447,27 +1449,25 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2500pci_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
u32 reg;
unsigned long flags;
/*
* Handle all tx queues.
......@@ -1479,7 +1479,7 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
/*
* Enable all TXDONE interrupts again.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
......@@ -1487,7 +1487,7 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2500pci_tbtt_tasklet(unsigned long data)
......@@ -1508,7 +1508,6 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
......@@ -1550,13 +1549,13 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
reg |= mask;
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
......@@ -1959,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
__set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
......
......@@ -1100,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
......@@ -1114,10 +1114,12 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
......@@ -1795,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags);
}
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags);
/*
* Set the rssi offset.
......
......@@ -66,7 +66,7 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3853 0x000d
#define RF5390 0x5390
#define RF5390 0x5390
/*
* Chipset revisions.
......@@ -79,7 +79,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
#define REV_RT5390F 0x0502
#define REV_RT5390F 0x0502
/*
* Signal information.
......@@ -126,9 +126,9 @@
/*
* AUX_CTRL: Aux/PCI-E related configuration
*/
#define AUX_CTRL 0x10c
#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
#define AUX_CTRL 0x10c
#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002)
#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400)
/*
* OPT_14: Unknown register used by rt3xxx devices.
......@@ -464,7 +464,7 @@
*/
#define RF_CSR_CFG 0x0500
#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00)
#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
......@@ -1746,13 +1746,13 @@ struct mac_iveiv_entry {
*/
#define BBP4_TX_BF FIELD8(0x01)
#define BBP4_BANDWIDTH FIELD8(0x18)
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
/*
* BBP 138: Unknown
......@@ -1765,7 +1765,7 @@ struct mac_iveiv_entry {
/*
* BBP 152: Rx Ant
*/
#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
/*
* RFCSR registers
......@@ -1776,7 +1776,7 @@ struct mac_iveiv_entry {
* RFCSR 1:
*/
#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_PLL_PD FIELD8(0x02)
#define RFCSR1_RX0_PD FIELD8(0x04)
#define RFCSR1_TX0_PD FIELD8(0x08)
#define RFCSR1_RX1_PD FIELD8(0x10)
......@@ -1785,7 +1785,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 2:
*/
#define RFCSR2_RESCAL_EN FIELD8(0x80)
#define RFCSR2_RESCAL_EN FIELD8(0x80)
/*
* RFCSR 6:
......@@ -1801,7 +1801,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 11:
*/
#define RFCSR11_R FIELD8(0x03)
#define RFCSR11_R FIELD8(0x03)
/*
* RFCSR 12:
......@@ -1857,9 +1857,9 @@ struct mac_iveiv_entry {
/*
* RFCSR 30:
*/
#define RFCSR30_TX_H20M FIELD8(0x02)
#define RFCSR30_RX_H20M FIELD8(0x04)
#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_TX_H20M FIELD8(0x02)
#define RFCSR30_RX_H20M FIELD8(0x04)
#define RFCSR30_RX_VCM FIELD8(0x18)
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
/*
......@@ -1871,17 +1871,17 @@ struct mac_iveiv_entry {
/*
* RFCSR 38:
*/
#define RFCSR38_RX_LO1_EN FIELD8(0x20)
#define RFCSR38_RX_LO1_EN FIELD8(0x20)
/*
* RFCSR 39:
*/
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
/*
* RFCSR 49:
*/
#define RFCSR49_TX FIELD8(0x3f)
#define RFCSR49_TX FIELD8(0x3f)
/*
* RF registers
......@@ -1918,7 +1918,7 @@ struct mac_iveiv_entry {
/*
* Chip ID
*/
#define EEPROM_CHIP_ID 0x0000
#define EEPROM_CHIP_ID 0x0000
/*
* EEPROM Version
......
......@@ -493,12 +493,12 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
if (rt2x00_rt(rt2x00dev, RT5390)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
rt2800_register_write(rt2x00dev, AUX_CTRL, reg);
}
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
......@@ -726,7 +726,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
if (qid >= QID_RX) {
if (unlikely(qid >= QID_RX)) {
/*
* Unknown queue, this shouldn't happen. Just drop
* this tx status.
......@@ -736,7 +736,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
queue = rt2x00queue_get_queue(rt2x00dev, qid);
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(queue == NULL)) {
/*
* The queue is NULL, this shouldn't happen. Stop
......@@ -747,7 +747,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
if (rt2x00queue_empty(queue)) {
if (unlikely(rt2x00queue_empty(queue))) {
/*
* The queue is empty. Stop processing here
* and drop the tx status.
......@@ -765,18 +765,17 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 1);
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt2800pci_txstatus_tasklet(unsigned long data)
......@@ -836,7 +835,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
*
* Furthermore we don't disable the TX_FIFO_STATUS
* interrupt here but leave it enabled so that the TX_STA_FIFO
* can also be read while the interrupt thread gets executed.
* can also be read while the tx status tasklet gets executed.
*
* Since we have only one producer and one consumer we don't
* need to lock the kfifo.
......@@ -862,7 +861,6 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg, mask;
unsigned long flags;
/* Read status and ACK all interrupts */
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
......@@ -905,11 +903,11 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
reg &= mask;
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
......@@ -979,6 +977,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
if (!modparam_nohwcrypt)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.
......@@ -1135,7 +1134,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
{ PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
{ 0, }
};
......
......@@ -565,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
__set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
__set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags);
/*
* Set the rssi offset.
......
......@@ -467,7 +467,6 @@ struct rt2x00lib_crypto {
const u8 *address;
u32 bssidx;
u32 aid;
u8 key[16];
u8 tx_mic[8];
......@@ -662,6 +661,8 @@ enum rt2x00_flags {
DRIVER_REQUIRE_L2PAD,
DRIVER_REQUIRE_TXSTATUS_FIFO,
DRIVER_REQUIRE_TASKLET_CONTEXT,
DRIVER_REQUIRE_SW_SEQNO,
DRIVER_REQUIRE_HT_TX_DESC,
/*
* Driver features
......@@ -886,14 +887,13 @@ struct rt2x00_dev {
struct work_struct txdone_work;
/*
* Data queue arrays for RX, TX and Beacon.
* The Beacon array also contains the Atim queue
* if that is supported by the device.
* Data queue arrays for RX, TX, Beacon and ATIM.
*/
unsigned int data_queues;
struct data_queue *rx;
struct data_queue *tx;
struct data_queue *bcn;
struct data_queue *atim;
/*
* Firmware image.
......@@ -1063,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry);
void rt2x00queue_unmap_skb(struct queue_entry *entry);
/**
* rt2x00queue_get_queue - Convert queue index to queue pointer
* rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @queue: rt2x00 queue index (see &enum data_queue_qid).
*
* Returns NULL for non tx queues.
*/
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
static inline struct data_queue *
rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
{
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
if (queue == QID_ATIM)
return rt2x00dev->atim;
return NULL;
}
/**
* rt2x00queue_get_entry - Get queue entry where the given index points to.
......
......@@ -38,12 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
if (tx_info->control.sta)
txdesc->mpdu_density =
txdesc->u.ht.mpdu_density =
tx_info->control.sta->ht_cap.ampdu_density;
txdesc->ba_size = 7; /* FIXME: What value is needed? */
txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
txdesc->stbc =
txdesc->u.ht.stbc =
(tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT;
/*
......@@ -51,22 +51,22 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* mcs rate to be used
*/
if (txrate->flags & IEEE80211_TX_RC_MCS) {
txdesc->mcs = txrate->idx;
txdesc->u.ht.mcs = txrate->idx;
/*
* MIMO PS should be set to 1 for STA's using dynamic SM PS
* when using more then one tx stream (>MCS7).
*/
if (tx_info->control.sta && txdesc->mcs > 7 &&
if (tx_info->control.sta && txdesc->u.ht.mcs > 7 &&
((tx_info->control.sta->ht_cap.cap &
IEEE80211_HT_CAP_SM_PS) >>
IEEE80211_HT_CAP_SM_PS_SHIFT) ==
WLAN_HT_CAP_SM_PS_DYNAMIC)
__set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags);
} else {
txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs);
txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs);
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
txdesc->mcs |= 0x08;
txdesc->u.ht.mcs |= 0x08;
}
/*
......@@ -77,14 +77,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
!(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
__set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
/*
* Determine HT Mix/Greenfield rate mode
*/
if (txrate->flags & IEEE80211_TX_RC_MCS)
txdesc->rate_mode = RATE_MODE_HT_MIX;
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
/*
* Set 40Mhz mode if necessary (for legacy rates this will
* duplicate the frame to both channels).
......@@ -105,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
* for frames not transmitted with TXOP_HTTXOP
*/
if (ieee80211_is_mgmt(hdr->frame_control))
txdesc->txop = TXOP_BACKOFF;
txdesc->u.ht.txop = TXOP_BACKOFF;
else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
txdesc->txop = TXOP_SIFS;
txdesc->u.ht.txop = TXOP_SIFS;
else
txdesc->txop = TXOP_HTTXOP;
txdesc->u.ht.txop = TXOP_HTTXOP;
}
u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
......
......@@ -116,13 +116,13 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
/*
* Determine which queue to put packet on.
* Use the ATIM queue if appropriate and present.
*/
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM);
else
queue = rt2x00queue_get_queue(rt2x00dev, qid);
qid = QID_ATIM;
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(!queue)) {
ERROR(rt2x00dev,
"Attempt to send packet over invalid queue %d.\n"
......@@ -149,7 +149,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto exit_fail;
}
if (rt2x00queue_write_tx_frame(queue, skb, false))
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
goto exit_fail;
if (rt2x00queue_threshold(queue))
......@@ -190,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
{
struct rt2x00_dev *rt2x00dev = hw->priv;
struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct data_queue *queue = rt2x00dev->bcn;
struct queue_entry *entry = NULL;
unsigned int i;
......@@ -518,11 +518,9 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
crypto.cmd = cmd;
if (sta) {
/* some drivers need the AID */
crypto.aid = sta->aid;
if (sta)
crypto.address = sta->addr;
} else
else
crypto.address = bcast_addr;
if (crypto.cipher == CIPHER_TKIP)
......@@ -692,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
struct rt2x00_dev *rt2x00dev = hw->priv;
struct data_queue *queue;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
if (unlikely(!queue))
return -EINVAL;
......
......@@ -221,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
unsigned long irqflags;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
unlikely(!tx_info->control.vif))
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags))
return;
/*
* Hardware should insert sequence counter.
* FIXME: We insert a software sequence counter first for
* hardware that doesn't support hardware sequence counting.
* The hardware is not able to insert a sequence number. Assign a
* software generated one here.
*
* This is wrong because beacons are not getting sequence
* numbers assigned properly.
......@@ -246,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
spin_unlock_irqrestore(&intf->seqlock, irqflags);
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
......@@ -260,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
unsigned int duration;
unsigned int residual;
/*
* Determine with what IFS priority this frame should be send.
* Set ifs to IFS_SIFS when the this is not the first fragment,
* or this fragment came after RTS/CTS.
*/
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
txdesc->u.plcp.ifs = IFS_BACKOFF;
else
txdesc->u.plcp.ifs = IFS_SIFS;
/* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
data_length = entry->skb->len + 4;
data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
......@@ -268,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* PLCP setup
* Length calculation depends on OFDM/CCK rate.
*/
txdesc->signal = hwrate->plcp;
txdesc->service = 0x04;
txdesc->u.plcp.signal = hwrate->plcp;
txdesc->u.plcp.service = 0x04;
if (hwrate->flags & DEV_RATE_OFDM) {
txdesc->length_high = (data_length >> 6) & 0x3f;
txdesc->length_low = data_length & 0x3f;
txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f;
txdesc->u.plcp.length_low = data_length & 0x3f;
} else {
/*
* Convert length to microseconds.
......@@ -288,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
* Check if we need to set the Length Extension
*/
if (hwrate->bitrate == 110 && residual <= 30)
txdesc->service |= 0x80;
txdesc->u.plcp.service |= 0x80;
}
txdesc->length_high = (duration >> 8) & 0xff;
txdesc->length_low = duration & 0xff;
txdesc->u.plcp.length_high = (duration >> 8) & 0xff;
txdesc->u.plcp.length_low = duration & 0xff;
/*
* When preamble is enabled we should set the
* preamble bit for the signal.
*/
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
txdesc->signal |= 0x08;
txdesc->u.plcp.signal |= 0x08;
}
}
......@@ -309,9 +321,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
struct ieee80211_rate *rate =
ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
const struct rt2x00_rate *hwrate;
struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
struct ieee80211_rate *rate;
const struct rt2x00_rate *hwrate = NULL;
memset(txdesc, 0, sizeof(*txdesc));
......@@ -371,33 +383,36 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
ieee80211_is_probe_resp(hdr->frame_control))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
* Determine with what IFS priority this frame should be send.
* Set ifs to IFS_SIFS when the this is not the first fragment,
* or this fragment came after RTS/CTS.
*/
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
!test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
!test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags))
__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
txdesc->ifs = IFS_BACKOFF;
} else
txdesc->ifs = IFS_SIFS;
/*
* Determine rate modulation.
*/
hwrate = rt2x00_get_rate(rate->hw_value);
txdesc->rate_mode = RATE_MODE_CCK;
if (hwrate->flags & DEV_RATE_OFDM)
txdesc->rate_mode = RATE_MODE_OFDM;
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txdesc->rate_mode = RATE_MODE_HT_GREENFIELD;
else if (txrate->flags & IEEE80211_TX_RC_MCS)
txdesc->rate_mode = RATE_MODE_HT_MIX;
else {
rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
hwrate = rt2x00_get_rate(rate->hw_value);
if (hwrate->flags & DEV_RATE_OFDM)
txdesc->rate_mode = RATE_MODE_OFDM;
else
txdesc->rate_mode = RATE_MODE_CCK;
}
/*
* Apply TX descriptor handling by components
*/
rt2x00crypto_create_tx_descriptor(entry, txdesc);
rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags))
rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
else
rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
}
static int rt2x00queue_write_tx_data(struct queue_entry *entry,
......@@ -690,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
}
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
{
int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
if (queue == QID_RX)
return rt2x00dev->rx;
if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
return &rt2x00dev->tx[queue];
if (!rt2x00dev->bcn)
return NULL;
if (queue == QID_BEACON)
return &rt2x00dev->bcn[0];
else if (queue == QID_ATIM && atim)
return &rt2x00dev->bcn[1];
return NULL;
}
EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
enum queue_index index)
{
......@@ -1088,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
goto exit;
if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
status = rt2x00queue_alloc_entries(rt2x00dev->atim,
rt2x00dev->ops->atim);
if (status)
goto exit;
......@@ -1162,6 +1154,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
rt2x00dev->rx = queue;
rt2x00dev->tx = &queue[1];
rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL;
/*
* Initialize queue parameters.
......@@ -1178,9 +1171,9 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_init(rt2x00dev, queue, qid++);
rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON);
if (req_atim)
rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM);
return 0;
}
......
......@@ -305,20 +305,27 @@ struct txentry_desc {
u16 length;
u16 header_length;
u16 length_high;
u16 length_low;
u16 signal;
u16 service;
u16 mcs;
u16 stbc;
u16 ba_size;
u16 rate_mode;
u16 mpdu_density;
union {
struct {
u16 length_high;
u16 length_low;
u16 signal;
u16 service;
enum ifs ifs;
} plcp;
struct {
u16 mcs;
u8 stbc;
u8 ba_size;
u8 mpdu_density;
enum txop txop;
} ht;
} u;
enum rate_modulation rate_mode;
short retry_limit;
short ifs;
short txop;
enum cipher cipher;
u16 key_idx;
......
......@@ -1898,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
......@@ -1946,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
......@@ -2190,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
* queue identication number.
*/
type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
queue = rt2x00queue_get_queue(rt2x00dev, type);
queue = rt2x00queue_get_tx_queue(rt2x00dev, type);
if (unlikely(!queue))
continue;
......@@ -2261,39 +2263,37 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
struct rt2x00_field32 irq_field)
{
unsigned long flags;
u32 reg;
/*
* Enable a single MCU interrupt. The interrupt mask register
* access needs locking.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock_irq(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, irq_field, 0);
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
static void rt61pci_txstatus_tasklet(unsigned long data)
......@@ -2331,7 +2331,6 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
struct rt2x00_dev *rt2x00dev = dev_instance;
u32 reg_mcu, mask_mcu;
u32 reg, mask;
unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
......@@ -2376,7 +2375,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
* Disable all interrupts for which a tasklet was scheduled right now,
* the tasklet will reenable the appropriate interrupts.
*/
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
spin_lock(&rt2x00dev->irqmask_lock);
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
reg |= mask;
......@@ -2386,7 +2385,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
reg |= mask_mcu;
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
spin_unlock(&rt2x00dev->irqmask_lock);
return IRQ_HANDLED;
}
......@@ -2917,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
......
......@@ -1474,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_OFDM,
(txdesc->rate_mode == RATE_MODE_OFDM));
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs);
rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
......@@ -1499,10 +1499,12 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
rt2x00_desc_write(txd, 1, word);
rt2x00_desc_read(txd, 2, &word);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal);
rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW,
txdesc->u.plcp.length_low);
rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH,
txdesc->u.plcp.length_high);
rt2x00_desc_write(txd, 2, word);
if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
......@@ -2247,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
if (queue_idx >= 4)
return 0;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx);
/* Update WMM TXOP register */
offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
......
......@@ -869,23 +869,35 @@ static void rtl8187_work(struct work_struct *work)
/* The RTL8187 returns the retry count through register 0xFFFA. In
* addition, it appears to be a cumulative retry count, not the
* value for the current TX packet. When multiple TX entries are
* queued, the retry count will be valid for the last one in the queue.
* The "error" should not matter for purposes of rate setting. */
* waiting in the queue, the retry count will be the total for all.
* The "error" may matter for purposes of rate setting, but there is
* no other choice with this hardware.
*/
struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv,
work.work);
struct ieee80211_tx_info *info;
struct ieee80211_hw *dev = priv->dev;
static u16 retry;
u16 tmp;
u16 avg_retry;
int length;
mutex_lock(&priv->conf_mutex);
tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA);
length = skb_queue_len(&priv->b_tx_status.queue);
if (unlikely(!length))
length = 1;
if (unlikely(tmp < retry))
tmp = retry;
avg_retry = (tmp - retry) / length;
while (skb_queue_len(&priv->b_tx_status.queue) > 0) {
struct sk_buff *old_skb;
old_skb = skb_dequeue(&priv->b_tx_status.queue);
info = IEEE80211_SKB_CB(old_skb);
info->status.rates[0].count = tmp - retry + 1;
info->status.rates[0].count = avg_retry + 1;
if (info->status.rates[0].count > RETRY_COUNT)
info->flags &= ~IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(dev, old_skb);
}
retry = tmp;
......@@ -931,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->TX_CONF,
RTL818X_TX_CONF_HW_SEQNUM |
RTL818X_TX_CONF_DISREQQSIZE |
(7 << 8 /* short retry limit */) |
(7 << 0 /* long retry limit */) |
(RETRY_COUNT << 8 /* short retry limit */) |
(RETRY_COUNT << 0 /* long retry limit */) |
(7 << 21 /* MAX TX DMA */));
rtl8187_init_urbs(dev);
rtl8187b_init_status_urb(dev);
......@@ -1376,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
/* Initialize rate-control variables */
dev->max_rates = 1;
dev->max_rate_tries = RETRY_COUNT;
eeprom.data = dev;
eeprom.register_read = rtl8187_eeprom_register_read;
......
......@@ -35,6 +35,8 @@
#define RFKILL_MASK_8187_89_97 0x2
#define RFKILL_MASK_8198 0x4
#define RETRY_COUNT 7
struct rtl8187_rx_info {
struct urb *urb;
struct ieee80211_hw *dev;
......
......@@ -7,15 +7,18 @@ rtlwifi-objs := \
efuse.o \
ps.o \
rc.o \
regd.o \
usb.o
regd.o
rtl8192c_common-objs += \
ifeq ($(CONFIG_PCI),y)
ifneq ($(CONFIG_PCI),)
rtlwifi-objs += pci.o
endif
ifneq ($(CONFIG_USB),)
rtlwifi-objs += usb.o
endif
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
obj-$(CONFIG_RTL8192CU) += rtl8192cu/
......
......@@ -54,7 +54,6 @@
/* This really should be 8, but not for our firmware */
#define MAX_SUPPORTED_RATES 32
#define COUNTRY_STRING_LEN 3
#define MAX_COUNTRY_TRIPLETS 32
/* Headers */
......@@ -98,7 +97,7 @@ struct country_triplet {
struct wl12xx_ie_country {
struct wl12xx_ie_header header;
u8 country_string[COUNTRY_STRING_LEN];
u8 country_string[IEEE80211_COUNTRY_STRING_LEN];
struct country_triplet triplets[MAX_COUNTRY_TRIPLETS];
} __packed;
......
......@@ -1361,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
acx->gf_protection = 0;
acx->gf_protection =
!!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
acx->ht_tx_burst_limit = 0;
acx->dual_cts_protection = 0;
......
......@@ -488,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
wl->hw_pg_ver = (s8)fuse;
if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
}
/* uploads NVS and firmware */
......
......@@ -59,6 +59,11 @@ struct wl1271_static_data {
#define PG_VER_MASK 0x3c
#define PG_VER_OFFSET 2
#define PG_MAJOR_VER_MASK 0x3
#define PG_MAJOR_VER_OFFSET 0x0
#define PG_MINOR_VER_MASK 0xc
#define PG_MINOR_VER_OFFSET 0x2
#define CMD_MBOX_ADDRESS 0x407B4
#define POLARITY_LOW BIT(1)
......
......@@ -63,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
cmd->status = 0;
WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
......
......@@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
mutex_lock(&wl->mutex);
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......
......@@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
irqreturn_t wl1271_irq(int irq, void *data);
#endif
......@@ -304,7 +304,7 @@ static struct conf_drv_settings default_conf = {
.rx_block_num = 70,
.tx_min_block_num = 40,
.dynamic_memory = 0,
.min_req_tx_blocks = 104,
.min_req_tx_blocks = 100,
.min_req_rx_blocks = 22,
.tx_min = 27,
}
......@@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -635,16 +635,44 @@ static void wl1271_fw_status(struct wl1271 *wl,
(s64)le32_to_cpu(status->fw_localtime);
}
#define WL1271_IRQ_MAX_LOOPS 10
static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
struct sk_buff *skb;
/* Pass all received frames to the network stack */
while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
ieee80211_rx_ni(wl->hw, skb);
static void wl1271_irq_work(struct work_struct *work)
/* Return sent skbs to the network stack */
while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
ieee80211_tx_status(wl->hw, skb);
}
static void wl1271_netstack_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, netstack_work);
do {
wl1271_flush_deferred_work(wl);
} while (skb_queue_len(&wl->deferred_rx_queue));
}
#define WL1271_IRQ_MAX_LOOPS 256
irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags;
struct wl1271 *wl =
container_of(work, struct wl1271, irq_work);
/* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
......@@ -653,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, true);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
spin_lock_irqsave(&wl->wl_lock, flags);
while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
loopcount--;
while (!done && loopcount--) {
/*
* In order to avoid a race with the hardirq, clear the flag
* before acknowledging the chip. Since the mutex is held,
* wl1271_ps_elp_wakeup cannot be called concurrently.
*/
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->common.intr);
intr &= WL1271_INTR_MASK;
if (!intr) {
wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
spin_lock_irqsave(&wl->wl_lock, flags);
done = true;
continue;
}
intr &= WL1271_INTR_MASK;
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("watchdog interrupt received! "
"starting recovery.");
......@@ -682,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work)
goto out;
}
if (intr & WL1271_ACX_INTR_DATA) {
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
/* check for tx results */
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
wl1271_rx(wl, &wl->fw_status->common);
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count) {
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
wl1271_tx_work_locked(wl);
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
wl1271_rx(wl, &wl->fw_status->common);
/* check for tx results */
if (wl->fw_status->common.tx_results_counter !=
(wl->tx_results_count & 0xff))
wl1271_tx_complete(wl);
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
skb_queue_len(&wl->deferred_rx_queue);
if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
......@@ -719,21 +758,24 @@ static void wl1271_irq_work(struct work_struct *work)
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
spin_lock_irqsave(&wl->wl_lock, flags);
}
if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
else
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_ps_elp_sleep(wl);
out:
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
wl->tx_queue_count)
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
mutex_unlock(&wl->mutex);
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
......@@ -974,7 +1016,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
......@@ -983,7 +1024,9 @@ int wl1271_plt_start(struct wl1271 *wl)
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
......@@ -1010,14 +1053,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
goto out;
}
wl1271_disable_interrupts(wl);
wl1271_power_off(wl);
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
mutex_lock(&wl->mutex);
out:
......@@ -1041,7 +1085,13 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int q;
u8 hlid = 0;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS)
hlid = wl1271_tx_get_hlid(skb);
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count++;
/*
......@@ -1054,12 +1104,8 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* queue the packet */
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl->bss_type == BSS_TYPE_AP_BSS) {
hlid = wl1271_tx_get_hlid(skb);
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
} else {
......@@ -1071,8 +1117,11 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* before that, the tx_work will not be initialized!
*/
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
static struct notifier_block wl1271_dev_notifier = {
......@@ -1169,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
break;
irq_disable:
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
......@@ -1178,7 +1226,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
cancel_work_sync(&wl->irq_work);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
......@@ -1244,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF;
wl1271_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
wl1271_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->pspoll_work);
cancel_delayed_work_sync(&wl->elp_work);
......@@ -1525,7 +1575,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -1681,7 +1731,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -1910,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
goto out_unlock;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
......@@ -2013,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2039,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2067,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2546,7 +2596,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2601,7 +2651,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
conf_tid->apsd_conf[0] = 0;
conf_tid->apsd_conf[1] = 0;
} else {
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2647,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2736,7 +2786,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
......@@ -2779,7 +2829,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -2812,7 +2862,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -3176,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
if (wl->state == WL1271_STATE_OFF)
goto out;
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
......@@ -3376,9 +3426,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
for (j = 0; j < AP_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->irq_work, wl1271_irq_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
......@@ -3404,6 +3457,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
......
......@@ -69,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
}
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
......@@ -87,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
* the completion variable in one entity.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
if (work_pending(&wl->irq_work) || chip_awake)
if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
pending = true;
else
wl->elp_compl = &compl;
......@@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
ret = wl1271_ps_elp_wakeup(wl, false);
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return ret;
......
......@@ -30,7 +30,7 @@
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
......
......@@ -129,7 +129,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
skb_trim(skb, skb->len - desc->pad_len);
ieee80211_rx_ni(wl->hw, skb);
skb_queue_tail(&wl->deferred_rx_queue, skb);
ieee80211_queue_work(wl->hw, &wl->netstack_work);
return 0;
}
......@@ -198,7 +199,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
pkt_offset += pkt_length;
}
}
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
/*
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
void wl1271_set_default_filters(struct wl1271 *wl)
......
......@@ -27,6 +27,7 @@
#include "cmd.h"
#include "scan.h"
#include "acx.h"
#include "ps.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
......@@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work)
mutex_lock(&wl->mutex);
if (wl->scan.state == WL1271_SCAN_STATE_IDLE) {
mutex_unlock(&wl->mutex);
return;
}
if (wl->state == WL1271_STATE_OFF)
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch);
......@@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
ieee80211_scan_completed(wl->hw, false);
/* restore hardware connection monitoring template */
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
if (wl1271_ps_elp_wakeup(wl) == 0) {
wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
wl1271_ps_elp_sleep(wl);
}
}
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
out:
mutex_unlock(&wl->mutex);
}
......
......@@ -28,6 +28,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
......@@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
return &(wl_to_func(wl)->dev);
}
static irqreturn_t wl1271_irq(int irq, void *cookie)
static irqreturn_t wl1271_hardirq(int irq, void *cookie)
{
struct wl1271 *wl = cookie;
unsigned long flags;
......@@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
/* complete the ELP completion */
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
if (wl->elp_compl) {
complete(wl->elp_compl);
wl->elp_compl = NULL;
}
if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->irq_work);
set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return IRQ_HANDLED;
return IRQ_WAKE_THREAD;
}
static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
......@@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
......@@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
}
sdio_release_host(func);
if (ret)
wl1271_error("sdio read failed (%d)", ret);
}
......@@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
......@@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
ret = sdio_memcpy_toio(func, addr, buf, len);
}
sdio_release_host(func);
if (ret)
wl1271_error("sdio write failed (%d)", ret);
}
......@@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
struct sdio_func *func = wl_to_func(wl);
int ret;
/* Power up the card */
/* Make sure the card will not be powered off by runtime PM */
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0)
goto out;
/* Runtime PM might be disabled, so power up the card manually */
ret = mmc_power_restore_host(func->card->host);
if (ret < 0)
goto out;
sdio_claim_host(func);
sdio_enable_func(func);
sdio_release_host(func);
out:
return ret;
......@@ -179,12 +173,17 @@ static int wl1271_sdio_power_on(struct wl1271 *wl)
static int wl1271_sdio_power_off(struct wl1271 *wl)
{
struct sdio_func *func = wl_to_func(wl);
int ret;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Power down the card */
/* Runtime PM might be disabled, so power off the card manually */
ret = mmc_power_save_host(func->card->host);
if (ret < 0)
return ret;
/* Let runtime PM know the card is powered off */
return pm_runtime_put_sync(&func->dev);
}
......@@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wl->irq = wlan_data->irq;
wl->ref_clock = wlan_data->board_ref_clock;
ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
DRIVER_NAME, wl);
if (ret < 0) {
wl1271_error("request_irq() failed: %d", ret);
goto out_free;
}
set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
disable_irq(wl->irq);
ret = wl1271_init_ieee80211(wl);
......@@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func,
out_irq:
free_irq(wl->irq, wl);
out_free:
wl1271_free_hw(wl);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册