提交 9f6e20ce 编写于 作者: J John W. Linville

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next into for-davem
...@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus); ...@@ -18,6 +18,9 @@ void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus, int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc, struct bcma_device *core_cc,
struct bcma_device *core_mips); struct bcma_device *core_mips);
#ifdef CONFIG_PM
int bcma_bus_resume(struct bcma_bus *bus);
#endif
/* scan.c */ /* scan.c */
int bcma_bus_scan(struct bcma_bus *bus); int bcma_bus_scan(struct bcma_bus *bus);
......
...@@ -234,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev) ...@@ -234,6 +234,41 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL); pci_set_drvdata(dev, NULL);
} }
#ifdef CONFIG_PM
static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
{
/* Host specific */
pci_save_state(dev);
pci_disable_device(dev);
pci_set_power_state(dev, pci_choose_state(dev, state));
return 0;
}
static int bcma_host_pci_resume(struct pci_dev *dev)
{
struct bcma_bus *bus = pci_get_drvdata(dev);
int err;
/* Host specific */
pci_set_power_state(dev, 0);
err = pci_enable_device(dev);
if (err)
return err;
pci_restore_state(dev);
/* Bus specific */
err = bcma_bus_resume(bus);
if (err)
return err;
return 0;
}
#else /* CONFIG_PM */
# define bcma_host_pci_suspend NULL
# define bcma_host_pci_resume NULL
#endif /* CONFIG_PM */
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
...@@ -249,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = { ...@@ -249,6 +284,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl, .id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe, .probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove, .remove = bcma_host_pci_remove,
.suspend = bcma_host_pci_suspend,
.resume = bcma_host_pci_resume,
}; };
int __init bcma_host_pci_init(void) int __init bcma_host_pci_init(void)
......
...@@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus, ...@@ -240,6 +240,22 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return 0; return 0;
} }
#ifdef CONFIG_PM
int bcma_bus_resume(struct bcma_bus *bus)
{
struct bcma_device *core;
/* Init CC core */
core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
if (core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
return 0;
}
#endif
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner) int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{ {
drv->drv.name = drv->name; drv->drv.name = drv->name;
......
...@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) ...@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
u16 v; u16 v;
int i; int i;
bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
SSB_SPROM_REVISION_REV;
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i]; v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v); *(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
...@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom) ...@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)]; bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)]; bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)]; bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)]; bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)]; bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)]; bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
} }
int bcma_sprom_get(struct bcma_bus *bus) int bcma_sprom_get(struct bcma_bus *bus)
......
...@@ -152,6 +152,7 @@ struct ath_common { ...@@ -152,6 +152,7 @@ struct ath_common {
struct ath_cycle_counters cc_survey; struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory; struct ath_regulatory regulatory;
struct ath_regulatory reg_world_copy;
const struct ath_ops *ops; const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops; const struct ath_bus_ops *bus_ops;
...@@ -214,6 +215,10 @@ do { \ ...@@ -214,6 +215,10 @@ do { \
* @ATH_DBG_HWTIMER: hardware timer handling * @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance * @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons * @ATH_DBG_BSTUCK: stuck beacons
* @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
* used exclusively for WLAN-BT coexistence starting from
* AR9462.
* @ATH_DBG_DFS: radar datection
* @ATH_DBG_ANY: enable all debugging * @ATH_DBG_ANY: enable all debugging
* *
* The debug level is used to control the amount and type of debugging output * The debug level is used to control the amount and type of debugging output
...@@ -240,6 +245,7 @@ enum ATH_DEBUG { ...@@ -240,6 +245,7 @@ enum ATH_DEBUG {
ATH_DBG_WMI = 0x00004000, ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000, ATH_DBG_BSTUCK = 0x00008000,
ATH_DBG_MCI = 0x00010000, ATH_DBG_MCI = 0x00010000,
ATH_DBG_DFS = 0x00020000,
ATH_DBG_ANY = 0xffffffff ATH_DBG_ANY = 0xffffffff
}; };
......
...@@ -2,6 +2,9 @@ config ATH9K_HW ...@@ -2,6 +2,9 @@ config ATH9K_HW
tristate tristate
config ATH9K_COMMON config ATH9K_COMMON
tristate tristate
config ATH9K_DFS_DEBUGFS
def_bool y
depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
config ATH9K config ATH9K
tristate "Atheros 802.11n wireless cards support" tristate "Atheros 802.11n wireless cards support"
...@@ -51,6 +54,25 @@ config ATH9K_DEBUGFS ...@@ -51,6 +54,25 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time. Also required for changing debug message flags at run time.
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && EXPERT
default n
---help---
This option enables DFS support for initiating radiation on
ath9k. There is no way to dynamically detect if a card was DFS
certified and as such this is left as a build time option. This
option should only be enabled by system integrators that can
guarantee that all the platforms that their kernel will run on
have obtained appropriate regulatory body certification for a
respective Atheros card by using ath9k on the target shipping
platforms.
This is currently only a placeholder for future DFS support,
as DFS support requires more components that still need to be
developed. At this point enabling this option won't do anything
except increase code size.
config ATH9K_RATE_CONTROL config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control" bool "Atheros ath9k rate control"
depends on ATH9K depends on ATH9K
......
...@@ -10,6 +10,8 @@ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o ...@@ -10,6 +10,8 @@ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
obj-$(CONFIG_ATH9K) += ath9k.o obj-$(CONFIG_ATH9K) += ath9k.o
......
...@@ -187,40 +187,12 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) ...@@ -187,40 +187,12 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
isr = REG_READ(ah, AR_ISR); isr = REG_READ(ah, AR_ISR);
} }
if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
u32 raw_intr, rx_msg_intr;
rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
ath_dbg(common, ATH_DBG_MCI,
"MCI gets 0xdeadbeef during MCI int processing"
"new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
"raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
raw_intr, rx_msg_intr, mci->raw_intr,
mci->rx_msg_intr);
else {
mci->rx_msg_intr |= rx_msg_intr;
mci->raw_intr |= raw_intr;
*masked |= ATH9K_INT_MCI;
if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
mci->cont_status =
REG_READ(ah, AR_MCI_CONT_STATUS);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
}
}
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0; *masked = 0;
if (!isr && !sync_cause) if (!isr && !sync_cause && !async_cause)
return false; return false;
if (isr) { if (isr) {
...@@ -326,6 +298,35 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) ...@@ -326,6 +298,35 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah); ar9003_hw_bb_watchdog_read(ah);
} }
if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
u32 raw_intr, rx_msg_intr;
rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
ath_dbg(common, ATH_DBG_MCI,
"MCI gets 0xdeadbeef during MCI int processing"
"new raw_intr=0x%08x, new rx_msg_raw=0x%08x, "
"raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
raw_intr, rx_msg_intr, mci->raw_intr,
mci->rx_msg_intr);
else {
mci->rx_msg_intr |= rx_msg_intr;
mci->raw_intr |= raw_intr;
*masked |= ATH9K_INT_MCI;
if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
mci->cont_status =
REG_READ(ah, AR_MCI_CONT_STATUS);
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
ath_dbg(common, ATH_DBG_MCI, "AR_INTR_SYNC_MCI\n");
}
}
if (sync_cause) { if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
......
...@@ -159,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, ...@@ -159,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* return block-ack bitmap index given sequence and starting sequence */ /* return block-ack bitmap index given sequence and starting sequence */
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) #define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
/* return the seqno for _start + _offset */
#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
/* returns delimiter padding required given the packet length */ /* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \ #define ATH_AGGR_GET_NDELIM(_len) \
(((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
...@@ -238,6 +241,7 @@ struct ath_atx_tid { ...@@ -238,6 +241,7 @@ struct ath_atx_tid {
struct ath_node *an; struct ath_node *an;
struct ath_atx_ac *ac; struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
int bar_index;
u16 seq_start; u16 seq_start;
u16 seq_next; u16 seq_next;
u16 baw_size; u16 baw_size;
...@@ -252,9 +256,9 @@ struct ath_atx_tid { ...@@ -252,9 +256,9 @@ struct ath_atx_tid {
struct ath_node { struct ath_node {
#ifdef CONFIG_ATH9K_DEBUGFS #ifdef CONFIG_ATH9K_DEBUGFS
struct list_head list; /* for sc->nodes */ struct list_head list; /* for sc->nodes */
#endif
struct ieee80211_sta *sta; /* station struct we're part of */ struct ieee80211_sta *sta; /* station struct we're part of */
struct ieee80211_vif *vif; /* interface with which we're associated */ struct ieee80211_vif *vif; /* interface with which we're associated */
#endif
struct ath_atx_tid tid[WME_NUM_TID]; struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC]; struct ath_atx_ac ac[WME_NUM_AC];
int ps_key; int ps_key;
...@@ -276,7 +280,6 @@ struct ath_tx_control { ...@@ -276,7 +280,6 @@ struct ath_tx_control {
}; };
#define ATH_TX_ERROR 0x01 #define ATH_TX_ERROR 0x01
#define ATH_TX_BAR 0x02
/** /**
* @txq_map: Index is mac80211 queue number. This is * @txq_map: Index is mac80211 queue number. This is
...@@ -542,7 +545,7 @@ struct ath_ant_comb { ...@@ -542,7 +545,7 @@ struct ath_ant_comb {
#define DEFAULT_CACHELINE 32 #define DEFAULT_CACHELINE 32
#define ATH_REGCLASSIDS_MAX 10 #define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
#define ATH_MAX_SW_RETRIES 10 #define ATH_MAX_SW_RETRIES 30
#define ATH_CHAN_MAX 255 #define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ #define ATH_TXPOWER_MAX 100 /* .5 dBm units */
......
...@@ -856,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, ...@@ -856,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) { if (bf_isampdu(bf)) {
if (flags & ATH_TX_BAR) if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries); TX_STAT_INC(qnum, a_xretries);
else else
TX_STAT_INC(qnum, a_completed); TX_STAT_INC(qnum, a_completed);
...@@ -1630,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah) ...@@ -1630,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug); sc, &fops_debug);
#endif #endif
ath9k_dfs_init_debug(sc);
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc, debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma); &fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc, debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "hw.h" #include "hw.h"
#include "rc.h" #include "rc.h"
#include "dfs_debug.h"
struct ath_txq; struct ath_txq;
struct ath_buf; struct ath_buf;
...@@ -187,6 +188,7 @@ struct ath_stats { ...@@ -187,6 +188,7 @@ struct ath_stats {
struct ath_interrupt_stats istats; struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES]; struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats; struct ath_rx_stats rxstats;
struct ath_dfs_stats dfs_stats;
u32 reset[__RESET_TYPE_MAX]; u32 reset[__RESET_TYPE_MAX];
}; };
......
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
* Copyright (c) 2011 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hw.h"
#include "hw-ops.h"
#include "ath9k.h"
#include "dfs.h"
#include "dfs_debug.h"
/*
* TODO: move into or synchronize this with generic header
* as soon as IF is defined
*/
struct dfs_radar_pulse {
u16 freq;
u64 ts;
u32 width;
u8 rssi;
};
/* internal struct to pass radar data */
struct ath_radar_data {
u8 pulse_bw_info;
u8 rssi;
u8 ext_rssi;
u8 pulse_length_ext;
u8 pulse_length_pri;
};
/* convert pulse duration to usecs, considering clock mode */
static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
{
const u32 AR93X_NSECS_PER_DUR = 800;
const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
u32 nsecs;
if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
else
nsecs = dur * AR93X_NSECS_PER_DUR;
return (nsecs + 500) / 1000;
}
#define PRI_CH_RADAR_FOUND 0x01
#define EXT_CH_RADAR_FOUND 0x02
static bool
ath9k_postprocess_radar_event(struct ath_softc *sc,
struct ath_radar_data *are,
struct dfs_radar_pulse *drp)
{
u8 rssi;
u16 dur;
ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_DFS,
"pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
are->pulse_bw_info,
are->pulse_length_pri, are->rssi,
are->pulse_length_ext, are->ext_rssi);
/*
* Only the last 2 bits of the BW info are relevant, they indicate
* which channel the radar was detected in.
*/
are->pulse_bw_info &= 0x03;
switch (are->pulse_bw_info) {
case PRI_CH_RADAR_FOUND:
/* radar in ctrl channel */
dur = are->pulse_length_pri;
DFS_STAT_INC(sc, pri_phy_errors);
/*
* cannot use ctrl channel RSSI
* if extension channel is stronger
*/
rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
break;
case EXT_CH_RADAR_FOUND:
/* radar in extension channel */
dur = are->pulse_length_ext;
DFS_STAT_INC(sc, ext_phy_errors);
/*
* cannot use extension channel RSSI
* if control channel is stronger
*/
rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
break;
case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
/*
* Conducted testing, when pulse is on DC, both pri and ext
* durations are reported to be same
*
* Radiated testing, when pulse is on DC, different pri and
* ext durations are reported, so take the larger of the two
*/
if (are->pulse_length_ext >= are->pulse_length_pri)
dur = are->pulse_length_ext;
else
dur = are->pulse_length_pri;
DFS_STAT_INC(sc, dc_phy_errors);
/* when both are present use stronger one */
rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
break;
default:
/*
* Bogus bandwidth info was received in descriptor,
* so ignore this PHY error
*/
DFS_STAT_INC(sc, bwinfo_discards);
return false;
}
if (rssi == 0) {
DFS_STAT_INC(sc, rssi_discards);
return false;
}
/*
* TODO: check chirping pulses
* checks for chirping are dependent on the DFS regulatory domain
* used, which is yet TBD
*/
/* convert duration to usecs */
drp->width = dur_to_usecs(sc->sc_ah, dur);
drp->rssi = rssi;
DFS_STAT_INC(sc, pulses_detected);
return true;
}
#undef PRI_CH_RADAR_FOUND
#undef EXT_CH_RADAR_FOUND
/*
* DFS: check PHY-error for radar pulse and feed the detector
*/
void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
struct ath_rx_status *rs, u64 mactime)
{
struct ath_radar_data ard;
u16 datalen;
char *vdata_end;
struct dfs_radar_pulse drp;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
(!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
ath_dbg(common, ATH_DBG_DFS,
"Error: rs_phyer=0x%x not a radar error\n",
rs->rs_phyerr);
return;
}
datalen = rs->rs_datalen;
if (datalen == 0) {
DFS_STAT_INC(sc, datalen_discards);
return;
}
ard.rssi = rs->rs_rssi_ctl0;
ard.ext_rssi = rs->rs_rssi_ext0;
/*
* hardware stores this as 8 bit signed value.
* we will cap it at 0 if it is a negative number
*/
if (ard.rssi & 0x80)
ard.rssi = 0;
if (ard.ext_rssi & 0x80)
ard.ext_rssi = 0;
vdata_end = (char *)data + datalen;
ard.pulse_bw_info = vdata_end[-1];
ard.pulse_length_ext = vdata_end[-2];
ard.pulse_length_pri = vdata_end[-3];
ath_dbg(common, ATH_DBG_DFS,
"bw_info=%d, length_pri=%d, length_ext=%d, "
"rssi_pri=%d, rssi_ext=%d\n",
ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
ard.rssi, ard.ext_rssi);
drp.freq = ah->curchan->channel;
drp.ts = mactime;
if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
static u64 last_ts;
ath_dbg(common, ATH_DBG_DFS,
"ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
"width=%d, rssi=%d, delta_ts=%llu\n",
drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
last_ts = drp.ts;
/*
* TODO: forward pulse to pattern detector
*
* ieee80211_add_radar_pulse(drp.freq, drp.ts,
* drp.width, drp.rssi);
*/
}
}
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
* Copyright (c) 2011 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef ATH9K_DFS_H
#define ATH9K_DFS_H
#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
/**
* ath9k_dfs_process_phyerr - process radar PHY error
* @sc: ath_softc
* @data: RX payload data
* @rs: RX status after processing descriptor
* @mactime: receive time
*
* This function is called whenever the HW DFS module detects a radar
* pulse and reports it as a PHY error.
*
* The radar information provided as raw payload data is validated and
* filtered for false pulses. Events passing all tests are forwarded to
* the upper layer for pattern detection.
*/
void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
struct ath_rx_status *rs, u64 mactime);
#else
static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
struct ath_rx_status *rs, u64 mactime) { }
#endif
#endif /* ATH9K_DFS_H */
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
* Copyright (c) 2011 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/export.h>
#include "ath9k.h"
#include "dfs_debug.h"
#define ATH9K_DFS_STAT(s, p) \
len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
sc->debug.stats.dfs_stats.p);
static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
char *buf;
unsigned int len = 0, size = 8000;
ssize_t retval = 0;
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
len += snprintf(buf + len, size - len, "DFS support for "
"macVersion = 0x%x, macRev = 0x%x: %s\n",
hw_ver->macVersion, hw_ver->macRev,
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
"enabled" : "disabled");
ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return retval;
}
static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static const struct file_operations fops_dfs_stats = {
.read = read_file_dfs,
.open = ath9k_dfs_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath9k_dfs_init_debug(struct ath_softc *sc)
{
debugfs_create_file("dfs_stats", S_IRUSR,
sc->debug.debugfs_phy, sc, &fops_dfs_stats);
}
/*
* Copyright (c) 2008-2011 Atheros Communications Inc.
* Copyright (c) 2011 Neratec Solutions AG
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef DFS_DEBUG_H
#define DFS_DEBUG_H
#include "hw.h"
/**
* struct ath_dfs_stats - DFS Statistics
*
* @pulses_detected: No. of pulses detected so far
* @datalen_discards: No. of pulses discarded due to invalid datalen
* @rssi_discards: No. of pulses discarded due to invalid RSSI
* @bwinfo_discards: No. of pulses discarded due to invalid BW info
* @pri_phy_errors: No. of pulses reported for primary channel
* @ext_phy_errors: No. of pulses reported for extension channel
* @dc_phy_errors: No. of pulses reported for primary + extension channel
*/
struct ath_dfs_stats {
u32 pulses_detected;
u32 datalen_discards;
u32 rssi_discards;
u32 bwinfo_discards;
u32 pri_phy_errors;
u32 ext_phy_errors;
u32 dc_phy_errors;
};
#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
void ath9k_dfs_init_debug(struct ath_softc *sc);
#else
#define DFS_STAT_INC(sc, c) do { } while (0)
static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
#endif /* DFS_DEBUG_H */
...@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah, ...@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan, return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
ini_reloaded); ini_reloaded);
} }
static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
{
if (!ath9k_hw_private_ops(ah)->set_radar_params)
return;
ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
}
#endif /* ATH9K_HW_OPS_H */ #endif /* ATH9K_HW_OPS_H */
...@@ -2277,6 +2277,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) ...@@ -2277,6 +2277,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
return chip_chainmask; return chip_chainmask;
} }
/**
* ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
* @ah: the atheros hardware data structure
*
* We enable DFS support upstream on chipsets which have passed a series
* of tests. The testing requirements are going to be documented. Desired
* test requirements are documented at:
*
* http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
*
* Once a new chipset gets properly tested an individual commit can be used
* to document the testing for DFS for that chipset.
*/
static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
{
switch (ah->hw_version.macVersion) {
/* AR9580 will likely be our first target to get testing on */
case AR_SREV_VERSION_9580:
default:
return false;
}
}
int ath9k_hw_fill_cap_info(struct ath_hw *ah) int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{ {
struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath9k_hw_capabilities *pCap = &ah->caps;
...@@ -2375,12 +2399,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) ...@@ -2375,12 +2399,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else else
pCap->num_gpio_pins = AR_NUM_GPIO; pCap->num_gpio_pins = AR_NUM_GPIO;
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->hw_caps |= ATH9K_HW_CAP_CST;
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
} else { else
pCap->rts_aggr_limit = (8 * 1024); pCap->rts_aggr_limit = (8 * 1024);
}
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT); ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
...@@ -2490,6 +2512,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) ...@@ -2490,6 +2512,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->pcie_lcr_offset = 0x80; pCap->pcie_lcr_offset = 0x80;
} }
if (ath9k_hw_dfs_tested(ah))
pCap->hw_caps |= ATH9K_HW_CAP_DFS;
tx_chainmask = pCap->tx_chainmask; tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask; rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) { while (tx_chainmask || rx_chainmask) {
......
...@@ -196,21 +196,21 @@ enum ath_ini_subsys { ...@@ -196,21 +196,21 @@ enum ath_ini_subsys {
enum ath9k_hw_caps { enum ath9k_hw_caps {
ATH9K_HW_CAP_HT = BIT(0), ATH9K_HW_CAP_HT = BIT(0),
ATH9K_HW_CAP_RFSILENT = BIT(1), ATH9K_HW_CAP_RFSILENT = BIT(1),
ATH9K_HW_CAP_CST = BIT(2), ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
ATH9K_HW_CAP_AUTOSLEEP = BIT(4), ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5), ATH9K_HW_CAP_EDMA = BIT(4),
ATH9K_HW_CAP_EDMA = BIT(6), ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7), ATH9K_HW_CAP_LDPC = BIT(6),
ATH9K_HW_CAP_LDPC = BIT(8), ATH9K_HW_CAP_FASTCLOCK = BIT(7),
ATH9K_HW_CAP_FASTCLOCK = BIT(9), ATH9K_HW_CAP_SGI_20 = BIT(8),
ATH9K_HW_CAP_SGI_20 = BIT(10), ATH9K_HW_CAP_PAPRD = BIT(9),
ATH9K_HW_CAP_PAPRD = BIT(11), ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12), ATH9K_HW_CAP_2GHZ = BIT(11),
ATH9K_HW_CAP_2GHZ = BIT(13), ATH9K_HW_CAP_5GHZ = BIT(12),
ATH9K_HW_CAP_5GHZ = BIT(14), ATH9K_HW_CAP_APM = BIT(13),
ATH9K_HW_CAP_APM = BIT(15), ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_RTT = BIT(16), ATH9K_HW_CAP_MCI = BIT(15),
ATH9K_HW_CAP_MCI = BIT(17), ATH9K_HW_CAP_DFS = BIT(16),
}; };
struct ath9k_hw_capabilities { struct ath9k_hw_capabilities {
......
...@@ -297,9 +297,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy, ...@@ -297,9 +297,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
{ {
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv; struct ath_softc *sc = hw->priv;
struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); struct ath_hw *ah = sc->sc_ah;
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
int ret;
ret = ath_reg_notifier_apply(wiphy, request, reg);
/* Set tx power */
if (ah->curchan) {
sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
ath9k_ps_wakeup(sc);
ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
ath9k_ps_restore(sc);
}
return ath_reg_notifier_apply(wiphy, request, reg); return ret;
} }
/* /*
......
...@@ -644,9 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, ...@@ -644,9 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
spin_lock(&sc->nodes_lock); spin_lock(&sc->nodes_lock);
list_add(&an->list, &sc->nodes); list_add(&an->list, &sc->nodes);
spin_unlock(&sc->nodes_lock); spin_unlock(&sc->nodes_lock);
#endif
an->sta = sta; an->sta = sta;
an->vif = vif; an->vif = vif;
#endif
if (sc->sc_flags & SC_OP_TXAGGR) { if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an); ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
...@@ -1873,7 +1873,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw, ...@@ -1873,7 +1873,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (ath9k_modparam_nohwcrypt) if (ath9k_modparam_nohwcrypt)
return -ENOSPC; return -ENOSPC;
if (vif->type == NL80211_IFTYPE_ADHOC && if ((vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP || (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) && key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
......
...@@ -234,8 +234,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) ...@@ -234,8 +234,8 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
} }
} }
void ath_mci_process_profile(struct ath_softc *sc, static void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info) struct ath_mci_profile_info *info)
{ {
struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
...@@ -261,8 +261,8 @@ void ath_mci_process_profile(struct ath_softc *sc, ...@@ -261,8 +261,8 @@ void ath_mci_process_profile(struct ath_softc *sc,
ath_mci_update_scheme(sc); ath_mci_update_scheme(sc);
} }
void ath_mci_process_status(struct ath_softc *sc, static void ath_mci_process_status(struct ath_softc *sc,
struct ath_mci_profile_status *status) struct ath_mci_profile_status *status)
{ {
struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex; struct ath_btcoex *btcoex = &sc->btcoex;
......
...@@ -128,10 +128,6 @@ struct ath_mci_coex { ...@@ -128,10 +128,6 @@ struct ath_mci_coex {
}; };
void ath_mci_flush_profile(struct ath_mci_profile *mci); void ath_mci_flush_profile(struct ath_mci_profile *mci);
void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info);
void ath_mci_process_status(struct ath_softc *sc,
struct ath_mci_profile_status *status);
int ath_mci_setup(struct ath_softc *sc); int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc); void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc); void ath_mci_intr(struct ath_softc *sc);
......
...@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc, ...@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->max_valid_rate = k; ath_rc_priv->max_valid_rate = k;
ath_rc_sort_validrates(rate_table, ath_rc_priv); ath_rc_sort_validrates(rate_table, ath_rc_priv);
ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; ath_rc_priv->rate_max_phy = (k > 4) ?
ath_rc_priv->valid_rate_index[k-4] :
ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table; ath_rc_priv->rate_table = rate_table;
ath_dbg(common, ATH_DBG_CONFIG, ath_dbg(common, ATH_DBG_CONFIG,
......
...@@ -1823,6 +1823,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) ...@@ -1823,6 +1823,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb); rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control) && if (ieee80211_is_beacon(hdr->frame_control) &&
!is_zero_ether_addr(common->curbssid) &&
!compare_ether_addr(hdr->addr3, common->curbssid)) !compare_ether_addr(hdr->addr3, common->curbssid))
rs.is_mybeacon = true; rs.is_mybeacon = true;
else else
......
...@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq); int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q, struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar); struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal); struct list_head *head, bool internal);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
...@@ -150,6 +150,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb) ...@@ -150,6 +150,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
return (struct ath_frame_info *) &tx_info->rate_driver_data[0]; return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
} }
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
seqno << IEEE80211_SEQ_SEQ_SHIFT);
}
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{ {
struct ath_txq *txq = tid->ac->txq; struct ath_txq *txq = tid->ac->txq;
...@@ -158,25 +164,24 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ...@@ -158,25 +164,24 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct list_head bf_head; struct list_head bf_head;
struct ath_tx_status ts; struct ath_tx_status ts;
struct ath_frame_info *fi; struct ath_frame_info *fi;
bool sendbar = false;
INIT_LIST_HEAD(&bf_head); INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts)); memset(&ts, 0, sizeof(ts));
spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) { while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb); fi = get_frame_info(skb);
bf = fi->bf; bf = fi->bf;
spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) { if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head); list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
sendbar = true;
} else { } else {
ath_tx_send_normal(sc, txq, NULL, skb); ath_tx_send_normal(sc, txq, NULL, skb);
} }
spin_lock_bh(&txq->axq_lock);
} }
if (tid->baw_head == tid->baw_tail) { if (tid->baw_head == tid->baw_tail) {
...@@ -184,7 +189,8 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) ...@@ -184,7 +189,8 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
tid->state &= ~AGGR_CLEANUP; tid->state &= ~AGGR_CLEANUP;
} }
spin_unlock_bh(&txq->axq_lock); if (sendbar)
ath_send_bar(tid, tid->seq_start);
} }
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
...@@ -200,6 +206,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, ...@@ -200,6 +206,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) { while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX); INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS); INCR(tid->baw_head, ATH_TID_MAX_BUFS);
if (tid->bar_index >= 0)
tid->bar_index--;
} }
} }
...@@ -243,9 +251,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, ...@@ -243,9 +251,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf; bf = fi->bf;
if (!bf) { if (!bf) {
spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
spin_lock(&txq->axq_lock);
continue; continue;
} }
...@@ -254,24 +260,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, ...@@ -254,24 +260,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries) if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
spin_unlock(&txq->axq_lock); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
spin_lock(&txq->axq_lock);
} }
tid->seq_next = tid->seq_start; tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head; tid->baw_tail = tid->baw_head;
tid->bar_index = -1;
} }
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
struct sk_buff *skb) struct sk_buff *skb, int count)
{ {
struct ath_frame_info *fi = get_frame_info(skb); struct ath_frame_info *fi = get_frame_info(skb);
struct ath_buf *bf = fi->bf; struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries); TX_STAT_INC(txq->axq_qnum, a_retries);
if (fi->retries++ > 0) fi->retries += count;
if (prev > 0)
return; return;
hdr = (struct ieee80211_hdr *)skb->data; hdr = (struct ieee80211_hdr *)skb->data;
...@@ -370,7 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -370,7 +378,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head; struct list_head bf_head;
struct sk_buff_head bf_pending; struct sk_buff_head bf_pending;
u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5]; u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true; bool rc_update = true;
...@@ -379,6 +387,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -379,6 +387,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int nframes; int nframes;
u8 tidno; u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH); bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
int i, retries;
int bar_index = -1;
skb = bf->bf_mpdu; skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data; hdr = (struct ieee80211_hdr *)skb->data;
...@@ -387,6 +397,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -387,6 +397,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memcpy(rates, tx_info->control.rates, sizeof(rates)); memcpy(rates, tx_info->control.rates, sizeof(rates));
retries = ts->ts_longretry + 1;
for (i = 0; i < ts->ts_rateindex; i++)
retries += rates[i].count;
rcu_read_lock(); rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2); sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
...@@ -400,8 +414,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -400,8 +414,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_stale || bf_next != NULL) if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head); list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
0, 0);
bf = bf_next; bf = bf_next;
} }
...@@ -411,6 +424,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -411,6 +424,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
an = (struct ath_node *)sta->drv_priv; an = (struct ath_node *)sta->drv_priv;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno); tid = ATH_AN_2_TID(an, tidno);
seq_first = tid->seq_start;
/* /*
* The hardware occasionally sends a tx status for the wrong TID. * The hardware occasionally sends a tx status for the wrong TID.
...@@ -460,25 +474,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -460,25 +474,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) { } else if (!isaggr && txok) {
/* transmit completion */ /* transmit completion */
acked_cnt++; acked_cnt++;
} else if ((tid->state & AGGR_CLEANUP) || !retry) {
/*
* cleanup in progress, just fail
* the un-acked sub-frames
*/
txfail = 1;
} else if (flush) {
txpending = 1;
} else if (fi->retries < ATH_MAX_SW_RETRIES) {
if (txok || !an->sleeping)
ath_tx_set_retry(sc, txq, bf->bf_mpdu,
retries);
txpending = 1;
} else { } else {
if ((tid->state & AGGR_CLEANUP) || !retry) { txfail = 1;
/* txfail_cnt++;
* cleanup in progress, just fail bar_index = max_t(int, bar_index,
* the un-acked sub-frames ATH_BA_INDEX(seq_first, seqno));
*/
txfail = 1;
} else if (flush) {
txpending = 1;
} else if (fi->retries < ATH_MAX_SW_RETRIES) {
if (txok || !an->sleeping)
ath_tx_set_retry(sc, txq, bf->bf_mpdu);
txpending = 1;
} else {
txfail = 1;
sendbar = 1;
txfail_cnt++;
}
} }
/* /*
...@@ -495,9 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -495,9 +509,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update * complete the acked-ones/xretried ones; update
* block-ack window * block-ack window
*/ */
spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno); ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates)); memcpy(tx_info->control.rates, rates, sizeof(rates));
...@@ -506,33 +518,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -506,33 +518,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} }
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
!txfail, sendbar); !txfail);
} else { } else {
/* retry the un-acked ones */ /* retry the un-acked ones */
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
if (bf->bf_next == NULL && bf_last->bf_stale) { bf->bf_next == NULL && bf_last->bf_stale) {
struct ath_buf *tbf; struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last); tbf = ath_clone_txbuf(sc, bf_last);
/* /*
* Update tx baw and complete the * Update tx baw and complete the
* frame with failed status if we * frame with failed status if we
* run out of tx buf. * run out of tx buf.
*/ */
if (!tbf) { if (!tbf) {
spin_lock_bh(&txq->axq_lock); ath_tx_update_baw(sc, tid, seqno);
ath_tx_update_baw(sc, tid, seqno);
spin_unlock_bh(&txq->axq_lock); ath_tx_complete_buf(sc, bf, txq,
&bf_head, ts, 0);
ath_tx_complete_buf(sc, bf, txq, bar_index = max_t(int, bar_index,
&bf_head, ATH_BA_INDEX(seq_first, seqno));
ts, 0, break;
!flush);
break;
}
fi->bf = tbf;
} }
fi->bf = tbf;
} }
/* /*
...@@ -545,12 +554,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -545,12 +554,18 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf = bf_next; bf = bf_next;
} }
if (bar_index >= 0) {
u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
}
/* prepend un-acked frames to the beginning of the pending frame queue */ /* prepend un-acked frames to the beginning of the pending frame queue */
if (!skb_queue_empty(&bf_pending)) { if (!skb_queue_empty(&bf_pending)) {
if (an->sleeping) if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true); ieee80211_sta_set_buffered(sta, tid->tidno, true);
spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q); skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) { if (!an->sleeping) {
ath_tx_queue_tid(txq, tid); ath_tx_queue_tid(txq, tid);
...@@ -558,7 +573,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ...@@ -558,7 +573,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT) if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true; tid->ac->clear_ps_filter = true;
} }
spin_unlock_bh(&txq->axq_lock);
} }
if (tid->state & AGGR_CLEANUP) if (tid->state & AGGR_CLEANUP)
...@@ -617,24 +631,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, ...@@ -617,24 +631,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
if (rates[i].count) { int modeidx;
int modeidx;
if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
legacy = 1;
break;
}
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
modeidx = MCS_HT40;
else
modeidx = MCS_HT20;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) if (!rates[i].count)
modeidx++; continue;
frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
max_4ms_framelen = min(max_4ms_framelen, frmlen); legacy = 1;
break;
} }
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
modeidx = MCS_HT40;
else
modeidx = MCS_HT20;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
modeidx++;
frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
} }
/* /*
...@@ -770,8 +786,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, ...@@ -770,8 +786,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno; seqno = bf->bf_state.seqno;
if (!bf_first)
bf_first = bf;
/* do not step over block-ack window */ /* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) { if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
...@@ -779,6 +793,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, ...@@ -779,6 +793,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break; break;
} }
if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
struct ath_tx_status ts = {};
struct list_head bf_head;
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
__skb_unlink(skb, &tid->buf_q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
continue;
}
if (!bf_first)
bf_first = bf;
if (!rl) { if (!rl) {
aggr_limit = ath_lookup_rate(sc, bf, tid); aggr_limit = ath_lookup_rate(sc, bf, tid);
rl = 1; rl = 1;
...@@ -1121,6 +1150,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, ...@@ -1121,6 +1150,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS; txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true; txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next; *ssn = txtid->seq_start = txtid->seq_next;
txtid->bar_index = -1;
memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
txtid->baw_head = txtid->baw_tail = 0; txtid->baw_head = txtid->baw_tail = 0;
...@@ -1155,9 +1185,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) ...@@ -1155,9 +1185,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP; txtid->state |= AGGR_CLEANUP;
else else
txtid->state &= ~AGGR_ADDBA_COMPLETE; txtid->state &= ~AGGR_ADDBA_COMPLETE;
spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid); ath_tx_flush_tid(sc, txtid);
spin_unlock_bh(&txq->axq_lock);
} }
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
...@@ -1399,8 +1429,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) ...@@ -1399,8 +1429,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx) struct list_head *list, bool retry_tx)
__releases(txq->axq_lock)
__acquires(txq->axq_lock)
{ {
struct ath_buf *bf, *lastbf; struct ath_buf *bf, *lastbf;
struct list_head bf_head; struct list_head bf_head;
...@@ -1427,13 +1455,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, ...@@ -1427,13 +1455,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf)) if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--; txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf)) if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx); retry_tx);
else else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
spin_lock_bh(&txq->axq_lock);
} }
} }
...@@ -1560,11 +1586,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) ...@@ -1560,11 +1586,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break; break;
} }
if (!list_empty(&ac->tid_q)) { if (!list_empty(&ac->tid_q) && !ac->sched) {
if (!ac->sched) { ac->sched = true;
ac->sched = true; list_add_tail(&ac->list, &txq->axq_acq);
list_add_tail(&ac->list, &txq->axq_acq);
}
} }
if (ac == last_ac || if (ac == last_ac ||
...@@ -1707,10 +1731,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, ...@@ -1707,10 +1731,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head); list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0; bf->bf_state.bf_type = 0;
/* update starting sequence number for subsequent ADDBA request */
if (tid)
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
bf->bf_lastbf = bf; bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen); ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false); ath_tx_txqaddbuf(sc, txq, &bf_head, false);
...@@ -1818,7 +1838,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1818,7 +1838,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf; struct ath_buf *bf;
u8 tidno; u8 tidno;
spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) { ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] & tidno = ieee80211_get_qos_ctl(hdr)[0] &
...@@ -1837,7 +1856,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1837,7 +1856,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else { } else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf) if (!bf)
goto out; return;
bf->bf_state.bfs_paprd = txctl->paprd; bf->bf_state.bfs_paprd = txctl->paprd;
...@@ -1846,9 +1865,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1846,9 +1865,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb); ath_tx_send_normal(sc, txctl->txq, tid, skb);
} }
out:
spin_unlock_bh(&txctl->txq->axq_lock);
} }
/* Upon failure caller should free skb */ /* Upon failure caller should free skb */
...@@ -1915,9 +1931,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, ...@@ -1915,9 +1931,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ieee80211_stop_queue(sc->hw, q); ieee80211_stop_queue(sc->hw, q);
txq->stopped = 1; txq->stopped = 1;
} }
spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl); ath_tx_start_dma(sc, skb, txctl);
spin_unlock_bh(&txq->axq_lock);
return 0; return 0;
} }
...@@ -1936,9 +1954,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1936,9 +1954,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (!(tx_flags & ATH_TX_ERROR)) if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */ /* Frame was ACKed */
tx_info->flags |= IEEE80211_TX_STAT_ACK; tx_info->flags |= IEEE80211_TX_STAT_ACK;
...@@ -1966,7 +1981,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1966,7 +1981,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb); q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) { if (txq == sc->tx.txq_map[q]) {
spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0)) if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0; txq->pending_frames = 0;
...@@ -1974,7 +1988,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1974,7 +1988,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ieee80211_wake_queue(sc->hw, q); ieee80211_wake_queue(sc->hw, q);
txq->stopped = 0; txq->stopped = 0;
} }
spin_unlock_bh(&txq->axq_lock);
} }
ieee80211_tx_status(hw, skb); ieee80211_tx_status(hw, skb);
...@@ -1982,16 +1995,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ...@@ -1982,16 +1995,13 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q, struct ath_txq *txq, struct list_head *bf_q,
struct ath_tx_status *ts, int txok, int sendbar) struct ath_tx_status *ts, int txok)
{ {
struct sk_buff *skb = bf->bf_mpdu; struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags; unsigned long flags;
int tx_flags = 0; int tx_flags = 0;
if (sendbar)
tx_flags = ATH_TX_BAR;
if (!txok) if (!txok)
tx_flags |= ATH_TX_ERROR; tx_flags |= ATH_TX_ERROR;
...@@ -2083,8 +2093,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, ...@@ -2083,8 +2093,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf, struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head) struct list_head *bf_head)
__releases(txq->axq_lock)
__acquires(txq->axq_lock)
{ {
int txok; int txok;
...@@ -2094,16 +2102,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, ...@@ -2094,16 +2102,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf)) if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--; txq->axq_ampdu_depth--;
spin_unlock_bh(&txq->axq_lock);
if (!bf_isampdu(bf)) { if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0); ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else } else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
spin_lock_bh(&txq->axq_lock);
if (sc->sc_flags & SC_OP_TXAGGR) if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq); ath_txq_schedule(sc, txq);
} }
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "regd.h" #include "regd.h"
#include "regd_common.h" #include "regd_common.h"
static int __ath_regd_init(struct ath_regulatory *reg);
/* /*
* This is a set of common rules used by our world regulatory domains. * This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate * We have 12 world regulatory domains. To save space we consolidate
...@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy, ...@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
} }
} }
static u16 ath_regd_find_country_by_name(char *alpha2)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
if (!memcmp(allCountries[i].isoName, alpha2, 2))
return allCountries[i].countryCode;
}
return -1;
}
int ath_reg_notifier_apply(struct wiphy *wiphy, int ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request, struct regulatory_request *request,
struct ath_regulatory *reg) struct ath_regulatory *reg)
{ {
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
u16 country_code;
/* We always apply this */ /* We always apply this */
ath_reg_apply_radar_flags(wiphy); ath_reg_apply_radar_flags(wiphy);
...@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy, ...@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
return 0; return 0;
switch (request->initiator) { switch (request->initiator) {
case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE: case NL80211_REGDOM_SET_BY_CORE:
/*
* If common->reg_world_copy is world roaming it means we *were*
* world roaming... so we now have to restore that data.
*/
if (!ath_is_world_regd(&common->reg_world_copy))
break;
memcpy(reg, &common->reg_world_copy,
sizeof(struct ath_regulatory));
break;
case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_USER: case NL80211_REGDOM_SET_BY_USER:
break; break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE: case NL80211_REGDOM_SET_BY_COUNTRY_IE:
if (ath_is_world_regd(reg)) if (!ath_is_world_regd(reg))
ath_reg_apply_world_flags(wiphy, request->initiator, break;
reg);
country_code = ath_regd_find_country_by_name(request->alpha2);
if (country_code == (u16) -1)
break;
reg->current_rd = COUNTRY_ERD_FLAG;
reg->current_rd |= country_code;
printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
reg->current_rd);
__ath_regd_init(reg);
ath_reg_apply_world_flags(wiphy, request->initiator, reg);
break; break;
} }
...@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg) ...@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
reg->current_rd = 0x64; reg->current_rd = 0x64;
} }
int static int __ath_regd_init(struct ath_regulatory *reg)
ath_regd_init(struct ath_regulatory *reg,
struct wiphy *wiphy,
int (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{ {
struct country_code_to_enum_rd *country = NULL; struct country_code_to_enum_rd *country = NULL;
u16 regdmn; u16 regdmn;
...@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg, ...@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n", printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->regDmnEnum); reg->regpair->regDmnEnum);
return 0;
}
int
ath_regd_init(struct ath_regulatory *reg,
struct wiphy *wiphy,
int (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request))
{
struct ath_common *common = container_of(reg, struct ath_common,
regulatory);
int r;
r = __ath_regd_init(reg);
if (r)
return r;
if (ath_is_world_regd(reg))
memcpy(&common->reg_world_copy, reg,
sizeof(struct ath_regulatory));
ath_regd_init_wiphy(reg, wiphy, reg_notifier); ath_regd_init_wiphy(reg, wiphy, reg_notifier);
return 0; return 0;
} }
EXPORT_SYMBOL(ath_regd_init); EXPORT_SYMBOL(ath_regd_init);
......
...@@ -228,10 +228,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, ...@@ -228,10 +228,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
static void b43_radio_2056_setup(struct b43_wldev *dev, static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e) const struct b43_nphy_channeltab_entry_rev3 *e)
{ {
struct ssb_sprom *sprom = dev->dev->bus_sprom;
enum ieee80211_band band = b43_current_band(dev->wl);
u16 offset;
u8 i;
u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
B43_WARN_ON(dev->phy.rev < 3); B43_WARN_ON(dev->phy.rev < 3);
b43_chantab_radio_2056_upload(dev, e); b43_chantab_radio_2056_upload(dev, e);
/* TODO */ b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
if (dev->dev->chip_id == 0x4716) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
} else {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
}
}
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
}
if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
if (dev->phy.rev >= 5) {
b43_radio_write(dev,
offset | B2056_TX_PADG_IDAC, 0xcc);
if (dev->dev->chip_id == 0x4716) {
bias = 0x40;
cbias = 0x45;
pag_boost = 0x5;
pgag_boost = 0x33;
mixg_boost = 0x55;
} else {
bias = 0x25;
cbias = 0x20;
pag_boost = 0x4;
pgag_boost = 0x03;
mixg_boost = 0x65;
}
padg_boost = 0x77;
b43_radio_write(dev,
offset | B2056_TX_INTPAG_IMAIN_STAT,
bias);
b43_radio_write(dev,
offset | B2056_TX_INTPAG_IAUX_STAT,
bias);
b43_radio_write(dev,
offset | B2056_TX_INTPAG_CASCBIAS,
cbias);
b43_radio_write(dev,
offset | B2056_TX_INTPAG_BOOST_TUNE,
pag_boost);
b43_radio_write(dev,
offset | B2056_TX_PGAG_BOOST_TUNE,
pgag_boost);
b43_radio_write(dev,
offset | B2056_TX_PADG_BOOST_TUNE,
padg_boost);
b43_radio_write(dev,
offset | B2056_TX_MIXG_BOOST_TUNE,
mixg_boost);
} else {
bias = dev->phy.is_40mhz ? 0x40 : 0x20;
b43_radio_write(dev,
offset | B2056_TX_INTPAG_IMAIN_STAT,
bias);
b43_radio_write(dev,
offset | B2056_TX_INTPAG_IAUX_STAT,
bias);
b43_radio_write(dev,
offset | B2056_TX_INTPAG_CASCBIAS,
0x30);
}
b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
}
} else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
/* TODO */
}
udelay(50); udelay(50);
/* VCO calibration */ /* VCO calibration */
b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00); b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
...@@ -387,7 +475,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) ...@@ -387,7 +475,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
if (nphy->hang_avoid) if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1); b43_nphy_stay_in_carrier_search(dev, 1);
if (dev->phy.rev >= 3) { if (dev->phy.rev >= 7) {
txpi[0] = txpi[1] = 30;
} else if (dev->phy.rev >= 3) {
txpi[0] = 40; txpi[0] = 40;
txpi[1] = 40; txpi[1] = 40;
} else if (sprom->revision < 4) { } else if (sprom->revision < 4) {
...@@ -411,6 +501,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) ...@@ -411,6 +501,9 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
txpi[1] = 91; txpi[1] = 91;
} }
} }
if (dev->phy.rev < 7 &&
(txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 10))
txpi[0] = txpi[1] = 91;
/* /*
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
...@@ -421,15 +514,31 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) ...@@ -421,15 +514,31 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (dev->phy.rev >= 3) { if (dev->phy.rev >= 3) {
/* FIXME: support 5GHz */ if (b43_nphy_ipa(dev)) {
txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]]; txgain = *(b43_nphy_get_ipa_gain_table(dev) +
txpi[i]);
} else if (b43_current_band(dev->wl) ==
IEEE80211_BAND_5GHZ) {
/* FIXME: use 5GHz tables */
txgain =
b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
} else {
if (dev->phy.rev >= 5 &&
sprom->fem.ghz5.extpa_gain == 3)
; /* FIXME: 5GHz_txgain_HiPwrEPA */
txgain =
b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
}
radio_gain = (txgain >> 16) & 0x1FFFF; radio_gain = (txgain >> 16) & 0x1FFFF;
} else { } else {
txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]]; txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
radio_gain = (txgain >> 16) & 0x1FFF; radio_gain = (txgain >> 16) & 0x1FFF;
} }
dac_gain = (txgain >> 8) & 0x3F; if (dev->phy.rev >= 7)
dac_gain = (txgain >> 8) & 0x7;
else
dac_gain = (txgain >> 8) & 0x3F;
bbmult = txgain & 0xFF; bbmult = txgain & 0xFF;
if (dev->phy.rev >= 3) { if (dev->phy.rev >= 3) {
...@@ -459,7 +568,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) ...@@ -459,7 +568,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
u32 tmp32; u32 tmp32;
u16 reg = (i == 0) ? u16 reg = (i == 0) ?
B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1; B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i])); tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
576 + txpi[i]));
b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4); b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
b43_phy_set(dev, reg, 0x4); b43_phy_set(dev, reg, 0x4);
} }
...@@ -1493,8 +1603,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1493,8 +1603,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom; struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* TX to RX */ /* TX to RX */
u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F }; u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 }; u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
/* RX to TX */ /* RX to TX */
u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3, u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
0x1F }; 0x1F };
...@@ -1505,6 +1615,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1505,6 +1615,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
u16 tmp16; u16 tmp16;
u32 tmp32; u32 tmp32;
b43_phy_write(dev, 0x23f, 0x1f8);
b43_phy_write(dev, 0x240, 0x1f8);
tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
tmp32 &= 0xffffff; tmp32 &= 0xffffff;
b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
...@@ -1520,12 +1633,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1520,12 +1633,13 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_write(dev, 0x2AE, 0x000C); b43_phy_write(dev, 0x2AE, 0x000C);
/* TX to RX */ /* TX to RX */
b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9); b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
ARRAY_SIZE(tx2rx_events));
/* RX to TX */ /* RX to TX */
if (b43_nphy_ipa(dev)) if (b43_nphy_ipa(dev))
b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa, b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
rx2tx_delays_ipa, 9); rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
if (nphy->hw_phyrxchain != 3 && if (nphy->hw_phyrxchain != 3 &&
nphy->hw_phyrxchain != nphy->hw_phytxchain) { nphy->hw_phyrxchain != nphy->hw_phytxchain) {
if (b43_nphy_ipa(dev)) { if (b43_nphy_ipa(dev)) {
...@@ -1533,7 +1647,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1533,7 +1647,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
rx2tx_delays[6] = 1; rx2tx_delays[6] = 1;
rx2tx_events[7] = 0x1F; rx2tx_events[7] = 0x1F;
} }
b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9); b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
ARRAY_SIZE(rx2tx_events));
} }
tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
...@@ -1547,8 +1662,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1547,8 +1662,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_nphy_gain_ctrl_workarounds(dev); b43_nphy_gain_ctrl_workarounds(dev);
b43_ntab_write(dev, B43_NTAB32(8, 0), 2); b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB32(8, 16), 2); b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
/* TODO */ /* TODO */
...@@ -1560,6 +1675,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1560,6 +1675,8 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
...@@ -1584,18 +1701,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev) ...@@ -1584,18 +1701,18 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
0x70); 0x70);
} }
b43_phy_write(dev, 0x224, 0x039C); b43_phy_write(dev, 0x224, 0x03eb);
b43_phy_write(dev, 0x225, 0x0357); b43_phy_write(dev, 0x225, 0x03eb);
b43_phy_write(dev, 0x226, 0x0317); b43_phy_write(dev, 0x226, 0x0341);
b43_phy_write(dev, 0x227, 0x02D7); b43_phy_write(dev, 0x227, 0x0341);
b43_phy_write(dev, 0x228, 0x039C); b43_phy_write(dev, 0x228, 0x042b);
b43_phy_write(dev, 0x229, 0x0357); b43_phy_write(dev, 0x229, 0x042b);
b43_phy_write(dev, 0x22A, 0x0317); b43_phy_write(dev, 0x22a, 0x0381);
b43_phy_write(dev, 0x22B, 0x02D7); b43_phy_write(dev, 0x22b, 0x0381);
b43_phy_write(dev, 0x22C, 0x039C); b43_phy_write(dev, 0x22c, 0x042b);
b43_phy_write(dev, 0x22D, 0x0357); b43_phy_write(dev, 0x22d, 0x042b);
b43_phy_write(dev, 0x22E, 0x0317); b43_phy_write(dev, 0x22e, 0x0381);
b43_phy_write(dev, 0x22F, 0x02D7); b43_phy_write(dev, 0x22f, 0x0381);
} }
static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
...@@ -3928,6 +4045,76 @@ int b43_phy_initn(struct b43_wldev *dev) ...@@ -3928,6 +4045,76 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0; return 0;
} }
/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
{
struct bcma_drv_cc *cc;
u32 pmu_ctl;
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
cc = &dev->dev->bdev->bus->drv_cc;
if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
if (avoid) {
bcma_chipco_pll_write(cc, 0x0, 0x11500010);
bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
bcma_chipco_pll_write(cc, 0x3, 0x00000000);
bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
bcma_chipco_pll_write(cc, 0x5, 0x88888815);
} else {
bcma_chipco_pll_write(cc, 0x0, 0x11100010);
bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
bcma_chipco_pll_write(cc, 0x3, 0x00000000);
bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
bcma_chipco_pll_write(cc, 0x5, 0x88888815);
}
pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
} else if (dev->dev->chip_id == 0x4716) {
if (avoid) {
bcma_chipco_pll_write(cc, 0x0, 0x11500060);
bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
bcma_chipco_pll_write(cc, 0x3, 0x00000000);
bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
bcma_chipco_pll_write(cc, 0x5, 0x88888815);
} else {
bcma_chipco_pll_write(cc, 0x0, 0x11100060);
bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
bcma_chipco_pll_write(cc, 0x2, 0x03000000);
bcma_chipco_pll_write(cc, 0x3, 0x00000000);
bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
bcma_chipco_pll_write(cc, 0x5, 0x88888815);
}
pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
BCMA_CC_PMU_CTL_NOILPONW;
} else if (dev->dev->chip_id == 0x4322 ||
dev->dev->chip_id == 0x4340 ||
dev->dev->chip_id == 0x4341) {
bcma_chipco_pll_write(cc, 0x0, 0x11100070);
bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
bcma_chipco_pll_write(cc, 0x5, 0x88888854);
if (avoid)
bcma_chipco_pll_write(cc, 0x2, 0x05201828);
else
bcma_chipco_pll_write(cc, 0x2, 0x05001828);
pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
} else {
return;
}
bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
/* FIXME */
break;
#endif
}
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev, static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e, const struct b43_phy_n_sfo_cfg *e,
...@@ -3935,6 +4122,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, ...@@ -3935,6 +4122,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
{ {
struct b43_phy *phy = &dev->phy; struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = dev->phy.n; struct b43_phy_n *nphy = dev->phy.n;
int ch = new_channel->hw_value;
u16 old_band_5ghz; u16 old_band_5ghz;
u32 tmp32; u32 tmp32;
...@@ -3974,8 +4162,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, ...@@ -3974,8 +4162,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_tx_lp_fbw(dev); b43_nphy_tx_lp_fbw(dev);
if (dev->phy.rev >= 3 && 0) { if (dev->phy.rev >= 3 &&
/* TODO */ dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
bool avoid = false;
if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
avoid = true;
} else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
avoid = true;
} else { /* 40MHz */
if (nphy->aband_spurwar_en &&
(ch == 38 || ch == 102 || ch == 118))
avoid = dev->dev->chip_id == 0x4716;
}
b43_nphy_pmu_spur_avoid(dev, avoid);
if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
dev->dev->chip_id == 43225) {
b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
avoid ? 0x5341 : 0x8889);
b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
}
if (dev->phy.rev == 3 || dev->phy.rev == 4)
; /* TODO: reset PLL */
if (avoid)
b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
else
b43_phy_mask(dev, B43_NPHY_BBCFG,
~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
b43_nphy_reset_cca(dev);
/* wl sets useless phy_isspuravoid here */
} }
b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830); b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
...@@ -4055,10 +4276,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) ...@@ -4055,10 +4276,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
{ {
struct b43_phy *phy = &dev->phy; struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n; struct b43_phy_n *nphy = phy->n;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
memset(nphy, 0, sizeof(*nphy)); memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4); nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
nphy->spur_avoid = (phy->rev >= 3) ?
B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */ nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */ nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */ nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
...@@ -4067,6 +4291,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev) ...@@ -4067,6 +4291,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */ * 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128; nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128; nphy->tx_pwr_idx[1] = 128;
/* Hardware TX power control and 5GHz power gain */
nphy->txpwrctrl = false;
nphy->pwg_gain_5ghz = false;
if (dev->phy.rev >= 3 ||
(dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
(dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
nphy->txpwrctrl = true;
nphy->pwg_gain_5ghz = true;
} else if (sprom->revision >= 4) {
if (dev->phy.rev >= 2 &&
(sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
nphy->txpwrctrl = true;
#ifdef CONFIG_B43_SSB
if (dev->dev->bus_type == B43_BUS_SSB &&
dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
struct pci_dev *pdev =
dev->dev->sdev->bus->host_pci;
if (pdev->device == 0x4328 ||
pdev->device == 0x432a)
nphy->pwg_gain_5ghz = true;
}
#endif
} else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
nphy->pwg_gain_5ghz = true;
}
}
if (dev->phy.rev >= 3) {
nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
}
} }
static void b43_nphy_op_free(struct b43_wldev *dev) static void b43_nphy_op_free(struct b43_wldev *dev)
......
...@@ -716,6 +716,12 @@ ...@@ -716,6 +716,12 @@
struct b43_wldev; struct b43_wldev;
enum b43_nphy_spur_avoid {
B43_SPUR_AVOID_DISABLE,
B43_SPUR_AVOID_AUTO,
B43_SPUR_AVOID_FORCE,
};
struct b43_chanspec { struct b43_chanspec {
u16 center_freq; u16 center_freq;
enum nl80211_channel_type channel_type; enum nl80211_channel_type channel_type;
...@@ -785,6 +791,7 @@ struct b43_phy_n { ...@@ -785,6 +791,7 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11]; u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl; bool txpwrctrl;
bool pwg_gain_5ghz;
u8 tx_pwr_idx[2]; u8 tx_pwr_idx[2];
u16 adj_pwr_tbl[84]; u16 adj_pwr_tbl[84];
u16 txcal_bbmult; u16 txcal_bbmult;
...@@ -803,6 +810,7 @@ struct b43_phy_n { ...@@ -803,6 +810,7 @@ struct b43_phy_n {
u16 classifier_state; u16 classifier_state;
u16 clip_state[2]; u16 clip_state[2];
enum b43_nphy_spur_avoid spur_avoid;
bool aband_spurwar_en; bool aband_spurwar_en;
bool gband_spurwar_en; bool gband_spurwar_en;
......
...@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = { ...@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, }, [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, }, [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, }, [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
[B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, }, [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, }, [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
[B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
[B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, }, [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, }, [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
[B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, }, [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
...@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev, ...@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
B2056_RX1, pts->rx, pts->rx_length); B2056_RX1, pts->rx, pts->rx_length);
} }
void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
{
struct b2056_inittabs_pts *pts;
const struct b2056_inittab_entry *e;
if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
B43_WARN_ON(1);
return;
}
pts = &b2056_inittabs[dev->phy.rev];
e = &pts->syn[B2056_SYN_PLL_CP2];
b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
}
const struct b43_nphy_channeltab_entry_rev3 * const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{ {
......
...@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 { ...@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev, void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag); bool ghz5, bool ignore_uploadflag);
void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
/* Get the NPHY Channel Switch Table entry for a channel. /* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */ * Returns NULL on failure to find an entry. */
......
...@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = { ...@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
0x0000, 0x0000, 0x0000, 0x0000,
}; };
/* volatile tables, PHY revision >= 3 */
/* indexed by antswctl2g */
static const u16 b43_ntab_antswctl2g_r3[4][32] = {
{
0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
0x0000, 0x0000,
},
{
0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
0x0000, 0x0000,
},
{
0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
0x0000, 0x0000,
},
{
0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
0x0000, 0x03cc,
}
};
/* TX gain tables */ /* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = { const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
...@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { ...@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
const s16 tbl_tx_filter_coef_rev4[7][15] = { const s16 tbl_tx_filter_coef_rev4[7][15] = {
{ -377, 137, -407, 208, -1527, { -377, 137, -407, 208, -1527,
956, 93, 186, 93, 230, 956, 93, 186, 93, 230,
-44, 230, 20, -191, 201 }, -44, 230, 201, -191, 201 },
{ -77, 20, -98, 49, -93, { -77, 20, -98, 49, -93,
60, 56, 111, 56, 26, 60, 56, 111, 56, 26,
-5, 26, 34, -32, 34 }, -5, 26, 34, -32, 34 },
...@@ -2838,9 +2880,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset) ...@@ -2838,9 +2880,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
break; break;
case B43_NTAB_32BIT: case B43_NTAB_32BIT:
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
value <<= 16; value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
break; break;
default: default:
B43_WARN_ON(1); B43_WARN_ON(1);
...@@ -2864,6 +2905,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, ...@@ -2864,6 +2905,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) { for (i = 0; i < nr_elements; i++) {
/* Auto increment broken + caching issue on BCM43224? */
if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
}
switch (type) { switch (type) {
case B43_NTAB_8BIT: case B43_NTAB_8BIT:
*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
...@@ -2874,9 +2921,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, ...@@ -2874,9 +2921,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
data += 2; data += 2;
break; break;
case B43_NTAB_32BIT: case B43_NTAB_32BIT:
*((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI); *((u32 *)data) =
*((u32 *)data) <<= 16; b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
*((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO); *((u32 *)data) |=
b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
data += 4; data += 4;
break; break;
default: default:
...@@ -2932,6 +2980,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, ...@@ -2932,6 +2980,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) { for (i = 0; i < nr_elements; i++) {
/* Auto increment broken + caching issue on BCM43224? */
if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
dev->dev->chip_rev == 1) {
b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
}
switch (type) { switch (type) {
case B43_NTAB_8BIT: case B43_NTAB_8BIT:
value = *data; value = *data;
...@@ -2999,6 +3054,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) ...@@ -2999,6 +3054,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
} while (0) } while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{ {
struct ssb_sprom *sprom = dev->dev->bus_sprom;
/* Static tables */ /* Static tables */
ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
...@@ -3029,7 +3086,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) ...@@ -3029,7 +3086,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */ /* Volatile tables */
/* TODO */ if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
else
B43_WARN_ON(1);
} }
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
......
...@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( ...@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ #define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128 #define B43_NTAB_C1_LOFEEDTH_SIZE 128
/* Volatile N-PHY tables, PHY revision >= 3 */
#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
/* Static N-PHY tables, PHY revision >= 3 */ /* Static N-PHY tables, PHY revision >= 3 */
#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */ #define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */ #define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */ #define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */ #define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */ #define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */ #define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */ #define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */ #define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ #define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ #define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */ #define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */ #define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */ #define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */ #define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */ #define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */ #define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */ #define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */ #define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */ #define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ #define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */ #define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ #define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
......
...@@ -3,9 +3,8 @@ config BRCMUTIL ...@@ -3,9 +3,8 @@ config BRCMUTIL
config BRCMSMAC config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver" tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
depends on PCI
depends on MAC80211 depends on MAC80211
depends on BCMA=n depends on BCMA
select BRCMUTIL select BRCMUTIL
select FW_LOADER select FW_LOADER
select CRC_CCITT select CRC_CCITT
......
...@@ -40,8 +40,7 @@ ...@@ -40,8 +40,7 @@
static void brcmf_sdioh_irqhandler(struct sdio_func *func) static void brcmf_sdioh_irqhandler(struct sdio_func *func)
{ {
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev); struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "***IRQHandler\n"); brcmf_dbg(TRACE, "***IRQHandler\n");
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#define DMA_ALIGN_MASK 0x03 #define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_FUNC1_BLOCKSIZE 64 #define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512 #define SDIO_FUNC2_BLOCKSIZE 512
...@@ -47,6 +48,7 @@ ...@@ -47,6 +48,7 @@
/* devices we support, null terminated */ /* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = { static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)}, {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{ /* end: all zeroes */ }, { /* end: all zeroes */ },
}; };
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
...@@ -481,12 +483,12 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, ...@@ -481,12 +483,12 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
kfree(bus_if); kfree(bus_if);
return -ENOMEM; return -ENOMEM;
} }
sdiodev->dev = &func->card->dev;
sdiodev->func[0] = func->card->sdio_func[0]; sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func; sdiodev->func[1] = func;
sdiodev->bus_if = bus_if;
bus_if->bus_priv = sdiodev; bus_if->bus_priv = sdiodev;
bus_if->type = SDIO_BUS; bus_if->type = SDIO_BUS;
dev_set_drvdata(&func->card->dev, bus_if); dev_set_drvdata(&func->card->dev, sdiodev);
atomic_set(&sdiodev->suspend, false); atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait); init_waitqueue_head(&sdiodev->request_byte_wait);
...@@ -496,12 +498,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, ...@@ -496,12 +498,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
} }
if (func->num == 2) { if (func->num == 2) {
bus_if = dev_get_drvdata(&func->card->dev); sdiodev = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
if ((!sdiodev) || (sdiodev->func[1]->card != func->card)) if ((!sdiodev) || (sdiodev->func[1]->card != func->card))
return -ENODEV; return -ENODEV;
sdiodev->func[2] = func; sdiodev->func[2] = func;
bus_if = sdiodev->bus_if;
sdiodev->dev = &func->dev;
dev_set_drvdata(&func->dev, bus_if);
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
ret = brcmf_sdio_probe(sdiodev); ret = brcmf_sdio_probe(sdiodev);
} }
...@@ -520,11 +525,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) ...@@ -520,11 +525,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num); brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) { if (func->num == 2) {
bus_if = dev_get_drvdata(&func->card->dev); bus_if = dev_get_drvdata(&func->dev);
sdiodev = bus_if->bus_priv; sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n"); brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev); brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL); dev_set_drvdata(&func->card->dev, NULL);
dev_set_drvdata(&func->dev, NULL);
kfree(bus_if); kfree(bus_if);
kfree(sdiodev); kfree(sdiodev);
} }
...@@ -534,15 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) ...@@ -534,15 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
static int brcmf_sdio_suspend(struct device *dev) static int brcmf_sdio_suspend(struct device *dev)
{ {
mmc_pm_flag_t sdio_flags; mmc_pm_flag_t sdio_flags;
struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev); struct sdio_func *func = dev_to_sdio_func(dev);
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev); struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
int ret = 0; int ret = 0;
brcmf_dbg(TRACE, "\n"); brcmf_dbg(TRACE, "\n");
sdiodev = bus_if->bus_priv;
atomic_set(&sdiodev->suspend, true); atomic_set(&sdiodev->suspend, true);
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
...@@ -564,11 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev) ...@@ -564,11 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev) static int brcmf_sdio_resume(struct device *dev)
{ {
struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev); struct sdio_func *func = dev_to_sdio_func(dev);
struct brcmf_bus *bus_if = dev_get_drvdata(&func->card->dev); struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
sdiodev = bus_if->bus_priv;
brcmf_sdio_wdtmr_enable(sdiodev, true); brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false); atomic_set(&sdiodev->suspend, false);
return 0; return 0;
......
...@@ -87,7 +87,7 @@ ...@@ -87,7 +87,7 @@
#define TOE_TX_CSUM_OL 0x00000001 #define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002 #define TOE_RX_CSUM_OL 0x00000002
#define BRCMF_BSS_INFO_VERSION 108 /* curr ver of brcmf_bss_info_le struct */ #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
/* size of brcmf_scan_params not including variable length array */ /* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64 #define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
......
...@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd { ...@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
* Used on data packets to convey priority across USB. * Used on data packets to convey priority across USB.
*/ */
#define BDC_HEADER_LEN 4 #define BDC_HEADER_LEN 4
#define BDC_PROTO_VER 1 /* Protocol version */ #define BDC_PROTO_VER 2 /* Protocol version */
#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ #define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ #define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */ #define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
...@@ -77,7 +77,7 @@ struct brcmf_proto_bdc_header { ...@@ -77,7 +77,7 @@ struct brcmf_proto_bdc_header {
u8 flags; u8 flags;
u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
u8 flags2; u8 flags2;
u8 rssi; u8 data_offset;
}; };
...@@ -372,7 +372,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, ...@@ -372,7 +372,7 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0; h->flags2 = 0;
h->rssi = 0; h->data_offset = 0;
BDC_SET_IF_IDX(h, ifidx); BDC_SET_IF_IDX(h, ifidx);
} }
......
...@@ -3636,6 +3636,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid) ...@@ -3636,6 +3636,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{ {
if (chipid == BCM4329_CHIP_ID) if (chipid == BCM4329_CHIP_ID)
return true; return true;
if (chipid == BCM4330_CHIP_ID)
return true;
return false; return false;
} }
......
...@@ -59,37 +59,17 @@ struct sdiod_drive_str { ...@@ -59,37 +59,17 @@ struct sdiod_drive_str {
u8 strength; /* Pad Drive Strength in mA */ u8 strength; /* Pad Drive Strength in mA */
u8 sel; /* Chip-specific select value */ u8 sel; /* Chip-specific select value */
}; };
/* SDIO Drive Strength to sel value table for PMU Rev 1 */ /* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = { static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
{ {32, 0x6},
4, 0x2}, { {26, 0x7},
2, 0x3}, { {22, 0x4},
1, 0x0}, { {16, 0x5},
0, 0x0} {12, 0x2},
}; {8, 0x3},
/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */ {4, 0x0},
static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = { {0, 0x1}
{ };
12, 0x7}, {
10, 0x6}, {
8, 0x5}, {
6, 0x4}, {
4, 0x2}, {
2, 0x1}, {
0, 0x0}
};
/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
{
32, 0x7}, {
26, 0x6}, {
22, 0x5}, {
16, 0x4}, {
12, 0x3}, {
8, 0x2}, {
4, 0x1}, {
0, 0x0}
};
u8 u8
brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid) brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
...@@ -396,6 +376,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev, ...@@ -396,6 +376,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[3].base = BCM4329_CORE_ARM_BASE; ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
ci->ramsize = BCM4329_RAMSIZE; ci->ramsize = BCM4329_RAMSIZE;
break; break;
case BCM4330_CHIP_ID:
ci->c_inf[0].wrapbase = 0x18100000;
ci->c_inf[0].cib = 0x27004211;
ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
ci->c_inf[1].base = 0x18002000;
ci->c_inf[1].wrapbase = 0x18102000;
ci->c_inf[1].cib = 0x07004211;
ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
ci->c_inf[2].base = 0x18004000;
ci->c_inf[2].wrapbase = 0x18104000;
ci->c_inf[2].cib = 0x0d080401;
ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
ci->c_inf[3].base = 0x18003000;
ci->c_inf[3].wrapbase = 0x18103000;
ci->c_inf[3].cib = 0x03004211;
ci->ramsize = 0x48000;
break;
default: default:
brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
return -ENODEV; return -ENODEV;
...@@ -569,19 +566,8 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, ...@@ -569,19 +566,8 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
return; return;
switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) { switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1): case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1; str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
str_mask = 0x30000000;
str_shift = 28;
break;
case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
str_mask = 0x00003800;
str_shift = 11;
break;
case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
str_mask = 0x00003800; str_mask = 0x00003800;
str_shift = 11; str_shift = 11;
break; break;
......
...@@ -135,6 +135,7 @@ struct brcmf_sdio_dev { ...@@ -135,6 +135,7 @@ struct brcmf_sdio_dev {
wait_queue_head_t request_chain_wait; wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait; wait_queue_head_t request_buffer_wait;
struct device *dev; struct device *dev;
struct brcmf_bus *bus_if;
}; };
/* Register/deregister device interrupt handler. */ /* Register/deregister device interrupt handler. */
......
...@@ -17,6 +17,8 @@ ...@@ -17,6 +17,8 @@
#ifndef _BRCM_AIUTILS_H_ #ifndef _BRCM_AIUTILS_H_
#define _BRCM_AIUTILS_H_ #define _BRCM_AIUTILS_H_
#include <linux/bcma/bcma.h>
#include "types.h" #include "types.h"
/* /*
...@@ -144,26 +146,15 @@ ...@@ -144,26 +146,15 @@
* public (read-only) portion of aiutils handle returned by si_attach() * public (read-only) portion of aiutils handle returned by si_attach()
*/ */
struct si_pub { struct si_pub {
uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
uint buscorerev; /* buscore rev */
uint buscoreidx; /* buscore index */
int ccrev; /* chip common core rev */ int ccrev; /* chip common core rev */
u32 cccaps; /* chip common capabilities */ u32 cccaps; /* chip common capabilities */
u32 cccaps_ext; /* chip common capabilities extension */
int pmurev; /* pmu core rev */ int pmurev; /* pmu core rev */
u32 pmucaps; /* pmu capabilities */ u32 pmucaps; /* pmu capabilities */
uint boardtype; /* board type */ uint boardtype; /* board type */
uint boardvendor; /* board vendor */ uint boardvendor; /* board vendor */
uint boardflags; /* board flags */
uint boardflags2; /* board flags2 */
uint chip; /* chip number */ uint chip; /* chip number */
uint chiprev; /* chip revision */ uint chiprev; /* chip revision */
uint chippkg; /* chip package option */ uint chippkg; /* chip package option */
u32 chipst; /* chip status */
bool issim; /* chip is in simulation or emulation */
uint socirev; /* SOC interconnect rev */
bool pci_pr32414;
}; };
struct pci_dev; struct pci_dev;
...@@ -179,38 +170,13 @@ struct gpioh_item { ...@@ -179,38 +170,13 @@ struct gpioh_item {
/* misc si info needed by some of the routines */ /* misc si info needed by some of the routines */
struct si_info { struct si_info {
struct si_pub pub; /* back plane public state (must be first) */ struct si_pub pub; /* back plane public state (must be first) */
struct pci_dev *pbus; /* handle to pci bus */ struct bcma_bus *icbus; /* handle to soc interconnect bus */
uint dev_coreid; /* the core provides driver functions */ struct pci_dev *pcibus; /* handle to pci bus */
void *intr_arg; /* interrupt callback function arg */
u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
/* restore chip interrupts */
void (*intrsrestore_fn) (void *intr_arg, u32 arg);
/* check if interrupts are enabled */
bool (*intrsenabled_fn) (void *intr_arg);
struct pcicore_info *pch; /* PCI/E core handle */ struct pcicore_info *pch; /* PCI/E core handle */
struct bcma_device *buscore;
struct list_head var_list; /* list of srom variables */ struct list_head var_list; /* list of srom variables */
void __iomem *curmap; /* current regs va */ u32 chipst; /* chip status */
void __iomem *regs[SI_MAXCORES]; /* other regs va */
uint curidx; /* current core index */
uint numcores; /* # discovered cores */
uint coreid[SI_MAXCORES]; /* id of each core */
u32 coresba[SI_MAXCORES]; /* backplane address of each core */
void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
u32 coresba2_size[SI_MAXCORES]; /* second address space size */
void *curwrap; /* current wrapper va */
void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
u32 oob_router; /* oob router registers for axi */
}; };
/* /*
...@@ -223,52 +189,15 @@ struct si_info { ...@@ -223,52 +189,15 @@ struct si_info {
/* AMBA Interconnect exported externs */ /* AMBA Interconnect exported externs */
extern uint ai_flag(struct si_pub *sih); extern struct bcma_device *ai_findcore(struct si_pub *sih,
extern void ai_setint(struct si_pub *sih, int siflag); u16 coreid, u16 coreunit);
extern uint ai_coreidx(struct si_pub *sih); extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
extern uint ai_corevendor(struct si_pub *sih);
extern uint ai_corerev(struct si_pub *sih);
extern bool ai_iscoreup(struct si_pub *sih);
extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
uint val);
extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
extern void ai_core_disable(struct si_pub *sih, u32 bits);
extern int ai_numaddrspaces(struct si_pub *sih);
extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
/* === exported functions === */ /* === exported functions === */
extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh); extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih); extern void ai_detach(struct si_pub *sih);
extern uint ai_coreid(struct si_pub *sih); extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern uint ai_corerev(struct si_pub *sih);
extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
uint val);
extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
extern bool ai_iscoreup(struct si_pub *sih);
extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
uint *origidx, uint *intr_val);
extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
extern void ai_core_disable(struct si_pub *sih, u32 bits);
extern u32 ai_alp_clock(struct si_pub *sih);
extern u32 ai_ilp_clock(struct si_pub *sih);
extern void ai_pci_setup(struct si_pub *sih, uint coremask); extern void ai_pci_setup(struct si_pub *sih, uint coremask);
extern void ai_setint(struct si_pub *sih, int siflag);
extern bool ai_backplane64(struct si_pub *sih);
extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg);
extern void ai_deregister_intr_callback(struct si_pub *sih);
extern void ai_clkctl_init(struct si_pub *sih); extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih); extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode); extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
...@@ -283,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih); ...@@ -283,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */ /* SPROM availability */
extern bool ai_is_sprom_available(struct si_pub *sih); extern bool ai_is_sprom_available(struct si_pub *sih);
/*
* Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
* The returned path is NULL terminated and has trailing '/'.
* Return 0 on success, nonzero otherwise.
*/
extern int ai_devpath(struct si_pub *sih, char *path, int size);
extern void ai_pci_sleep(struct si_pub *sih); extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih); extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih); extern void ai_pci_up(struct si_pub *sih);
...@@ -299,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on); ...@@ -299,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */ /* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih); extern void ai_epa_4313war(struct si_pub *sih);
extern uint ai_get_buscoretype(struct si_pub *sih);
extern uint ai_get_buscorerev(struct si_pub *sih);
static inline int ai_get_ccrev(struct si_pub *sih)
{
return sih->ccrev;
}
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;
}
static inline int ai_get_pmurev(struct si_pub *sih)
{
return sih->pmurev;
}
static inline u32 ai_get_pmucaps(struct si_pub *sih)
{
return sih->pmucaps;
}
static inline uint ai_get_boardtype(struct si_pub *sih)
{
return sih->boardtype;
}
static inline uint ai_get_boardvendor(struct si_pub *sih)
{
return sih->boardvendor;
}
static inline uint ai_get_chip_id(struct si_pub *sih)
{
return sih->chip;
}
static inline uint ai_get_chiprev(struct si_pub *sih)
{
return sih->chiprev;
}
static inline uint ai_get_chippkg(struct si_pub *sih)
{
return sih->chippkg;
}
#endif /* _BRCM_AIUTILS_H_ */ #endif /* _BRCM_AIUTILS_H_ */
...@@ -1118,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, ...@@ -1118,14 +1118,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
u8 status_delay = 0; u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */ /* wait till the next 8 bytes of txstatus is available */
while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) { s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
while ((s1 & TXS_V) == 0) {
udelay(1); udelay(1);
status_delay++; status_delay++;
if (status_delay > 10) if (status_delay > 10)
return; /* error condition */ return; /* error condition */
s1 = bcma_read32(wlc->hw->d11core,
D11REGOFFS(frmtxstatus));
} }
s2 = R_REG(&wlc->regs->frmtxstatus2); s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
} }
if (scb) { if (scb) {
......
...@@ -430,6 +430,9 @@ struct d11regs { ...@@ -430,6 +430,9 @@ struct d11regs {
u16 PAD[0x380]; /* 0x800 - 0xEFE */ u16 PAD[0x380]; /* 0x800 - 0xEFE */
}; };
/* d11 register field offset */
#define D11REGOFFS(field) offsetof(struct d11regs, field)
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */ #define PIHR_BASE 0x0400 /* byte address of packed IHR region */
/* biststatus */ /* biststatus */
......
...@@ -26,6 +26,13 @@ ...@@ -26,6 +26,13 @@
#include "dma.h" #include "dma.h"
#include "soc.h" #include "soc.h"
/*
* dma register field offset calculation
*/
#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
/* /*
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within * DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
* a contiguous 8kB physical address. * a contiguous 8kB physical address.
...@@ -220,15 +227,16 @@ struct dma_info { ...@@ -220,15 +227,16 @@ struct dma_info {
uint *msg_level; /* message level pointer */ uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */ char name[MAXNAMEL]; /* callers name for diag msgs */
struct pci_dev *pbus; /* bus handle */ struct bcma_device *core;
struct device *dmadev;
bool dma64; /* this dma engine is operating in 64-bit mode */ bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
/* 64-bit dma tx engine registers */ /* 64-bit dma tx engine registers */
struct dma64regs __iomem *d64txregs; uint d64txregbase;
/* 64-bit dma rx engine registers */ /* 64-bit dma rx engine registers */
struct dma64regs __iomem *d64rxregs; uint d64rxregbase;
/* pointer to dma64 tx descriptor ring */ /* pointer to dma64 tx descriptor ring */
struct dma64desc *txd64; struct dma64desc *txd64;
/* pointer to dma64 rx descriptor ring */ /* pointer to dma64 rx descriptor ring */
...@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) ...@@ -375,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
if (dmactrlflags & DMA_CTRL_PEN) { if (dmactrlflags & DMA_CTRL_PEN) {
u32 control; u32 control;
control = R_REG(&di->d64txregs->control); control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
W_REG(&di->d64txregs->control, bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control | D64_XC_PD); control | D64_XC_PD);
if (R_REG(&di->d64txregs->control) & D64_XC_PD) if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
D64_XC_PD)
/* We *can* disable it so it is supported, /* We *can* disable it so it is supported,
* restore control register * restore control register
*/ */
W_REG(&di->d64txregs->control, bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control); control);
else else
/* Not supported, don't allow it to be enabled */ /* Not supported, don't allow it to be enabled */
dmactrlflags &= ~DMA_CTRL_PEN; dmactrlflags &= ~DMA_CTRL_PEN;
...@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags) ...@@ -394,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
return dmactrlflags; return dmactrlflags;
} }
static bool _dma64_addrext(struct dma64regs __iomem *dma64regs) static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{ {
u32 w; u32 w;
OR_REG(&dma64regs->control, D64_XC_AE); bcma_set32(di->core, ctrl_offset, D64_XC_AE);
w = R_REG(&dma64regs->control); w = bcma_read32(di->core, ctrl_offset);
AND_REG(&dma64regs->control, ~D64_XC_AE); bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
return (w & D64_XC_AE) == D64_XC_AE; return (w & D64_XC_AE) == D64_XC_AE;
} }
...@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di) ...@@ -412,13 +421,13 @@ static bool _dma_isaddrext(struct dma_info *di)
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */ /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
/* not all tx or rx channel are available */ /* not all tx or rx channel are available */
if (di->d64txregs != NULL) { if (di->d64txregbase != 0) {
if (!_dma64_addrext(di->d64txregs)) if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
di->name); di->name);
return true; return true;
} else if (di->d64rxregs != NULL) { } else if (di->d64rxregbase != 0) {
if (!_dma64_addrext(di->d64rxregs)) if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
di->name); di->name);
return true; return true;
...@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di) ...@@ -432,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
u32 addrl; u32 addrl;
/* Check to see if the descriptors need to be aligned on 4K/8K or not */ /* Check to see if the descriptors need to be aligned on 4K/8K or not */
if (di->d64txregs != NULL) { if (di->d64txregbase != 0) {
W_REG(&di->d64txregs->addrlow, 0xff0); bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
addrl = R_REG(&di->d64txregs->addrlow); addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
if (addrl != 0) if (addrl != 0)
return false; return false;
} else if (di->d64rxregs != NULL) { } else if (di->d64rxregbase != 0) {
W_REG(&di->d64rxregs->addrlow, 0xff0); bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
addrl = R_REG(&di->d64rxregs->addrlow); addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
if (addrl != 0) if (addrl != 0)
return false; return false;
} }
...@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di) ...@@ -450,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
* Descriptor table must start at the DMA hardware dictated alignment, so * Descriptor table must start at the DMA hardware dictated alignment, so
* allocated memory must be large enough to support this requirement. * allocated memory must be large enough to support this requirement.
*/ */
static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, static void *dma_alloc_consistent(struct dma_info *di, uint size,
u16 align_bits, uint *alloced, u16 align_bits, uint *alloced,
dma_addr_t *pap) dma_addr_t *pap)
{ {
...@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size, ...@@ -460,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
size += align; size += align;
*alloced = size; *alloced = size;
} }
return pci_alloc_consistent(pdev, size, pap); return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
} }
static static
...@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, ...@@ -486,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u32 desc_strtaddr; u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits; u32 alignbytes = 1 << *alignbits;
va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa); va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
if (NULL == va) if (NULL == va)
return NULL; return NULL;
...@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size, ...@@ -495,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) { & boundary)) {
*alignbits = dma_align_sizetobits(size); *alignbits = dma_align_sizetobits(size);
pci_free_consistent(di->pbus, size, va, *descpa); dma_free_coherent(di->dmadev, size, va, *descpa);
va = dma_alloc_consistent(di->pbus, size, *alignbits, va = dma_alloc_consistent(di, size, *alignbits,
alloced, descpa); alloced, descpa);
} }
return va; return va;
...@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction) ...@@ -556,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
} }
struct dma_pub *dma_attach(char *name, struct si_pub *sih, struct dma_pub *dma_attach(char *name, struct si_pub *sih,
void __iomem *dmaregstx, void __iomem *dmaregsrx, struct bcma_device *core,
uint ntxd, uint nrxd, uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
uint rxbufsize, int rxextheadroom, uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level) uint nrxpost, uint rxoffset, uint *msg_level)
{ {
struct dma_info *di; struct dma_info *di;
u8 rev = core->id.rev;
uint size; uint size;
/* allocate private info structure */ /* allocate private info structure */
...@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -572,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->msg_level = msg_level ? msg_level : &dma_msg_level; di->msg_level = msg_level ? msg_level : &dma_msg_level;
di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); di->dma64 =
((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
/* init dma reg pointer */ /* init dma reg info */
di->d64txregs = (struct dma64regs __iomem *) dmaregstx; di->core = core;
di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx; di->d64txregbase = txregbase;
di->d64rxregbase = rxregbase;
/* /*
* Default flags (which can be changed by the driver calling * Default flags (which can be changed by the driver calling
...@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -585,16 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/ */
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
name, "DMA64", "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
"txregbase %u rxregbase %u\n", name, "DMA64",
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx); rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */ /* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL); strncpy(di->name, name, MAXNAMEL);
di->name[MAXNAMEL - 1] = '\0'; di->name[MAXNAMEL - 1] = '\0';
di->pbus = ((struct si_info *)sih)->pbus; di->dmadev = core->dma_dev;
/* save tunables */ /* save tunables */
di->ntxd = (u16) ntxd; di->ntxd = (u16) ntxd;
...@@ -626,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih, ...@@ -626,11 +639,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dataoffsetlow = di->ddoffsetlow; di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh; di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
if ((ai_coreid(sih) == SDIOD_CORE_ID) if ((core->id.id == SDIOD_CORE_ID)
&& ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) && ((rev > 0) && (rev <= 2)))
di->addrext = 0; di->addrext = 0;
else if ((ai_coreid(sih) == I2S_CORE_ID) && else if ((core->id.id == I2S_CORE_ID) &&
((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) ((rev == 0) || (rev == 1)))
di->addrext = 0; di->addrext = 0;
else else
di->addrext = _dma_isaddrext(di); di->addrext = _dma_isaddrext(di);
...@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub) ...@@ -749,13 +762,13 @@ void dma_detach(struct dma_pub *pub)
/* free dma descriptor rings */ /* free dma descriptor rings */
if (di->txd64) if (di->txd64)
pci_free_consistent(di->pbus, di->txdalloc, dma_free_coherent(di->dmadev, di->txdalloc,
((s8 *)di->txd64 - di->txdalign), ((s8 *)di->txd64 - di->txdalign),
(di->txdpaorig)); (di->txdpaorig));
if (di->rxd64) if (di->rxd64)
pci_free_consistent(di->pbus, di->rxdalloc, dma_free_coherent(di->dmadev, di->rxdalloc,
((s8 *)di->rxd64 - di->rxdalign), ((s8 *)di->rxd64 - di->rxdalign),
(di->rxdpaorig)); (di->rxdpaorig));
/* free packet pointer vectors */ /* free packet pointer vectors */
kfree(di->txp); kfree(di->txp);
...@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) ...@@ -780,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
if ((di->ddoffsetlow == 0) if ((di->ddoffsetlow == 0)
|| !(pa & PCI32ADDR_HIGH)) { || !(pa & PCI32ADDR_HIGH)) {
if (direction == DMA_TX) { if (direction == DMA_TX) {
W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); pa + di->ddoffsetlow);
bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
di->ddoffsethigh);
} else { } else {
W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); pa + di->ddoffsetlow);
bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
di->ddoffsethigh);
} }
} else { } else {
/* DMA64 32bits address extension */ /* DMA64 32bits address extension */
...@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa) ...@@ -795,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
pa &= ~PCI32ADDR_HIGH; pa &= ~PCI32ADDR_HIGH;
if (direction == DMA_TX) { if (direction == DMA_TX) {
W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow); bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh); pa + di->ddoffsetlow);
SET_REG(&di->d64txregs->control, bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
D64_XC_AE, (ae << D64_XC_AE_SHIFT)); di->ddoffsethigh);
bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
D64_XC_AE, (ae << D64_XC_AE_SHIFT));
} else { } else {
W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow); bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh); pa + di->ddoffsetlow);
SET_REG(&di->d64rxregs->control, bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
D64_RC_AE, (ae << D64_RC_AE_SHIFT)); di->ddoffsethigh);
bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
D64_RC_AE, (ae << D64_RC_AE_SHIFT));
} }
} }
} }
...@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di) ...@@ -815,9 +836,9 @@ static void _dma_rxenable(struct dma_info *di)
DMA_TRACE("%s:\n", di->name); DMA_TRACE("%s:\n", di->name);
control = control = D64_RC_RE | (bcma_read32(di->core,
(R_REG(&di->d64rxregs->control) & D64_RC_AE) | DMA64RXREGOFFS(di, control)) &
D64_RC_RE; D64_RC_AE);
if ((dmactrlflags & DMA_CTRL_PEN) == 0) if ((dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_RC_PD; control |= D64_RC_PD;
...@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di) ...@@ -825,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
if (dmactrlflags & DMA_CTRL_ROC) if (dmactrlflags & DMA_CTRL_ROC)
control |= D64_RC_OC; control |= D64_RC_OC;
W_REG(&di->d64rxregs->control, bcma_write32(di->core, DMA64RXREGOFFS(di, control),
((di->rxoffset << D64_RC_RO_SHIFT) | control)); ((di->rxoffset << D64_RC_RO_SHIFT) | control));
} }
...@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) ...@@ -868,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
return NULL; return NULL;
curr = curr =
B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) - B2I(((bcma_read32(di->core,
DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc); di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */ /* ignore curr if forceall */
...@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall) ...@@ -882,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow; pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
/* clear this packet from the descriptor ring */ /* clear this packet from the descriptor ring */
pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE); dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef); di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef); di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
...@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list) ...@@ -950,12 +972,12 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
if (resid > 0) { if (resid > 0) {
uint cur; uint cur;
cur = cur =
B2I(((R_REG(&di->d64rxregs->status0) & B2I(((bcma_read32(di->core,
D64_RS0_CD_MASK) - DMA64RXREGOFFS(di, status0)) &
di->rcvptrbase) & D64_RS0_CD_MASK, D64_RS0_CD_MASK) - di->rcvptrbase) &
struct dma64desc); D64_RS0_CD_MASK, struct dma64desc);
DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur); di->rxin, di->rxout, cur);
} }
#endif /* BCMDBG */ #endif /* BCMDBG */
...@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di) ...@@ -983,8 +1005,10 @@ static bool dma64_rxidle(struct dma_info *di)
if (di->nrxd == 0) if (di->nrxd == 0)
return true; return true;
return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) == return ((bcma_read32(di->core,
(R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK)); DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
(bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
D64_RS0_CD_MASK));
} }
/* /*
...@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub) ...@@ -1048,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
*/ */
*(u32 *) (p->data) = 0; *(u32 *) (p->data) = 0;
pa = pci_map_single(di->pbus, p->data, pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
di->rxbufsize, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
/* save the free packet pointer */ /* save the free packet pointer */
di->rxp[rxout] = p; di->rxp[rxout] = p;
...@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub) ...@@ -1067,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
di->rxout = rxout; di->rxout = rxout;
/* update the chip lastdscr pointer */ /* update the chip lastdscr pointer */
W_REG(&di->d64rxregs->ptr, bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
di->rcvptrbase + I2B(rxout, struct dma64desc)); di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty; return ring_empty;
...@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub) ...@@ -1128,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0) if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD; control |= D64_XC_PD;
OR_REG(&di->d64txregs->control, control); bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
/* DMA engine with alignment requirement requires table to be inited /* DMA engine with alignment requirement requires table to be inited
* before enabling the engine * before enabling the engine
...@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub) ...@@ -1146,7 +1170,7 @@ void dma_txsuspend(struct dma_pub *pub)
if (di->ntxd == 0) if (di->ntxd == 0)
return; return;
OR_REG(&di->d64txregs->control, D64_XC_SE); bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
} }
void dma_txresume(struct dma_pub *pub) void dma_txresume(struct dma_pub *pub)
...@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub) ...@@ -1158,7 +1182,7 @@ void dma_txresume(struct dma_pub *pub)
if (di->ntxd == 0) if (di->ntxd == 0)
return; return;
AND_REG(&di->d64txregs->control, ~D64_XC_SE); bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
} }
bool dma_txsuspended(struct dma_pub *pub) bool dma_txsuspended(struct dma_pub *pub)
...@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub) ...@@ -1166,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub; struct dma_info *di = (struct dma_info *)pub;
return (di->ntxd == 0) || return (di->ntxd == 0) ||
((R_REG(&di->d64txregs->control) & D64_XC_SE) == ((bcma_read32(di->core,
D64_XC_SE); DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
D64_XC_SE);
} }
void dma_txreclaim(struct dma_pub *pub, enum txd_range range) void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
...@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub) ...@@ -1200,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
return true; return true;
/* suspend tx DMA first */ /* suspend tx DMA first */
W_REG(&di->d64txregs->control, D64_XC_SE); bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
SPINWAIT(((status = SPINWAIT(((status =
(R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
!= D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE) D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
&& (status != D64_XS0_XS_STOPPED), 10000); (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
10000);
W_REG(&di->d64txregs->control, 0); bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
SPINWAIT(((status = SPINWAIT(((status =
(R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK)) (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
!= D64_XS0_XS_DISABLED), 10000); D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
/* wait for the last transaction to complete */ /* wait for the last transaction to complete */
udelay(300); udelay(300);
...@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub) ...@@ -1225,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
if (di->nrxd == 0) if (di->nrxd == 0)
return true; return true;
W_REG(&di->d64rxregs->control, 0); bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
SPINWAIT(((status = SPINWAIT(((status =
(R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK)) (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
!= D64_RS0_RS_DISABLED), 10000); D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
return status == D64_RS0_RS_DISABLED; return status == D64_RS0_RS_DISABLED;
} }
...@@ -1267,7 +1293,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) ...@@ -1267,7 +1293,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
goto outoftxd; goto outoftxd;
/* get physical address of buffer start */ /* get physical address of buffer start */
pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE); pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
/* With a DMA segment list, Descriptor table is filled /* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over * using the segment list instead of looping over
...@@ -1290,7 +1316,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit) ...@@ -1290,7 +1316,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
/* kick the chip */ /* kick the chip */
if (commit) if (commit)
W_REG(&di->d64txregs->ptr, bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(txout, struct dma64desc)); di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */ /* tx flow control */
...@@ -1338,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) ...@@ -1338,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
if (range == DMA_RANGE_ALL) if (range == DMA_RANGE_ALL)
end = di->txout; end = di->txout;
else { else {
struct dma64regs __iomem *dregs = di->d64txregs; end = (u16) (B2I(((bcma_read32(di->core,
DMA64TXREGOFFS(di, status0)) &
end = (u16) (B2I(((R_REG(&dregs->status0) & D64_XS0_CD_MASK) - di->xmtptrbase) &
D64_XS0_CD_MASK) - D64_XS0_CD_MASK, struct dma64desc));
di->xmtptrbase) & D64_XS0_CD_MASK,
struct dma64desc));
if (range == DMA_RANGE_TRANSFERED) { if (range == DMA_RANGE_TRANSFERED) {
active_desc = active_desc =
(u16) (R_REG(&dregs->status1) & (u16)(bcma_read32(di->core,
DMA64TXREGOFFS(di, status1)) &
D64_XS1_AD_MASK); D64_XS1_AD_MASK);
active_desc = active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
...@@ -1376,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range) ...@@ -1376,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
txp = di->txp[i]; txp = di->txp[i];
di->txp[i] = NULL; di->txp[i] = NULL;
pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE); dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
} }
di->txin = i; di->txin = i;
......
...@@ -75,10 +75,11 @@ struct dma_pub { ...@@ -75,10 +75,11 @@ struct dma_pub {
}; };
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
void __iomem *dmaregstx, void __iomem *dmaregsrx, struct bcma_device *d11core,
uint ntxd, uint nrxd, uint txregbase, uint rxregbase,
uint rxbufsize, int rxextheadroom, uint ntxd, uint nrxd,
uint nrxpost, uint rxoffset, uint *msg_level); uint rxbufsize, int rxextheadroom,
uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub); void dma_rxinit(struct dma_pub *pub);
int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list); int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
......
...@@ -68,8 +68,6 @@ struct brcms_info { ...@@ -68,8 +68,6 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */ spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */ spinlock_t isr_lock; /* per-device ISR synchronization lock */
/* regsva for unmap in brcms_free() */
void __iomem *regsva; /* opaque chip registers virtual address */
/* timer related fields */ /* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */ atomic_t callbacks; /* # outstanding callback functions */
......
...@@ -334,7 +334,7 @@ struct brcms_hardware { ...@@ -334,7 +334,7 @@ struct brcms_hardware {
u32 machwcap_backup; /* backup of machwcap */ u32 machwcap_backup; /* backup of machwcap */
struct si_pub *sih; /* SI handle (cookie for siutils calls) */ struct si_pub *sih; /* SI handle (cookie for siutils calls) */
struct d11regs __iomem *regs; /* pointer to device registers */ struct bcma_device *d11core; /* pointer to 802.11 core */
struct phy_shim_info *physhim; /* phy shim layer handler */ struct phy_shim_info *physhim; /* phy shim layer handler */
struct shared_phy *phy_sh; /* pointer to shared phy state */ struct shared_phy *phy_sh; /* pointer to shared phy state */
struct brcms_hw_band *band;/* pointer to active per-band state */ struct brcms_hw_band *band;/* pointer to active per-band state */
...@@ -400,7 +400,6 @@ struct brcms_txq_info { ...@@ -400,7 +400,6 @@ struct brcms_txq_info {
* *
* pub: pointer to driver public state. * pub: pointer to driver public state.
* wl: pointer to specific private state. * wl: pointer to specific private state.
* regs: pointer to device registers.
* hw: HW related state. * hw: HW related state.
* clkreq_override: setting for clkreq for PCIE : Auto, 0, 1. * clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
* fastpwrup_dly: time in us needed to bring up d11 fast clock. * fastpwrup_dly: time in us needed to bring up d11 fast clock.
...@@ -477,7 +476,6 @@ struct brcms_txq_info { ...@@ -477,7 +476,6 @@ struct brcms_txq_info {
struct brcms_c_info { struct brcms_c_info {
struct brcms_pub *pub; struct brcms_pub *pub;
struct brcms_info *wl; struct brcms_info *wl;
struct d11regs __iomem *regs;
struct brcms_hardware *hw; struct brcms_hardware *hw;
/* clock */ /* clock */
......
...@@ -62,8 +62,7 @@ struct sbpciregs; ...@@ -62,8 +62,7 @@ struct sbpciregs;
struct sbpcieregs; struct sbpcieregs;
extern struct pcicore_info *pcicore_init(struct si_pub *sih, extern struct pcicore_info *pcicore_init(struct si_pub *sih,
struct pci_dev *pdev, struct bcma_device *core);
void __iomem *regs);
extern void pcicore_deinit(struct pcicore_info *pch); extern void pcicore_deinit(struct pcicore_info *pch);
extern void pcicore_attach(struct pcicore_info *pch, int state); extern void pcicore_attach(struct pcicore_info *pch, int state);
extern void pcicore_hwup(struct pcicore_info *pch); extern void pcicore_hwup(struct pcicore_info *pch);
...@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch); ...@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
extern void pcicore_down(struct pcicore_info *pch, int state); extern void pcicore_down(struct pcicore_info *pch, int state);
extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id, extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen); unsigned char *buf, u32 *buflen);
extern void pcicore_fixcfg_pci(struct pcicore_info *pch, extern void pcicore_fixcfg(struct pcicore_info *pch);
struct sbpciregs __iomem *pciregs); extern void pcicore_pci_setup(struct pcicore_info *pch);
extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
struct sbpcieregs __iomem *pciregs);
extern void pcicore_pci_setup(struct pcicore_info *pch,
struct sbpciregs __iomem *pciregs);
#endif /* _BRCM_NICPCI_H_ */ #endif /* _BRCM_NICPCI_H_ */
...@@ -166,7 +166,6 @@ struct shared_phy_params { ...@@ -166,7 +166,6 @@ struct shared_phy_params {
struct phy_shim_info *physhim; struct phy_shim_info *physhim;
uint unit; uint unit;
uint corerev; uint corerev;
uint buscorerev;
u16 vid; u16 vid;
u16 did; u16 did;
uint chip; uint chip;
...@@ -175,7 +174,6 @@ struct shared_phy_params { ...@@ -175,7 +174,6 @@ struct shared_phy_params {
uint sromrev; uint sromrev;
uint boardtype; uint boardtype;
uint boardrev; uint boardrev;
uint boardvendor;
u32 boardflags; u32 boardflags;
u32 boardflags2; u32 boardflags2;
}; };
...@@ -183,7 +181,7 @@ struct shared_phy_params { ...@@ -183,7 +181,7 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp); extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
struct d11regs __iomem *regs, struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy); int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi); extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
......
...@@ -1373,6 +1373,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, ...@@ -1373,6 +1373,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
static void iwl_trans_pcie_free(struct iwl_trans *trans) static void iwl_trans_pcie_free(struct iwl_trans *trans)
{ {
iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans); iwl_trans_pcie_tx_free(trans);
iwl_trans_pcie_rx_free(trans); iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans); free_irq(bus(trans)->irq, trans);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册