提交 c8ac18f2 编写于 作者: D David S. Miller

Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of...

Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Major changes:

iwlwifi:

* more work for new devices (4165 / 8260)
* cleanups / improvemnts in rate control
* fixes for TDLS
* major statistics work from Johannes - more to come
* improvements for the fw error dump infrastructure
* usual amount of small fixes here and there (scan, D0i3 etc...)
* add support for beamforming
* enable stuck queue detection for iwlmvm
* a few fixes for EBS scan
* fixes for various failure paths
* improvements for TDLS Offchannel

wil6210:

* performance tuning
* some AP features

brcm80211:

* rework some code in SDIO part of the brcmfmac driver related to
  suspend/resume that were found doing stress testing
* in PCIe part scheduling of worker thread needed to be relaxed
* minor fixes and exposing firmware revision information to
  user-space, ie. ethtool.

mwifiex:

* enhancements for change virtual interface handling
* remove coupling between netdev and FW supported interface
  combination, now conversion from any type of supported interface
  types to any other type is possible
* DFS support in AP mode

ath9k:

* fix calibration issues on some boards
* Wake-on-WLAN improvements

ath10k:

* add support for qca6174 hardware
* enable RX batching to reduce CPU load

Conflicts:
	drivers/net/wireless/rtlwifi/pci.c

Conflict resolution is to get rid of the 'end' label and keep
the rest.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -107,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); ...@@ -107,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc); bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#else
static inline bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
return false;
}
static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
}
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
#ifdef CONFIG_BCMA_DRIVER_GPIO #ifdef CONFIG_BCMA_DRIVER_GPIO
......
...@@ -178,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc) ...@@ -178,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{ {
u32 maxt; u32 maxt;
enum bcma_clkmode clkmode;
maxt = bcma_chipco_watchdog_get_max_timer(cc); maxt = bcma_chipco_watchdog_get_max_timer(cc);
if (cc->capabilities & BCMA_CC_CAP_PMU) { if (cc->capabilities & BCMA_CC_CAP_PMU) {
...@@ -188,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) ...@@ -188,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = maxt; ticks = maxt;
bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else { } else {
clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC; struct bcma_bus *bus = cc->core->bus;
bcma_core_set_clockmode(cc->core, clkmode);
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
bcma_core_set_clockmode(cc->core,
ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
if (ticks > maxt) if (ticks > maxt)
ticks = maxt; ticks = maxt;
/* instant NMI */ /* instant NMI */
......
...@@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device, ...@@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
return bcma_pcie_mdio_read(pc, device, address); return bcma_pcie_mdio_read(pc, device, address);
} }
/**************************************************
* Early init.
**************************************************/
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/*
* Apply some early fixes required before accessing SPROM.
* See also si_pci_fixcfg.
*/
void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
{
if (pc->early_setup_done)
return;
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
if (pc->hostmode)
goto out;
bcma_core_pci_fixcfg(pc);
out:
pc->early_setup_done = true;
}
/************************************************** /**************************************************
* Workarounds. * Workarounds.
**************************************************/ **************************************************/
...@@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc) ...@@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN); tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
} }
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */ /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
/* Needs to happen when coming out of 'standby'/'hibernate' */ /* Needs to happen when coming out of 'standby'/'hibernate' */
static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
...@@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) ...@@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{ {
bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc); bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc); bcma_core_pci_config_fixup(pc);
} }
...@@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc) ...@@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
if (pc->setup_done) if (pc->setup_done)
return; return;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE bcma_core_pci_early_init(pc);
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
if (pc->hostmode) if (pc->hostmode)
bcma_core_pci_hostmode_init(pc); bcma_core_pci_hostmode_init(pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ else
if (!pc->hostmode)
bcma_core_pci_clientmode_init(pc); bcma_core_pci_clientmode_init(pc);
} }
......
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
static void bcma_host_pci_switch_core(struct bcma_device *core) static void bcma_host_pci_switch_core(struct bcma_device *core)
{ {
int win2 = core->bus->host_is_pcie2 ?
BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
core->addr); core->addr);
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
core->wrap);
core->bus->mapped_core = core; core->bus->mapped_core = core;
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id); bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
} }
......
...@@ -368,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus) ...@@ -368,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
struct bcma_device *core, *tmp; struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) { list_for_each_entry_safe(core, tmp, &bus->cores, list) {
if (!core->dev_registered)
continue;
list_del(&core->list); list_del(&core->list);
if (core->dev_registered) device_unregister(&core->dev);
device_unregister(&core->dev);
} }
if (bus->hosttype == BCMA_HOSTTYPE_SOC) if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog); platform_device_unregister(bus->drv_cc.watchdog);
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
}
} }
int bcma_bus_register(struct bcma_bus *bus) int bcma_bus_register(struct bcma_bus *bus)
...@@ -395,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus) ...@@ -395,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_early_init(&bus->drv_cc); bcma_core_chipcommon_early_init(&bus->drv_cc);
} }
/* Early init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
/* Cores providing flash access go before SPROM init */ /* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) { list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id)) if (bcma_is_core_needed_early(core->id.id))
...@@ -467,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus) ...@@ -467,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus) void bcma_bus_unregister(struct bcma_bus *bus)
{ {
struct bcma_device *cores[3];
int err; int err;
err = bcma_gpio_unregister(&bus->drv_cc); err = bcma_gpio_unregister(&bus->drv_cc);
...@@ -478,15 +491,7 @@ void bcma_bus_unregister(struct bcma_bus *bus) ...@@ -478,15 +491,7 @@ void bcma_bus_unregister(struct bcma_bus *bus)
bcma_core_chipcommon_b_free(&bus->drv_cc_b); bcma_core_chipcommon_b_free(&bus->drv_cc_b);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
bcma_unregister_cores(bus); bcma_unregister_cores(bus);
kfree(cores[2]);
kfree(cores[1]);
kfree(cores[0]);
} }
/* /*
......
...@@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus) ...@@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus)
u16 offset = BCMA_CC_SPROM; u16 offset = BCMA_CC_SPROM;
u16 *sprom; u16 *sprom;
size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
SSB_SPROMSIZE_WORDS_R10, }; SSB_SPROMSIZE_WORDS_R10,
SSB_SPROMSIZE_WORDS_R11, };
int i, err = 0; int i, err = 0;
if (!bus->drv_cc.core) if (!bus->drv_cc.core)
......
...@@ -64,6 +64,7 @@ enum ath_op_flags { ...@@ -64,6 +64,7 @@ enum ath_op_flags {
ATH_OP_HW_RESET, ATH_OP_HW_RESET,
ATH_OP_SCANNING, ATH_OP_SCANNING,
ATH_OP_MULTI_CHANNEL, ATH_OP_MULTI_CHANNEL,
ATH_OP_WOW_ENABLED,
}; };
enum ath_bus_type { enum ath_bus_type {
......
...@@ -9,12 +9,14 @@ ath10k_core-y += mac.o \ ...@@ -9,12 +9,14 @@ ath10k_core-y += mac.o \
txrx.o \ txrx.o \
wmi.o \ wmi.o \
wmi-tlv.o \ wmi-tlv.o \
bmi.o bmi.o \
hw.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \ ath10k_pci-y += pci.o \
......
...@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) ...@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
int ce_id; int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr); ath10k_ce_error_intr_disable(ar, ctrl_addr);
...@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->src_nentries); nentries = roundup_pow_of_two(attr->src_nentries);
...@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ...@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries); nentries = roundup_pow_of_two(attr->dest_nentries);
...@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, ...@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{ {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
...@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) ...@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{ {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
...@@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ...@@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
ce_state->ar = ar; ce_state->ar = ar;
ce_state->id = ce_id; ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state->attr_flags = attr->flags; ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max; ce_state->src_sz_max = attr->src_sz_max;
......
...@@ -394,7 +394,7 @@ struct ce_attr { ...@@ -394,7 +394,7 @@ struct ce_attr {
#define DST_WATERMARK_HIGH_RESET 0 #define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050 #define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(unsigned int ce_id) static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{ {
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
} }
......
...@@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
}, },
}, },
{
.id = QCA6174_HW_2_1_VERSION,
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE,
.otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA6174_HW_3_0_VERSION,
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
.otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA6174_HW_3_2_VERSION,
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
.otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
}; };
static void ath10k_send_suspend_complete(struct ath10k *ar) static void ath10k_send_suspend_complete(struct ath10k *ar)
...@@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ...@@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_TLV: case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS; ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
break; break;
case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_UNSET:
...@@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ...@@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop; goto err_hif_stop;
} }
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
ar->wmi.svc_map));
status = ath10k_htt_rx_ring_refill(ar);
if (status) {
ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
goto err_hif_stop;
}
/* we don't care about HTT in UTF mode */ /* we don't care about HTT in UTF mode */
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_htt_setup(&ar->htt); status = ath10k_htt_setup(&ar->htt);
...@@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister); ...@@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus, enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops) const struct ath10k_hif_ops *hif_ops)
{ {
struct ath10k *ar; struct ath10k *ar;
...@@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ...@@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->ath_common.priv = ar; ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw; ar->ath_common.hw = ar->hw;
ar->dev = dev; ar->dev = dev;
ar->hw_rev = hw_rev;
ar->hif.ops = hif_ops; ar->hif.ops = hif_ops;
ar->hif.bus = bus; ar->hif.bus = bus;
switch (hw_rev) {
case ATH10K_HW_QCA988X:
ar->regs = &qca988x_regs;
break;
case ATH10K_HW_QCA6174:
ar->regs = &qca6174_regs;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
ret = -ENOTSUPP;
goto err_free_mac;
}
init_completion(&ar->scan.started); init_completion(&ar->scan.started);
init_completion(&ar->scan.completed); init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel); init_completion(&ar->scan.on_channel);
......
...@@ -97,6 +97,11 @@ struct ath10k_skb_cb { ...@@ -97,6 +97,11 @@ struct ath10k_skb_cb {
} bcn; } bcn;
} __packed; } __packed;
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{ {
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
...@@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) ...@@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
} }
static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
return (struct ath10k_skb_rxcb *)skb->cb;
}
#define ATH10K_RXCB_SKB(rxcb) \
container_of((void *)rxcb, struct sk_buff, cb)
static inline u32 host_interest_item_address(u32 item_offset) static inline u32 host_interest_item_address(u32 item_offset)
{ {
return QCA988X_HOST_INTEREST_ADDRESS + item_offset; return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
...@@ -239,10 +253,21 @@ struct ath10k_sta { ...@@ -239,10 +253,21 @@ struct ath10k_sta {
u32 smps; u32 smps;
struct work_struct update_wk; struct work_struct update_wk;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
#endif
}; };
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
ATH10K_BEACON_SENDING,
ATH10K_BEACON_SENT,
};
struct ath10k_vif { struct ath10k_vif {
struct list_head list; struct list_head list;
...@@ -253,7 +278,7 @@ struct ath10k_vif { ...@@ -253,7 +278,7 @@ struct ath10k_vif {
u32 dtim_period; u32 dtim_period;
struct sk_buff *beacon; struct sk_buff *beacon;
/* protected by data_lock */ /* protected by data_lock */
bool beacon_sent; enum ath10k_beacon_state beacon_state;
void *beacon_buf; void *beacon_buf;
dma_addr_t beacon_paddr; dma_addr_t beacon_paddr;
...@@ -266,10 +291,8 @@ struct ath10k_vif { ...@@ -266,10 +291,8 @@ struct ath10k_vif {
u32 aid; u32 aid;
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx; s8 def_wep_key_idx;
u8 def_wep_key_newidx;
u16 tx_seq_no; u16 tx_seq_no;
...@@ -296,6 +319,7 @@ struct ath10k_vif { ...@@ -296,6 +319,7 @@ struct ath10k_vif {
bool use_cts_prot; bool use_cts_prot;
int num_legacy_stations; int num_legacy_stations;
int txpower; int txpower;
struct wmi_wmm_params_all_arg wmm_params;
}; };
struct ath10k_vif_iter { struct ath10k_vif_iter {
...@@ -326,6 +350,7 @@ struct ath10k_debug { ...@@ -326,6 +350,7 @@ struct ath10k_debug {
/* protected by conf_mutex */ /* protected by conf_mutex */
u32 fw_dbglog_mask; u32 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter; u32 pktlog_filter;
u32 reg_addr; u32 reg_addr;
u32 nf_cal_period; u32 nf_cal_period;
...@@ -452,6 +477,7 @@ struct ath10k { ...@@ -452,6 +477,7 @@ struct ath10k {
struct device *dev; struct device *dev;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
u32 chip_id; u32 chip_id;
u32 target_version; u32 target_version;
u8 fw_version_major; u8 fw_version_major;
...@@ -467,9 +493,6 @@ struct ath10k { ...@@ -467,9 +493,6 @@ struct ath10k {
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
bool p2p; bool p2p;
struct { struct {
...@@ -479,6 +502,7 @@ struct ath10k { ...@@ -479,6 +502,7 @@ struct ath10k {
struct completion target_suspend; struct completion target_suspend;
const struct ath10k_hw_regs *regs;
struct ath10k_bmi bmi; struct ath10k_bmi bmi;
struct ath10k_wmi wmi; struct ath10k_wmi wmi;
struct ath10k_htc htc; struct ath10k_htc htc;
...@@ -559,7 +583,6 @@ struct ath10k { ...@@ -559,7 +583,6 @@ struct ath10k {
u8 cfg_tx_chainmask; u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask; u8 cfg_rx_chainmask;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done; struct completion install_key_done;
struct completion vdev_setup_done; struct completion vdev_setup_done;
...@@ -643,6 +666,7 @@ struct ath10k { ...@@ -643,6 +666,7 @@ struct ath10k {
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus, enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops); const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar); void ath10k_core_destroy(struct ath10k *ar);
......
...@@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) ...@@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1*HZ); 1*HZ);
if (ret <= 0) if (ret == 0)
return -ETIMEDOUT; return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
...@@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, ...@@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{ {
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
unsigned int len; unsigned int len;
char buf[32]; char buf[64];
len = scnprintf(buf, sizeof(buf), "0x%08x\n", len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
ar->debug.fw_dbglog_mask); ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
...@@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, ...@@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
unsigned long mask;
int ret; int ret;
char buf[64];
unsigned int log_level, mask;
ret = kstrtoul_from_user(user_buf, count, 0, &mask); simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret)
return ret; /* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
ret = sscanf(buf, "%x %u", &mask, &log_level);
if (!ret)
return -EINVAL;
if (ret == 1)
/* default if user did not specify */
log_level = ATH10K_DBGLOG_LEVEL_WARN;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
ar->debug.fw_dbglog_mask = mask; ar->debug.fw_dbglog_mask = mask;
ar->debug.fw_dbglog_level = log_level;
if (ar->state == ATH10K_STATE_ON) { if (ar->state == ATH10K_STATE_ON) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
ar->debug.fw_dbglog_level);
if (ret) { if (ret) {
ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
ret); ret);
...@@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar) ...@@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar)
ret); ret);
if (ar->debug.fw_dbglog_mask) { if (ar->debug.fw_dbglog_mask) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
ATH10K_DBGLOG_LEVEL_WARN);
if (ret) if (ret)
/* not serious */ /* not serious */
ath10k_warn(ar, "failed to enable dbglog during start: %d", ath10k_warn(ar, "failed to enable dbglog during start: %d",
......
...@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter { ...@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter {
ATH10K_PKTLOG_ANY = 0x00000001f, ATH10K_PKTLOG_ANY = 0x00000001f,
}; };
enum ath10k_dbg_aggr_mode {
ATH10K_DBG_AGGR_MODE_AUTO,
ATH10K_DBG_AGGR_MODE_MANUAL,
ATH10K_DBG_AGGR_MODE_MAX,
};
extern unsigned int ath10k_debug_mask; extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
...@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, ...@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data); struct ethtool_stats *stats, u64 *data);
#else #else
static inline int ath10k_debug_start(struct ath10k *ar) static inline int ath10k_debug_start(struct ath10k *ar)
{ {
...@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) ...@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ath10k_debug_get_et_stats NULL #define ath10k_debug_get_et_stats NULL
#endif /* CONFIG_ATH10K_DEBUGFS */ #endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_MAC80211_DEBUGFS
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG #ifdef CONFIG_ATH10K_DEBUG
__printf(3, 4) void ath10k_dbg(struct ath10k *ar, __printf(3, 4) void ath10k_dbg(struct ath10k *ar,
......
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "wmi-ops.h"
#include "debug.h"
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[32];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
(arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
"auto" : "manual");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 aggr_mode;
int ret;
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
return -EINVAL;
if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(aggr_mode == arsta->aggr_mode)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
if (ret) {
ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
goto out;
}
arsta->aggr_mode = aggr_mode;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_aggr_mode = {
.read = ath10k_dbg_sta_read_aggr_mode,
.write = ath10k_dbg_sta_write_aggr_mode,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u", &tid, &buf_size);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, buf_size);
if (ret) {
ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba = {
.write = ath10k_dbg_sta_write_addba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u", &tid, &status);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
tid, status);
if (ret) {
ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
arsta->arvif->vdev_id, sta->addr, tid, status);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba_resp = {
.write = ath10k_dbg_sta_write_addba_resp,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
if (ret != 3)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, initiator, reason);
if (ret) {
ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
arsta->arvif->vdev_id, sta->addr, tid, initiator,
reason);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_delba = {
.write = ath10k_dbg_sta_write_delba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
&fops_aggr_mode);
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
}
...@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, ...@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
/* wait for response */ /* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp, status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status <= 0) { if (status == 0) {
if (status == 0)
status = -ETIMEDOUT;
ath10k_err(ar, "Service connect timeout: %d\n", status); ath10k_err(ar, "Service connect timeout: %d\n", status);
return status; return -ETIMEDOUT;
} }
/* we controlled the buffer creation, it's aligned */ /* we controlled the buffer creation, it's aligned */
......
...@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar) ...@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
htt->ar = ar; htt->ar = ar;
htt->max_throughput_mbps = 800;
/* /*
* Prefetch enough data to satisfy target * Prefetch enough data to satisfy target
...@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) ...@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
status = wait_for_completion_timeout(&htt->target_version_received, status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ); HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) { if (status == 0) {
ath10k_warn(ar, "htt version request timed out\n"); ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/hashtable.h>
#include <net/mac80211.h> #include <net/mac80211.h>
#include "htc.h" #include "htc.h"
...@@ -286,7 +287,19 @@ enum htt_t2h_msg_type { ...@@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
/* FIXME: Do not depend on this event id. Numbering of this event id is
* broken across different firmware revisions and HTT version fails to
* indicate this.
*/
HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_TEST,
/* keep this last */ /* keep this last */
HTT_T2H_NUM_MSGS HTT_T2H_NUM_MSGS
}; };
...@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication { ...@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
struct htt_rx_pn_ind {
__le16 peer_id;
u8 tid;
u8 seqno_start;
u8 seqno_end;
u8 pn_ie_count;
u8 reserved;
u8 pn_ies[0];
} __packed;
struct htt_rx_offload_msdu {
__le16 msdu_len;
__le16 peer_id;
u8 vdev_id;
u8 tid;
u8 fw_desc;
u8 payload[0];
} __packed;
struct htt_rx_offload_ind {
u8 reserved;
__le16 msdu_count;
} __packed;
struct htt_rx_in_ord_msdu_desc {
__le32 msdu_paddr;
__le16 msdu_len;
u8 fw_desc;
u8 reserved;
} __packed;
struct htt_rx_in_ord_ind {
u8 info;
__le16 peer_id;
u8 vdev_id;
u8 reserved;
__le16 msdu_count;
struct htt_rx_in_ord_msdu_desc msdu_descs[0];
} __packed;
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
/* /*
* target -> host test message definition * target -> host test message definition
* *
...@@ -1150,6 +1210,9 @@ struct htt_resp { ...@@ -1150,6 +1210,9 @@ struct htt_resp {
struct htt_rx_test rx_test; struct htt_rx_test rx_test;
struct htt_pktlog_msg pktlog_msg; struct htt_pktlog_msg pktlog_msg;
struct htt_stats_conf stats_conf; struct htt_stats_conf stats_conf;
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;
}; };
} __packed; } __packed;
...@@ -1182,7 +1245,6 @@ struct ath10k_htt { ...@@ -1182,7 +1245,6 @@ struct ath10k_htt {
struct ath10k *ar; struct ath10k *ar;
enum ath10k_htc_ep_id eid; enum ath10k_htc_ep_id eid;
int max_throughput_mbps;
u8 target_version_major; u8 target_version_major;
u8 target_version_minor; u8 target_version_minor;
struct completion target_version_received; struct completion target_version_received;
...@@ -1198,6 +1260,20 @@ struct ath10k_htt { ...@@ -1198,6 +1260,20 @@ struct ath10k_htt {
* filled. * filled.
*/ */
struct sk_buff **netbufs_ring; struct sk_buff **netbufs_ring;
/* This is used only with firmware supporting IN_ORD_IND.
*
* With Full Rx Reorder the HTT Rx Ring is more of a temporary
* buffer ring from which buffer addresses are copied by the
* firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
* pointing to specific (re-ordered) buffers.
*
* FIXME: With kernel generic hashing functions there's a lot
* of hash collisions for sk_buffs.
*/
bool in_ord_rx;
DECLARE_HASHTABLE(skb_table, 4);
/* /*
* Ring of buffer addresses - * Ring of buffer addresses -
* This ring holds the "physical" device address of the * This ring holds the "physical" device address of the
...@@ -1252,12 +1328,11 @@ struct ath10k_htt { ...@@ -1252,12 +1328,11 @@ struct ath10k_htt {
unsigned int prefetch_len; unsigned int prefetch_len;
/* Protects access to %pending_tx, %used_msdu_ids */ /* Protects access to pending_tx, num_pending_tx */
spinlock_t tx_lock; spinlock_t tx_lock;
int max_num_pending_tx; int max_num_pending_tx;
int num_pending_tx; int num_pending_tx;
struct sk_buff **pending_tx; struct idr pending_tx;
unsigned long *used_msdu_ids; /* bitmap */
wait_queue_head_t empty_tx_wq; wait_queue_head_t empty_tx_wq;
struct dma_pool *tx_pool; struct dma_pool *tx_pool;
...@@ -1271,6 +1346,7 @@ struct ath10k_htt { ...@@ -1271,6 +1346,7 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task; struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q; struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q; struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
/* rx_status template */ /* rx_status template */
struct ieee80211_rx_status rx_status; struct ieee80211_rx_status rx_status;
...@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt); ...@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
void ath10k_htt_tx_free(struct ath10k_htt *htt); void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_alloc(struct ath10k_htt *htt); int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt); void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
...@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, ...@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_amsdu); u8 max_subfrms_amsdu);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#include <linux/log2.h> #include <linux/log2.h>
#define HTT_RX_RING_SIZE 1024 #define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
#define HTT_RX_RING_FILL_LEVEL 1000 #define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
/* when under memory pressure rx ring refill may fail and needs a retry */ /* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50 #define HTT_RX_RING_REFILL_RETRY_MS 50
...@@ -34,31 +34,70 @@ ...@@ -34,31 +34,70 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb); static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr); static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
{
struct ath10k_skb_rxcb *rxcb;
hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
if (rxcb->paddr == paddr)
return ATH10K_RXCB_SKB(rxcb);
WARN_ON_ONCE(1);
return NULL;
}
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt) static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct ath10k_skb_cb *cb; struct ath10k_skb_rxcb *rxcb;
struct hlist_node *n;
int i; int i;
for (i = 0; i < htt->rx_ring.fill_cnt; i++) { if (htt->rx_ring.in_ord_rx) {
skb = htt->rx_ring.netbufs_ring[i]; hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
cb = ATH10K_SKB_CB(skb); skb = ATH10K_RXCB_SKB(rxcb);
dma_unmap_single(htt->ar->dev, cb->paddr, dma_unmap_single(htt->ar->dev, rxcb->paddr,
skb->len + skb_tailroom(skb), skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); hash_del(&rxcb->hlist);
dev_kfree_skb_any(skb);
}
} else {
for (i = 0; i < htt->rx_ring.size; i++) {
skb = htt->rx_ring.netbufs_ring[i];
if (!skb)
continue;
rxcb = ATH10K_SKB_RXCB(skb);
dma_unmap_single(htt->ar->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
} }
htt->rx_ring.fill_cnt = 0; htt->rx_ring.fill_cnt = 0;
hash_init(htt->rx_ring.skb_table);
memset(htt->rx_ring.netbufs_ring, 0,
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
} }
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{ {
struct htt_rx_desc *rx_desc; struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t paddr; dma_addr_t paddr;
int ret = 0, idx; int ret = 0, idx;
/* The Full Rx Reorder firmware has no way of telling the host
* implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
* To keep things simple make sure ring is always half empty. This
* guarantees there'll be no replenishment overruns possible.
*/
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
while (num > 0) { while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN); skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
...@@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) ...@@ -86,17 +125,29 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
goto fail; goto fail;
} }
ATH10K_SKB_CB(skb)->paddr = paddr; rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb; htt->rx_ring.netbufs_ring[idx] = skb;
htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr); htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
htt->rx_ring.fill_cnt++; htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
(u32)paddr);
}
num--; num--;
idx++; idx++;
idx &= htt->rx_ring.size_mask; idx &= htt->rx_ring.size_mask;
} }
fail: fail:
/*
* Make sure the rx buffer is updated before available buffer
* index to avoid any potential rx ring corruption.
*/
mb();
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx); *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
return ret; return ret;
} }
...@@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg) ...@@ -153,22 +204,20 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
ath10k_htt_rx_msdu_buff_replenish(htt); ath10k_htt_rx_msdu_buff_replenish(htt);
} }
static void ath10k_htt_rx_ring_clean_up(struct ath10k_htt *htt) int ath10k_htt_rx_ring_refill(struct ath10k *ar)
{ {
struct sk_buff *skb; struct ath10k_htt *htt = &ar->htt;
int i; int ret;
for (i = 0; i < htt->rx_ring.size; i++) { spin_lock_bh(&htt->rx_ring.lock);
skb = htt->rx_ring.netbufs_ring[i]; ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
if (!skb) htt->rx_ring.fill_cnt));
continue; spin_unlock_bh(&htt->rx_ring.lock);
dma_unmap_single(htt->ar->dev, ATH10K_SKB_CB(skb)->paddr, if (ret)
skb->len + skb_tailroom(skb), ath10k_htt_rx_ring_free(htt);
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); return ret;
htt->rx_ring.netbufs_ring[i] = NULL;
}
} }
void ath10k_htt_rx_free(struct ath10k_htt *htt) void ath10k_htt_rx_free(struct ath10k_htt *htt)
...@@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) ...@@ -179,8 +228,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->tx_compl_q); skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
ath10k_htt_rx_ring_clean_up(htt); ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev, dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size * (htt->rx_ring.size *
...@@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) ...@@ -212,6 +262,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld; idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx]; msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL; htt->rx_ring.netbufs_ring[idx] = NULL;
htt->rx_ring.paddrs_ring[idx] = 0;
idx++; idx++;
idx &= htt->rx_ring.size_mask; idx &= htt->rx_ring.size_mask;
...@@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) ...@@ -219,7 +270,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.fill_cnt--; htt->rx_ring.fill_cnt--;
dma_unmap_single(htt->ar->dev, dma_unmap_single(htt->ar->dev,
ATH10K_SKB_CB(msdu)->paddr, ATH10K_SKB_RXCB(msdu)->paddr,
msdu->len + skb_tailroom(msdu), msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ", ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
...@@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr) ...@@ -379,6 +430,82 @@ static void ath10k_htt_rx_replenish_task(unsigned long ptr)
ath10k_htt_rx_msdu_buff_replenish(htt); ath10k_htt_rx_msdu_buff_replenish(htt);
} }
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u32 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
if (!msdu)
return NULL;
rxcb = ATH10K_SKB_RXCB(msdu);
hash_del(&rxcb->hlist);
htt->rx_ring.fill_cnt--;
dma_unmap_single(htt->ar->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
return msdu;
}
static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
int msdu_count;
bool is_offload;
u32 paddr;
lockdep_assert_held(&htt->rx_ring.lock);
msdu_count = __le16_to_cpu(ev->msdu_count);
is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
while (msdu_count--) {
paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!msdu) {
__skb_queue_purge(list);
return -ENOENT;
}
__skb_queue_tail(list, msdu);
if (!is_offload) {
rxd = (void *)msdu->data;
trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
skb_put(msdu, sizeof(*rxd));
skb_pull(msdu, sizeof(*rxd));
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
if (!(__le32_to_cpu(rxd->attention.flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
}
}
msdu_desc++;
}
return 0;
}
int ath10k_htt_rx_alloc(struct ath10k_htt *htt) int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
...@@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) ...@@ -424,7 +551,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.alloc_idx.vaddr = vaddr; htt->rx_ring.alloc_idx.vaddr = vaddr;
htt->rx_ring.alloc_idx.paddr = paddr; htt->rx_ring.alloc_idx.paddr = paddr;
htt->rx_ring.sw_rd_idx.msdu_payld = 0; htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
*htt->rx_ring.alloc_idx.vaddr = 0; *htt->rx_ring.alloc_idx.vaddr = 0;
/* Initialize the Rx refill retry timer */ /* Initialize the Rx refill retry timer */
...@@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) ...@@ -433,14 +560,15 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
spin_lock_init(&htt->rx_ring.lock); spin_lock_init(&htt->rx_ring.lock);
htt->rx_ring.fill_cnt = 0; htt->rx_ring.fill_cnt = 0;
if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level)) htt->rx_ring.sw_rd_idx.msdu_payld = 0;
goto err_fill_ring; hash_init(htt->rx_ring.skb_table);
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task, tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
(unsigned long)htt); (unsigned long)htt);
skb_queue_head_init(&htt->tx_compl_q); skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_compl_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task, tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt); (unsigned long)htt);
...@@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) ...@@ -449,12 +577,6 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.size, htt->rx_ring.fill_level); htt->rx_ring.size, htt->rx_ring.fill_level);
return 0; return 0;
err_fill_ring:
ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
htt->rx_ring.alloc_idx.vaddr,
htt->rx_ring.alloc_idx.paddr);
err_dma_idx: err_dma_idx:
dma_free_coherent(htt->ar->dev, dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size * (htt->rx_ring.size *
...@@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar, ...@@ -691,7 +813,7 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
* *
* FIXME: Can we get/compute 64bit TSF? * FIXME: Can we get/compute 64bit TSF?
*/ */
status->mactime = __le32_to_cpu(rxd->ppdu_end.tsf_timestamp); status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END; status->flag |= RX_FLAG_MACTIME_END;
} }
...@@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) ...@@ -1578,6 +1700,194 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
} }
static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
if (skb_queue_empty(list))
return -ENOBUFS;
if (WARN_ON(!skb_queue_empty(amsdu)))
return -EINVAL;
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
rxd = (void *)msdu->data - sizeof(*rxd);
if (rxd->msdu_end.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
rxd = (void *)msdu->data - sizeof(*rxd);
if (!(rxd->msdu_end.info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
}
return 0;
}
static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_has_protected(hdr->frame_control))
return;
/* Offloaded frames are already decrypted but firmware insists they are
* protected in the 802.11 header. Strip the flag. Otherwise mac80211
* will drop the frame.
*/
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
}
static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
* a short meta information header.
*/
rx = (void *)msdu->data;
skb_put(msdu, sizeof(*rx));
skb_pull(msdu, sizeof(*rx));
if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
dev_kfree_skb_any(msdu);
continue;
}
skb_put(msdu, __le16_to_cpu(rx->msdu_len));
/* Offloaded rx header length isn't multiple of 2 nor 4 so the
* actual payload is unaligned. Align the frame. Otherwise
* mac80211 complains. This shouldn't reduce performance much
* because these offloaded frames are rare.
*/
offset = 4 - ((unsigned long)msdu->data & 3);
skb_put(msdu, offset);
memmove(msdu->data + offset, msdu->data, msdu->len);
skb_pull(msdu, offset);
/* FIXME: The frame is NWifi. Re-construct QoS Control
* if possible later.
*/
memset(status, 0, sizeof(*status));
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status);
ath10k_process_rx(ar, status, msdu);
}
}
static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
struct ieee80211_rx_status *status = &htt->rx_status;
struct sk_buff_head list;
struct sk_buff_head amsdu;
u16 peer_id;
u16 msdu_count;
u8 vdev_id;
u8 tid;
bool offload;
bool frag;
int ret;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
return;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
vdev_id = resp->rx_in_ord_ind.vdev_id;
tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
offload = !!(resp->rx_in_ord_ind.info &
HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
vdev_id, peer_id, tid, offload, frag, msdu_count);
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
* extracted and processed.
*/
__skb_queue_head_init(&list);
ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
return;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
* frames from different PPDUs meaning reported rx rate
* to mac80211 isn't accurate/reliable. It's still
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
break;
case -EAGAIN:
/* fall through */
default:
/* Should not happen. */
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
return;
}
}
tasklet_schedule(&htt->rx_replenish_task);
}
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
...@@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1700,6 +2010,20 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
*/ */
break; break;
} }
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
spin_lock_bh(&htt->rx_ring.lock);
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
spin_unlock_bh(&htt->rx_ring.lock);
tasklet_schedule(&htt->txrx_compl_task);
return;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
/* FIXME: This WMI-TLV event is overlapping with 10.2
* CHAN_CHANGE - both being 0xF. Neither is being used in
* practice so no immediate action is necessary. Nevertheless
* HTT may need an abstraction layer like WMI has one day.
*/
break;
default: default:
ath10k_warn(ar, "htt event (%d) not handled\n", ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type); resp->hdr.msg_type);
...@@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ...@@ -1715,6 +2039,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_htt_txrx_compl_task(unsigned long ptr) static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{ {
struct ath10k_htt *htt = (struct ath10k_htt *)ptr; struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct ath10k *ar = htt->ar;
struct htt_resp *resp; struct htt_resp *resp;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) ...@@ -1731,5 +2056,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
ath10k_htt_rx_handler(htt, &resp->rx_ind); ath10k_htt_rx_handler(htt, &resp->rx_ind);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
ath10k_htt_rx_in_ord_ind(ar, skb);
dev_kfree_skb_any(skb);
}
spin_unlock_bh(&htt->rx_ring.lock); spin_unlock_bh(&htt->rx_ring.lock);
} }
...@@ -56,21 +56,18 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) ...@@ -56,21 +56,18 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
return ret; return ret;
} }
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
int msdu_id; int ret;
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
msdu_id = find_first_zero_bit(htt->used_msdu_ids, ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
htt->max_num_pending_tx);
if (msdu_id == htt->max_num_pending_tx) ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
return -ENOBUFS;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); return ret;
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
} }
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
...@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) ...@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
idr_remove(&htt->pending_tx, msdu_id);
} }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx); htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * spin_lock_init(&htt->tx_lock);
htt->max_num_pending_tx, GFP_KERNEL); idr_init(&htt->pending_tx);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0); sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) { if (!htt->tx_pool) {
kfree(htt->used_msdu_ids); idr_destroy(&htt->pending_tx);
kfree(htt->pending_tx);
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
} }
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = ctx;
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0}; struct htt_tx_done tx_done = {0};
int msdu_id;
spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
msdu_id);
tx_done.discard = 1; tx_done.discard = 1;
tx_done.msdu_id = msdu_id; tx_done.msdu_id = msdu_id;
ath10k_txrx_tx_unref(htt, &tx_done); spin_lock_bh(&htt->tx_lock);
} ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
return 0;
} }
void ath10k_htt_tx_free(struct ath10k_htt *htt) void ath10k_htt_tx_free(struct ath10k_htt *htt)
{ {
ath10k_htt_tx_free_pending(htt); idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
kfree(htt->pending_tx); idr_destroy(&htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool); dma_pool_destroy(htt->tx_pool);
} }
...@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->mgmt_tx); len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) { if (res < 0) {
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec; goto err_tx_dec;
} }
msdu_id = res; msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len); txdesc = ath10k_htc_alloc_skb(ar, len);
...@@ -423,7 +398,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -423,7 +398,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
dev_kfree_skb_any(txdesc); dev_kfree_skb_any(txdesc);
err_free_msdu_id: err_free_msdu_id:
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
...@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err; goto err;
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) { if (res < 0) {
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec; goto err_tx_dec;
} }
msdu_id = res; msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = min(htt->prefetch_len, msdu->len);
...@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr); &paddr);
if (!skb_cb->htt.txbuf) if (!skb_cb->htt.txbuf) {
res = -ENOMEM;
goto err_free_msdu_id; goto err_free_msdu_id;
}
skb_cb->htt.txbuf_paddr = paddr; skb_cb->htt.txbuf_paddr = paddr;
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control))
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr); res = dma_mapping_error(dev, skb_cb->paddr);
...@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; if (msdu->ip_summed == CHECKSUM_PARTIAL) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
}
/* Prevent firmware from sending up tx inspection requests. There's /* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force * nothing ath10k can do with frames requested for inspection so force
...@@ -593,7 +576,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -593,7 +576,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf_paddr); skb_cb->htt.txbuf_paddr);
err_free_msdu_id: err_free_msdu_id:
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
......
/* /*
* Copyright (c) 2014 Qualcomm Atheros, Inc. * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -14,11 +14,45 @@ ...@@ -14,11 +14,45 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#ifndef __WIL_PLATFORM__MSM_H__ #include <linux/types.h>
#define __WIL_PLATFORM_MSM_H__ #include "hw.h"
#include "wil_platform.h" const struct ath10k_hw_regs qca988x_regs = {
.rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00004000,
.rtc_wmac_base_address = 0x00005000,
.soc_core_base_address = 0x00009000,
.ce_wrapper_base_address = 0x00057000,
.ce0_base_address = 0x00057400,
.ce1_base_address = 0x00057800,
.ce2_base_address = 0x00057c00,
.ce3_base_address = 0x00058000,
.ce4_base_address = 0x00058400,
.ce5_base_address = 0x00058800,
.ce6_base_address = 0x00058c00,
.ce7_base_address = 0x00059000,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00040000,
.soc_chip_id_address = 0x00ec,
.scratch_3_address = 0x0030,
};
void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops); const struct ath10k_hw_regs qca6174_regs = {
.rtc_state_cold_reset_mask = 0x00002000,
#endif /* __WIL_PLATFORM__MSM_H__ */ .rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
.ce_wrapper_base_address = 0x00034000,
.ce0_base_address = 0x00034400,
.ce1_base_address = 0x00034800,
.ce2_base_address = 0x00034c00,
.ce3_base_address = 0x00035000,
.ce4_base_address = 0x00035400,
.ce5_base_address = 0x00035800,
.ce6_base_address = 0x00035c00,
.ce7_base_address = 0x00036000,
.soc_reset_control_si0_rst_mask = 0x00000000,
.soc_reset_control_ce_rst_mask = 0x00000001,
.soc_chip_id_address = 0x000f0,
.scratch_3_address = 0x0028,
};
...@@ -34,6 +34,44 @@ ...@@ -34,6 +34,44 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA6174 target BMI version signatures */
#define QCA6174_HW_1_0_VERSION 0x05000000
#define QCA6174_HW_1_1_VERSION 0x05000001
#define QCA6174_HW_1_3_VERSION 0x05000003
#define QCA6174_HW_2_1_VERSION 0x05010000
#define QCA6174_HW_3_0_VERSION 0x05020000
#define QCA6174_HW_3_2_VERSION 0x05030000
enum qca6174_pci_rev {
QCA6174_PCI_REV_1_1 = 0x11,
QCA6174_PCI_REV_1_3 = 0x13,
QCA6174_PCI_REV_2_0 = 0x20,
QCA6174_PCI_REV_3_0 = 0x30,
};
enum qca6174_chip_id_rev {
QCA6174_HW_1_0_CHIP_ID_REV = 0,
QCA6174_HW_1_1_CHIP_ID_REV = 1,
QCA6174_HW_1_3_CHIP_ID_REV = 2,
QCA6174_HW_2_1_CHIP_ID_REV = 4,
QCA6174_HW_2_2_CHIP_ID_REV = 5,
QCA6174_HW_3_0_CHIP_ID_REV = 8,
QCA6174_HW_3_1_CHIP_ID_REV = 9,
QCA6174_HW_3_2_CHIP_ID_REV = 10,
};
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin"
...@@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version { ...@@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX, ATH10K_FW_WMI_OP_VERSION_MAX,
}; };
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
};
struct ath10k_hw_regs {
u32 rtc_state_cold_reset_mask;
u32 rtc_soc_base_address;
u32 rtc_wmac_base_address;
u32 soc_core_base_address;
u32 ce_wrapper_base_address;
u32 ce0_base_address;
u32 ce1_base_address;
u32 ce2_base_address;
u32 ce3_base_address;
u32 ce4_base_address;
u32 ce5_base_address;
u32 ce6_base_address;
u32 ce7_base_address;
u32 soc_reset_control_si0_rst_mask;
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
u32 scratch_3_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
/* Known pecularities: /* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599) * - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599) * - current FW dumps upon raw tx mode (last tested v599)
...@@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr { ...@@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400) #define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0 #define TARGET_10X_MAX_FRAG_ENTRIES 0
/* 10.2 parameters */
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */ /* Target specific defines for WMI-TLV firmware */
#define TARGET_TLV_NUM_VDEVS 3 #define TARGET_TLV_NUM_VDEVS 3
#define TARGET_TLV_NUM_STATIONS 32 #define TARGET_TLV_NUM_STATIONS 32
...@@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr { ...@@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr {
/* as of IP3.7.1 */ /* as of IP3.7.1 */
#define RTC_STATE_V_ON 3 #define RTC_STATE_V_ON 3
#define RTC_STATE_COLD_RESET_MASK 0x00000400 #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0 #define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007 #define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000 #define RTC_STATE_ADDRESS 0x0000
...@@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr { ...@@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr {
#define PCIE_SOC_WAKE_RESET 0x00000000 #define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008 #define SOC_GLOBAL_RESET_ADDRESS 0x0008
#define RTC_SOC_BASE_ADDRESS 0x00004000 #define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
#define RTC_WMAC_BASE_ADDRESS 0x00005000 #define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
#define MAC_COEX_BASE_ADDRESS 0x00006000 #define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000 #define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000 #define SOC_PCIE_BASE_ADDRESS 0x00008000
#define SOC_CORE_BASE_ADDRESS 0x00009000 #define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
#define WLAN_UART_BASE_ADDRESS 0x0000c000 #define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000 #define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000 #define WLAN_GPIO_BASE_ADDRESS 0x00014000
...@@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr { ...@@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr {
#define EFUSE_BASE_ADDRESS 0x00030000 #define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000 #define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00 #define WLAN_UART2_BASE_ADDRESS 0x00054c00
#define CE_WRAPPER_BASE_ADDRESS 0x00057000 #define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
#define CE0_BASE_ADDRESS 0x00057400 #define CE0_BASE_ADDRESS ar->regs->ce0_base_address
#define CE1_BASE_ADDRESS 0x00057800 #define CE1_BASE_ADDRESS ar->regs->ce1_base_address
#define CE2_BASE_ADDRESS 0x00057c00 #define CE2_BASE_ADDRESS ar->regs->ce2_base_address
#define CE3_BASE_ADDRESS 0x00058000 #define CE3_BASE_ADDRESS ar->regs->ce3_base_address
#define CE4_BASE_ADDRESS 0x00058400 #define CE4_BASE_ADDRESS ar->regs->ce4_base_address
#define CE5_BASE_ADDRESS 0x00058800 #define CE5_BASE_ADDRESS ar->regs->ce5_base_address
#define CE6_BASE_ADDRESS 0x00058c00 #define CE6_BASE_ADDRESS ar->regs->ce6_base_address
#define CE7_BASE_ADDRESS 0x00059000 #define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000 #define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 #define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_ADDRESS 0x00000000 #define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000 #define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 #define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 #define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020 #define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0 #define SOC_CPU_CLOCK_STANDARD_LSB 0
...@@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr { ...@@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr {
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec #define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
#define SOC_CHIP_ID_REV_LSB 8 #define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00 #define SOC_CHIP_ID_REV_MASK 0x00000f00
...@@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr { ...@@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr {
#define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c #define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014 #define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030 #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010 #define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */ /* Firmware indications to the Host via SCRATCH_3 register. */
......
...@@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); ...@@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
#define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6174_2_1_DEVICE_ID (0x003e)
static const struct pci_device_id ath10k_pci_id_table[] = { static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{0} {0}
}; };
...@@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { ...@@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
* because of that. * because of that.
*/ */
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
}; };
static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
...@@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ...@@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
return -EIO; return -EIO;
} }
ATH10K_SKB_CB(skb)->paddr = paddr; ATH10K_SKB_RXCB(skb)->paddr = paddr;
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
if (ret) { if (ret) {
...@@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) ...@@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
&flags) == 0) { &flags) == 0) {
skb = transfer_context; skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb); max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE); max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) { if (unlikely(max_nbytes < nbytes)) {
...@@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) ...@@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
ce_ring->per_transfer_context[i] = NULL; ce_ring->per_transfer_context[i] = NULL;
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) ...@@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_get_num_banks(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
return 1;
case QCA6174_2_1_DEVICE_ID:
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
return 9;
}
break;
}
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
return 1;
}
static int ath10k_pci_init_config(struct ath10k *ar) static int ath10k_pci_init_config(struct ath10k *ar)
{ {
u32 interconnect_targ_addr; u32 interconnect_targ_addr;
...@@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar) ...@@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
/* first bank is switched to IRAM */ /* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK); HI_EARLY_ALLOC_MAGIC_MASK);
ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK); HI_EARLY_ALLOC_IRAM_BANKS_MASK);
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
...@@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) ...@@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_chip_reset(struct ath10k *ar) static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{ {
int i, ret; int i, ret;
u32 val; u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
* It is thus preferred to use warm reset which is safer but may not be * It is thus preferred to use warm reset which is safer but may not be
...@@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) ...@@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
return ret; return ret;
} }
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
return 0; return 0;
} }
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
/* FIXME: QCA6174 requires cold + warm reset to work. */
ret = ath10k_pci_cold_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
return ret;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
ret);
return ret;
}
ret = ath10k_pci_warm_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
return 0;
}
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar))
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
else
return -ENOTSUPP;
}
static int ath10k_pci_hif_power_up(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar)
{ {
int ret; int ret;
...@@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ...@@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/ */
ret = ath10k_pci_chip_reset(ar); ret = ath10k_pci_chip_reset(ar);
if (ret) { if (ret) {
if (ath10k_pci_has_fw_crashed(ar)) {
ath10k_warn(ar, "firmware crashed during chip reset\n");
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
}
ath10k_err(ar, "failed to reset chip: %d\n", ret); ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_sleep; goto err_sleep;
} }
...@@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data) ...@@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data)
return; return;
} }
ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar); ath10k_pci_fw_crashed_dump(ar);
} }
...@@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data) ...@@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (ath10k_pci_has_fw_crashed(ar)) { if (ath10k_pci_has_fw_crashed(ar)) {
ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar); ath10k_pci_fw_crashed_dump(ar);
return; return;
...@@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) ...@@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_EVENT_PENDING) { if (val & FW_IND_EVENT_PENDING) {
ath10k_warn(ar, "device has crashed during init\n"); ath10k_warn(ar, "device has crashed during init\n");
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
return -ECOMM; return -ECOMM;
} }
...@@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0; int ret = 0;
struct ath10k *ar; struct ath10k *ar;
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
u32 chip_id; u32 chip_id;
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, switch (pci_dev->device) {
ATH10K_BUS_PCI, case QCA988X_2_0_DEVICE_ID:
&ath10k_pci_hif_ops); hw_rev = ATH10K_HW_QCA988X;
break;
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
break;
default:
WARN_ON(1);
return -ENOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
hw_rev, &ath10k_pci_hif_ops);
if (!ar) { if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n"); dev_err(&pdev->dev, "failed to allocate core\n");
return -ENOMEM; return -ENOMEM;
...@@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_release; goto err_release;
} }
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_sleep;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
goto err_sleep;
}
ret = ath10k_pci_alloc_pipes(ar); ret = ath10k_pci_alloc_pipes(ar);
if (ret) { if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
...@@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq; goto err_deinit_irq;
} }
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq;
}
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_free_irq;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
goto err_sleep;
}
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id); ret = ath10k_core_register(ar, chip_id);
......
...@@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) ...@@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ #define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
#define BAR_NUM 0 #define BAR_NUM 0
......
...@@ -850,7 +850,7 @@ struct rx_ppdu_start { ...@@ -850,7 +850,7 @@ struct rx_ppdu_start {
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
struct rx_ppdu_end { struct rx_ppdu_end_common {
__le32 evm_p0; __le32 evm_p0;
__le32 evm_p1; __le32 evm_p1;
__le32 evm_p2; __le32 evm_p2;
...@@ -873,10 +873,33 @@ struct rx_ppdu_end { ...@@ -873,10 +873,33 @@ struct rx_ppdu_end {
u8 phy_err_code; u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */ __le32 info0; /* %RX_PPDU_END_INFO0_ */
} __packed;
struct rx_ppdu_end_qca988x {
__le16 bb_length; __le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */ __le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed; } __packed;
#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
#define RX_PPDU_END_RTT_UNUSED_LSB 24
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
struct rx_ppdu_end_qca6174 {
__le32 rtt; /* %RX_PPDU_END_RTT_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
} __packed;
} __packed;
/* /*
* evm_p0 * evm_p0
* EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#ifndef __TARGADDRS_H__ #ifndef __TARGADDRS_H__
#define __TARGADDRS_H__ #define __TARGADDRS_H__
#include "hw.h"
/* /*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest * host_interest structure. It must match the address of the _host_interest
...@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask ...@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0 #define QCA988X_BOARD_EXT_DATA_SZ 0
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */ #endif /* __TARGADDRS_H__ */
...@@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev, ...@@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
} }
period = max(ATH10K_QUIET_PERIOD_MIN, period = max(ATH10K_QUIET_PERIOD_MIN,
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss)); (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
duration = period * (duty_cycle / 100); duration = (period * duty_cycle) / 100;
enabled = duration ? 1 : 0; enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
...@@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, ...@@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature; temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
ret = snprintf(buf, PAGE_SIZE, "%d", temperature); /* display in millidegree celcius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out: out:
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return ret; return ret;
...@@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Avoid linking error on devm_hwmon_device_register_with_groups, I /* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */ * guess linux/hwmon.h is missing proper stubs. */
if (!config_enabled(HWMON)) if (!config_enabled(CONFIG_HWMON))
return 0; return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
......
...@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc, ...@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc,
) )
); );
TRACE_EVENT(ath10k_wmi_diag_container,
TP_PROTO(struct ath10k *ar,
u8 type,
u32 timestamp,
u32 code,
u16 len,
const void *data),
TP_ARGS(ar, type, timestamp, code, len, data),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u8, type)
__field(u32, timestamp)
__field(u32, code)
__field(u16, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->type = type;
__entry->timestamp = timestamp;
__entry->code = code;
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk(
"%s %s diag container type %hhu timestamp %u code %u len %d",
__get_str(driver),
__get_str(device),
__entry->type,
__entry->timestamp,
__entry->code,
__entry->len
)
);
TRACE_EVENT(ath10k_wmi_diag,
TP_PROTO(struct ath10k *ar, const void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk(
"%s %s tlv diag len %d",
__get_str(driver),
__get_str(device),
__entry->len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */ /* we don't want to use include/trace/events */
......
...@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
return; return;
} }
msdu = htt->pending_tx[tx_done->msdu_id]; msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
return;
}
skb_cb = ATH10K_SKB_CB(msdu); skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
...@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */ /* we do not own the msdu anymore */
exit: exit:
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt); __ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0) if (htt->num_pending_tx == 0)
......
...@@ -78,6 +78,8 @@ struct wmi_ops { ...@@ -78,6 +78,8 @@ struct wmi_ops {
const struct wmi_vdev_spectral_conf_arg *arg); const struct wmi_vdev_spectral_conf_arg *arg);
struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
u32 trigger, u32 enable); u32 trigger, u32 enable);
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]); const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
...@@ -102,16 +104,20 @@ struct wmi_ops { ...@@ -102,16 +104,20 @@ struct wmi_ops {
u32 value); u32 value);
struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg); const struct wmi_scan_chan_list_arg *arg);
struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif); struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab);
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg); const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
enum wmi_stats_id stats_id); enum wmi_stats_id stats_id);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type, enum wmi_force_fw_hang_type type,
u32 delay_ms); u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
...@@ -119,6 +125,30 @@ struct wmi_ops { ...@@ -119,6 +125,30 @@ struct wmi_ops {
u32 next_offset, u32 next_offset,
u32 enabled); u32 enabled);
struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac);
struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 buf_size);
struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid,
u32 status);
struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 initiator,
u32 reason);
struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
u32 tim_ie_offset, struct sk_buff *bcn,
u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len);
struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
struct sk_buff *bcn);
struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
const u8 *p2p_ie);
struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
}; };
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
...@@ -557,6 +587,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, ...@@ -557,6 +587,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
return ath10k_wmi_cmd_send(ar, skb, cmd_id); return ath10k_wmi_cmd_send(ar, skb, cmd_id);
} }
static inline int
ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_sta_uapsd)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
num_ac);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]) const u8 peer_addr[ETH_ALEN])
...@@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar, ...@@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar,
} }
static inline int static inline int
ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab)
{ {
struct ath10k *ar = arvif->ar;
struct sk_buff *skb; struct sk_buff *skb;
int ret; int ret;
if (!ar->wmi.ops->gen_beacon_dma) if (!ar->wmi.ops->gen_beacon_dma)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_beacon_dma(arvif); skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
dtim_zero, deliver_cab);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) ...@@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
static inline int static inline int
ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg) const struct wmi_wmm_params_all_arg *arg)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar, ...@@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
} }
static inline int static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (!ar->wmi.ops->gen_dbglog_cfg) if (!ar->wmi.ops->gen_dbglog_cfg)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable); skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar) ...@@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
ar->wmi.cmd->pdev_get_temperature_cmdid); ar->wmi.cmd->pdev_get_temperature_cmdid);
} }
static inline int
ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_clear_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_clear_resp_cmdid);
}
static inline int
ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_send_cmdid);
}
static inline int
ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_set_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_set_resp_cmdid);
}
static inline int
ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_delba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
reason);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->delba_send_cmdid);
}
static inline int
ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_bcn_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
prb_caps, prb_erp, prb_ies,
prb_ies_len);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
}
static inline int
ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_prb_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
}
static inline int
ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
}
static inline int
ath10k_wmi_sta_keepalive(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_sta_keepalive)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
#endif #endif
...@@ -58,6 +58,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = { ...@@ -58,6 +58,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wlan_host_mem_req) }, = { .min_len = sizeof(struct wlan_host_mem_req) },
[WMI_TLV_TAG_STRUCT_READY_EVENT] [WMI_TLV_TAG_STRUCT_READY_EVENT]
= { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
= { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
= { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
}; };
static int static int
...@@ -156,6 +160,142 @@ static u16 ath10k_wmi_tlv_len(const void *ptr) ...@@ -156,6 +160,142 @@ static u16 ath10k_wmi_tlv_len(const void *ptr)
return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
} }
/**************/
/* TLV events */
/**************/
static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_bcn_tx_status_ev *ev;
u32 vdev_id, tx_status;
int ret;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
if (!ev) {
kfree(tb);
return -EPROTO;
}
tx_status = __le32_to_cpu(ev->tx_status);
vdev_id = __le32_to_cpu(ev->vdev_id);
switch (tx_status) {
case WMI_TLV_BCN_TX_STATUS_OK:
break;
case WMI_TLV_BCN_TX_STATUS_XRETRY:
case WMI_TLV_BCN_TX_STATUS_DROP:
case WMI_TLV_BCN_TX_STATUS_FILTERED:
/* FIXME: It's probably worth telling mac80211 to stop the
* interface as it is crippled.
*/
ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
vdev_id, tx_status);
break;
}
kfree(tb);
return 0;
}
static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_tlv_diag_data_ev *ev;
const struct wmi_tlv_diag_item *item;
const void *data;
int ret, num_items, len;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
data = tb[WMI_TLV_TAG_ARRAY_BYTE];
if (!ev || !data) {
kfree(tb);
return -EPROTO;
}
num_items = __le32_to_cpu(ev->num_items);
len = ath10k_wmi_tlv_len(data);
while (num_items--) {
if (len == 0)
break;
if (len < sizeof(*item)) {
ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
break;
}
item = data;
if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
ath10k_warn(ar, "failed to parse diag data: item is too long\n");
break;
}
trace_ath10k_wmi_diag_container(ar,
item->type,
__le32_to_cpu(item->timestamp),
__le32_to_cpu(item->code),
__le16_to_cpu(item->len),
item->payload);
len -= sizeof(*item);
len -= roundup(__le16_to_cpu(item->len), 4);
data += sizeof(*item);
data += roundup(__le16_to_cpu(item->len), 4);
}
if (num_items != -1 || len != 0)
ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
num_items, len);
kfree(tb);
return 0;
}
static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
struct sk_buff *skb)
{
const void **tb;
const void *data;
int ret, len;
tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
return ret;
}
data = tb[WMI_TLV_TAG_ARRAY_BYTE];
if (!data) {
kfree(tb);
return -EPROTO;
}
len = ath10k_wmi_tlv_len(data);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
trace_ath10k_wmi_diag(ar, data, len);
kfree(tb);
return 0;
}
/***********/ /***********/
/* TLV ops */ /* TLV ops */
/***********/ /***********/
...@@ -268,6 +408,15 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) ...@@ -268,6 +408,15 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_TLV_READY_EVENTID: case WMI_TLV_READY_EVENTID:
ath10k_wmi_event_ready(ar, skb); ath10k_wmi_event_ready(ar, skb);
break; break;
case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
break;
case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
ath10k_wmi_tlv_event_diag_data(ar, skb);
break;
case WMI_TLV_DIAG_EVENTID:
ath10k_wmi_tlv_event_diag(ar, skb);
break;
default: default:
ath10k_warn(ar, "Unknown eventid: %d\n", id); ath10k_warn(ar, "Unknown eventid: %d\n", id);
break; break;
...@@ -903,8 +1052,15 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) ...@@ -903,8 +1052,15 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0); if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
cfg->num_offload_peers = __cpu_to_le32(3);
cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
} else {
cfg->num_offload_peers = __cpu_to_le32(0);
cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
}
cfg->num_peer_keys = __cpu_to_le32(2); cfg->num_peer_keys = __cpu_to_le32(2);
cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
cfg->ast_skid_limit = __cpu_to_le32(0x10); cfg->ast_skid_limit = __cpu_to_le32(0x10);
...@@ -1356,6 +1512,173 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, ...@@ -1356,6 +1512,173 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
return skb; return skb;
} }
static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
const struct wmi_sta_uapsd_auto_trig_arg *arg)
{
struct wmi_sta_uapsd_auto_trig_param *ac;
struct wmi_tlv *tlv;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
tlv->len = __cpu_to_le16(sizeof(*ac));
ac = (void *)tlv->value;
ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
ac->user_priority = __cpu_to_le32(arg->user_priority);
ac->service_interval = __cpu_to_le32(arg->service_interval);
ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
ac->delay_interval = __cpu_to_le32(arg->delay_interval);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
ac->wmm_ac, ac->user_priority, ac->service_interval,
ac->suspend_interval, ac->delay_interval);
return ptr + sizeof(*tlv) + sizeof(*ac);
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac)
{
struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
struct wmi_sta_uapsd_auto_trig_param *ac;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
size_t ac_tlv_len;
void *ptr;
int i;
ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + ac_tlv_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->num_ac = __cpu_to_le32(num_ac);
ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(ac_tlv_len);
ac = (void *)tlv->value;
ptr += sizeof(*tlv);
for (i = 0; i < num_ac; i++)
ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
return skb;
}
static void *ath10k_wmi_tlv_put_wmm(void *ptr,
const struct wmi_wmm_params_arg *arg)
{
struct wmi_wmm_params *wmm;
struct wmi_tlv *tlv;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
tlv->len = __cpu_to_le16(sizeof(*wmm));
wmm = (void *)tlv->value;
ath10k_wmi_set_wmm_param(wmm, arg);
return ptr + sizeof(*tlv) + sizeof(*wmm);
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg)
{
struct wmi_tlv_vdev_set_wmm_cmd *cmd;
struct wmi_wmm_params *wmm;
struct wmi_tlv *tlv;
struct sk_buff *skb;
size_t len;
void *ptr;
len = (sizeof(*tlv) + sizeof(*cmd)) +
(4 * (sizeof(*tlv) + sizeof(*wmm)));
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg)
{
struct wmi_tlv_sta_keepalive_cmd *cmd;
struct wmi_sta_keepalive_arp_resp *arp;
struct sk_buff *skb;
struct wmi_tlv *tlv;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + sizeof(*arp);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
cmd->enabled = __cpu_to_le32(arg->enabled);
cmd->method = __cpu_to_le32(arg->method);
cmd->interval = __cpu_to_le32(arg->interval);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
tlv->len = __cpu_to_le16(sizeof(*arp));
arp = (void *)tlv->value;
arp->src_ip4_addr = arg->src_ip4_addr;
arp->dest_ip4_addr = arg->dest_ip4_addr;
ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d inverval %d\n",
arg->vdev_id, arg->enabled, arg->method, arg->interval);
return skb;
}
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]) const u8 peer_addr[ETH_ALEN])
...@@ -1678,13 +2001,15 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, ...@@ -1678,13 +2001,15 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
} }
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif) ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab)
{ {
struct ath10k *ar = arvif->ar;
struct wmi_bcn_tx_ref_cmd *cmd; struct wmi_bcn_tx_ref_cmd *cmd;
struct wmi_tlv *tlv; struct wmi_tlv *tlv;
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff *beacon = arvif->beacon;
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
u16 fc; u16 fc;
...@@ -1692,48 +2017,33 @@ ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif) ...@@ -1692,48 +2017,33 @@ ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k_vif *arvif)
if (!skb) if (!skb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
hdr = (struct ieee80211_hdr *)beacon->data; hdr = (struct ieee80211_hdr *)bcn;
fc = le16_to_cpu(hdr->frame_control); fc = le16_to_cpu(hdr->frame_control);
tlv = (void *)skb->data; tlv = (void *)skb->data;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd)); tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value; cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(arvif->vdev_id); cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->data_len = __cpu_to_le32(beacon->len); cmd->data_len = __cpu_to_le32(bcn_len);
cmd->data_ptr = __cpu_to_le32(ATH10K_SKB_CB(beacon)->paddr); cmd->data_ptr = __cpu_to_le32(bcn_paddr);
cmd->msdu_id = 0; cmd->msdu_id = 0;
cmd->frame_control = __cpu_to_le32(fc); cmd->frame_control = __cpu_to_le32(fc);
cmd->flags = 0; cmd->flags = 0;
if (ATH10K_SKB_CB(beacon)->bcn.dtim_zero) if (dtim_zero)
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
if (ATH10K_SKB_CB(beacon)->bcn.deliver_cab) if (deliver_cab)
cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
return skb; return skb;
} }
static void *ath10k_wmi_tlv_put_wmm(void *ptr,
const struct wmi_wmm_params_arg *arg)
{
struct wmi_wmm_params *wmm;
struct wmi_tlv *tlv;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
tlv->len = __cpu_to_le16(sizeof(*wmm));
wmm = (void *)tlv->value;
ath10k_wmi_pdev_set_wmm_param(wmm, arg);
return ptr + sizeof(*tlv) + sizeof(*wmm);
}
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg) const struct wmi_wmm_params_all_arg *arg)
{ {
struct wmi_tlv_pdev_set_wmm_cmd *cmd; struct wmi_tlv_pdev_set_wmm_cmd *cmd;
struct wmi_wmm_params *wmm; struct wmi_wmm_params *wmm;
...@@ -1816,8 +2126,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, ...@@ -1816,8 +2126,8 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
} }
static struct sk_buff * static struct sk_buff *
ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable) ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
{ u32 log_level) {
struct wmi_tlv_dbglog_cmd *cmd; struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv; struct wmi_tlv *tlv;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -1922,6 +2232,159 @@ ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) ...@@ -1922,6 +2232,159 @@ ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
return skb; return skb;
} }
static struct sk_buff *
ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
u32 tim_ie_offset, struct sk_buff *bcn,
u32 prb_caps, u32 prb_erp, void *prb_ies,
size_t prb_ies_len)
{
struct wmi_tlv_bcn_tmpl_cmd *cmd;
struct wmi_tlv_bcn_prb_info *info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
if (WARN_ON(prb_ies_len > 0 && !prb_ies))
return ERR_PTR(-EINVAL);
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + sizeof(*info) + prb_ies_len +
sizeof(*tlv) + roundup(bcn->len, 4);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
cmd->buf_len = __cpu_to_le32(bcn->len);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
* then it is then impossible to pass original ie len.
* This chunk is not used yet so if setting probe resp template yields
* problems with beaconing or crashes firmware look here.
*/
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
info = (void *)tlv->value;
info->caps = __cpu_to_le32(prb_caps);
info->erp = __cpu_to_le32(prb_erp);
memcpy(info->ies, prb_ies, prb_ies_len);
ptr += sizeof(*tlv);
ptr += sizeof(*info);
ptr += prb_ies_len;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
memcpy(tlv->value, bcn->data, bcn->len);
/* FIXME: Adjust TSF? */
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
vdev_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
struct sk_buff *prb)
{
struct wmi_tlv_prb_tmpl_cmd *cmd;
struct wmi_tlv_bcn_prb_info *info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + sizeof(*info) +
sizeof(*tlv) + roundup(prb->len, 4);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->buf_len = __cpu_to_le32(prb->len);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
tlv->len = __cpu_to_le16(sizeof(*info));
info = (void *)tlv->value;
info->caps = 0;
info->erp = 0;
ptr += sizeof(*tlv);
ptr += sizeof(*info);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
tlv->len = __cpu_to_le16(roundup(prb->len, 4));
memcpy(tlv->value, prb->data, prb->len);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
vdev_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
const u8 *p2p_ie)
{
struct wmi_tlv_p2p_go_bcn_ie *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
ptr += sizeof(*tlv);
ptr += roundup(p2p_ie[1] + 2, 4);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
vdev_id);
return skb;
}
/****************/ /****************/
/* TLV mappings */ /* TLV mappings */
/****************/ /****************/
...@@ -2045,6 +2508,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { ...@@ -2045,6 +2508,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED, .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
.vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
}; };
static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
...@@ -2188,6 +2652,7 @@ static const struct wmi_ops wmi_tlv_ops = { ...@@ -2188,6 +2652,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
.gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
.gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
.gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
.gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
.gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
.gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
...@@ -2207,6 +2672,15 @@ static const struct wmi_ops wmi_tlv_ops = { ...@@ -2207,6 +2672,15 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
/* .gen_pdev_set_quiet_mode not implemented */ /* .gen_pdev_set_quiet_mode not implemented */
/* .gen_pdev_get_temperature not implemented */ /* .gen_pdev_get_temperature not implemented */
/* .gen_addba_clear_resp not implemented */
/* .gen_addba_send not implemented */
/* .gen_addba_set_resp not implemented */
/* .gen_delba_send not implemented */
.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
.gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
.gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
.gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
.gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
}; };
/************/ /************/
......
...@@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd { ...@@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd {
__le32 dg_type; /* no idea.. */ __le32 dg_type; /* no idea.. */
} __packed; } __packed;
struct wmi_tlv_vdev_set_wmm_cmd {
__le32 vdev_id;
} __packed;
struct wmi_tlv_phyerr_ev { struct wmi_tlv_phyerr_ev {
__le32 num_phyerrs; __le32 num_phyerrs;
__le32 tsf_l32; __le32 tsf_l32;
...@@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable { ...@@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable {
__le32 reserved; __le32 reserved;
} __packed; } __packed;
enum wmi_tlv_bcn_tx_status {
WMI_TLV_BCN_TX_STATUS_OK,
WMI_TLV_BCN_TX_STATUS_XRETRY,
WMI_TLV_BCN_TX_STATUS_DROP,
WMI_TLV_BCN_TX_STATUS_FILTERED,
};
struct wmi_tlv_bcn_tx_status_ev {
__le32 vdev_id;
__le32 tx_status;
} __packed;
struct wmi_tlv_bcn_prb_info {
__le32 caps;
__le32 erp;
u8 ies[0];
} __packed;
struct wmi_tlv_bcn_tmpl_cmd {
__le32 vdev_id;
__le32 tim_ie_offset;
__le32 buf_len;
} __packed;
struct wmi_tlv_prb_tmpl_cmd {
__le32 vdev_id;
__le32 buf_len;
} __packed;
struct wmi_tlv_p2p_go_bcn_ie {
__le32 vdev_id;
__le32 ie_len;
} __packed;
enum wmi_tlv_diag_item_type {
WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
};
struct wmi_tlv_diag_item {
u8 type;
u8 reserved;
__le16 len;
__le32 timestamp;
__le32 code;
u8 payload[0];
} __packed;
struct wmi_tlv_diag_data_ev {
__le32 num_items;
} __packed;
struct wmi_tlv_sta_keepalive_cmd {
__le32 vdev_id;
__le32 enabled;
__le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
__le32 interval; /* in seconds */
} __packed;
void ath10k_wmi_tlv_attach(struct ath10k *ar); void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif #endif
...@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) ...@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = ioread32(reg); regval = ioread32(reg);
iowrite32(regval | val, reg); iowrite32(regval | val, reg);
regval = ioread32(reg); regval = ioread32(reg);
usleep_range(100, 150); udelay(100); /* NB: should be atomic */
/* Bring BB/MAC out of reset */ /* Bring BB/MAC out of reset */
iowrite32(regval & ~val, reg); iowrite32(regval & ~val, reg);
......
...@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah) ...@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{ {
int offset[8] = {0}, total = 0, test; int offset[8] = {0}, total = 0, test;
int agc_out, i; int agc_out, i, peak_detect_threshold;
if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
peak_detect_threshold = 8;
else
peak_detect_threshold = 0;
/*
* Turn off LNA/SW.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1); AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0); AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) {
if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
}
/*
* Turn off RXON.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0x1); AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON, 0x0); AR_PHY_65NM_RXTX2_RXON, 0x0);
/*
* Turn on AGC for cal.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
...@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) ...@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1); AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
if (AR_SREV_9330_11(ah)) { if (AR_SREV_9330_11(ah))
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
} else {
if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
if (is_2g) if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
peak_detect_threshold);
else else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
peak_detect_threshold);
} }
for (i = 6; i > 0; i--) { for (i = 6; i > 0; i--) {
...@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) ...@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total); AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
/*
* Turn on LNA.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0); AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
/*
* Turn off RXON.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0); AR_PHY_65NM_RXTX2_RXON_OVR, 0);
/*
* Turn off peak detect calibration.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0); AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
} }
...@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, ...@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal: skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
if (AR_SREV_9330_11(ah)) if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan)); for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i,
IS_CHAN_2GHZ(chan));
}
}
/* /*
* For non-AR9550 chips, we just trigger AGC calibration * For non-AR9550 chips, we just trigger AGC calibration
......
...@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = { ...@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822}, {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
...@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = { ...@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
......
...@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { ...@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{ {
if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan); relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL; spec_priv->rfs_chan_spec_scan = NULL;
} }
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册