提交 ebc7a496 编写于 作者: K Kalle Valo

Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for v5.19. Major changes:

ath11k

* Wake-on-WLAN support for QCA6390 and WCN6855

* device recovery (firmware restart) support for QCA6390 and WCN6855

wcn36xx

* support for transmit rate reporting to user space
...@@ -728,20 +728,17 @@ static int ath10k_ahb_probe(struct platform_device *pdev) ...@@ -728,20 +728,17 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
struct ath10k *ar; struct ath10k *ar;
struct ath10k_ahb *ar_ahb; struct ath10k_ahb *ar_ahb;
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
const struct of_device_id *of_id;
enum ath10k_hw_rev hw_rev; enum ath10k_hw_rev hw_rev;
size_t size; size_t size;
int ret; int ret;
struct ath10k_bus_params bus_params = {}; struct ath10k_bus_params bus_params = {};
of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); hw_rev = (enum ath10k_hw_rev)of_device_get_match_data(&pdev->dev);
if (!of_id) { if (!hw_rev) {
dev_err(&pdev->dev, "failed to find matching device tree id\n"); dev_err(&pdev->dev, "OF data missing\n");
return -EINVAL; return -EINVAL;
} }
hw_rev = (enum ath10k_hw_rev)of_id->data;
size = sizeof(*ar_pci) + sizeof(*ar_ahb); size = sizeof(*ar_pci) + sizeof(*ar_ahb);
ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
hw_rev, &ath10k_ahb_hif_ops); hw_rev, &ath10k_ahb_hif_ops);
......
...@@ -94,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -94,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = true, .tx_stats_over_pktlog = true,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA988X_HW_2_0_VERSION, .id = QCA988X_HW_2_0_VERSION,
...@@ -131,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -131,6 +132,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = true, .tx_stats_over_pktlog = true,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9887_HW_1_0_VERSION, .id = QCA9887_HW_1_0_VERSION,
...@@ -169,6 +171,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -169,6 +171,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA6174_HW_3_2_VERSION, .id = QCA6174_HW_3_2_VERSION,
...@@ -202,6 +205,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -202,6 +205,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.bmi_large_size_download = true, .bmi_large_size_download = true,
.supports_peer_stats_info = true, .supports_peer_stats_info = true,
.dynamic_sar_support = true, .dynamic_sar_support = true,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA6174_HW_2_1_VERSION, .id = QCA6174_HW_2_1_VERSION,
...@@ -239,6 +243,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -239,6 +243,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA6174_HW_2_1_VERSION, .id = QCA6174_HW_2_1_VERSION,
...@@ -276,6 +281,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -276,6 +281,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA6174_HW_3_0_VERSION, .id = QCA6174_HW_3_0_VERSION,
...@@ -313,6 +319,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -313,6 +319,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA6174_HW_3_2_VERSION, .id = QCA6174_HW_3_2_VERSION,
...@@ -354,6 +361,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -354,6 +361,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.supports_peer_stats_info = true, .supports_peer_stats_info = true,
.dynamic_sar_support = true, .dynamic_sar_support = true,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA99X0_HW_2_0_DEV_VERSION, .id = QCA99X0_HW_2_0_DEV_VERSION,
...@@ -397,6 +405,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -397,6 +405,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9984_HW_1_0_DEV_VERSION, .id = QCA9984_HW_1_0_DEV_VERSION,
...@@ -447,6 +456,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -447,6 +456,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9888_HW_2_0_DEV_VERSION, .id = QCA9888_HW_2_0_DEV_VERSION,
...@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -494,6 +504,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9377_HW_1_0_DEV_VERSION, .id = QCA9377_HW_1_0_DEV_VERSION,
...@@ -531,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -531,6 +542,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9377_HW_1_1_DEV_VERSION, .id = QCA9377_HW_1_1_DEV_VERSION,
...@@ -570,6 +582,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -570,6 +582,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA9377_HW_1_1_DEV_VERSION, .id = QCA9377_HW_1_1_DEV_VERSION,
...@@ -600,6 +613,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -600,6 +613,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin_workaround = true, .uart_pin_workaround = true,
.credit_size_workaround = true, .credit_size_workaround = true,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = QCA4019_HW_1_0_DEV_VERSION, .id = QCA4019_HW_1_0_DEV_VERSION,
...@@ -644,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -644,6 +658,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = false, .dynamic_sar_support = false,
.hw_restart_disconnect = false,
}, },
{ {
.id = WCN3990_HW_1_0_DEV_VERSION, .id = WCN3990_HW_1_0_DEV_VERSION,
...@@ -674,6 +689,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -674,6 +689,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.credit_size_workaround = false, .credit_size_workaround = false,
.tx_stats_over_pktlog = false, .tx_stats_over_pktlog = false,
.dynamic_sar_support = true, .dynamic_sar_support = true,
.hw_restart_disconnect = true,
}, },
}; };
...@@ -2442,6 +2458,7 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable); ...@@ -2442,6 +2458,7 @@ EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
static void ath10k_core_restart(struct work_struct *work) static void ath10k_core_restart(struct work_struct *work)
{ {
struct ath10k *ar = container_of(work, struct ath10k, restart_work); struct ath10k *ar = container_of(work, struct ath10k, restart_work);
struct ath10k_vif *arvif;
int ret; int ret;
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
...@@ -2480,6 +2497,14 @@ static void ath10k_core_restart(struct work_struct *work) ...@@ -2480,6 +2497,14 @@ static void ath10k_core_restart(struct work_struct *work)
ar->state = ATH10K_STATE_RESTARTING; ar->state = ATH10K_STATE_RESTARTING;
ath10k_halt(ar); ath10k_halt(ar);
ath10k_scan_finish(ar); ath10k_scan_finish(ar);
if (ar->hw_params.hw_restart_disconnect) {
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->is_up &&
arvif->vdev_type == WMI_VDEV_TYPE_STA)
ieee80211_hw_restart_disconnect(arvif->vif);
}
}
ieee80211_restart_hw(ar->hw); ieee80211_restart_hw(ar->hw);
break; break;
case ATH10K_STATE_OFF: case ATH10K_STATE_OFF:
......
...@@ -633,6 +633,8 @@ struct ath10k_hw_params { ...@@ -633,6 +633,8 @@ struct ath10k_hw_params {
bool supports_peer_stats_info; bool supports_peer_stats_info;
bool dynamic_sar_support; bool dynamic_sar_support;
bool hw_restart_disconnect;
}; };
struct htt_resp; struct htt_resp;
......
...@@ -345,6 +345,12 @@ static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) ...@@ -345,6 +345,12 @@ static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
ep->ep_ops.ep_rx_complete(ar, skb); ep->ep_ops.ep_rx_complete(ar, skb);
/* The RX complete handler now owns the skb... */ /* The RX complete handler now owns the skb... */
if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
local_bh_disable();
napi_schedule(&ar->napi);
local_bh_enable();
}
return; return;
out_free_skb: out_free_skb:
...@@ -387,6 +393,7 @@ static int ath10k_usb_hif_start(struct ath10k *ar) ...@@ -387,6 +393,7 @@ static int ath10k_usb_hif_start(struct ath10k *ar)
int i; int i;
struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
ath10k_core_napi_enable(ar);
ath10k_usb_start_recv_pipes(ar); ath10k_usb_start_recv_pipes(ar);
/* set the TX resource avail threshold for each TX pipe */ /* set the TX resource avail threshold for each TX pipe */
...@@ -462,6 +469,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, ...@@ -462,6 +469,7 @@ static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
static void ath10k_usb_hif_stop(struct ath10k *ar) static void ath10k_usb_hif_stop(struct ath10k *ar)
{ {
ath10k_usb_flush_all(ar); ath10k_usb_flush_all(ar);
ath10k_core_napi_sync_disable(ar);
} }
static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
...@@ -966,6 +974,20 @@ static int ath10k_usb_create(struct ath10k *ar, ...@@ -966,6 +974,20 @@ static int ath10k_usb_create(struct ath10k *ar,
return ret; return ret;
} }
static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
int done;
done = ath10k_htt_rx_hl_indication(ar, budget);
ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget);
if (done < budget)
napi_complete_done(ctx, done);
return done;
}
/* ath10k usb driver registered functions */ /* ath10k usb driver registered functions */
static int ath10k_usb_probe(struct usb_interface *interface, static int ath10k_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id) const struct usb_device_id *id)
...@@ -992,6 +1014,9 @@ static int ath10k_usb_probe(struct usb_interface *interface, ...@@ -992,6 +1014,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM; return -ENOMEM;
} }
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll,
ATH10K_NAPI_BUDGET);
usb_get_dev(dev); usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor); vendor_id = le16_to_cpu(dev->descriptor.idVendor);
product_id = le16_to_cpu(dev->descriptor.idProduct); product_id = le16_to_cpu(dev->descriptor.idProduct);
...@@ -1013,6 +1038,7 @@ static int ath10k_usb_probe(struct usb_interface *interface, ...@@ -1013,6 +1038,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
bus_params.dev_type = ATH10K_DEV_TYPE_HL; bus_params.dev_type = ATH10K_DEV_TYPE_HL;
/* TODO: don't know yet how to get chip_id with USB */ /* TODO: don't know yet how to get chip_id with USB */
bus_params.chip_id = 0; bus_params.chip_id = 0;
bus_params.hl_msdu_ids = true;
ret = ath10k_core_register(ar, &bus_params); ret = ath10k_core_register(ar, &bus_params);
if (ret) { if (ret) {
ath10k_warn(ar, "failed to register driver core: %d\n", ret); ath10k_warn(ar, "failed to register driver core: %d\n", ret);
...@@ -1044,6 +1070,7 @@ static void ath10k_usb_remove(struct usb_interface *interface) ...@@ -1044,6 +1070,7 @@ static void ath10k_usb_remove(struct usb_interface *interface)
return; return;
ath10k_core_unregister(ar_usb->ar); ath10k_core_unregister(ar_usb->ar);
netif_napi_del(&ar_usb->ar->napi);
ath10k_usb_destroy(ar_usb->ar); ath10k_usb_destroy(ar_usb->ar);
usb_put_dev(interface_to_usbdev(interface)); usb_put_dev(interface_to_usbdev(interface));
ath10k_core_destroy(ar_usb->ar); ath10k_core_destroy(ar_usb->ar);
......
...@@ -16,20 +16,20 @@ ath11k-y += core.o \ ...@@ -16,20 +16,20 @@ ath11k-y += core.o \
ce.o \ ce.o \
peer.o \ peer.o \
dbring.o \ dbring.o \
hw.o \ hw.o
wow.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
ath11k-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
ath11k_ahb-y += ahb.o ath11k_ahb-y += ahb.o
obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o
ath11k_pci-y += mhi.o pci.o ath11k_pci-y += mhi.o pci.o pcic.o
# for tracing framework to find trace.h # for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src) CFLAGS_trace.o := -I$(src)
// SPDX-License-Identifier: BSD-3-Clause-Clear // SPDX-License-Identifier: BSD-3-Clause-Clear
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/remoteproc.h> #include <linux/remoteproc.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/of.h> #include <linux/of.h>
#include "core.h" #include "core.h"
#include "dp_tx.h" #include "dp_tx.h"
#include "dp_rx.h" #include "dp_rx.h"
...@@ -95,11 +96,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -95,11 +96,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074, .hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false, .supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true, .alloc_cacheable_memory = true,
.wakeup_mhi = false,
.supports_rssi_stats = false, .supports_rssi_stats = false,
.fw_wmi_diag_event = false, .fw_wmi_diag_event = false,
.current_cc_support = false, .current_cc_support = false,
.dbr_debug_support = true, .dbr_debug_support = true,
.global_reset = false,
}, },
{ {
.hw_rev = ATH11K_HW_IPQ6018_HW10, .hw_rev = ATH11K_HW_IPQ6018_HW10,
...@@ -161,11 +162,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -161,11 +162,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074, .hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = false, .supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = true, .alloc_cacheable_memory = true,
.wakeup_mhi = false,
.supports_rssi_stats = false, .supports_rssi_stats = false,
.fw_wmi_diag_event = false, .fw_wmi_diag_event = false,
.current_cc_support = false, .current_cc_support = false,
.dbr_debug_support = true, .dbr_debug_support = true,
.global_reset = false,
}, },
{ {
.name = "qca6390 hw2.0", .name = "qca6390 hw2.0",
...@@ -219,18 +220,18 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -219,18 +220,18 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.num_peers = 512, .num_peers = 512,
.supports_suspend = true, .supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074), .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.supports_regdb = true, .supports_regdb = false,
.fix_l1ss = true, .fix_l1ss = true,
.credit_flow = true, .credit_flow = true,
.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390, .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
.hal_params = &ath11k_hw_hal_params_qca6390, .hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false, .supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false, .alloc_cacheable_memory = false,
.wakeup_mhi = true,
.supports_rssi_stats = true, .supports_rssi_stats = true,
.fw_wmi_diag_event = true, .fw_wmi_diag_event = true,
.current_cc_support = true, .current_cc_support = true,
.dbr_debug_support = false, .dbr_debug_support = false,
.global_reset = true,
}, },
{ {
.name = "qcn9074 hw1.0", .name = "qcn9074 hw1.0",
...@@ -291,11 +292,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -291,11 +292,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_ipq8074, .hal_params = &ath11k_hw_hal_params_ipq8074,
.supports_dynamic_smps_6ghz = true, .supports_dynamic_smps_6ghz = true,
.alloc_cacheable_memory = true, .alloc_cacheable_memory = true,
.wakeup_mhi = false,
.supports_rssi_stats = false, .supports_rssi_stats = false,
.fw_wmi_diag_event = false, .fw_wmi_diag_event = false,
.current_cc_support = false, .current_cc_support = false,
.dbr_debug_support = true, .dbr_debug_support = true,
.global_reset = false,
}, },
{ {
.name = "wcn6855 hw2.0", .name = "wcn6855 hw2.0",
...@@ -356,11 +357,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -356,11 +357,11 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390, .hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false, .supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false, .alloc_cacheable_memory = false,
.wakeup_mhi = true,
.supports_rssi_stats = true, .supports_rssi_stats = true,
.fw_wmi_diag_event = true, .fw_wmi_diag_event = true,
.current_cc_support = true, .current_cc_support = true,
.dbr_debug_support = false, .dbr_debug_support = false,
.global_reset = true,
}, },
{ {
.name = "wcn6855 hw2.1", .name = "wcn6855 hw2.1",
...@@ -420,25 +421,37 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { ...@@ -420,25 +421,37 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.hal_params = &ath11k_hw_hal_params_qca6390, .hal_params = &ath11k_hw_hal_params_qca6390,
.supports_dynamic_smps_6ghz = false, .supports_dynamic_smps_6ghz = false,
.alloc_cacheable_memory = false, .alloc_cacheable_memory = false,
.wakeup_mhi = true,
.supports_rssi_stats = true, .supports_rssi_stats = true,
.fw_wmi_diag_event = true, .fw_wmi_diag_event = true,
.current_cc_support = true, .current_cc_support = true,
.dbr_debug_support = false, .dbr_debug_support = false,
.global_reset = true,
}, },
}; };
static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
{
WARN_ON(!ab->hw_params.single_pdev_only);
return &ab->pdevs[0];
}
int ath11k_core_suspend(struct ath11k_base *ab) int ath11k_core_suspend(struct ath11k_base *ab)
{ {
int ret; int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend) if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* TODO: there can frames in queues so for now add delay as a hack. /* so far single_pdev_only chips have supports_suspend as true
* Need to implement to handle and remove this delay. * and only the first pdev is valid.
*/ */
msleep(500); pdev = ath11k_core_get_single_pdev(ab);
ar = pdev->ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
ret = ath11k_dp_rx_pktlog_stop(ab, true); ret = ath11k_dp_rx_pktlog_stop(ab, true);
if (ret) { if (ret) {
...@@ -447,6 +460,12 @@ int ath11k_core_suspend(struct ath11k_base *ab) ...@@ -447,6 +460,12 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret; return ret;
} }
ret = ath11k_mac_wait_tx_complete(ar);
if (ret) {
ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
ret = ath11k_wow_enable(ab); ret = ath11k_wow_enable(ab);
if (ret) { if (ret) {
ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret); ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
...@@ -479,10 +498,20 @@ EXPORT_SYMBOL(ath11k_core_suspend); ...@@ -479,10 +498,20 @@ EXPORT_SYMBOL(ath11k_core_suspend);
int ath11k_core_resume(struct ath11k_base *ab) int ath11k_core_resume(struct ath11k_base *ab)
{ {
int ret; int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
if (!ab->hw_params.supports_suspend) if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* so far signle_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
ar = pdev->ar;
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
ret = ath11k_hif_resume(ab); ret = ath11k_hif_resume(ab);
if (ret) { if (ret) {
ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret); ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
...@@ -509,6 +538,75 @@ int ath11k_core_resume(struct ath11k_base *ab) ...@@ -509,6 +538,75 @@ int ath11k_core_resume(struct ath11k_base *ab)
} }
EXPORT_SYMBOL(ath11k_core_resume); EXPORT_SYMBOL(ath11k_core_resume);
static void ath11k_core_check_bdfext(const struct dmi_header *hdr, void *data)
{
struct ath11k_base *ab = data;
const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
ssize_t copied;
size_t len;
int i;
if (ab->qmi.target.bdf_ext[0] != '\0')
return;
if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
return;
if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"wrong smbios bdf ext type length (%d).\n",
hdr->length);
return;
}
if (!smbios->bdf_enabled) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
return;
}
/* Only one string exists (per spec) */
if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant magic does not match.\n");
return;
}
len = min_t(size_t,
strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
for (i = 0; i < len; i++) {
if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant name contains non ascii chars.\n");
return;
}
}
/* Copy extension name without magic prefix */
copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
sizeof(ab->qmi.target.bdf_ext));
if (copied < 0) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"bdf variant string is longer than the buffer can accommodate\n");
return;
}
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"found and validated bdf variant smbios_type 0x%x bdf %s\n",
ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
}
int ath11k_core_check_smbios(struct ath11k_base *ab)
{
ab->qmi.target.bdf_ext[0] = '\0';
dmi_walk(ath11k_core_check_bdfext, ab);
if (ab->qmi.target.bdf_ext[0] == '\0')
return -ENODATA;
return 0;
}
int ath11k_core_check_dt(struct ath11k_base *ab) int ath11k_core_check_dt(struct ath11k_base *ab)
{ {
size_t max_len = sizeof(ab->qmi.target.bdf_ext); size_t max_len = sizeof(ab->qmi.target.bdf_ext);
...@@ -532,13 +630,13 @@ int ath11k_core_check_dt(struct ath11k_base *ab) ...@@ -532,13 +630,13 @@ int ath11k_core_check_dt(struct ath11k_base *ab)
return 0; return 0;
} }
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len) size_t name_len, bool with_variant)
{ {
/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
if (ab->qmi.target.bdf_ext[0] != '\0') if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
scnprintf(variant, sizeof(variant), ",variant=%s", scnprintf(variant, sizeof(variant), ",variant=%s",
ab->qmi.target.bdf_ext); ab->qmi.target.bdf_ext);
...@@ -568,6 +666,18 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name, ...@@ -568,6 +666,18 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
return 0; return 0;
} }
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
return __ath11k_core_create_board_name(ab, name, name_len, true);
}
static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
return __ath11k_core_create_board_name(ab, name, name_len, false);
}
const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab, const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
const char *file) const char *file)
{ {
...@@ -602,7 +712,9 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, ...@@ -602,7 +712,9 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
struct ath11k_board_data *bd, struct ath11k_board_data *bd,
const void *buf, size_t buf_len, const void *buf, size_t buf_len,
const char *boardname, const char *boardname,
int bd_ie_type) int ie_id,
int name_id,
int data_id)
{ {
const struct ath11k_fw_ie *hdr; const struct ath11k_fw_ie *hdr;
bool name_match_found; bool name_match_found;
...@@ -612,7 +724,7 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, ...@@ -612,7 +724,7 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
name_match_found = false; name_match_found = false;
/* go through ATH11K_BD_IE_BOARD_ elements */ /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
while (buf_len > sizeof(struct ath11k_fw_ie)) { while (buf_len > sizeof(struct ath11k_fw_ie)) {
hdr = buf; hdr = buf;
board_ie_id = le32_to_cpu(hdr->id); board_ie_id = le32_to_cpu(hdr->id);
...@@ -623,48 +735,50 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, ...@@ -623,48 +735,50 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
buf += sizeof(*hdr); buf += sizeof(*hdr);
if (buf_len < ALIGN(board_ie_len, 4)) { if (buf_len < ALIGN(board_ie_len, 4)) {
ath11k_err(ab, "invalid ATH11K_BD_IE_BOARD length: %zu < %zu\n", ath11k_err(ab, "invalid %s length: %zu < %zu\n",
ath11k_bd_ie_type_str(ie_id),
buf_len, ALIGN(board_ie_len, 4)); buf_len, ALIGN(board_ie_len, 4));
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
switch (board_ie_id) { if (board_ie_id == name_id) {
case ATH11K_BD_IE_BOARD_NAME:
ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "", ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
board_ie_data, board_ie_len); board_ie_data, board_ie_len);
if (board_ie_len != strlen(boardname)) if (board_ie_len != strlen(boardname))
break; goto next;
ret = memcmp(board_ie_data, boardname, strlen(boardname)); ret = memcmp(board_ie_data, boardname, strlen(boardname));
if (ret) if (ret)
break; goto next;
name_match_found = true; name_match_found = true;
ath11k_dbg(ab, ATH11K_DBG_BOOT, ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot found match for name '%s'", "boot found match %s for name '%s'",
ath11k_bd_ie_type_str(ie_id),
boardname); boardname);
break; } else if (board_ie_id == data_id) {
case ATH11K_BD_IE_BOARD_DATA:
if (!name_match_found) if (!name_match_found)
/* no match found */ /* no match found */
break; goto next;
ath11k_dbg(ab, ATH11K_DBG_BOOT, ath11k_dbg(ab, ATH11K_DBG_BOOT,
"boot found board data for '%s'", boardname); "boot found %s for '%s'",
ath11k_bd_ie_type_str(ie_id),
boardname);
bd->data = board_ie_data; bd->data = board_ie_data;
bd->len = board_ie_len; bd->len = board_ie_len;
ret = 0; ret = 0;
goto out; goto out;
default: } else {
ath11k_warn(ab, "unknown ATH11K_BD_IE_BOARD found: %d\n", ath11k_warn(ab, "unknown %s id found: %d\n",
ath11k_bd_ie_type_str(ie_id),
board_ie_id); board_ie_id);
break;
} }
next:
/* jump over the padding */ /* jump over the padding */
board_ie_len = ALIGN(board_ie_len, 4); board_ie_len = ALIGN(board_ie_len, 4);
...@@ -681,7 +795,10 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab, ...@@ -681,7 +795,10 @@ static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
struct ath11k_board_data *bd, struct ath11k_board_data *bd,
const char *boardname) const char *boardname,
int ie_id_match,
int name_id,
int data_id)
{ {
size_t len, magic_len; size_t len, magic_len;
const u8 *data; const u8 *data;
...@@ -746,22 +863,23 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, ...@@ -746,22 +863,23 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
goto err; goto err;
} }
switch (ie_id) { if (ie_id == ie_id_match) {
case ATH11K_BD_IE_BOARD:
ret = ath11k_core_parse_bd_ie_board(ab, bd, data, ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
ie_len, ie_len,
boardname, boardname,
ATH11K_BD_IE_BOARD); ie_id_match,
name_id,
data_id);
if (ret == -ENOENT) if (ret == -ENOENT)
/* no match found, continue */ /* no match found, continue */
break; goto next;
else if (ret) else if (ret)
/* there was an error, bail out */ /* there was an error, bail out */
goto err; goto err;
/* either found or error, so stop searching */ /* either found or error, so stop searching */
goto out; goto out;
} }
next:
/* jump over the padding */ /* jump over the padding */
ie_len = ALIGN(ie_len, 4); ie_len = ALIGN(ie_len, 4);
...@@ -771,8 +889,9 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab, ...@@ -771,8 +889,9 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
out: out:
if (!bd->data || !bd->len) { if (!bd->data || !bd->len) {
ath11k_err(ab, ath11k_dbg(ab, ATH11K_DBG_BOOT,
"failed to fetch board data for %s from %s\n", "failed to fetch %s for %s from %s\n",
ath11k_bd_ie_type_str(ie_id_match),
boardname, filepath); boardname, filepath);
ret = -ENODATA; ret = -ENODATA;
goto err; goto err;
...@@ -803,24 +922,52 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, ...@@ -803,24 +922,52 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
#define BOARD_NAME_SIZE 200 #define BOARD_NAME_SIZE 200
int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
{ {
char boardname[BOARD_NAME_SIZE]; char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
char *filename, filepath[100];
int ret; int ret;
ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE); filename = ATH11K_BOARD_API2_FILE;
ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname));
if (ret) { if (ret) {
ath11k_err(ab, "failed to create board name: %d", ret); ath11k_err(ab, "failed to create board name: %d", ret);
return ret; return ret;
} }
ab->bd_api = 2; ab->bd_api = 2;
ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname); ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH11K_BD_IE_BOARD,
ATH11K_BD_IE_BOARD_NAME,
ATH11K_BD_IE_BOARD_DATA);
if (!ret)
goto success;
ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
sizeof(fallback_boardname));
if (ret) {
ath11k_err(ab, "failed to create fallback board name: %d", ret);
return ret;
}
ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
ATH11K_BD_IE_BOARD,
ATH11K_BD_IE_BOARD_NAME,
ATH11K_BD_IE_BOARD_DATA);
if (!ret) if (!ret)
goto success; goto success;
ab->bd_api = 1; ab->bd_api = 1;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE); ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
if (ret) { if (ret) {
ath11k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n", ath11k_core_create_firmware_path(ab, filename,
filepath, sizeof(filepath));
ath11k_err(ab, "failed to fetch board data for %s from %s\n",
boardname, filepath);
if (memcmp(boardname, fallback_boardname, strlen(boardname)))
ath11k_err(ab, "failed to fetch board data for %s from %s\n",
fallback_boardname, filepath);
ath11k_err(ab, "failed to fetch board.bin from %s\n",
ab->hw_params.fw.dir); ab->hw_params.fw.dir);
return ret; return ret;
} }
...@@ -832,13 +979,32 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd) ...@@ -832,13 +979,32 @@ int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd) int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
{ {
char boardname[BOARD_NAME_SIZE];
int ret; int ret;
ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"failed to create board name for regdb: %d", ret);
goto exit;
}
ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
ATH11K_BD_IE_REGDB,
ATH11K_BD_IE_REGDB_NAME,
ATH11K_BD_IE_REGDB_DATA);
if (!ret)
goto exit;
ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME); ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
if (ret) if (ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n", ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir); ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
exit:
if (!ret)
ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
return ret; return ret;
} }
...@@ -1261,6 +1427,7 @@ static void ath11k_update_11d(struct work_struct *work) ...@@ -1261,6 +1427,7 @@ static void ath11k_update_11d(struct work_struct *work)
pdev = &ab->pdevs[i]; pdev = &ab->pdevs[i];
ar = pdev->ar; ar = pdev->ar;
memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret) if (ret)
ath11k_warn(ar->ab, ath11k_warn(ar->ab,
...@@ -1269,12 +1436,11 @@ static void ath11k_update_11d(struct work_struct *work) ...@@ -1269,12 +1436,11 @@ static void ath11k_update_11d(struct work_struct *work)
} }
} }
static void ath11k_core_restart(struct work_struct *work) static void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
{ {
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
struct ath11k *ar; struct ath11k *ar;
struct ath11k_pdev *pdev; struct ath11k_pdev *pdev;
int i, ret = 0; int i;
spin_lock_bh(&ab->base_lock); spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++; ab->stats.fw_crash_counter++;
...@@ -1288,6 +1454,7 @@ static void ath11k_core_restart(struct work_struct *work) ...@@ -1288,6 +1454,7 @@ static void ath11k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw); ieee80211_stop_queues(ar->hw);
ath11k_mac_drain_tx(ar); ath11k_mac_drain_tx(ar);
complete(&ar->completed_11d_scan);
complete(&ar->scan.started); complete(&ar->scan.started);
complete(&ar->scan.completed); complete(&ar->scan.completed);
complete(&ar->peer_assoc_done); complete(&ar->peer_assoc_done);
...@@ -1307,12 +1474,13 @@ static void ath11k_core_restart(struct work_struct *work) ...@@ -1307,12 +1474,13 @@ static void ath11k_core_restart(struct work_struct *work)
wake_up(&ab->wmi_ab.tx_credits_wq); wake_up(&ab->wmi_ab.tx_credits_wq);
wake_up(&ab->peer_mapping_wq); wake_up(&ab->peer_mapping_wq);
}
ret = ath11k_core_reconfigure_on_crash(ab); static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
if (ret) { {
ath11k_err(ab, "failed to reconfigure driver on crash recovery\n"); struct ath11k *ar;
return; struct ath11k_pdev *pdev;
} int i;
for (i = 0; i < ab->num_radios; i++) { for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i]; pdev = &ab->pdevs[i];
...@@ -1348,6 +1516,98 @@ static void ath11k_core_restart(struct work_struct *work) ...@@ -1348,6 +1516,98 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ab->driver_recovery); complete(&ab->driver_recovery);
} }
static void ath11k_core_restart(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
int ret;
if (!ab->is_reset)
ath11k_core_pre_reconfigure_recovery(ab);
ret = ath11k_core_reconfigure_on_crash(ab);
if (ret) {
ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
return;
}
if (ab->is_reset)
complete_all(&ab->reconfigure_complete);
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
}
static void ath11k_core_reset(struct work_struct *work)
{
struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
int reset_count, fail_cont_count;
long time_left;
if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
return;
}
/* Sometimes the recovery will fail and then the next all recovery fail,
* this is to avoid infinite recovery since it can not recovery success.
*/
fail_cont_count = atomic_read(&ab->fail_cont_count);
if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
return;
if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
time_before(jiffies, ab->reset_fail_timeout))
return;
reset_count = atomic_inc_return(&ab->reset_count);
if (reset_count > 1) {
/* Sometimes it happened another reset worker before the previous one
* completed, then the second reset worker will destroy the previous one,
* thus below is to avoid that.
*/
ath11k_warn(ab, "already reseting count %d\n", reset_count);
reinit_completion(&ab->reset_complete);
time_left = wait_for_completion_timeout(&ab->reset_complete,
ATH11K_RESET_TIMEOUT_HZ);
if (time_left) {
ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
atomic_dec(&ab->reset_count);
return;
}
ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
/* Record the continuous recovery fail count when recovery failed*/
atomic_inc(&ab->fail_cont_count);
}
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
atomic_set(&ab->recovery_count, 0);
reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_start_count, 0);
ath11k_core_pre_reconfigure_recovery(ab);
reinit_completion(&ab->reconfigure_complete);
ath11k_core_post_reconfigure_recovery(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
time_left = wait_for_completion_timeout(&ab->recovery_start,
ATH11K_RECOVER_START_TIMEOUT_HZ);
ath11k_hif_power_down(ab);
ath11k_qmi_free_resource(ab);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
}
static int ath11k_init_hw_params(struct ath11k_base *ab) static int ath11k_init_hw_params(struct ath11k_base *ab)
{ {
const struct ath11k_hw_params *hw_params = NULL; const struct ath11k_hw_params *hw_params = NULL;
...@@ -1417,6 +1677,7 @@ EXPORT_SYMBOL(ath11k_core_deinit); ...@@ -1417,6 +1677,7 @@ EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab) void ath11k_core_free(struct ath11k_base *ab)
{ {
destroy_workqueue(ab->workqueue_aux);
destroy_workqueue(ab->workqueue); destroy_workqueue(ab->workqueue);
kfree(ab); kfree(ab);
...@@ -1439,9 +1700,17 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, ...@@ -1439,9 +1700,17 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
if (!ab->workqueue) if (!ab->workqueue)
goto err_sc_free; goto err_sc_free;
ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
if (!ab->workqueue_aux)
goto err_free_wq;
mutex_init(&ab->core_lock); mutex_init(&ab->core_lock);
mutex_init(&ab->tbl_mtx_lock);
spin_lock_init(&ab->base_lock); spin_lock_init(&ab->base_lock);
mutex_init(&ab->vdev_id_11d_lock); mutex_init(&ab->vdev_id_11d_lock);
init_completion(&ab->reset_complete);
init_completion(&ab->reconfigure_complete);
init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers); INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq); init_waitqueue_head(&ab->peer_mapping_wq);
...@@ -1450,6 +1719,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, ...@@ -1450,6 +1719,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart); INIT_WORK(&ab->restart_work, ath11k_core_restart);
INIT_WORK(&ab->update_11d_work, ath11k_update_11d); INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work); INIT_WORK(&ab->rfkill_work, ath11k_rfkill_work);
INIT_WORK(&ab->reset_work, ath11k_core_reset);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend); init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed); init_completion(&ab->wow.wakeup_completed);
...@@ -1460,6 +1730,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, ...@@ -1460,6 +1730,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
return ab; return ab;
err_free_wq:
destroy_workqueue(ab->workqueue);
err_sc_free: err_sc_free:
kfree(ab); kfree(ab);
return NULL; return NULL;
......
/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef ATH11K_CORE_H #ifndef ATH11K_CORE_H
...@@ -10,6 +11,9 @@ ...@@ -10,6 +11,9 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/rhashtable.h>
#include "qmi.h" #include "qmi.h"
#include "htc.h" #include "htc.h"
#include "wmi.h" #include "wmi.h"
...@@ -23,6 +27,7 @@ ...@@ -23,6 +27,7 @@
#include "thermal.h" #include "thermal.h"
#include "dbring.h" #include "dbring.h"
#include "spectral.h" #include "spectral.h"
#include "wow.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
...@@ -36,9 +41,26 @@ ...@@ -36,9 +41,26 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF #define ATH11K_INVALID_HW_MAC_ID 0xFF
#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ) #define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
/* SMBIOS type containing Board Data File Name Extension */
#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
/* SMBIOS type structure length (excluding strings-set) */
#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
/* The magic used by QCA spec */
#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
extern unsigned int ath11k_frame_mode; extern unsigned int ath11k_frame_mode;
#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
#define ATH11K_MON_TIMER_INTERVAL 10 #define ATH11K_MON_TIMER_INTERVAL 10
#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
enum ath11k_supported_bw { enum ath11k_supported_bw {
ATH11K_BW_20 = 0, ATH11K_BW_20 = 0,
...@@ -147,6 +169,13 @@ struct ath11k_ext_irq_grp { ...@@ -147,6 +169,13 @@ struct ath11k_ext_irq_grp {
struct net_device napi_ndev; struct net_device napi_ndev;
}; };
struct ath11k_smbios_bdf {
struct dmi_header hdr;
u32 padding;
u8 bdf_enabled;
u8 bdf_ext[];
};
#define HEHANDLE_CAP_PHYINFO_SIZE 3 #define HEHANDLE_CAP_PHYINFO_SIZE 3
#define HECAP_PHYINFO_SIZE 9 #define HECAP_PHYINFO_SIZE 9
#define HECAP_MACINFO_SIZE 5 #define HECAP_MACINFO_SIZE 5
...@@ -189,6 +218,12 @@ enum ath11k_scan_state { ...@@ -189,6 +218,12 @@ enum ath11k_scan_state {
ATH11K_SCAN_ABORTING, ATH11K_SCAN_ABORTING,
}; };
enum ath11k_11d_state {
ATH11K_11D_IDLE,
ATH11K_11D_PREPARING,
ATH11K_11D_RUNNING,
};
enum ath11k_dev_flags { enum ath11k_dev_flags {
ATH11K_CAC_RUNNING, ATH11K_CAC_RUNNING,
ATH11K_FLAG_CORE_REGISTERED, ATH11K_FLAG_CORE_REGISTERED,
...@@ -204,6 +239,8 @@ enum ath11k_dev_flags { ...@@ -204,6 +239,8 @@ enum ath11k_dev_flags {
ATH11K_FLAG_CE_IRQ_ENABLED, ATH11K_FLAG_CE_IRQ_ENABLED,
ATH11K_FLAG_EXT_IRQ_ENABLED, ATH11K_FLAG_EXT_IRQ_ENABLED,
ATH11K_FLAG_FIXED_MEM_RGN, ATH11K_FLAG_FIXED_MEM_RGN,
ATH11K_FLAG_DEVICE_INIT_DONE,
ATH11K_FLAG_MULTI_MSI_VECTORS,
}; };
enum ath11k_monitor_flags { enum ath11k_monitor_flags {
...@@ -212,6 +249,30 @@ enum ath11k_monitor_flags { ...@@ -212,6 +249,30 @@ enum ath11k_monitor_flags {
ATH11K_FLAG_MONITOR_VDEV_CREATED, ATH11K_FLAG_MONITOR_VDEV_CREATED,
}; };
#define ATH11K_IPV6_UC_TYPE 0
#define ATH11K_IPV6_AC_TYPE 1
#define ATH11K_IPV6_MAX_COUNT 16
#define ATH11K_IPV4_MAX_COUNT 2
struct ath11k_arp_ns_offload {
u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
u32 ipv4_count;
u32 ipv6_count;
u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
u8 mac_addr[ETH_ALEN];
};
struct ath11k_rekey_data {
u8 kck[NL80211_KCK_LEN];
u8 kek[NL80211_KCK_LEN];
u64 replay_ctr;
bool enable_offload;
};
struct ath11k_vif { struct ath11k_vif {
u32 vdev_id; u32 vdev_id;
enum wmi_vdev_type vdev_type; enum wmi_vdev_type vdev_type;
...@@ -263,6 +324,9 @@ struct ath11k_vif { ...@@ -263,6 +324,9 @@ struct ath11k_vif {
bool bcca_zero_sent; bool bcca_zero_sent;
bool do_not_send_tmpl; bool do_not_send_tmpl;
struct ieee80211_chanctx_conf chanctx; struct ieee80211_chanctx_conf chanctx;
struct ath11k_arp_ns_offload arp_ns_offload;
struct ath11k_rekey_data rekey_data;
#ifdef CONFIG_ATH11K_DEBUGFS #ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_twt; struct dentry *debugfs_twt;
#endif /* CONFIG_ATH11K_DEBUGFS */ #endif /* CONFIG_ATH11K_DEBUGFS */
...@@ -590,6 +654,9 @@ struct ath11k { ...@@ -590,6 +654,9 @@ struct ath11k {
struct work_struct wmi_mgmt_tx_work; struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue; struct sk_buff_head wmi_mgmt_tx_queue;
struct ath11k_wow wow;
struct completion target_suspend;
bool target_suspend_ack;
struct ath11k_per_peer_tx_stats peer_tx_stats; struct ath11k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info; struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth; u32 ppdu_stat_list_depth;
...@@ -607,12 +674,13 @@ struct ath11k { ...@@ -607,12 +674,13 @@ struct ath11k {
bool dfs_block_radar_events; bool dfs_block_radar_events;
struct ath11k_thermal thermal; struct ath11k_thermal thermal;
u32 vdev_id_11d_scan; u32 vdev_id_11d_scan;
struct completion finish_11d_scan; struct completion completed_11d_scan;
struct completion finish_11d_ch_list; enum ath11k_11d_state state_11d;
bool pending_11d;
bool regdom_set_by_user; bool regdom_set_by_user;
int hw_rate_code; int hw_rate_code;
u8 twt_enabled; u8 twt_enabled;
bool nlo_enabled;
u8 alpha2[REG_ALPHA2_LEN + 1];
}; };
struct ath11k_band_cap { struct ath11k_band_cap {
...@@ -662,6 +730,14 @@ struct ath11k_bus_params { ...@@ -662,6 +730,14 @@ struct ath11k_bus_params {
bool static_window_map; bool static_window_map;
}; };
struct ath11k_pci_ops {
int (*wakeup)(struct ath11k_base *ab);
void (*release)(struct ath11k_base *ab);
int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
};
/* IPQ8074 HW channel counters frequency value in hertz */ /* IPQ8074 HW channel counters frequency value in hertz */
#define IPQ8074_CC_FREQ_HERTZ 320000 #define IPQ8074_CC_FREQ_HERTZ 320000
...@@ -703,6 +779,19 @@ struct ath11k_soc_dp_stats { ...@@ -703,6 +779,19 @@ struct ath11k_soc_dp_stats {
struct ath11k_dp_ring_bp_stats bp_stats; struct ath11k_dp_ring_bp_stats bp_stats;
}; };
struct ath11k_msi_user {
char *name;
int num_vectors;
u32 base_vector;
};
struct ath11k_msi_config {
int total_vectors;
int total_users;
struct ath11k_msi_user *users;
u16 hw_rev;
};
/* Master structure to hold the hw data which may be used in core module */ /* Master structure to hold the hw data which may be used in core module */
struct ath11k_base { struct ath11k_base {
enum ath11k_hw_rev hw_rev; enum ath11k_hw_rev hw_rev;
...@@ -747,6 +836,18 @@ struct ath11k_base { ...@@ -747,6 +836,18 @@ struct ath11k_base {
struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS]; struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS]; struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
unsigned long long free_vdev_map; unsigned long long free_vdev_map;
/* To synchronize rhash tbl write operation */
struct mutex tbl_mtx_lock;
/* The rhashtable containing struct ath11k_peer keyed by mac addr */
struct rhashtable *rhead_peer_addr;
struct rhashtable_params rhash_peer_addr_param;
/* The rhashtable containing struct ath11k_peer keyed by id */
struct rhashtable *rhead_peer_id;
struct rhashtable_params rhash_peer_id_param;
struct list_head peers; struct list_head peers;
wait_queue_head_t peer_mapping_wq; wait_queue_head_t peer_mapping_wq;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
...@@ -788,6 +889,18 @@ struct ath11k_base { ...@@ -788,6 +889,18 @@ struct ath11k_base {
struct work_struct restart_work; struct work_struct restart_work;
struct work_struct update_11d_work; struct work_struct update_11d_work;
u8 new_alpha2[3]; u8 new_alpha2[3];
struct workqueue_struct *workqueue_aux;
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
struct completion reconfigure_complete;
struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
struct { struct {
/* protected by data_lock */ /* protected by data_lock */
u32 fw_crash_counter; u32 fw_crash_counter;
...@@ -815,6 +928,17 @@ struct ath11k_base { ...@@ -815,6 +928,17 @@ struct ath11k_base {
u32 subsystem_device; u32 subsystem_device;
} id; } id;
struct {
struct {
const struct ath11k_msi_config *config;
u32 ep_base_data;
u32 addr_lo;
u32 addr_hi;
} msi;
const struct ath11k_pci_ops *ops;
} pci;
/* must be last */ /* must be last */
u8 drv_priv[] __aligned(sizeof(void *)); u8 drv_priv[] __aligned(sizeof(void *));
}; };
...@@ -996,7 +1120,7 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab, ...@@ -996,7 +1120,7 @@ int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
const char *name); const char *name);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd); void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k); int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar); void ath11k_core_halt(struct ath11k *ar);
int ath11k_core_resume(struct ath11k_base *ab); int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab); int ath11k_core_suspend(struct ath11k_base *ab);
......
...@@ -596,6 +596,10 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file, ...@@ -596,6 +596,10 @@ static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
ret = ath11k_wmi_force_fw_hang_cmd(ar, ret = ath11k_wmi_force_fw_hang_cmd(ar,
ATH11K_WMI_FW_HANG_ASSERT_TYPE, ATH11K_WMI_FW_HANG_ASSERT_TYPE,
ATH11K_WMI_FW_HANG_DELAY); ATH11K_WMI_FW_HANG_DELAY);
} else if (!strcmp(buf, "hw-restart")) {
ath11k_info(ab, "user requested hw restart\n");
queue_work(ab->workqueue_aux, &ab->reset_work);
ret = 0;
} else { } else {
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
......
...@@ -272,6 +272,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab, ...@@ -272,6 +272,11 @@ void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
ep_tx_complete(htc->ab, skb); ep_tx_complete(htc->ab, skb);
} }
static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
{
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot wakeup from suspend is received\n");
}
void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -376,6 +381,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab, ...@@ -376,6 +381,7 @@ void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
ath11k_htc_suspend_complete(ab, false); ath11k_htc_suspend_complete(ab, false);
break; break;
case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
ath11k_htc_wakeup_from_suspend(ab);
break; break;
default: default:
ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n", ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
......
/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef ATH11K_HW_H #ifndef ATH11K_HW_H
...@@ -189,11 +190,11 @@ struct ath11k_hw_params { ...@@ -189,11 +190,11 @@ struct ath11k_hw_params {
const struct ath11k_hw_hal_params *hal_params; const struct ath11k_hw_hal_params *hal_params;
bool supports_dynamic_smps_6ghz; bool supports_dynamic_smps_6ghz;
bool alloc_cacheable_memory; bool alloc_cacheable_memory;
bool wakeup_mhi;
bool supports_rssi_stats; bool supports_rssi_stats;
bool fw_wmi_diag_event; bool fw_wmi_diag_event;
bool current_cc_support; bool current_cc_support;
bool dbr_debug_support; bool dbr_debug_support;
bool global_reset;
}; };
struct ath11k_hw_ops { struct ath11k_hw_ops {
...@@ -290,10 +291,16 @@ enum ath11k_bd_ie_board_type { ...@@ -290,10 +291,16 @@ enum ath11k_bd_ie_board_type {
ATH11K_BD_IE_BOARD_DATA = 1, ATH11K_BD_IE_BOARD_DATA = 1,
}; };
enum ath11k_bd_ie_regdb_type {
ATH11K_BD_IE_REGDB_NAME = 0,
ATH11K_BD_IE_REGDB_DATA = 1,
};
enum ath11k_bd_ie_type { enum ath11k_bd_ie_type {
/* contains sub IEs of enum ath11k_bd_ie_board_type */ /* contains sub IEs of enum ath11k_bd_ie_board_type */
ATH11K_BD_IE_BOARD = 0, ATH11K_BD_IE_BOARD = 0,
ATH11K_BD_IE_BOARD_EXT = 1, /* contains sub IEs of enum ath11k_bd_ie_regdb_type */
ATH11K_BD_IE_REGDB = 1,
}; };
struct ath11k_hw_regs { struct ath11k_hw_regs {
...@@ -361,4 +368,16 @@ extern const struct ath11k_hw_regs qca6390_regs; ...@@ -361,4 +368,16 @@ extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs; extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs; extern const struct ath11k_hw_regs wcn6855_regs;
static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
{
switch (type) {
case ATH11K_BD_IE_BOARD:
return "board data";
case ATH11K_BD_IE_REGDB:
return "regdb data";
}
return "unknown";
}
#endif #endif
// SPDX-License-Identifier: BSD-3-Clause-Clear // SPDX-License-Identifier: BSD-3-Clause-Clear
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#include <net/mac80211.h> #include <net/mac80211.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/bitfield.h>
#include <linux/inetdevice.h>
#include <net/if_inet6.h>
#include <net/ipv6.h>
#include "mac.h" #include "mac.h"
#include "core.h" #include "core.h"
#include "debug.h" #include "debug.h"
...@@ -16,6 +21,8 @@ ...@@ -16,6 +21,8 @@
#include "testmode.h" #include "testmode.h"
#include "peer.h" #include "peer.h"
#include "debugfs_sta.h" #include "debugfs_sta.h"
#include "hif.h"
#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \ #define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \ .band = NL80211_BAND_2GHZ, \
...@@ -868,13 +875,16 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar) ...@@ -868,13 +875,16 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock); spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) { list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
ath11k_peer_rx_tid_cleanup(ar, peer); ath11k_peer_rx_tid_cleanup(ar, peer);
ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list); list_del(&peer->list);
kfree(peer); kfree(peer);
} }
spin_unlock_bh(&ab->base_lock); spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
ar->num_peers = 0; ar->num_peers = 0;
ar->num_stations = 0; ar->num_stations = 0;
...@@ -2750,6 +2760,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw, ...@@ -2750,6 +2760,7 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
} }
arvif->is_up = true; arvif->is_up = true;
arvif->rekey_data.enable_offload = false;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n", "mac vdev %d up (associated) bssid %pM aid %d\n",
...@@ -2807,6 +2818,8 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw, ...@@ -2807,6 +2818,8 @@ static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
arvif->is_up = false; arvif->is_up = false;
memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
cancel_delayed_work_sync(&arvif->connection_loss_work); cancel_delayed_work_sync(&arvif->connection_loss_work);
} }
...@@ -3093,6 +3106,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ...@@ -3093,6 +3106,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
int ret = 0; int ret = 0;
u8 rateidx; u8 rateidx;
u32 rate; u32 rate;
u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
...@@ -3385,6 +3399,18 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ...@@ -3385,6 +3399,18 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP) changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info); ath11k_mac_fils_discovery(arvif, info);
if (changed & BSS_CHANGED_ARP_FILTER) {
ipv4_cnt = min(info->arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
memcpy(arvif->arp_ns_offload.ipv4_addr, info->arp_addr_list,
ipv4_cnt * sizeof(u32));
memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
info->arp_addr_cnt,
vif->addr, arvif->arp_ns_offload.ipv4_addr);
}
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
} }
...@@ -3595,26 +3621,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, ...@@ -3595,26 +3621,6 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret) if (ret)
goto exit; goto exit;
/* Currently the pending_11d=true only happened 1 time while
* wlan interface up in ath11k_mac_11d_scan_start(), it is called by
* ath11k_mac_op_add_interface(), after wlan interface up,
* pending_11d=false always.
* If remove below wait, it always happened scan fail and lead connect
* fail while wlan interface up, because it has a 11d scan which is running
* in firmware, and lead this scan failed.
*/
if (ar->pending_11d) {
long time_left;
unsigned long timeout = 5 * HZ;
if (ar->supports_6ghz)
timeout += 5 * HZ;
time_left = wait_for_completion_timeout(&ar->finish_11d_ch_list, timeout);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac wait 11d channel list time left %ld\n", time_left);
}
memset(&arg, 0, sizeof(arg)); memset(&arg, 0, sizeof(arg));
ath11k_wmi_start_scan_init(ar, &arg); ath11k_wmi_start_scan_init(ar, &arg);
arg.vdev_id = arvif->vdev_id; arg.vdev_id = arvif->vdev_id;
...@@ -3680,6 +3686,10 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw, ...@@ -3680,6 +3686,10 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
kfree(arg.extraie.ptr); kfree(arg.extraie.ptr);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
if (ar->state_11d == ATH11K_11D_PREPARING)
ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
return ret; return ret;
} }
...@@ -4531,6 +4541,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ...@@ -4531,6 +4541,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
} }
ath11k_mac_dec_num_stations(arvif, sta); ath11k_mac_dec_num_stations(arvif, sta);
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock); spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr); peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
if (skip_peer_delete && peer) { if (skip_peer_delete && peer) {
...@@ -4538,12 +4549,14 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw, ...@@ -4538,12 +4549,14 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
} else if (peer && peer->sta == sta) { } else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id); vif->addr, arvif->vdev_id);
ath11k_peer_rhash_delete(ar->ab, peer);
peer->sta = NULL; peer->sta = NULL;
list_del(&peer->list); list_del(&peer->list);
kfree(peer); kfree(peer);
ar->num_peers--; ar->num_peers--;
} }
spin_unlock_bh(&ar->ab->base_lock); spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
kfree(arsta->tx_stats); kfree(arsta->tx_stats);
arsta->tx_stats = NULL; arsta->tx_stats = NULL;
...@@ -5726,6 +5739,27 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) ...@@ -5726,6 +5739,27 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
return ret; return ret;
} }
static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
{
int recovery_start_count;
if (!ab->is_reset)
return;
recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
if (recovery_start_count == ab->num_radios) {
complete(&ab->recovery_start);
ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
}
ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
wait_for_completion_timeout(&ab->reconfigure_complete,
ATH11K_RECONFIGURE_TIMEOUT_HZ);
}
static int ath11k_mac_op_start(struct ieee80211_hw *hw) static int ath11k_mac_op_start(struct ieee80211_hw *hw)
{ {
struct ath11k *ar = hw->priv; struct ath11k *ar = hw->priv;
...@@ -5742,6 +5776,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) ...@@ -5742,6 +5776,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
break; break;
case ATH11K_STATE_RESTARTING: case ATH11K_STATE_RESTARTING:
ar->state = ATH11K_STATE_RESTARTED; ar->state = ATH11K_STATE_RESTARTED;
ath11k_mac_wait_reconfigure(ab);
break; break;
case ATH11K_STATE_RESTARTED: case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED: case ATH11K_STATE_WEDGED:
...@@ -5808,7 +5843,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw) ...@@ -5808,7 +5843,7 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
/* TODO: Do we need to enable ANI? */ /* TODO: Do we need to enable ANI? */
ath11k_reg_update_chan_list(ar); ath11k_reg_update_chan_list(ar, false);
ar->num_started_vdevs = 0; ar->num_started_vdevs = 0;
ar->num_created_vdevs = 0; ar->num_created_vdevs = 0;
...@@ -5875,6 +5910,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw) ...@@ -5875,6 +5910,11 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&ar->ab->update_11d_work); cancel_work_sync(&ar->ab->update_11d_work);
cancel_work_sync(&ar->ab->rfkill_work); cancel_work_sync(&ar->ab->rfkill_work);
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
list_del(&ppdu_stats->list); list_del(&ppdu_stats->list);
...@@ -6045,7 +6085,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab) ...@@ -6045,7 +6085,7 @@ static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
return false; return false;
} }
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait) void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
{ {
struct wmi_11d_scan_start_params param; struct wmi_11d_scan_start_params param;
int ret; int ret;
...@@ -6073,28 +6113,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait) ...@@ -6073,28 +6113,22 @@ void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait)
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n"); ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac start 11d scan\n");
if (wait)
reinit_completion(&ar->finish_11d_scan);
ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param); ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
if (ret) { if (ret) {
ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n", ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
vdev_id, ret); vdev_id, ret);
} else { } else {
ar->vdev_id_11d_scan = vdev_id; ar->vdev_id_11d_scan = vdev_id;
if (wait) { if (ar->state_11d == ATH11K_11D_PREPARING)
ar->pending_11d = true; ar->state_11d = ATH11K_11D_RUNNING;
ret = wait_for_completion_timeout(&ar->finish_11d_scan,
5 * HZ);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"mac 11d scan left time %d\n", ret);
if (!ret)
ar->pending_11d = false;
}
} }
fin: fin:
if (ar->state_11d == ATH11K_11D_PREPARING) {
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
mutex_unlock(&ar->ab->vdev_id_11d_lock); mutex_unlock(&ar->ab->vdev_id_11d_lock);
} }
...@@ -6117,12 +6151,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar) ...@@ -6117,12 +6151,15 @@ void ath11k_mac_11d_scan_stop(struct ath11k *ar)
vdev_id = ar->vdev_id_11d_scan; vdev_id = ar->vdev_id_11d_scan;
ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id); ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
if (ret) if (ret) {
ath11k_warn(ar->ab, ath11k_warn(ar->ab,
"failed to stopt 11d scan vdev %d ret: %d\n", "failed to stopt 11d scan vdev %d ret: %d\n",
vdev_id, ret); vdev_id, ret);
else } else {
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
} }
mutex_unlock(&ar->ab->vdev_id_11d_lock); mutex_unlock(&ar->ab->vdev_id_11d_lock);
} }
...@@ -6318,8 +6355,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, ...@@ -6318,8 +6355,10 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del; goto err_peer_del;
} }
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, true); if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
reinit_completion(&ar->completed_11d_scan);
ar->state_11d = ATH11K_11D_PREPARING;
}
break; break;
case WMI_VDEV_TYPE_MONITOR: case WMI_VDEV_TYPE_MONITOR:
set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
...@@ -6364,22 +6403,12 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw, ...@@ -6364,22 +6403,12 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
err_peer_del: err_peer_del:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
reinit_completion(&ar->peer_delete_done); fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
fbret = ath11k_wmi_send_peer_delete_cmd(ar, vif->addr,
arvif->vdev_id);
if (fbret) { if (fbret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
arvif->vdev_id, vif->addr); vif->addr, arvif->vdev_id, fbret);
goto err; goto err;
} }
fbret = ath11k_wait_for_peer_delete_done(ar, arvif->vdev_id,
vif->addr);
if (fbret)
goto err;
ar->num_peers--;
} }
err_vdev_del: err_vdev_del:
...@@ -7120,6 +7149,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ...@@ -7120,6 +7149,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv; struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab; struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv; struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct ath11k_peer *peer;
int ret; int ret;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
...@@ -7131,9 +7161,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ...@@ -7131,9 +7161,13 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started); WARN_ON(!arvif->is_started);
if (ab->hw_params.vdev_start_delay && if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ath11k_peer_find_by_addr(ab, ar->mac_addr)) spin_lock_bh(&ab->base_lock);
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
spin_unlock_bh(&ab->base_lock);
if (peer)
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_mac_monitor_stop(ar); ret = ath11k_mac_monitor_stop(ar);
...@@ -7184,7 +7218,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, ...@@ -7184,7 +7218,7 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
} }
if (arvif->vdev_type == WMI_VDEV_TYPE_STA) if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath11k_mac_11d_scan_start(ar, arvif->vdev_id, false); ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
} }
...@@ -7258,31 +7292,47 @@ static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) ...@@ -7258,31 +7292,47 @@ static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
u32 queues, bool drop)
{ {
struct ath11k *ar = hw->priv;
long time_left; long time_left;
int ret = 0;
if (drop)
return;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq, time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0), (atomic_read(&ar->dp.num_tx_pending) == 0),
ATH11K_FLUSH_TIMEOUT); ATH11K_FLUSH_TIMEOUT);
if (time_left == 0) if (time_left == 0) {
ath11k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
atomic_read(&ar->dp.num_tx_pending));
ret = -ETIMEDOUT;
}
time_left = wait_event_timeout(ar->txmgmt_empty_waitq, time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0), (atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH11K_FLUSH_TIMEOUT); ATH11K_FLUSH_TIMEOUT);
if (time_left == 0) if (time_left == 0) {
ath11k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
time_left); atomic_read(&ar->num_pending_mgmt_tx));
ret = -ETIMEDOUT;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, return ret;
"mac mgmt tx flush mgmt pending %d\n", }
atomic_read(&ar->num_pending_mgmt_tx));
int ath11k_mac_wait_tx_complete(struct ath11k *ar)
{
ath11k_mac_drain_tx(ar);
return ath11k_mac_flush_tx_complete(ar);
}
static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath11k *ar = hw->priv;
if (drop)
return;
ath11k_mac_flush_tx_complete(ar);
} }
static int static int
...@@ -7881,6 +7931,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ...@@ -7881,6 +7931,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type) enum ieee80211_reconfig_type reconfig_type)
{ {
struct ath11k *ar = hw->priv; struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
int recovery_count;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return; return;
...@@ -7892,6 +7944,30 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ...@@ -7892,6 +7944,30 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ar->pdev->pdev_id); ar->pdev->pdev_id);
ar->state = ATH11K_STATE_ON; ar->state = ATH11K_STATE_ON;
ieee80211_wake_queues(ar->hw); ieee80211_wake_queues(ar->hw);
if (ar->ab->hw_params.current_cc_support &&
ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
struct wmi_set_current_country_params set_current_param = {};
memcpy(&set_current_param.alpha2, ar->alpha2, 2);
ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
}
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
ath11k_dbg(ab, ATH11K_DBG_BOOT,
"recovery count %d\n", recovery_count);
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
if (recovery_count == ab->num_radios) {
atomic_dec(&ab->reset_count);
complete(&ab->reset_complete);
ab->is_reset = false;
atomic_set(&ab->fail_cont_count, 0);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
}
}
} }
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
...@@ -8069,6 +8145,134 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, ...@@ -8069,6 +8145,134 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
} }
} }
static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload)
{
int i;
for (i = 0; i < offload->ipv6_count; i++) {
offload->self_ipv6_addr[i][0] = 0xff;
offload->self_ipv6_addr[i][1] = 0x02;
offload->self_ipv6_addr[i][11] = 0x01;
offload->self_ipv6_addr[i][12] = 0xff;
offload->self_ipv6_addr[i][13] =
offload->ipv6_addr[i][13];
offload->self_ipv6_addr[i][14] =
offload->ipv6_addr[i][14];
offload->self_ipv6_addr[i][15] =
offload->ipv6_addr[i][15];
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
offload->self_ipv6_addr[i]);
}
}
static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev)
{
struct ath11k *ar = hw->priv;
struct ath11k_arp_ns_offload *offload;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
struct list_head *p;
u32 count, scope;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac op ipv6 changed\n");
offload = &arvif->arp_ns_offload;
count = 0;
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
/* get unicast address */
list_for_each(p, &idev->addr_list) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
if (ifa6->flags & IFA_F_DADFAILED)
continue;
scope = ipv6_addr_src_scope(&ifa6->addr);
if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
scope == IPV6_ADDR_SCOPE_GLOBAL) {
memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
sizeof(ifa6->addr.s6_addr));
offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 uc %pI6 scope %d\n",
count, offload->ipv6_addr[count],
scope);
count++;
} else {
ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
}
}
/* get anycast address */
for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
scope == IPV6_ADDR_SCOPE_GLOBAL) {
memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
sizeof(ifaca6->aca_addr));
offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac count %d ipv6 ac %pI6 scope %d\n",
count, offload->ipv6_addr[count],
scope);
count++;
} else {
ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
}
}
generate:
offload->ipv6_count = count;
read_unlock_bh(&idev->lock);
/* generate ns multicast address */
ath11k_generate_ns_mc_addr(ar, offload);
}
static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set rekey data vdev %d\n",
arvif->vdev_id);
mutex_lock(&ar->conf_mutex);
memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
/* The supplicant works on big-endian, the firmware expects it on
* little endian.
*/
rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
arvif->rekey_data.enable_offload = true;
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
rekey_data->kck, NL80211_KCK_LEN);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
rekey_data->kck, NL80211_KEK_LEN);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
&rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
mutex_unlock(&ar->conf_mutex);
}
static const struct ieee80211_ops ath11k_ops = { static const struct ieee80211_ops ath11k_ops = {
.tx = ath11k_mac_op_tx, .tx = ath11k_mac_op_tx,
.start = ath11k_mac_op_start, .start = ath11k_mac_op_start,
...@@ -8083,6 +8287,7 @@ static const struct ieee80211_ops ath11k_ops = { ...@@ -8083,6 +8287,7 @@ static const struct ieee80211_ops ath11k_ops = {
.hw_scan = ath11k_mac_op_hw_scan, .hw_scan = ath11k_mac_op_hw_scan,
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan, .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key, .set_key = ath11k_mac_op_set_key,
.set_rekey_data = ath11k_mac_op_set_rekey_data,
.sta_state = ath11k_mac_op_sta_state, .sta_state = ath11k_mac_op_sta_state,
.sta_set_4addr = ath11k_mac_op_sta_set_4addr, .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr, .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
...@@ -8104,9 +8309,21 @@ static const struct ieee80211_ops ath11k_ops = { ...@@ -8104,9 +8309,21 @@ static const struct ieee80211_ops ath11k_ops = {
.flush = ath11k_mac_op_flush, .flush = ath11k_mac_op_flush,
.sta_statistics = ath11k_mac_op_sta_statistics, .sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd) CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
#ifdef CONFIG_PM
.suspend = ath11k_wow_op_suspend,
.resume = ath11k_wow_op_resume,
.set_wakeup = ath11k_wow_op_set_wakeup,
#endif
#ifdef CONFIG_ATH11K_DEBUGFS #ifdef CONFIG_ATH11K_DEBUGFS
.sta_add_debugfs = ath11k_debugfs_sta_op_add, .sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif #endif
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = ath11k_mac_op_ipv6_changed,
#endif
}; };
static void ath11k_mac_update_ch_list(struct ath11k *ar, static void ath11k_mac_update_ch_list(struct ath11k *ar,
...@@ -8359,6 +8576,8 @@ void ath11k_mac_unregister(struct ath11k_base *ab) ...@@ -8359,6 +8576,8 @@ void ath11k_mac_unregister(struct ath11k_base *ab)
__ath11k_mac_unregister(ar); __ath11k_mac_unregister(ar);
} }
ath11k_peer_rhash_tbl_destroy(ab);
} }
static int __ath11k_mac_register(struct ath11k *ar) static int __ath11k_mac_register(struct ath11k *ar)
...@@ -8473,6 +8692,24 @@ static int __ath11k_mac_register(struct ath11k *ar) ...@@ -8473,6 +8692,24 @@ static int __ath11k_mac_register(struct ath11k *ar)
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
} }
if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
ar->hw->wiphy->max_sched_scan_plan_interval =
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
}
ret = ath11k_wow_init(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
goto err_free_if_combs;
}
ar->hw->queues = ATH11K_HW_MAX_QUEUES; ar->hw->queues = ATH11K_HW_MAX_QUEUES;
ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN; ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1; ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
...@@ -8569,6 +8806,10 @@ int ath11k_mac_register(struct ath11k_base *ab) ...@@ -8569,6 +8806,10 @@ int ath11k_mac_register(struct ath11k_base *ab)
ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ; ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1; ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
ret = ath11k_peer_rhash_tbl_init(ab);
if (ret)
return ret;
for (i = 0; i < ab->num_radios; i++) { for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i]; pdev = &ab->pdevs[i];
ar = pdev->ar; ar = pdev->ar;
...@@ -8598,6 +8839,8 @@ int ath11k_mac_register(struct ath11k_base *ab) ...@@ -8598,6 +8839,8 @@ int ath11k_mac_register(struct ath11k_base *ab)
__ath11k_mac_unregister(ar); __ath11k_mac_unregister(ar);
} }
ath11k_peer_rhash_tbl_destroy(ab);
return ret; return ret;
} }
...@@ -8665,8 +8908,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab) ...@@ -8665,8 +8908,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->monitor_vdev_id = -1; ar->monitor_vdev_id = -1;
clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags); clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID; ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
init_completion(&ar->finish_11d_scan); init_completion(&ar->completed_11d_scan);
init_completion(&ar->finish_11d_ch_list);
} }
return 0; return 0;
......
...@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default; ...@@ -130,7 +130,7 @@ extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
#define ATH11K_SCAN_11D_INTERVAL 600000 #define ATH11K_SCAN_11D_INTERVAL 600000
#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF #define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id, bool wait); void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_11d_scan_stop(struct ath11k *ar); void ath11k_mac_11d_scan_stop(struct ath11k *ar);
void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab); void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
...@@ -172,4 +172,5 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher); ...@@ -172,4 +172,5 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb); void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id); void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif); void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
int ath11k_mac_wait_tx_complete(struct ath11k *ar);
#endif #endif
// SPDX-License-Identifier: BSD-3-Clause-Clear // SPDX-License-Identifier: BSD-3-Clause-Clear
/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */ /*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -11,6 +14,7 @@ ...@@ -11,6 +14,7 @@
#include "debug.h" #include "debug.h"
#include "mhi.h" #include "mhi.h"
#include "pci.h" #include "pci.h"
#include "pcic.h"
#define MHI_TIMEOUT_DEFAULT_MS 90000 #define MHI_TIMEOUT_DEFAULT_MS 90000
#define RDDM_DUMP_SIZE 0x420000 #define RDDM_DUMP_SIZE 0x420000
...@@ -205,7 +209,7 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) ...@@ -205,7 +209,7 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
{ {
u32 val; u32 val;
val = ath11k_pci_read32(ab, MHISTATUS); val = ath11k_pcic_read32(ab, MHISTATUS);
ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
...@@ -213,29 +217,29 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) ...@@ -213,29 +217,29 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
* has SYSERR bit set and thus need to set MHICTRL_RESET * has SYSERR bit set and thus need to set MHICTRL_RESET
* to clear SYSERR. * to clear SYSERR.
*/ */
ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK); ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
mdelay(10); mdelay(10);
} }
static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab) static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
{ {
ath11k_pci_write32(ab, PCIE_TXVECDB, 0); ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
} }
static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab) static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
{ {
ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0); ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
} }
static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab) static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
{ {
ath11k_pci_write32(ab, PCIE_RXVECDB, 0); ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
} }
static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab) static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
{ {
ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0); ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
} }
void ath11k_mhi_clear_vector(struct ath11k_base *ab) void ath11k_mhi_clear_vector(struct ath11k_base *ab)
...@@ -254,9 +258,8 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) ...@@ -254,9 +258,8 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
int *irq; int *irq;
unsigned int msi_data; unsigned int msi_data;
ret = ath11k_pci_get_user_msi_assignment(ab_pci, ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
"MHI", &num_vectors, &user_base_data, &base_vector);
&user_base_data, &base_vector);
if (ret) if (ret)
return ret; return ret;
...@@ -270,11 +273,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) ...@@ -270,11 +273,10 @@ static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
for (i = 0; i < num_vectors; i++) { for (i = 0; i < num_vectors; i++) {
msi_data = base_vector; msi_data = base_vector;
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
msi_data += i; msi_data += i;
irq[i] = ath11k_pci_get_msi_irq(ab->dev, irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
msi_data);
} }
ab_pci->mhi_ctrl->irq = irq; ab_pci->mhi_ctrl->irq = irq;
...@@ -292,15 +294,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) ...@@ -292,15 +294,48 @@ static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
{ {
} }
static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
{
switch (reason) {
case MHI_CB_IDLE:
return "MHI_CB_IDLE";
case MHI_CB_PENDING_DATA:
return "MHI_CB_PENDING_DATA";
case MHI_CB_LPM_ENTER:
return "MHI_CB_LPM_ENTER";
case MHI_CB_LPM_EXIT:
return "MHI_CB_LPM_EXIT";
case MHI_CB_EE_RDDM:
return "MHI_CB_EE_RDDM";
case MHI_CB_EE_MISSION_MODE:
return "MHI_CB_EE_MISSION_MODE";
case MHI_CB_SYS_ERROR:
return "MHI_CB_SYS_ERROR";
case MHI_CB_FATAL_ERROR:
return "MHI_CB_FATAL_ERROR";
case MHI_CB_BW_REQ:
return "MHI_CB_BW_REQ";
default:
return "UNKNOWN";
}
};
static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb) enum mhi_callback cb)
{ {
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "mhi notify status reason %s\n",
ath11k_mhi_op_callback_to_str(cb));
switch (cb) { switch (cb) {
case MHI_CB_SYS_ERROR: case MHI_CB_SYS_ERROR:
ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break; break;
case MHI_CB_EE_RDDM:
if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default: default:
break; break;
} }
...@@ -371,7 +406,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci) ...@@ -371,7 +406,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
return ret; return ret;
} }
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
...@@ -428,216 +463,62 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci) ...@@ -428,216 +463,62 @@ void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
mhi_free_controller(mhi_ctrl); mhi_free_controller(mhi_ctrl);
} }
static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state) int ath11k_mhi_start(struct ath11k_pci *ab_pci)
{
switch (mhi_state) {
case ATH11K_MHI_INIT:
return "INIT";
case ATH11K_MHI_DEINIT:
return "DEINIT";
case ATH11K_MHI_POWER_ON:
return "POWER_ON";
case ATH11K_MHI_POWER_OFF:
return "POWER_OFF";
case ATH11K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH11K_MHI_SUSPEND:
return "SUSPEND";
case ATH11K_MHI_RESUME:
return "RESUME";
case ATH11K_MHI_TRIGGER_RDDM:
return "TRIGGER_RDDM";
case ATH11K_MHI_RDDM_DONE:
return "RDDM_DONE";
default:
return "UNKNOWN";
}
};
static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
enum ath11k_mhi_state mhi_state)
{ {
struct ath11k_base *ab = ab_pci->ab; struct ath11k_base *ab = ab_pci->ab;
int ret;
switch (mhi_state) { ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
case ATH11K_MHI_INIT:
set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH11K_MHI_DEINIT:
clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
break;
case ATH11K_MHI_POWER_ON:
set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH11K_MHI_POWER_OFF:
case ATH11K_MHI_FORCE_POWER_OFF:
clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
case ATH11K_MHI_SUSPEND:
set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH11K_MHI_RESUME:
clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
break;
case ATH11K_MHI_TRIGGER_RDDM:
set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
break;
case ATH11K_MHI_RDDM_DONE:
set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
break;
default:
ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
}
}
static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci, ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
enum ath11k_mhi_state mhi_state) if (ret) {
{ ath11k_warn(ab, "failed to prepare mhi: %d", ret);
struct ath11k_base *ab = ab_pci->ab; return ret;
}
switch (mhi_state) { ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
case ATH11K_MHI_INIT: if (ret) {
if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state)) ath11k_warn(ab, "failed to power up mhi: %d", ret);
return 0; return ret;
break;
case ATH11K_MHI_DEINIT:
case ATH11K_MHI_POWER_ON:
if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_FORCE_POWER_OFF:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_POWER_OFF:
case ATH11K_MHI_SUSPEND:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_RESUME:
if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_TRIGGER_RDDM:
if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
return 0;
break;
case ATH11K_MHI_RDDM_DONE:
return 0;
default:
ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
} }
ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n", return 0;
ath11k_mhi_state_to_str(mhi_state), mhi_state, }
ab_pci->mhi_state);
return -EINVAL; void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
{
mhi_power_down(ab_pci->mhi_ctrl, true);
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
} }
static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci, int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
enum ath11k_mhi_state mhi_state)
{ {
struct ath11k_base *ab = ab_pci->ab; struct ath11k_base *ab = ab_pci->ab;
int ret; int ret;
ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state); ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
if (ret) if (ret) {
goto out; ath11k_warn(ab, "failed to suspend mhi: %d", ret);
return ret;
ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
switch (mhi_state) {
case ATH11K_MHI_INIT:
ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_DEINIT:
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
ret = 0;
break;
case ATH11K_MHI_POWER_ON:
ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
case ATH11K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
break;
case ATH11K_MHI_SUSPEND:
ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RESUME:
/* Do force MHI resume as some devices like QCA6390, WCN6855
* are not in M3 state but they are functional. So just ignore
* the MHI state while resuming.
*/
ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
break;
case ATH11K_MHI_RDDM_DONE:
break;
default:
ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
ret = -EINVAL;
} }
if (ret)
goto out;
ath11k_mhi_set_state_bit(ab_pci, mhi_state);
return 0; return 0;
out:
ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
ath11k_mhi_state_to_str(mhi_state), mhi_state);
return ret;
} }
int ath11k_mhi_start(struct ath11k_pci *ab_pci) int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{ {
struct ath11k_base *ab = ab_pci->ab;
int ret; int ret;
ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS; /* Do force MHI resume as some devices like QCA6390, WCN6855
* are not in M3 state but they are functional. So just ignore
ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT); * the MHI state while resuming.
if (ret) */
goto out; ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
if (ret) {
ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON); ath11k_warn(ab, "failed to resume mhi: %d", ret);
if (ret) return ret;
goto out; }
return 0; return 0;
out:
return ret;
}
void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
}
void ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_SUSPEND);
}
void ath11k_mhi_resume(struct ath11k_pci *ab_pci)
{
ath11k_mhi_set_state(ab_pci, ATH11K_MHI_RESUME);
} }
...@@ -16,19 +16,6 @@ ...@@ -16,19 +16,6 @@
#define MHICTRL 0x38 #define MHICTRL 0x38
#define MHICTRL_RESET_MASK 0x2 #define MHICTRL_RESET_MASK 0x2
enum ath11k_mhi_state {
ATH11K_MHI_INIT,
ATH11K_MHI_DEINIT,
ATH11K_MHI_POWER_ON,
ATH11K_MHI_POWER_OFF,
ATH11K_MHI_FORCE_POWER_OFF,
ATH11K_MHI_SUSPEND,
ATH11K_MHI_RESUME,
ATH11K_MHI_TRIGGER_RDDM,
ATH11K_MHI_RDDM,
ATH11K_MHI_RDDM_DONE,
};
int ath11k_mhi_start(struct ath11k_pci *ar_pci); int ath11k_mhi_start(struct ath11k_pci *ar_pci);
void ath11k_mhi_stop(struct ath11k_pci *ar_pci); void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
int ath11k_mhi_register(struct ath11k_pci *ar_pci); int ath11k_mhi_register(struct ath11k_pci *ar_pci);
...@@ -36,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci); ...@@ -36,7 +23,7 @@ void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab); void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
void ath11k_mhi_clear_vector(struct ath11k_base *ab); void ath11k_mhi_clear_vector(struct ath11k_base *ab);
void ath11k_mhi_suspend(struct ath11k_pci *ar_pci); int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
void ath11k_mhi_resume(struct ath11k_pci *ar_pci); int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
#endif #endif
// SPDX-License-Identifier: BSD-3-Clause-Clear // SPDX-License-Identifier: BSD-3-Clause-Clear
/* /*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#include <linux/module.h> #include <linux/module.h>
...@@ -13,29 +14,15 @@ ...@@ -13,29 +14,15 @@
#include "hif.h" #include "hif.h"
#include "mhi.h" #include "mhi.h"
#include "debug.h" #include "debug.h"
#include "pcic.h"
#define ATH11K_PCI_BAR_NUM 0 #define ATH11K_PCI_BAR_NUM 0
#define ATH11K_PCI_DMA_MASK 32 #define ATH11K_PCI_DMA_MASK 32
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define WINDOW_ENABLE_BIT 0x40000000
#define WINDOW_REG_ADDRESS 0x310c
#define WINDOW_VALUE_MASK GENMASK(24, 19)
#define WINDOW_START 0x80000
#define WINDOW_RANGE_MASK GENMASK(18, 0)
#define TCSR_SOC_HW_VERSION 0x0224 #define TCSR_SOC_HW_VERSION 0x0224
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ACCESS_ALWAYS_OFF 0xFE0
#define QCA6390_DEVICE_ID 0x1101 #define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104 #define QCN9074_DEVICE_ID 0x1104
#define WCN6855_DEVICE_ID 0x1103 #define WCN6855_DEVICE_ID 0x1103
...@@ -49,233 +36,126 @@ static const struct pci_device_id ath11k_pci_id_table[] = { ...@@ -49,233 +36,126 @@ static const struct pci_device_id ath11k_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
static const struct ath11k_bus_params ath11k_pci_bus_params = { static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
.mhi_support = true, {
.m3_fw_support = true, struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
.fixed_bdf_addr = false,
.fixed_mem_region = false,
};
static const struct ath11k_msi_config ath11k_msi_config[] = { return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
{ }
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
},
};
static const struct ath11k_msi_config msi_config_one_msi = { static void ath11k_pci_bus_release(struct ath11k_base *ab)
.total_vectors = 1, {
.total_users = 4, struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
},
};
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
"bhi", }
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
{ {
struct ath11k_base *ab = ab_pci->ab; struct ath11k_base *ab = ab_pci->ab;
u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset); u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
lockdep_assert_held(&ab_pci->window_lock); lockdep_assert_held(&ab_pci->window_lock);
if (window != ab_pci->register_window) { if (window != ab_pci->register_window) {
iowrite32(WINDOW_ENABLE_BIT | window, iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
ab->mem + WINDOW_REG_ADDRESS); ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ioread32(ab->mem + WINDOW_REG_ADDRESS); ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
ab_pci->register_window = window; ab_pci->register_window = window;
} }
} }
static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) static void
ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
{ {
u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); u32 window_start = ATH11K_PCI_WINDOW_START;
u32 window;
window = (umac_window << 12) | (ce_window << 6);
iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); spin_lock_bh(&ab_pci->window_lock);
ath11k_pci_select_window(ab_pci, offset);
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} }
static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
u32 offset)
{ {
u32 window_start; struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 window_start = ATH11K_PCI_WINDOW_START;
/* If offset lies within DP register range, use 3rd window */ u32 val;
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
window_start = 3 * WINDOW_START; spin_lock_bh(&ab_pci->window_lock);
/* If offset lies within CE register range, use 2nd window */ ath11k_pci_select_window(ab_pci, offset);
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) val = ioread32(ab->mem + window_start +
window_start = 2 * WINDOW_START; (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
else spin_unlock_bh(&ab_pci->window_lock);
window_start = WINDOW_START;
return window_start; return val;
} }
void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{ {
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct pci_dev *pci_dev = to_pci_dev(ab->dev);
u32 window_start;
/* for offset beyond BAR + 4K - 32, may return pci_irq_vector(pci_dev, vector);
* need to wakeup MHI to access. }
*/
if (ab->hw_params.wakeup_mhi &&
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
offset >= ACCESS_ALWAYS_OFF)
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
if (offset < WINDOW_START) { static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
iowrite32(value, ab->mem + offset); .wakeup = ath11k_pci_bus_wake_up,
} else { .release = ath11k_pci_bus_release,
if (ab->bus_params.static_window_map) .get_msi_irq = ath11k_pci_get_msi_irq,
window_start = ath11k_pci_get_window_start(ab, offset); .window_write32 = ath11k_pci_window_write32,
else .window_read32 = ath11k_pci_window_read32,
window_start = WINDOW_START; };
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath11k_pci_select_window(ab_pci, offset);
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
iowrite32(value, ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
}
if (ab->hw_params.wakeup_mhi && static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && .get_msi_irq = ath11k_pci_get_msi_irq,
offset >= ACCESS_ALWAYS_OFF) .window_write32 = ath11k_pci_window_write32,
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); .window_read32 = ath11k_pci_window_read32,
} };
u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) static const struct ath11k_bus_params ath11k_pci_bus_params = {
{ .mhi_support = true,
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); .m3_fw_support = true,
u32 val, window_start; .fixed_bdf_addr = false,
.fixed_mem_region = false,
};
/* for offset beyond BAR + 4K - 32, may static const struct ath11k_msi_config msi_config_one_msi = {
* need to wakeup MHI to access. .total_vectors = 1,
*/ .total_users = 4,
if (ab->hw_params.wakeup_mhi && .users = (struct ath11k_msi_user[]) {
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
offset >= ACCESS_ALWAYS_OFF) { .name = "CE", .num_vectors = 1, .base_vector = 0 },
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
},
};
if (offset < WINDOW_START) { static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
val = ioread32(ab->mem + offset); {
} else { u32 umac_window;
if (ab->bus_params.static_window_map) u32 ce_window;
window_start = ath11k_pci_get_window_start(ab, offset); u32 window;
else
window_start = WINDOW_START;
if (window_start == WINDOW_START) {
spin_lock_bh(&ab_pci->window_lock);
ath11k_pci_select_window(ab_pci, offset);
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
spin_unlock_bh(&ab_pci->window_lock);
} else {
val = ioread32(ab->mem + window_start +
(offset & WINDOW_RANGE_MASK));
}
}
if (ab->hw_params.wakeup_mhi && umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
offset >= ACCESS_ALWAYS_OFF) window = (umac_window << 12) | (ce_window << 6);
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
return val; iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
} }
static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
{ {
u32 val, delay; u32 val, delay;
val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
val |= PCIE_SOC_GLOBAL_RESET_V; val |= PCIE_SOC_GLOBAL_RESET_V;
ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
/* TODO: exact time to sleep is uncertain */ /* TODO: exact time to sleep is uncertain */
delay = 10; delay = 10;
...@@ -284,11 +164,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) ...@@ -284,11 +164,11 @@ static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
/* Need to toggle V bit back otherwise stuck in reset status */ /* Need to toggle V bit back otherwise stuck in reset status */
val &= ~PCIE_SOC_GLOBAL_RESET_V; val &= ~PCIE_SOC_GLOBAL_RESET_V;
ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
mdelay(delay); mdelay(delay);
val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
if (val == 0xffffffff) if (val == 0xffffffff)
ath11k_warn(ab, "link down error during global reset\n"); ath11k_warn(ab, "link down error during global reset\n");
} }
...@@ -298,10 +178,10 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) ...@@ -298,10 +178,10 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
u32 val; u32 val;
/* read cookie */ /* read cookie */
val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* TODO: exact time to sleep is uncertain */ /* TODO: exact time to sleep is uncertain */
...@@ -310,16 +190,16 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) ...@@ -310,16 +190,16 @@ static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
* continuing warm path and entering dead loop. * continuing warm path and entering dead loop.
*/ */
ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
mdelay(10); mdelay(10);
val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
/* A read clear register. clear the register to prevent /* A read clear register. clear the register to prevent
* Q6 from entering wrong code path. * Q6 from entering wrong code path.
*/ */
val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
} }
...@@ -329,14 +209,14 @@ static int ath11k_pci_set_link_reg(struct ath11k_base *ab, ...@@ -329,14 +209,14 @@ static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
u32 v; u32 v;
int i; int i;
v = ath11k_pci_read32(ab, offset); v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value) if ((v & mask) == value)
return 0; return 0;
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
ath11k_pci_write32(ab, offset, (v & ~mask) | value); ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
v = ath11k_pci_read32(ab, offset); v = ath11k_pcic_read32(ab, offset);
if ((v & mask) == value) if ((v & mask) == value)
return 0; return 0;
...@@ -397,23 +277,23 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) ...@@ -397,23 +277,23 @@ static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
u32 val; u32 val;
int i; int i;
val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
/* PCIE link seems very unstable after the Hot Reset*/ /* PCIE link seems very unstable after the Hot Reset*/
for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
if (val == 0xffffffff) if (val == 0xffffffff)
mdelay(5); mdelay(5);
ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
} }
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val);
val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
val |= GCC_GCC_PCIE_HOT_RST_VAL; val |= GCC_GCC_PCIE_HOT_RST_VAL;
ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
...@@ -427,21 +307,21 @@ static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) ...@@ -427,21 +307,21 @@ static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
* So when download SBL again, SBL will open Interrupt and * So when download SBL again, SBL will open Interrupt and
* receive it, and crash immediately. * receive it, and crash immediately.
*/ */
ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
} }
static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
{ {
u32 val; u32 val;
val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
} }
static void ath11k_pci_force_wake(struct ath11k_base *ab) static void ath11k_pci_force_wake(struct ath11k_base *ab)
{ {
ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
mdelay(5); mdelay(5);
} }
...@@ -463,463 +343,6 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) ...@@ -463,463 +343,6 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
ath11k_mhi_set_mhictrl_reset(ab); ath11k_mhi_set_mhictrl_reset(ab);
} }
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
return pci_irq_vector(pci_dev, vector);
}
static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct pci_dev *pci_dev = to_pci_dev(ab->dev);
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
msi_addr_lo);
if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
msi_addr_hi);
} else {
*msi_addr_hi = 0;
}
}
int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
struct ath11k_base *ab = ab_pci->ab;
const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab_pci->msi_ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
num_vectors, user_base_data,
base_vector);
}
static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
static void ath11k_pci_free_irq(struct ath11k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_pci_free_ext_irq(ab);
}
static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pci_ce_irq_disable(ab, i);
}
}
static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_pci_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pci_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(irq_grp->ab);
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_pci_ext_grp_enable(irq_grp);
}
}
static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_pci_ext_irq_disable(ab);
ath11k_pci_sync_ext_irqs(ab);
}
static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
&num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
ab->hw_params.ring_mask->rx_err[i] ||
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
ab->hw_params.ring_mask->reo_status[i] ||
ab->hw_params.ring_mask->rxdma2host[i] ||
ab->hw_params.ring_mask->host2rxdma[i] ||
ab->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
ab->irq_num[irq_idx] = irq;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq:%d group:%d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
ab_pci->irq_flags,
"DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
}
ath11k_pci_ext_grp_disable(irq_grp);
}
return 0;
}
static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
const struct cpumask *m)
{
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_config_irq(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
"CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret);
return ret;
}
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet);
ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
ab_pci->irq_flags, irq_name[irq_idx],
ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
goto err_irq_affinity_cleanup;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath11k_pci_ce_irq_disable(ab, i);
}
ret = ath11k_pci_ext_irq_config(ab);
if (ret)
goto err_irq_affinity_cleanup;
return 0;
err_irq_affinity_cleanup:
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
return ret;
}
static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
{ {
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
...@@ -935,19 +358,6 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) ...@@ -935,19 +358,6 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
&cfg->shadow_reg_v2_len); &cfg->shadow_reg_v2_len);
} }
static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pci_ce_irq_enable(ab, i);
}
}
static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable) static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
{ {
struct pci_dev *dev = ab_pci->pdev; struct pci_dev *dev = ab_pci->pdev;
...@@ -976,18 +386,18 @@ static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci) ...@@ -976,18 +386,18 @@ static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
{ {
struct ath11k_base *ab = ab_pci->ab; struct ath11k_base *ab = ab_pci->ab;
const struct ath11k_msi_config *msi_config = ab_pci->msi_config; const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
struct pci_dev *pci_dev = ab_pci->pdev;
struct msi_desc *msi_desc; struct msi_desc *msi_desc;
int num_vectors; int num_vectors;
int ret; int ret;
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, num_vectors = pci_alloc_irq_vectors(pci_dev,
msi_config->total_vectors, msi_config->total_vectors,
msi_config->total_vectors, msi_config->total_vectors,
PCI_IRQ_MSI); PCI_IRQ_MSI);
if (num_vectors == msi_config->total_vectors) { if (num_vectors == msi_config->total_vectors) {
set_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
ab_pci->irq_flags = IRQF_SHARED;
} else { } else {
num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
1, 1,
...@@ -997,9 +407,8 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) ...@@ -997,9 +407,8 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
ret = -EINVAL; ret = -EINVAL;
goto reset_msi_config; goto reset_msi_config;
} }
clear_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
ab_pci->msi_config = &msi_config_one_msi; ab->pci.msi.config = &msi_config_one_msi;
ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n"); ath11k_dbg(ab, ATH11K_DBG_PCI, "request MSI one vector\n");
} }
ath11k_info(ab, "MSI vectors: %d\n", num_vectors); ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
...@@ -1013,11 +422,19 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) ...@@ -1013,11 +422,19 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
goto free_msi_vector; goto free_msi_vector;
} }
ab_pci->msi_ep_base_data = msi_desc->msg.data; ab->pci.msi.ep_base_data = msi_desc->msg.data;
if (msi_desc->pci.msi_attrib.is_64)
set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
&ab->pci.msi.addr_lo);
if (msi_desc->pci.msi_attrib.is_64) {
pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
&ab->pci.msi.addr_hi);
} else {
ab->pci.msi.addr_hi = 0;
}
ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
return 0; return 0;
...@@ -1044,10 +461,10 @@ static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci) ...@@ -1044,10 +461,10 @@ static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
return -EINVAL; return -EINVAL;
} }
ab_pci->msi_ep_base_data = msi_desc->msg.data; ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
ab_pci->msi_ep_base_data); ab_pci->ab->pci.msi.ep_base_data);
return 0; return 0;
} }
...@@ -1160,7 +577,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab) ...@@ -1160,7 +577,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
int ret; int ret;
ab_pci->register_window = 0; ab_pci->register_window = 0;
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, true); ath11k_pci_sw_reset(ab_pci->ab, true);
/* Disable ASPM during firmware download due to problems switching /* Disable ASPM during firmware download due to problems switching
...@@ -1194,7 +611,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab) ...@@ -1194,7 +611,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci); ath11k_pci_msi_disable(ab_pci);
ath11k_mhi_stop(ab_pci); ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false); ath11k_pci_sw_reset(ab_pci->ab, false);
} }
...@@ -1202,144 +619,67 @@ static int ath11k_pci_hif_suspend(struct ath11k_base *ab) ...@@ -1202,144 +619,67 @@ static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
{ {
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
ath11k_mhi_suspend(ar_pci); return ath11k_mhi_suspend(ar_pci);
return 0;
} }
static int ath11k_pci_hif_resume(struct ath11k_base *ab) static int ath11k_pci_hif_resume(struct ath11k_base *ab)
{ {
struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
ath11k_mhi_resume(ar_pci); return ath11k_mhi_resume(ar_pci);
return 0;
} }
static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab)
{ {
ath11k_pci_ce_irqs_disable(ab); ath11k_pcic_ce_irqs_enable(ab);
ath11k_pci_sync_ce_irqs(ab);
ath11k_pci_kill_tasklets(ab);
} }
static void ath11k_pci_stop(struct ath11k_base *ab) static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
{ {
ath11k_pci_ce_irq_disable_sync(ab); ath11k_pcic_ce_irq_disable_sync(ab);
ath11k_ce_cleanup_pipes(ab);
} }
static int ath11k_pci_start(struct ath11k_base *ab) static int ath11k_pci_start(struct ath11k_base *ab)
{ {
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
/* TODO: for now don't restore ASPM in case of single MSI /* TODO: for now don't restore ASPM in case of single MSI
* vector as MHI register reading in M2 causes system hang. * vector as MHI register reading in M2 causes system hang.
*/ */
if (test_bit(ATH11K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
ath11k_pci_aspm_restore(ab_pci); ath11k_pci_aspm_restore(ab_pci);
else else
ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
ath11k_pci_ce_irqs_enable(ab); ath11k_pcic_start(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
{
ath11k_pci_ce_irqs_enable(ab);
}
static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
{
ath11k_pci_ce_irq_disable_sync(ab);
}
static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0; return 0;
} }
static const struct ath11k_hif_ops ath11k_pci_hif_ops = { static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.start = ath11k_pci_start, .start = ath11k_pci_start,
.stop = ath11k_pci_stop, .stop = ath11k_pcic_stop,
.read32 = ath11k_pci_read32, .read32 = ath11k_pcic_read32,
.write32 = ath11k_pci_write32, .write32 = ath11k_pcic_write32,
.power_down = ath11k_pci_power_down, .power_down = ath11k_pci_power_down,
.power_up = ath11k_pci_power_up, .power_up = ath11k_pci_power_up,
.suspend = ath11k_pci_hif_suspend, .suspend = ath11k_pci_hif_suspend,
.resume = ath11k_pci_hif_resume, .resume = ath11k_pci_hif_resume,
.irq_enable = ath11k_pci_ext_irq_enable, .irq_enable = ath11k_pcic_ext_irq_enable,
.irq_disable = ath11k_pci_ext_irq_disable, .irq_disable = ath11k_pcic_ext_irq_disable,
.get_msi_address = ath11k_pci_get_msi_address, .get_msi_address = ath11k_pcic_get_msi_address,
.get_user_msi_vector = ath11k_get_user_msi_assignment, .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
.map_service_to_pipe = ath11k_pci_map_service_to_pipe, .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
.ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath11k_pci_hif_ce_irq_disable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath11k_pci_get_ce_msi_idx, .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
}; };
static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
{ {
u32 soc_hw_version; u32 soc_hw_version;
soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION); soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
soc_hw_version); soc_hw_version);
*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
...@@ -1349,6 +689,15 @@ static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 * ...@@ -1349,6 +689,15 @@ static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *
*major, *minor); *major, *minor);
} }
static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
const struct cpumask *m)
{
if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
return 0;
return irq_set_affinity_hint(ab_pci->pdev->irq, m);
}
static int ath11k_pci_probe(struct pci_dev *pdev, static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev) const struct pci_device_id *pci_dev)
{ {
...@@ -1411,11 +760,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ...@@ -1411,11 +760,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto err_pci_free_region; goto err_pci_free_region;
} }
ab_pci->msi_config = &ath11k_msi_config[0];
ab->pci.ops = &ath11k_pci_ops_qca6390;
break; break;
case QCN9074_DEVICE_ID: case QCN9074_DEVICE_ID:
ab_pci->msi_config = &ath11k_msi_config[1];
ab->bus_params.static_window_map = true; ab->bus_params.static_window_map = true;
ab->pci.ops = &ath11k_pci_ops_qcn9074;
ab->hw_rev = ATH11K_HW_QCN9074_HW10; ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break; break;
case WCN6855_DEVICE_ID: case WCN6855_DEVICE_ID:
...@@ -1444,7 +794,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ...@@ -1444,7 +794,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto err_pci_free_region; goto err_pci_free_region;
} }
ab_pci->msi_config = &ath11k_msi_config[0];
ab->pci.ops = &ath11k_pci_ops_qca6390;
break; break;
default: default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
...@@ -1453,6 +804,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ...@@ -1453,6 +804,12 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region; goto err_pci_free_region;
} }
ret = ath11k_pcic_init_msi_config(ab);
if (ret) {
ath11k_err(ab, "failed to init msi config: %d\n", ret);
goto err_pci_free_region;
}
ret = ath11k_pci_alloc_msi(ab_pci); ret = ath11k_pci_alloc_msi(ab_pci);
if (ret) { if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret); ath11k_err(ab, "failed to enable msi: %d\n", ret);
...@@ -1481,12 +838,18 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ...@@ -1481,12 +838,18 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ath11k_pci_init_qmi_ce_config(ab); ath11k_pci_init_qmi_ce_config(ab);
ret = ath11k_pci_config_irq(ab); ret = ath11k_pcic_config_irq(ab);
if (ret) { if (ret) {
ath11k_err(ab, "failed to config irq: %d\n", ret); ath11k_err(ab, "failed to config irq: %d\n", ret);
goto err_ce_free; goto err_ce_free;
} }
ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
if (ret) {
ath11k_err(ab, "failed to set irq affinity %d\n", ret);
goto err_free_irq;
}
/* kernel may allocate a dummy vector before request_irq and /* kernel may allocate a dummy vector before request_irq and
* then allocate a real vector when request_irq is called. * then allocate a real vector when request_irq is called.
* So get msi_data here again to avoid spurious interrupt * So get msi_data here again to avoid spurious interrupt
...@@ -1495,18 +858,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev, ...@@ -1495,18 +858,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ret = ath11k_pci_config_msi_data(ab_pci); ret = ath11k_pci_config_msi_data(ab_pci);
if (ret) { if (ret) {
ath11k_err(ab, "failed to config msi_data: %d\n", ret); ath11k_err(ab, "failed to config msi_data: %d\n", ret);
goto err_free_irq; goto err_irq_affinity_cleanup;
} }
ret = ath11k_core_init(ab); ret = ath11k_core_init(ab);
if (ret) { if (ret) {
ath11k_err(ab, "failed to init core: %d\n", ret); ath11k_err(ab, "failed to init core: %d\n", ret);
goto err_free_irq; goto err_irq_affinity_cleanup;
} }
return 0; return 0;
err_irq_affinity_cleanup:
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
err_free_irq: err_free_irq:
ath11k_pci_free_irq(ab); ath11k_pcic_free_irq(ab);
err_ce_free: err_ce_free:
ath11k_ce_free_pipes(ab); ath11k_ce_free_pipes(ab);
...@@ -1550,7 +916,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev) ...@@ -1550,7 +916,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
qmi_fail: qmi_fail:
ath11k_mhi_unregister(ab_pci); ath11k_mhi_unregister(ab_pci);
ath11k_pci_free_irq(ab); ath11k_pcic_free_irq(ab);
ath11k_pci_free_msi(ab_pci); ath11k_pci_free_msi(ab_pci);
ath11k_pci_free_region(ab_pci); ath11k_pci_free_region(ab_pci);
......
/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* /*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef _ATH11K_PCI_H #ifndef _ATH11K_PCI_H
#define _ATH11K_PCI_H #define _ATH11K_PCI_H
...@@ -52,23 +53,8 @@ ...@@ -52,23 +53,8 @@
#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c #define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4 #define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
struct ath11k_msi_user {
char *name;
int num_vectors;
u32 base_vector;
};
struct ath11k_msi_config {
int total_vectors;
int total_users;
struct ath11k_msi_user *users;
};
enum ath11k_pci_flags { enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
ATH11K_PCI_ASPM_RESTORE, ATH11K_PCI_ASPM_RESTORE,
ATH11K_PCI_FLAG_MULTI_MSI_VECTORS,
}; };
struct ath11k_pci { struct ath11k_pci {
...@@ -76,10 +62,8 @@ struct ath11k_pci { ...@@ -76,10 +62,8 @@ struct ath11k_pci {
struct ath11k_base *ab; struct ath11k_base *ab;
u16 dev_id; u16 dev_id;
char amss_path[100]; char amss_path[100];
u32 msi_ep_base_data;
struct mhi_controller *mhi_ctrl; struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config; const struct ath11k_msi_config *msi_config;
unsigned long mhi_state;
u32 register_window; u32 register_window;
/* protects register_window above */ /* protects register_window above */
...@@ -88,8 +72,6 @@ struct ath11k_pci { ...@@ -88,8 +72,6 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */ /* enum ath11k_pci_flags */
unsigned long flags; unsigned long flags;
u16 link_ctl; u16 link_ctl;
unsigned long irq_flags;
}; };
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
...@@ -97,11 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab) ...@@ -97,11 +79,5 @@ static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
return (struct ath11k_pci *)ab->drv_priv; return (struct ath11k_pci *)ab->drv_priv;
} }
int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name, int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
int *num_vectors, u32 *user_base_data,
u32 *base_vector);
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
#endif #endif
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "pcic.h"
#include "debug.h"
static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"bhi",
"mhi-er0",
"mhi-er1",
"ce0",
"ce1",
"ce2",
"ce3",
"ce4",
"ce5",
"ce6",
"ce7",
"ce8",
"ce9",
"ce10",
"ce11",
"host2wbm-desc-feed",
"host2reo-re-injection",
"host2reo-command",
"host2rxdma-monitor-ring3",
"host2rxdma-monitor-ring2",
"host2rxdma-monitor-ring1",
"reo2ost-exception",
"wbm2host-rx-release",
"reo2host-status",
"reo2host-destination-ring4",
"reo2host-destination-ring3",
"reo2host-destination-ring2",
"reo2host-destination-ring1",
"rxdma2host-monitor-destination-mac3",
"rxdma2host-monitor-destination-mac2",
"rxdma2host-monitor-destination-mac1",
"ppdu-end-interrupts-mac3",
"ppdu-end-interrupts-mac2",
"ppdu-end-interrupts-mac1",
"rxdma2host-monitor-status-ring-mac3",
"rxdma2host-monitor-status-ring-mac2",
"rxdma2host-monitor-status-ring-mac1",
"host2rxdma-host-buf-ring-mac3",
"host2rxdma-host-buf-ring-mac2",
"host2rxdma-host-buf-ring-mac1",
"rxdma2host-destination-ring-mac3",
"rxdma2host-destination-ring-mac2",
"rxdma2host-destination-ring-mac1",
"host2tcl-input-ring4",
"host2tcl-input-ring3",
"host2tcl-input-ring2",
"host2tcl-input-ring1",
"wbm2host-tx-completions-ring3",
"wbm2host-tx-completions-ring2",
"wbm2host-tx-completions-ring1",
"tcl2host-status-ring",
};
static const struct ath11k_msi_config ath11k_msi_config[] = {
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_QCA6390_HW20,
},
{
.total_vectors = 16,
.total_users = 3,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
},
.hw_rev = ATH11K_HW_QCN9074_HW10,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW20,
},
{
.total_vectors = 32,
.total_users = 4,
.users = (struct ath11k_msi_user[]) {
{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
},
.hw_rev = ATH11K_HW_WCN6855_HW21,
},
};
int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
{
const struct ath11k_msi_config *msi_config;
int i;
for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
msi_config = &ath11k_msi_config[i];
if (msi_config->hw_rev == ab->hw_rev)
break;
}
if (i == ARRAY_SIZE(ath11k_msi_config)) {
ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
ab->hw_rev);
return -EINVAL;
}
ab->pci.msi.config = msi_config;
return 0;
}
EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
static inline u32 ath11k_pcic_get_window_start(struct ath11k_base *ab,
u32 offset)
{
u32 window_start;
/* If offset lies within DP register range, use 3rd window */
if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = 3 * ATH11K_PCI_WINDOW_START;
/* If offset lies within CE register range, use 2nd window */
else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < ATH11K_PCI_WINDOW_RANGE_MASK)
window_start = 2 * ATH11K_PCI_WINDOW_START;
else
window_start = ATH11K_PCI_WINDOW_START;
return window_start;
}
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
{
u32 window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
if (offset < ATH11K_PCI_WINDOW_START) {
iowrite32(value, ab->mem + offset);
} else {
if (ab->bus_params.static_window_map)
window_start = ath11k_pcic_get_window_start(ab, offset);
else
window_start = ATH11K_PCI_WINDOW_START;
if (window_start == ATH11K_PCI_WINDOW_START &&
ab->pci.ops->window_write32) {
ab->pci.ops->window_write32(ab, offset, value);
} else {
iowrite32(value, ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
}
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
!ret)
ab->pci.ops->release(ab);
}
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
{
u32 val, window_start;
int ret = 0;
/* for offset beyond BAR + 4K - 32, may
* need to wakeup the device to access.
*/
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->wakeup)
ret = ab->pci.ops->wakeup(ab);
if (offset < ATH11K_PCI_WINDOW_START) {
val = ioread32(ab->mem + offset);
} else {
if (ab->bus_params.static_window_map)
window_start = ath11k_pcic_get_window_start(ab, offset);
else
window_start = ATH11K_PCI_WINDOW_START;
if (window_start == ATH11K_PCI_WINDOW_START &&
ab->pci.ops->window_read32) {
val = ab->pci.ops->window_read32(ab, offset);
} else {
val = ioread32(ab->mem + window_start +
(offset & ATH11K_PCI_WINDOW_RANGE_MASK));
}
}
if (test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF && ab->pci.ops->release &&
!ret)
ab->pci.ops->release(ab);
return val;
}
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi)
{
*msi_addr_lo = ab->pci.msi.addr_lo;
*msi_addr_hi = ab->pci.msi.addr_hi;
}
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector)
{
const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
int idx;
for (idx = 0; idx < msi_config->total_users; idx++) {
if (strcmp(user_name, msi_config->users[idx].name) == 0) {
*num_vectors = msi_config->users[idx].num_vectors;
*base_vector = msi_config->users[idx].base_vector;
*user_base_data = *base_vector + ab->pci.msi.ep_base_data;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
user_name, *num_vectors, *user_base_data,
*base_vector);
return 0;
}
}
ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
return -EINVAL;
}
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
{
u32 i, msi_data_idx;
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
if (ce_id == i)
break;
msi_data_idx++;
}
*msi_idx = msi_data_idx;
}
static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
{
int i, j;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
}
}
void ath11k_pcic_free_irq(struct ath11k_base *ab)
{
int i, irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
}
ath11k_pcic_free_ext_irq(ab);
}
static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
enable_irq(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
u32 irq_idx;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
disable_irq_nosync(ab->irq_num[irq_idx]);
}
static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_disable(ab, i);
}
}
static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
{
int i;
int irq_idx;
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
synchronize_irq(ab->irq_num[irq_idx]);
}
}
static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
{
struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
enable_irq(ce_pipe->ab->irq_num[irq_idx]);
}
static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
{
struct ath11k_ce_pipe *ce_pipe = arg;
struct ath11k_base *ab = ce_pipe->ab;
int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
/* last interrupt received for this CE */
ce_pipe->timestamp = jiffies;
disable_irq_nosync(ab->irq_num[irq_idx]);
tasklet_schedule(&ce_pipe->intr_tq);
return IRQ_HANDLED;
}
static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable
* in a uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
{
int i;
clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
ath11k_pcic_ext_grp_disable(irq_grp);
if (irq_grp->napi_enabled) {
napi_synchronize(&irq_grp->napi);
napi_disable(&irq_grp->napi);
irq_grp->napi_enabled = false;
}
}
}
static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
{
struct ath11k_base *ab = irq_grp->ab;
int i;
/* In case of one MSI vector, we handle irq enable/disable in a
* uniform way since we only have one irq
*/
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
return;
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
if (!irq_grp->napi_enabled) {
napi_enable(&irq_grp->napi);
irq_grp->napi_enabled = true;
}
ath11k_pcic_ext_grp_enable(irq_grp);
}
}
static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
{
int i, j, irq_idx;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
for (j = 0; j < irq_grp->num_irq; j++) {
irq_idx = irq_grp->irqs[j];
synchronize_irq(ab->irq_num[irq_idx]);
}
}
}
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
{
__ath11k_pcic_ext_irq_disable(ab);
ath11k_pcic_sync_ext_irqs(ab);
}
static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
{
struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
struct ath11k_ext_irq_grp,
napi);
struct ath11k_base *ab = irq_grp->ab;
int work_done;
int i;
work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
for (i = 0; i < irq_grp->num_irq; i++)
enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
}
if (work_done > budget)
work_done = budget;
return work_done;
}
static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
{
struct ath11k_ext_irq_grp *irq_grp = arg;
struct ath11k_base *ab = irq_grp->ab;
int i;
if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
return IRQ_HANDLED;
ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
/* last interrupt received for this group */
irq_grp->timestamp = jiffies;
for (i = 0; i < irq_grp->num_irq; i++)
disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
napi_schedule(&irq_grp->napi);
return IRQ_HANDLED;
}
static int
ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
{
if (!ab->pci.ops->get_msi_irq) {
WARN_ONCE(1, "get_msi_irq pci op not defined");
return -EOPNOTSUPP;
}
return ab->pci.ops->get_msi_irq(ab, vector);
}
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
int i, j, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
&user_base_data,
&base_vector);
if (ret < 0)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
init_dummy_netdev(&irq_grp->napi_ndev);
netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
if (ab->hw_params.ring_mask->tx[i] ||
ab->hw_params.ring_mask->rx[i] ||
ab->hw_params.ring_mask->rx_err[i] ||
ab->hw_params.ring_mask->rx_wbm_rel[i] ||
ab->hw_params.ring_mask->reo_status[i] ||
ab->hw_params.ring_mask->rxdma2host[i] ||
ab->hw_params.ring_mask->host2rxdma[i] ||
ab->hw_params.ring_mask->rx_mon_status[i]) {
num_irq = 1;
}
irq_grp->num_irq = num_irq;
irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
for (j = 0; j < irq_grp->num_irq; j++) {
int irq_idx = irq_grp->irqs[j];
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
if (irq < 0)
return irq;
ab->irq_num[irq_idx] = irq;
ath11k_dbg(ab, ATH11K_DBG_PCI,
"irq:%d group:%d\n", irq, i);
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
irq_flags, "DP_EXT_IRQ", irq_grp);
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
return ret;
}
}
ath11k_pcic_ext_grp_disable(irq_grp);
}
return 0;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *ce_pipe;
u32 msi_data_start;
u32 msi_data_count, msi_data_idx;
u32 msi_irq_start;
unsigned int msi_data;
int irq, i, ret, irq_idx;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
&msi_data_start, &msi_irq_start);
if (ret)
return ret;
irq_flags = IRQF_SHARED;
if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
irq_flags |= IRQF_NOBALANCING;
/* Configure CE irqs */
for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
irq = ath11k_pcic_get_msi_irq(ab, msi_data);
if (irq < 0)
return irq;
ce_pipe = &ab->ce.ce_pipe[i];
irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
irq_flags, irq_name[irq_idx], ce_pipe);
if (ret) {
ath11k_err(ab, "failed to request irq %d: %d\n",
irq_idx, ret);
return ret;
}
ab->irq_num[irq_idx] = irq;
msi_data_idx++;
ath11k_pcic_ce_irq_disable(ab, i);
}
ret = ath11k_pcic_ext_irq_config(ab);
if (ret)
return ret;
return 0;
}
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
for (i = 0; i < ab->hw_params.ce_count; i++) {
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_pcic_ce_irq_enable(ab, i);
}
}
static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
}
}
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
{
ath11k_pcic_ce_irqs_disable(ab);
ath11k_pcic_sync_ce_irqs(ab);
ath11k_pcic_kill_tasklets(ab);
}
void ath11k_pcic_stop(struct ath11k_base *ab)
{
ath11k_pcic_ce_irq_disable_sync(ab);
ath11k_ce_cleanup_pipes(ab);
}
int ath11k_pcic_start(struct ath11k_base *ab)
{
set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pcic_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
return 0;
}
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
int i;
for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
switch (__le32_to_cpu(entry->pipedir)) {
case PIPEDIR_NONE:
break;
case PIPEDIR_IN:
WARN_ON(dl_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
break;
case PIPEDIR_OUT:
WARN_ON(ul_set);
*ul_pipe = __le32_to_cpu(entry->pipenum);
ul_set = true;
break;
case PIPEDIR_INOUT:
WARN_ON(dl_set);
WARN_ON(ul_set);
*dl_pipe = __le32_to_cpu(entry->pipenum);
*ul_pipe = __le32_to_cpu(entry->pipenum);
dl_set = true;
ul_set = true;
break;
}
}
if (WARN_ON(!ul_set || !dl_set))
return -ENOENT;
return 0;
}
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_PCI_CMN_H
#define _ATH11K_PCI_CMN_H
#include "core.h"
#define ATH11K_PCI_IRQ_CE0_OFFSET 3
#define ATH11K_PCI_IRQ_DP_OFFSET 14
#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
#define ATH11K_PCI_WINDOW_START 0x80000
#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
/* BAR0 + 4k is always accessible, and no
* need to force wakeup.
* 4K - 32 = 0xFE0
*/
#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
int *num_vectors, u32 *user_base_data,
u32 *base_vector);
void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
u32 *msi_addr_hi);
void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
void ath11k_pcic_free_irq(struct ath11k_base *ab);
int ath11k_pcic_config_irq(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
void ath11k_pcic_stop(struct ath11k_base *ab);
int ath11k_pcic_start(struct ath11k_base *ab);
int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
#endif
// SPDX-License-Identifier: BSD-3-Clause-Clear // SPDX-License-Identifier: BSD-3-Clause-Clear
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#include "core.h" #include "core.h"
#include "peer.h" #include "peer.h"
#include "debug.h" #include "debug.h"
struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
const u8 *addr) int peer_id)
{ {
struct ath11k_peer *peer; struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock); lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) { list_for_each_entry(peer, &ab->peers, list) {
if (peer->vdev_id != vdev_id) if (peer->peer_id != peer_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue; continue;
return peer; return peer;
...@@ -26,15 +25,15 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id, ...@@ -26,15 +25,15 @@ struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
return NULL; return NULL;
} }
static struct ath11k_peer *ath11k_peer_find_by_pdev_idx(struct ath11k_base *ab, struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
u8 pdev_idx, const u8 *addr) const u8 *addr)
{ {
struct ath11k_peer *peer; struct ath11k_peer *peer;
lockdep_assert_held(&ab->base_lock); lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) { list_for_each_entry(peer, &ab->peers, list) {
if (peer->pdev_idx != pdev_idx) if (peer->vdev_id != vdev_id)
continue; continue;
if (!ether_addr_equal(peer->addr, addr)) if (!ether_addr_equal(peer->addr, addr))
continue; continue;
...@@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab, ...@@ -52,14 +51,13 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock); lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) { if (!ab->rhead_peer_addr)
if (!ether_addr_equal(peer->addr, addr)) return NULL;
continue;
return peer; peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
} ab->rhash_peer_addr_param);
return NULL; return peer;
} }
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
...@@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, ...@@ -69,11 +67,13 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
lockdep_assert_held(&ab->base_lock); lockdep_assert_held(&ab->base_lock);
list_for_each_entry(peer, &ab->peers, list) if (!ab->rhead_peer_id)
if (peer_id == peer->peer_id) return NULL;
return peer;
return NULL; peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
ab->rhash_peer_id_param);
return peer;
} }
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab, struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
...@@ -99,7 +99,7 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id) ...@@ -99,7 +99,7 @@ void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
spin_lock_bh(&ab->base_lock); spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_id(ab, peer_id); peer = ath11k_peer_find_list_by_id(ab, peer_id);
if (!peer) { if (!peer) {
ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n", ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
peer_id); peer_id);
...@@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id, ...@@ -167,6 +167,76 @@ static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
return 0; return 0;
} }
static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params,
void *key)
{
struct ath11k_peer *tmp;
lockdep_assert_held(&ab->tbl_mtx_lock);
tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
if (!tmp)
return 0;
else if (IS_ERR(tmp))
return PTR_ERR(tmp);
else
return -EEXIST;
}
static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
struct rhashtable *rtbl,
struct rhash_head *rhead,
struct rhashtable_params *params)
{
int ret;
lockdep_assert_held(&ab->tbl_mtx_lock);
ret = rhashtable_remove_fast(rtbl, rhead, *params);
if (ret && ret != -ENOENT)
return ret;
return 0;
}
static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
{
int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param, &peer->peer_id);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
}
ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param, &peer->addr);
if (ret) {
ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
goto err_clean;
}
return 0;
err_clean:
ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
&ab->rhash_peer_id_param);
return ret;
}
void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
{ {
struct ath11k_peer *peer, *tmp; struct ath11k_peer *peer, *tmp;
...@@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) ...@@ -174,6 +244,7 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock); spin_lock_bh(&ab->base_lock);
list_for_each_entry_safe(peer, tmp, &ab->peers, list) { list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
if (peer->vdev_id != vdev_id) if (peer->vdev_id != vdev_id)
...@@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id) ...@@ -182,12 +253,14 @@ void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n", ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id); peer->addr, vdev_id);
ath11k_peer_rhash_delete(ab, peer);
list_del(&peer->list); list_del(&peer->list);
kfree(peer); kfree(peer);
ar->num_peers--; ar->num_peers--;
} }
spin_unlock_bh(&ab->base_lock); spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
} }
static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr) static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
...@@ -217,17 +290,38 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, ...@@ -217,17 +290,38 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
return 0; return 0;
} }
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
{ {
int ret; int ret;
struct ath11k_peer *peer;
struct ath11k_base *ab = ar->ab;
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ab->tbl_mtx_lock);
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find_by_addr(ab, addr);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
ath11k_warn(ab,
"failed to find peer vdev_id %d addr %pM in delete\n",
vdev_id, addr);
return -EINVAL;
}
ath11k_peer_rhash_delete(ab, peer);
spin_unlock_bh(&ab->base_lock);
mutex_unlock(&ab->tbl_mtx_lock);
reinit_completion(&ar->peer_delete_done); reinit_completion(&ar->peer_delete_done);
ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id); ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
if (ret) { if (ret) {
ath11k_warn(ar->ab, ath11k_warn(ab,
"failed to delete peer vdev_id %d addr %pM ret %d\n", "failed to delete peer vdev_id %d addr %pM ret %d\n",
vdev_id, addr, ret); vdev_id, addr, ret);
return ret; return ret;
...@@ -237,6 +331,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr) ...@@ -237,6 +331,19 @@ int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
if (ret) if (ret)
return ret; return ret;
return 0;
}
int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = __ath11k_peer_delete(ar, vdev_id, addr);
if (ret)
return ret;
ar->num_peers--; ar->num_peers--;
return 0; return 0;
...@@ -263,7 +370,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, ...@@ -263,7 +370,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
} }
spin_lock_bh(&ar->ab->base_lock); spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr); peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
if (peer) { if (peer) {
spin_unlock_bh(&ar->ab->base_lock); spin_unlock_bh(&ar->ab->base_lock);
return -EINVAL; return -EINVAL;
...@@ -283,11 +390,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, ...@@ -283,11 +390,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&ar->ab->tbl_mtx_lock);
spin_lock_bh(&ar->ab->base_lock); spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr); peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
if (!peer) { if (!peer) {
spin_unlock_bh(&ar->ab->base_lock); spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n", ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id); param->peer_addr, param->vdev_id);
...@@ -295,6 +404,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, ...@@ -295,6 +404,13 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
goto cleanup; goto cleanup;
} }
ret = ath11k_peer_rhash_add(ar->ab, peer);
if (ret) {
spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
goto cleanup;
}
peer->pdev_idx = ar->pdev_idx; peer->pdev_idx = ar->pdev_idx;
peer->sta = sta; peer->sta = sta;
...@@ -319,26 +435,213 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif, ...@@ -319,26 +435,213 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
ar->num_peers++; ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock); spin_unlock_bh(&ar->ab->base_lock);
mutex_unlock(&ar->ab->tbl_mtx_lock);
return 0; return 0;
cleanup: cleanup:
reinit_completion(&ar->peer_delete_done); fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
if (fbret)
ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
param->peer_addr, param->vdev_id, fbret);
fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr, return ret;
param->vdev_id); }
if (fbret) {
ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
param->vdev_id, param->peer_addr); {
goto exit; int ret;
lockdep_assert_held(&ab->base_lock);
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
return -EPERM;
ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
&ab->rhash_peer_addr_param);
if (ret) {
ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
peer->addr, peer->peer_id, ret);
return ret;
} }
fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id, ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
param->peer_addr); &ab->rhash_peer_id_param);
if (fbret) if (ret) {
ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n", ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
param->peer_addr, param->vdev_id, fbret); peer->addr, peer->peer_id, ret);
return ret;
}
return 0;
}
static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_id_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_id)
return 0;
size = sizeof(*ab->rhead_peer_id);
rhash_id_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_id_tbl) {
ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_id_param;
param->key_offset = offsetof(struct ath11k_peer, peer_id);
param->head_offset = offsetof(struct ath11k_peer, rhash_id);
param->key_len = sizeof_field(struct ath11k_peer, peer_id);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_id_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_id) {
ab->rhead_peer_id = rhash_id_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_id_tbl);
err_free:
kfree(rhash_id_tbl);
exit:
return ret; return ret;
} }
static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
{
struct rhashtable_params *param;
struct rhashtable *rhash_addr_tbl;
int ret;
size_t size;
lockdep_assert_held(&ab->tbl_mtx_lock);
if (ab->rhead_peer_addr)
return 0;
size = sizeof(*ab->rhead_peer_addr);
rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
if (!rhash_addr_tbl) {
ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
size);
return -ENOMEM;
}
param = &ab->rhash_peer_addr_param;
param->key_offset = offsetof(struct ath11k_peer, addr);
param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
param->key_len = sizeof_field(struct ath11k_peer, addr);
param->automatic_shrinking = true;
param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
ret = rhashtable_init(rhash_addr_tbl, param);
if (ret) {
ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
goto err_free;
}
spin_lock_bh(&ab->base_lock);
if (!ab->rhead_peer_addr) {
ab->rhead_peer_addr = rhash_addr_tbl;
} else {
spin_unlock_bh(&ab->base_lock);
goto cleanup_tbl;
}
spin_unlock_bh(&ab->base_lock);
return 0;
cleanup_tbl:
rhashtable_destroy(rhash_addr_tbl);
err_free:
kfree(rhash_addr_tbl);
return ret;
}
static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_id)
return;
rhashtable_destroy(ab->rhead_peer_id);
kfree(ab->rhead_peer_id);
ab->rhead_peer_id = NULL;
}
static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
{
lockdep_assert_held(&ab->tbl_mtx_lock);
if (!ab->rhead_peer_addr)
return;
rhashtable_destroy(ab->rhead_peer_addr);
kfree(ab->rhead_peer_addr);
ab->rhead_peer_addr = NULL;
}
int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
{
int ret;
mutex_lock(&ab->tbl_mtx_lock);
ret = ath11k_peer_rhash_id_tbl_init(ab);
if (ret)
goto out;
ret = ath11k_peer_rhash_addr_tbl_init(ab);
if (ret)
goto cleanup_tbl;
mutex_unlock(&ab->tbl_mtx_lock);
return 0;
cleanup_tbl:
ath11k_peer_rhash_id_tbl_destroy(ab);
out:
mutex_unlock(&ab->tbl_mtx_lock);
return ret;
}
void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
{
mutex_lock(&ab->tbl_mtx_lock);
ath11k_peer_rhash_addr_tbl_destroy(ab);
ath11k_peer_rhash_id_tbl_destroy(ab);
mutex_unlock(&ab->tbl_mtx_lock);
}
/* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* SPDX-License-Identifier: BSD-3-Clause-Clear */
/* /*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/ */
#ifndef ATH11K_PEER_H #ifndef ATH11K_PEER_H
...@@ -20,6 +21,11 @@ struct ath11k_peer { ...@@ -20,6 +21,11 @@ struct ath11k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1]; struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1]; struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
/* peer id based rhashtable list pointer */
struct rhash_head rhash_id;
/* peer addr based rhashtable list pointer */
struct rhash_head rhash_addr;
/* Info used in MMIC verification of /* Info used in MMIC verification of
* RX fragments * RX fragments
*/ */
...@@ -47,5 +53,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id, ...@@ -47,5 +53,7 @@ int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr); const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab, struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
int vdev_id); int vdev_id);
int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
#endif /* _PEER_H_ */ #endif /* _PEER_H_ */
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02 #define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
#define HOST_CSTATE_BIT 0x04 #define HOST_CSTATE_BIT 0x04
#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
bool ath11k_cold_boot_cal = 1; bool ath11k_cold_boot_cal = 1;
EXPORT_SYMBOL(ath11k_cold_boot_cal); EXPORT_SYMBOL(ath11k_cold_boot_cal);
...@@ -1674,6 +1677,9 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab) ...@@ -1674,6 +1677,9 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT; req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
} }
if (ab->hw_params.global_reset)
req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n"); ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi host cap request\n");
ret = qmi_txn_init(&ab->qmi.handle, &txn, ret = qmi_txn_init(&ab->qmi.handle, &txn,
...@@ -2008,6 +2014,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ...@@ -2008,6 +2014,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
struct qmi_txn txn; struct qmi_txn txn;
int ret = 0; int ret = 0;
int r; int r;
char *fw_build_id;
int fw_build_id_mask_len;
memset(&req, 0, sizeof(req)); memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp)); memset(&resp, 0, sizeof(resp));
...@@ -2073,6 +2081,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ...@@ -2073,6 +2081,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n"); ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
} }
fw_build_id = ab->qmi.target.fw_build_id;
fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
fw_build_id = fw_build_id + fw_build_id_mask_len;
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n", ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family, ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id); ab->qmi.target.board_id, ab->qmi.target.soc_id);
...@@ -2080,7 +2093,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab) ...@@ -2080,7 +2093,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s", ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version, ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp, ab->qmi.target.fw_build_timestamp,
ab->qmi.target.fw_build_id); fw_build_id);
r = ath11k_core_check_smbios(ab);
if (r)
ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
r = ath11k_core_check_dt(ab); r = ath11k_core_check_dt(ab);
if (r) if (r)
......
...@@ -83,6 +83,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) ...@@ -83,6 +83,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
*/ */
if (ar->ab->hw_params.current_cc_support) { if (ar->ab->hw_params.current_cc_support) {
memcpy(&set_current_param.alpha2, request->alpha2, 2); memcpy(&set_current_param.alpha2, request->alpha2, 2);
memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
if (ret) if (ret)
ath11k_warn(ar->ab, ath11k_warn(ar->ab,
...@@ -102,7 +103,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) ...@@ -102,7 +103,7 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
ar->regdom_set_by_user = true; ar->regdom_set_by_user = true;
} }
int ath11k_reg_update_chan_list(struct ath11k *ar) int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
{ {
struct ieee80211_supported_band **bands; struct ieee80211_supported_band **bands;
struct scan_chan_list_params *params; struct scan_chan_list_params *params;
...@@ -111,7 +112,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) ...@@ -111,7 +112,32 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch; struct channel_param *ch;
enum nl80211_band band; enum nl80211_band band;
int num_channels = 0; int num_channels = 0;
int i, ret; int i, ret, left;
if (wait && ar->state_11d != ATH11K_11D_IDLE) {
left = wait_for_completion_timeout(&ar->completed_11d_scan,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive 11d scan complete: timed out\n");
ar->state_11d = ATH11K_11D_IDLE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"reg 11d scan wait left time %d\n", left);
}
if (wait &&
(ar->scan.state == ATH11K_SCAN_STARTING ||
ar->scan.state == ATH11K_SCAN_RUNNING)) {
left = wait_for_completion_timeout(&ar->scan.completed,
ATH11K_SCAN_TIMEOUT_HZ);
if (!left)
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"failed to receive hw scan complete: timed out\n");
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"reg hw scan wait left time %d\n", left);
}
bands = hw->wiphy->bands; bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) { for (band = 0; band < NUM_NL80211_BANDS; band++) {
...@@ -193,11 +219,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) ...@@ -193,11 +219,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
kfree(params); kfree(params);
if (ar->pending_11d) {
complete(&ar->finish_11d_ch_list);
ar->pending_11d = false;
}
return ret; return ret;
} }
...@@ -263,15 +284,8 @@ int ath11k_regd_update(struct ath11k *ar) ...@@ -263,15 +284,8 @@ int ath11k_regd_update(struct ath11k *ar)
goto err; goto err;
} }
if (ar->pending_11d)
complete(&ar->finish_11d_scan);
rtnl_lock(); rtnl_lock();
wiphy_lock(ar->hw->wiphy); wiphy_lock(ar->hw->wiphy);
if (ar->pending_11d)
reinit_completion(&ar->finish_11d_ch_list);
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy); wiphy_unlock(ar->hw->wiphy);
rtnl_unlock(); rtnl_unlock();
...@@ -282,7 +296,7 @@ int ath11k_regd_update(struct ath11k *ar) ...@@ -282,7 +296,7 @@ int ath11k_regd_update(struct ath11k *ar)
goto err; goto err;
if (ar->state == ATH11K_STATE_ON) { if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar); ret = ath11k_reg_update_chan_list(ar, true);
if (ret) if (ret)
goto err; goto err;
} }
......
...@@ -32,5 +32,5 @@ struct ieee80211_regdomain * ...@@ -32,5 +32,5 @@ struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab, ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect); struct cur_regulatory_info *reg_info, bool intersect);
int ath11k_regd_update(struct ath11k *ar); int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar); int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
#endif #endif
...@@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, ...@@ -2015,7 +2015,10 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
{ {
/* setup commonly used values */ /* setup commonly used values */
arg->scan_req_id = 1; arg->scan_req_id = 1;
arg->scan_priority = WMI_SCAN_PRIORITY_LOW; if (ar->state_11d == ATH11K_11D_PREPARING)
arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
else
arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
arg->dwell_time_active = 50; arg->dwell_time_active = 50;
arg->dwell_time_active_2g = 0; arg->dwell_time_active_2g = 0;
arg->dwell_time_passive = 150; arg->dwell_time_passive = 150;
...@@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab) ...@@ -6350,8 +6353,10 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb) static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
{ {
const struct wmi_11d_new_cc_ev *ev; const struct wmi_11d_new_cc_ev *ev;
struct ath11k *ar;
struct ath11k_pdev *pdev;
const void **tb; const void **tb;
int ret; int ret, i;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC); tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) { if (IS_ERR(tb)) {
...@@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s ...@@ -6377,6 +6382,13 @@ static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *s
kfree(tb); kfree(tb);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
ar->state_11d = ATH11K_11D_IDLE;
complete(&ar->completed_11d_scan);
}
queue_work(ab->workqueue, &ab->update_11d_work); queue_work(ab->workqueue, &ab->update_11d_work);
return 0; return 0;
...@@ -7765,6 +7777,56 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab, ...@@ -7765,6 +7777,56 @@ static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
kfree(tb); kfree(tb);
} }
static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
struct sk_buff *skb)
{
const void **tb;
const struct wmi_gtk_offload_status_event *ev;
struct ath11k_vif *arvif;
__be64 replay_ctr_be;
u64 replay_ctr;
int ret;
tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
if (IS_ERR(tb)) {
ret = PTR_ERR(tb);
ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
return;
}
ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
if (!ev) {
ath11k_warn(ab, "failed to fetch gtk offload status ev");
kfree(tb);
return;
}
arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
if (!arvif) {
ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
ev->vdev_id);
kfree(tb);
return;
}
ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi gtk offload event refresh_cnt %d\n",
ev->refresh_cnt);
ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
replay_ctr = ev->replay_ctr.word1;
replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
arvif->rekey_data.replay_ctr = replay_ctr;
/* supplicant expects big-endian replay counter */
replay_ctr_be = cpu_to_be64(replay_ctr);
ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
(void *)&replay_ctr_be, GFP_KERNEL);
kfree(tb);
}
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{ {
struct wmi_cmd_hdr *cmd_hdr; struct wmi_cmd_hdr *cmd_hdr;
...@@ -7896,6 +7958,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) ...@@ -7896,6 +7958,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID: case WMI_DIAG_EVENTID:
ath11k_wmi_diag_event(ab, skb); ath11k_wmi_diag_event(ab, skb);
break; break;
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
/* TODO: Add remaining events */ /* TODO: Add remaining events */
default: default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id); ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
...@@ -8165,6 +8230,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab) ...@@ -8165,6 +8230,39 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
ath11k_wmi_free_dbring_caps(ab); ath11k_wmi_free_dbring_caps(ab);
} }
int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
u32 filter_bitmap, bool enable)
{
struct wmi_hw_data_filter_cmd *cmd;
struct sk_buff *skb;
int len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->enable = enable;
/* Set all modes in case of disable */
if (cmd->enable)
cmd->hw_filter_bitmap = filter_bitmap;
else
cmd->hw_filter_bitmap = ((u32)~0U);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi hw data filter enable %d filter_bitmap 0x%x\n",
enable, filter_bitmap);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
}
int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar) int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
{ {
struct wmi_wow_host_wakeup_ind *cmd; struct wmi_wow_host_wakeup_ind *cmd;
...@@ -8235,3 +8333,536 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, ...@@ -8235,3 +8333,536 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID); return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
} }
int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable)
{
struct wmi_wow_add_del_event_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->is_add = enable;
cmd->event_bitmap = (1 << event);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
wow_wakeup_event(event), enable, vdev_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
}
int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset)
{
struct wmi_wow_add_pattern_cmd *cmd;
struct wmi_wow_bitmap_pattern *bitmap;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *ptr;
size_t len;
len = sizeof(*cmd) +
sizeof(*tlv) + /* array struct */
sizeof(*bitmap) + /* bitmap */
sizeof(*tlv) + /* empty ipv4 sync */
sizeof(*tlv) + /* empty ipv6 sync */
sizeof(*tlv) + /* empty magic */
sizeof(*tlv) + /* empty info timeout */
sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
/* cmd */
ptr = (u8 *)skb->data;
cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_ADD_PATTERN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->pattern_id = pattern_id;
cmd->pattern_type = WOW_BITMAP_PATTERN;
ptr += sizeof(*cmd);
/* bitmap */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
ptr += sizeof(*tlv);
bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_BITMAP_PATTERN_T) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
memcpy(bitmap->patternbuf, pattern, pattern_len);
ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
memcpy(bitmap->bitmaskbuf, mask, pattern_len);
ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
bitmap->pattern_offset = pattern_offset;
bitmap->pattern_len = pattern_len;
bitmap->bitmask_len = pattern_len;
bitmap->pattern_id = pattern_id;
ptr += sizeof(*bitmap);
/* ipv4 sync */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* ipv6 sync */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* magic */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* pattern info timeout */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, 0);
ptr += sizeof(*tlv);
/* ratelimit interval */
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
vdev_id, pattern_id, pattern_offset);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
}
int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
{
struct wmi_wow_del_pattern_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_WOW_DEL_PATTERN_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->pattern_id = pattern_id;
cmd->pattern_type = WOW_BITMAP_PATTERN;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
vdev_id, pattern_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
}
static struct sk_buff *
ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno)
{
struct nlo_configured_parameters *nlo_list;
struct wmi_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u32 *channel_list;
size_t len, nlo_list_len, channel_list_len;
u8 *ptr;
u32 i;
len = sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
len += channel_list_len;
nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
len += nlo_list_len;
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (u8 *)skb->data;
cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = pno->vdev_id;
cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
/* current FW does not support min-max range for dwell time */
cmd->active_dwell_time = pno->active_max_time;
cmd->passive_dwell_time = pno->passive_max_time;
if (pno->do_passive_scan)
cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
cmd->fast_scan_period = pno->fast_scan_period;
cmd->slow_scan_period = pno->slow_scan_period;
cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
cmd->delay_start_time = pno->delay_start_time;
if (pno->enable_pno_scan_randomization) {
cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
}
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = pno->uc_networks_count;
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
ptr += sizeof(*tlv);
nlo_list = (struct nlo_configured_parameters *)ptr;
for (i = 0; i < cmd->no_of_ssids; i++) {
tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
nlo_list[i].ssid.valid = true;
nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
memcpy(nlo_list[i].ssid.ssid.ssid,
pno->a_networks[i].ssid.ssid,
nlo_list[i].ssid.ssid.ssid_len);
ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
if (pno->a_networks[i].rssi_threshold &&
pno->a_networks[i].rssi_threshold > -300) {
nlo_list[i].rssi_cond.valid = true;
nlo_list[i].rssi_cond.rssi =
pno->a_networks[i].rssi_threshold;
}
nlo_list[i].bcast_nw_type.valid = true;
nlo_list[i].bcast_nw_type.bcast_nw_type =
pno->a_networks[i].bcast_nw_type;
}
ptr += nlo_list_len;
cmd->num_of_channels = pno->a_networks[0].channel_count;
tlv = (struct wmi_tlv *)ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
FIELD_PREP(WMI_TLV_LEN, channel_list_len);
ptr += sizeof(*tlv);
channel_list = (u32 *)ptr;
for (i = 0; i < cmd->num_of_channels; i++)
channel_list[i] = pno->a_networks[0].channels[i];
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
vdev_id);
return skb;
}
static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
u32 vdev_id)
{
struct wmi_wow_nlo_config_cmd *cmd;
struct sk_buff *skb;
size_t len;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->flags = WMI_NLO_CONFIG_STOP;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi tlv stop pno config vdev_id %d\n", vdev_id);
return skb;
}
int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
struct sk_buff *skb;
if (pno_scan->enable)
skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
else
skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
if (IS_ERR_OR_NULL(skb))
return -ENOMEM;
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
}
static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload,
u8 **ptr,
bool enable,
bool ext)
{
struct wmi_ns_offload_tuple *ns;
struct wmi_tlv *tlv;
u8 *buf_ptr = *ptr;
u32 ns_cnt, ns_ext_tuples;
int i, max_offloads;
ns_cnt = offload->ipv6_count;
tlv = (struct wmi_tlv *)buf_ptr;
if (ext) {
ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
i = WMI_MAX_NS_OFFLOADS;
max_offloads = offload->ipv6_count;
} else {
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
i = 0;
max_offloads = WMI_MAX_NS_OFFLOADS;
}
buf_ptr += sizeof(*tlv);
for (; i < max_offloads; i++) {
ns = (struct wmi_ns_offload_tuple *)buf_ptr;
ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
if (enable) {
if (i < ns_cnt)
ns->flags |= WMI_NSOL_FLAGS_VALID;
memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
if (offload->ipv6_type[i])
ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
ath11k_ce_byte_swap(ns->target_mac.addr, 8);
if (ns->target_mac.word0 != 0 ||
ns->target_mac.word1 != 0) {
ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"wmi index %d ns_solicited %pI6 target %pI6",
i, ns->solicitation_ipaddr,
ns->target_ipaddr[0]);
}
buf_ptr += sizeof(*ns);
}
*ptr = buf_ptr;
}
static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
struct ath11k_arp_ns_offload *offload,
u8 **ptr,
bool enable)
{
struct wmi_arp_offload_tuple *arp;
struct wmi_tlv *tlv;
u8 *buf_ptr = *ptr;
int i;
/* fill arp tuple */
tlv = (struct wmi_tlv *)buf_ptr;
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
buf_ptr += sizeof(*tlv);
for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
arp = (struct wmi_arp_offload_tuple *)buf_ptr;
arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
if (enable && i < offload->ipv4_count) {
/* Copy the target ip addr and flags */
arp->flags = WMI_ARPOL_FLAGS_VALID;
memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
ath11k_ce_byte_swap(arp->target_ipaddr, 4);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi arp offload address %pI4",
arp->target_ipaddr);
}
buf_ptr += sizeof(*arp);
}
*ptr = buf_ptr;
}
int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable)
{
struct ath11k_arp_ns_offload *offload;
struct wmi_set_arp_ns_offload_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
u8 *buf_ptr;
size_t len;
u8 ns_cnt, ns_ext_tuples = 0;
offload = &arvif->arp_ns_offload;
ns_cnt = offload->ipv6_count;
len = sizeof(*cmd) +
sizeof(*tlv) +
WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
sizeof(*tlv) +
WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
len += sizeof(*tlv) +
ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
}
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
buf_ptr = skb->data;
cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->flags = 0;
cmd->vdev_id = arvif->vdev_id;
cmd->num_ns_ext_tuples = ns_ext_tuples;
buf_ptr += sizeof(*cmd);
ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
if (ns_ext_tuples)
ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
}
int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable)
{
struct wmi_gtk_rekey_offload_cmd *cmd;
struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
int len;
struct sk_buff *skb;
__le64 replay_ctr;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arvif->vdev_id;
if (enable) {
cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
/* the length in rekey_data and cmd is equal */
memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
memcpy(cmd->replay_ctr, &replay_ctr,
sizeof(replay_ctr));
ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
} else {
cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
}
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
arvif->vdev_id, enable);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
}
int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
struct ath11k_vif *arvif)
{
struct wmi_gtk_rekey_offload_cmd *cmd;
int len;
struct sk_buff *skb;
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
if (!skb)
return -ENOMEM;
cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = arvif->vdev_id;
cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
arvif->vdev_id);
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
}
...@@ -13,6 +13,7 @@ struct ath11k_base; ...@@ -13,6 +13,7 @@ struct ath11k_base;
struct ath11k; struct ath11k;
struct ath11k_fw_stats; struct ath11k_fw_stats;
struct ath11k_fw_dbglog; struct ath11k_fw_dbglog;
struct ath11k_vif;
#define PSOC_HOST_MAX_NUM_SS (8) #define PSOC_HOST_MAX_NUM_SS (8)
...@@ -3088,9 +3089,6 @@ enum scan_dwelltime_adaptive_mode { ...@@ -3088,9 +3089,6 @@ enum scan_dwelltime_adaptive_mode {
SCAN_DWELL_MODE_STATIC = 4 SCAN_DWELL_MODE_STATIC = 4
}; };
#define WLAN_SCAN_MAX_NUM_SSID 10
#define WLAN_SCAN_MAX_NUM_BSSID 10
#define WLAN_SSID_MAX_LEN 32 #define WLAN_SSID_MAX_LEN 32
struct element_info { struct element_info {
...@@ -3105,7 +3103,6 @@ struct wlan_ssid { ...@@ -3105,7 +3103,6 @@ struct wlan_ssid {
#define WMI_IE_BITMAP_SIZE 8 #define WMI_IE_BITMAP_SIZE 8
#define WMI_SCAN_MAX_NUM_SSID 0x0A
/* prefix used by scan requestor ids on the host */ /* prefix used by scan requestor ids on the host */
#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000 #define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
...@@ -3113,10 +3110,6 @@ struct wlan_ssid { ...@@ -3113,10 +3110,6 @@ struct wlan_ssid {
/* host cycles through the lower 12 bits to generate ids */ /* host cycles through the lower 12 bits to generate ids */
#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000 #define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
/* Values lower than this may be refused by some firmware revisions with a scan /* Values lower than this may be refused by some firmware revisions with a scan
* completion with a timedout reason. * completion with a timedout reason.
*/ */
...@@ -3312,8 +3305,8 @@ struct scan_req_params { ...@@ -3312,8 +3305,8 @@ struct scan_req_params {
u32 n_probes; u32 n_probes;
u32 *chan_list; u32 *chan_list;
u32 notify_scan_events; u32 notify_scan_events;
struct wlan_ssid ssid[WLAN_SCAN_MAX_NUM_SSID]; struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
struct wmi_mac_addr bssid_list[WLAN_SCAN_MAX_NUM_BSSID]; struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
struct element_info extraie; struct element_info extraie;
struct element_info htcap; struct element_info htcap;
struct element_info vhtcap; struct element_info vhtcap;
...@@ -5390,6 +5383,19 @@ struct ath11k_wmi_base { ...@@ -5390,6 +5383,19 @@ struct ath11k_wmi_base {
struct ath11k_targ_cap *targ_cap; struct ath11k_targ_cap *targ_cap;
}; };
/* Definition of HW data filtering */
enum hw_data_filter_type {
WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
};
struct wmi_hw_data_filter_cmd {
u32 tlv_header;
u32 vdev_id;
u32 enable;
u32 hw_filter_bitmap;
} __packed;
/* WOW structures */ /* WOW structures */
enum wmi_wow_wakeup_event { enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0, WOW_BMISS_EVENT = 0,
...@@ -5534,6 +5540,45 @@ static inline const char *wow_reason(enum wmi_wow_wake_reason reason) ...@@ -5534,6 +5540,45 @@ static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
#undef C2S #undef C2S
struct wmi_wow_ev_arg {
u32 vdev_id;
u32 flag;
enum wmi_wow_wake_reason wake_reason;
u32 data_len;
};
enum wmi_tlv_pattern_type {
WOW_PATTERN_MIN = 0,
WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
WOW_IPV4_SYNC_PATTERN,
WOW_IPV6_SYNC_PATTERN,
WOW_WILD_CARD_PATTERN,
WOW_TIMER_PATTERN,
WOW_MAGIC_PATTERN,
WOW_IPV6_RA_PATTERN,
WOW_IOAC_PKT_PATTERN,
WOW_IOAC_TMR_PATTERN,
WOW_PATTERN_MAX
};
#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
#define WOW_DEFAULT_BITMASK_SIZE 148
#define WOW_MIN_PATTERN_SIZE 1
#define WOW_MAX_PATTERN_SIZE 148
#define WOW_MAX_PKT_OFFSET 128
#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
sizeof(struct rfc1042_hdr))
#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
offsetof(struct ieee80211_hdr_3addr, addr1))
struct wmi_wow_add_del_event_cmd {
u32 tlv_header;
u32 vdev_id;
u32 is_add;
u32 event_bitmap;
} __packed;
struct wmi_wow_enable_cmd { struct wmi_wow_enable_cmd {
u32 tlv_header; u32 tlv_header;
u32 enable; u32 enable;
...@@ -5546,13 +5591,292 @@ struct wmi_wow_host_wakeup_ind { ...@@ -5546,13 +5591,292 @@ struct wmi_wow_host_wakeup_ind {
u32 reserved; u32 reserved;
} __packed; } __packed;
struct wmi_wow_ev_arg { struct wmi_tlv_wow_event_info {
u32 vdev_id; u32 vdev_id;
u32 flag; u32 flag;
enum wmi_wow_wake_reason wake_reason; u32 wake_reason;
u32 data_len; u32 data_len;
} __packed;
struct wmi_wow_bitmap_pattern {
u32 tlv_header;
u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
u32 pattern_offset;
u32 pattern_len;
u32 bitmask_len;
u32 pattern_id;
} __packed;
struct wmi_wow_add_pattern_cmd {
u32 tlv_header;
u32 vdev_id;
u32 pattern_id;
u32 pattern_type;
} __packed;
struct wmi_wow_del_pattern_cmd {
u32 tlv_header;
u32 vdev_id;
u32 pattern_id;
u32 pattern_type;
} __packed;
#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
#define WMI_PNO_MAX_NETW_CHANNELS 26
#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
#define WMI_PNO_MAX_PB_REQ_SIZE 450
#define WMI_PNO_24G_DEFAULT_CH 1
#define WMI_PNO_5G_DEFAULT_CH 36
#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
/* SSID broadcast type */
enum wmi_ssid_bcast_type {
BCAST_UNKNOWN = 0,
BCAST_NORMAL = 1,
BCAST_HIDDEN = 2,
};
#define WMI_NLO_MAX_SSIDS 16
#define WMI_NLO_MAX_CHAN 48
#define WMI_NLO_CONFIG_STOP BIT(0)
#define WMI_NLO_CONFIG_START BIT(1)
#define WMI_NLO_CONFIG_RESET BIT(2)
#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
* Only one of them can be enabled at a given time
*/
#define WMI_NLO_CONFIG_ENLO BIT(7)
#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
struct wmi_nlo_ssid_param {
u32 valid;
struct wmi_ssid ssid;
} __packed;
struct wmi_nlo_enc_param {
u32 valid;
u32 enc_type;
} __packed;
struct wmi_nlo_auth_param {
u32 valid;
u32 auth_type;
} __packed;
struct wmi_nlo_bcast_nw_param {
u32 valid;
u32 bcast_nw_type;
} __packed;
struct wmi_nlo_rssi_param {
u32 valid;
s32 rssi;
} __packed;
struct nlo_configured_parameters {
/* TLV tag and len;*/
u32 tlv_header;
struct wmi_nlo_ssid_param ssid;
struct wmi_nlo_enc_param enc_type;
struct wmi_nlo_auth_param auth_type;
struct wmi_nlo_rssi_param rssi_cond;
/* indicates if the SSID is hidden or not */
struct wmi_nlo_bcast_nw_param bcast_nw_type;
} __packed;
struct wmi_network_type {
struct wmi_ssid ssid;
u32 authentication;
u32 encryption;
u32 bcast_nw_type;
u8 channel_count;
u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
s32 rssi_threshold;
}; };
struct wmi_pno_scan_req {
u8 enable;
u8 vdev_id;
u8 uc_networks_count;
struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
u32 fast_scan_period;
u32 slow_scan_period;
u8 fast_scan_max_cycles;
bool do_passive_scan;
u32 delay_start_time;
u32 active_min_time;
u32 active_max_time;
u32 passive_min_time;
u32 passive_max_time;
/* mac address randomization attributes */
u32 enable_pno_scan_randomization;
u8 mac_addr[ETH_ALEN];
u8 mac_addr_mask[ETH_ALEN];
};
struct wmi_wow_nlo_config_cmd {
u32 tlv_header;
u32 flags;
u32 vdev_id;
u32 fast_scan_max_cycles;
u32 active_dwell_time;
u32 passive_dwell_time;
u32 probe_bundle_size;
/* ART = IRT */
u32 rest_time;
/* Max value that can be reached after SBM */
u32 max_rest_time;
/* SBM */
u32 scan_backoff_multiplier;
/* SCBM */
u32 fast_scan_period;
/* specific to windows */
u32 slow_scan_period;
u32 no_of_ssids;
u32 num_of_channels;
/* NLO scan start delay time in milliseconds */
u32 delay_start_time;
/* MAC Address to use in Probe Req as SA */
struct wmi_mac_addr mac_addr;
/* Mask on which MAC has to be randomized */
struct wmi_mac_addr mac_mask;
/* IE bitmap to use in Probe Req */
u32 ie_bitmap[8];
/* Number of vendor OUIs. In the TLV vendor_oui[] */
u32 num_vendor_oui;
/* Number of connected NLO band preferences */
u32 num_cnlo_band_pref;
/* The TLVs will follow.
* nlo_configured_parameters nlo_list[];
* u32 channel_list[num_of_channels];
*/
} __packed;
#define WMI_MAX_NS_OFFLOADS 2
#define WMI_MAX_ARP_OFFLOADS 2
#define WMI_ARPOL_FLAGS_VALID BIT(0)
#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
struct wmi_arp_offload_tuple {
u32 tlv_header;
u32 flags;
u8 target_ipaddr[4];
u8 remote_ipaddr[4];
struct wmi_mac_addr target_mac;
} __packed;
#define WMI_NSOL_FLAGS_VALID BIT(0)
#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
#define WMI_NSOL_MAX_TARGET_IPS 2
struct wmi_ns_offload_tuple {
u32 tlv_header;
u32 flags;
u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
u8 solicitation_ipaddr[16];
u8 remote_ipaddr[16];
struct wmi_mac_addr target_mac;
} __packed;
struct wmi_set_arp_ns_offload_cmd {
u32 tlv_header;
u32 flags;
u32 vdev_id;
u32 num_ns_ext_tuples;
/* The TLVs follow:
* wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS];
* wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
* wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples];
*/
} __packed;
#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
#define GTK_OFFLOAD_KEK_BYTES 16
#define GTK_OFFLOAD_KCK_BYTES 16
#define GTK_REPLAY_COUNTER_BYTES 8
#define WMI_MAX_KEY_LEN 32
#define IGTK_PN_SIZE 6
struct wmi_replayc_cnt {
union {
u8 counter[GTK_REPLAY_COUNTER_BYTES];
struct {
u32 word0;
u32 word1;
} __packed;
} __packed;
} __packed;
struct wmi_gtk_offload_status_event {
u32 vdev_id;
u32 flags;
u32 refresh_cnt;
struct wmi_replayc_cnt replay_ctr;
u8 igtk_key_index;
u8 igtk_key_length;
u8 igtk_key_rsc[IGTK_PN_SIZE];
u8 igtk_key[WMI_MAX_KEY_LEN];
u8 gtk_key_index;
u8 gtk_key_length;
u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
u8 gtk_key[WMI_MAX_KEY_LEN];
} __packed;
struct wmi_gtk_rekey_offload_cmd {
u32 tlv_header;
u32 vdev_id;
u32 flags;
u8 kek[GTK_OFFLOAD_KEK_BYTES];
u8 kck[GTK_OFFLOAD_KCK_BYTES];
u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
} __packed;
int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
u32 cmd_id); u32 cmd_id);
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len); struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
...@@ -5714,4 +6038,22 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar, ...@@ -5714,4 +6038,22 @@ int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
const u8 mac_addr[ETH_ALEN]); const u8 mac_addr[ETH_ALEN]);
int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap, int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
struct ath11k_fw_dbglog *dbglog); struct ath11k_fw_dbglog *dbglog);
int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan);
int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id);
int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
const u8 *pattern, const u8 *mask,
int pattern_len, int pattern_offset);
int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
enum wmi_wow_wakeup_event event,
u32 enable);
int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
u32 filter_bitmap, bool enable);
int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable);
int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
struct ath11k_vif *arvif, bool enable);
int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
struct ath11k_vif *arvif);
#endif #endif
...@@ -6,11 +6,24 @@ ...@@ -6,11 +6,24 @@
#include <linux/delay.h> #include <linux/delay.h>
#include "mac.h" #include "mac.h"
#include <net/mac80211.h>
#include "core.h" #include "core.h"
#include "hif.h" #include "hif.h"
#include "debug.h" #include "debug.h"
#include "wmi.h" #include "wmi.h"
#include "wow.h" #include "wow.h"
#include "dp_rx.h"
static const struct wiphy_wowlan_support ath11k_wowlan_support = {
.flags = WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
WIPHY_WOWLAN_GTK_REKEY_FAILURE,
.pattern_min_len = WOW_MIN_PATTERN_SIZE,
.pattern_max_len = WOW_MAX_PATTERN_SIZE,
.max_pkt_offset = WOW_MAX_PKT_OFFSET,
};
int ath11k_wow_enable(struct ath11k_base *ab) int ath11k_wow_enable(struct ath11k_base *ab)
{ {
...@@ -71,3 +84,753 @@ int ath11k_wow_wakeup(struct ath11k_base *ab) ...@@ -71,3 +84,753 @@ int ath11k_wow_wakeup(struct ath11k_base *ab)
return 0; return 0;
} }
static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
int i, ret;
for (i = 0; i < WOW_EVENT_MAX; i++) {
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
if (ret) {
ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
for (i = 0; i < ar->wow.max_num_patterns; i++) {
ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
if (ret) {
ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
i, arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wow_vif_cleanup(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
/* Convert a 802.3 format to a 802.11 format.
* +------------+-----------+--------+----------------+
* 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
* +------------+-----------+--------+----------------+
* |__ |_______ |____________ |________
* | | | |
* +--+------------+----+-----------+---------------+-----------+
* 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
* +--+------------+----+-----------+---------------+-----------+
*/
static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
const struct cfg80211_pkt_pattern *old)
{
u8 hdr_8023_pattern[ETH_HLEN] = {};
u8 hdr_8023_bit_mask[ETH_HLEN] = {};
u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
int total_len = old->pkt_offset + old->pattern_len;
int hdr_80211_end_offset;
struct ieee80211_hdr_3addr *new_hdr_pattern =
(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
struct ieee80211_hdr_3addr *new_hdr_mask =
(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
int hdr_len = sizeof(*new_hdr_pattern);
struct rfc1042_hdr *new_rfc_pattern =
(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
struct rfc1042_hdr *new_rfc_mask =
(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
int rfc_len = sizeof(*new_rfc_pattern);
memcpy(hdr_8023_pattern + old->pkt_offset,
old->pattern, ETH_HLEN - old->pkt_offset);
memcpy(hdr_8023_bit_mask + old->pkt_offset,
old->mask, ETH_HLEN - old->pkt_offset);
/* Copy destination address */
memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
/* Copy source address */
memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
/* Copy logic link type */
memcpy(&new_rfc_pattern->snap_type,
&old_hdr_pattern->h_proto,
sizeof(old_hdr_pattern->h_proto));
memcpy(&new_rfc_mask->snap_type,
&old_hdr_mask->h_proto,
sizeof(old_hdr_mask->h_proto));
/* Compute new pkt_offset */
if (old->pkt_offset < ETH_ALEN)
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr1);
else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr3) -
offsetof(struct ethhdr, h_source);
else
new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
/* Compute new hdr end offset */
if (total_len > ETH_HLEN)
hdr_80211_end_offset = hdr_len + rfc_len;
else if (total_len > offsetof(struct ethhdr, h_proto))
hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
else if (total_len > ETH_ALEN)
hdr_80211_end_offset = total_len - ETH_ALEN +
offsetof(struct ieee80211_hdr_3addr, addr3);
else
hdr_80211_end_offset = total_len +
offsetof(struct ieee80211_hdr_3addr, addr1);
new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
memcpy((u8 *)new->pattern,
hdr_80211_pattern + new->pkt_offset,
new->pattern_len);
memcpy((u8 *)new->mask,
hdr_80211_bit_mask + new->pkt_offset,
new->pattern_len);
if (total_len > ETH_HLEN) {
/* Copy frame body */
memcpy((u8 *)new->pattern + new->pattern_len,
(void *)old->pattern + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
memcpy((u8 *)new->mask + new->pattern_len,
(void *)old->mask + ETH_HLEN - old->pkt_offset,
total_len - ETH_HLEN);
new->pattern_len += total_len - ETH_HLEN;
}
}
static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
struct cfg80211_sched_scan_request *nd_config,
struct wmi_pno_scan_req *pno)
{
int i, j;
u8 ssid_len;
pno->enable = 1;
pno->vdev_id = vdev_id;
pno->uc_networks_count = nd_config->n_match_sets;
if (!pno->uc_networks_count ||
pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
return -EINVAL;
if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
return -EINVAL;
/* Filling per profile params */
for (i = 0; i < pno->uc_networks_count; i++) {
ssid_len = nd_config->match_sets[i].ssid.ssid_len;
if (ssid_len == 0 || ssid_len > 32)
return -EINVAL;
pno->a_networks[i].ssid.ssid_len = ssid_len;
memcpy(pno->a_networks[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid_len);
pno->a_networks[i].authentication = 0;
pno->a_networks[i].encryption = 0;
pno->a_networks[i].bcast_nw_type = 0;
/* Copying list of valid channel into request */
pno->a_networks[i].channel_count = nd_config->n_channels;
pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
for (j = 0; j < nd_config->n_channels; j++) {
pno->a_networks[i].channels[j] =
nd_config->channels[j]->center_freq;
}
}
/* set scan to passive if no SSIDs are specified in the request */
if (nd_config->n_ssids == 0)
pno->do_passive_scan = true;
else
pno->do_passive_scan = false;
for (i = 0; i < nd_config->n_ssids; i++) {
j = 0;
while (j < pno->uc_networks_count) {
if (pno->a_networks[j].ssid.ssid_len ==
nd_config->ssids[i].ssid_len &&
(memcmp(pno->a_networks[j].ssid.ssid,
nd_config->ssids[i].ssid,
pno->a_networks[j].ssid.ssid_len) == 0)) {
pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
break;
}
j++;
}
}
if (nd_config->n_scan_plans == 2) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
pno->slow_scan_period =
nd_config->scan_plans[1].interval * MSEC_PER_SEC;
} else if (nd_config->n_scan_plans == 1) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = 1;
pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
} else {
ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
nd_config->n_scan_plans);
}
if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
/* enable mac randomization */
pno->enable_pno_scan_randomization = 1;
memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
}
pno->delay_start_time = nd_config->delay;
/* Current FW does not support min-max range for dwell time */
pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
return 0;
}
static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
int ret, i;
unsigned long wow_mask = 0;
struct ath11k *ar = arvif->ar;
const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int pattern_id = 0;
/* Setup requested WOW features */
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
fallthrough;
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
__set_bit(WOW_HTT_EVENT, &wow_mask);
__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
break;
case WMI_VDEV_TYPE_STA:
if (wowlan->disconnect) {
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
__set_bit(WOW_BMISS_EVENT, &wow_mask);
__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
}
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
if (wowlan->nd_config) {
struct wmi_pno_scan_req *pno;
int ret;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
ar->nlo_enabled = true;
ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
wowlan->nd_config, pno);
if (!ret) {
ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
}
kfree(pno);
}
break;
default:
break;
}
for (i = 0; i < wowlan->n_patterns; i++) {
u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
struct cfg80211_pkt_pattern new_pattern = {};
struct cfg80211_pkt_pattern old_pattern = patterns[i];
int j;
new_pattern.pattern = ath_pattern;
new_pattern.mask = ath_bitmask;
if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
continue;
/* convert bytemask to bitmask */
for (j = 0; j < patterns[i].pattern_len; j++)
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
if (patterns[i].pkt_offset < ETH_HLEN) {
u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
memcpy(pattern_ext, old_pattern.pattern,
old_pattern.pattern_len);
old_pattern.pattern = pattern_ext;
ath11k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
} else {
new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
}
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
return -EINVAL;
ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
pattern_id,
new_pattern.pattern,
new_pattern.mask,
new_pattern.pattern_len,
new_pattern.pkt_offset);
if (ret) {
ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
pattern_id,
arvif->vdev_id, ret);
return ret;
}
pattern_id++;
__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
}
for (i = 0; i < WOW_EVENT_MAX; i++) {
if (!test_bit(i, &wow_mask))
continue;
ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
if (ret) {
ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
wow_wakeup_event(i), arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_wakeups(struct ath11k *ar,
struct cfg80211_wowlan *wowlan)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
{
int ret = 0;
struct ath11k *ar = arvif->ar;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
if (ar->nlo_enabled) {
struct wmi_pno_scan_req *pno;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
pno->enable = 0;
ar->nlo_enabled = false;
ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
kfree(pno);
}
break;
default:
break;
}
return ret;
}
static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_vif_wow_clean_nlo(arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_set_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
u32 bitmap;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
bitmap,
true);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
continue;
ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
{
struct ath11k_vif *arvif;
int ret;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
!arvif->is_up ||
!arvif->rekey_data.enable_offload)
continue;
/* get rekey info before disable rekey offload */
if (!enable) {
ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
if (ret) {
ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
arvif->vdev_id, ret);
return ret;
}
}
ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
arvif->vdev_id, enable, ret);
return ret;
}
}
return 0;
}
static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
{
int ret;
ret = ath11k_wow_arp_ns_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
enable, ret);
return ret;
}
ret = ath11k_gtk_rekey_offload(ar, enable);
if (ret) {
ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
enable, ret);
return ret;
}
return 0;
}
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan)
{
struct ath11k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
ret);
goto exit;
}
ret = ath11k_wow_set_wakeups(ar, wowlan);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_protocol_offload(ar, true);
if (ret) {
ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
ret);
goto cleanup;
}
ath11k_mac_drain_tx(ar);
ret = ath11k_mac_wait_tx_complete(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
goto cleanup;
}
ret = ath11k_wow_set_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
ret);
goto cleanup;
}
ret = ath11k_wow_enable(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
goto cleanup;
}
ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
if (ret) {
ath11k_warn(ar->ab,
"failed to stop dp rx pktlog during wow suspend: %d\n",
ret);
goto cleanup;
}
ath11k_ce_stop_shadow_timers(ar->ab);
ath11k_dp_stop_shadow_timers(ar->ab);
ath11k_hif_irq_disable(ar->ab);
ath11k_hif_ce_irq_disable(ar->ab);
ret = ath11k_hif_suspend(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
goto wakeup;
}
goto exit;
wakeup:
ath11k_wow_wakeup(ar->ab);
cleanup:
ath11k_wow_cleanup(ar);
exit:
mutex_unlock(&ar->conf_mutex);
return ret ? 1 : 0;
}
void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
{
struct ath11k *ar = hw->priv;
mutex_lock(&ar->conf_mutex);
device_set_wakeup_enable(ar->ab->dev, enabled);
mutex_unlock(&ar->conf_mutex);
}
int ath11k_wow_op_resume(struct ieee80211_hw *hw)
{
struct ath11k *ar = hw->priv;
int ret;
mutex_lock(&ar->conf_mutex);
ret = ath11k_hif_resume(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
goto exit;
}
ath11k_hif_ce_irq_enable(ar->ab);
ath11k_hif_irq_enable(ar->ab);
ret = ath11k_dp_rx_pktlog_start(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
return ret;
}
ret = ath11k_wow_wakeup(ar->ab);
if (ret) {
ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
goto exit;
}
ret = ath11k_wow_nlo_cleanup(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
goto exit;
}
ret = ath11k_wow_clear_hw_filter(ar);
if (ret) {
ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
goto exit;
}
ret = ath11k_wow_protocol_offload(ar, false);
if (ret) {
ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
ret);
goto exit;
}
exit:
if (ret) {
switch (ar->state) {
case ATH11K_STATE_ON:
ar->state = ATH11K_STATE_RESTARTING;
ret = 1;
break;
case ATH11K_STATE_OFF:
case ATH11K_STATE_RESTARTING:
case ATH11K_STATE_RESTARTED:
case ATH11K_STATE_WEDGED:
ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
ar->state);
ret = -EIO;
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_wow_init(struct ath11k *ar)
{
if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
return 0;
ar->wow.wowlan_support = ath11k_wowlan_support;
if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
ATH11K_HW_TXRX_NATIVE_WIFI) {
ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
}
ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
device_set_wakeup_capable(ar->ab->dev, true);
return 0;
}
...@@ -3,8 +3,53 @@ ...@@ -3,8 +3,53 @@
* Copyright (c) 2020 The Linux Foundation. All rights reserved. * Copyright (c) 2020 The Linux Foundation. All rights reserved.
*/ */
#ifndef _WOW_H_
#define _WOW_H_
struct ath11k_wow {
u32 max_num_patterns;
struct completion wakeup_completed;
struct wiphy_wowlan_support wowlan_support;
};
struct rfc1042_hdr {
u8 llc_dsap;
u8 llc_ssap;
u8 llc_ctrl;
u8 snap_oui[3];
__be16 snap_type;
} __packed;
#define ATH11K_WOW_RETRY_NUM 3 #define ATH11K_WOW_RETRY_NUM 3
#define ATH11K_WOW_RETRY_WAIT_MS 200 #define ATH11K_WOW_RETRY_WAIT_MS 200
#define ATH11K_WOW_PATTERNS 22
#ifdef CONFIG_PM
int ath11k_wow_init(struct ath11k *ar);
int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan);
int ath11k_wow_op_resume(struct ieee80211_hw *hw);
void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
int ath11k_wow_enable(struct ath11k_base *ab); int ath11k_wow_enable(struct ath11k_base *ab);
int ath11k_wow_wakeup(struct ath11k_base *ab); int ath11k_wow_wakeup(struct ath11k_base *ab);
#else
static inline int ath11k_wow_init(struct ath11k *ar)
{
return 0;
}
static inline int ath11k_wow_enable(struct ath11k_base *ab)
{
return 0;
}
static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
{
return 0;
}
#endif /* CONFIG_PM */
#endif /* _WOW_H_ */
...@@ -1538,7 +1538,7 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target, ...@@ -1538,7 +1538,7 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
queue, n_msg); queue, n_msg);
/* /*
* This is due to unavailabilty of buffers to rx entire data. * This is due to unavailability of buffers to rx entire data.
* Return no error so that free buffers from queue can be used * Return no error so that free buffers from queue can be used
* to receive partial data. * to receive partial data.
*/ */
......
...@@ -98,14 +98,12 @@ static int ath_ahb_probe(struct platform_device *pdev) ...@@ -98,14 +98,12 @@ static int ath_ahb_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
} }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); irq = platform_get_irq(pdev, 0);
if (res == NULL) { if (irq < 0) {
dev_err(&pdev->dev, "no IRQ resource found\n"); dev_err(&pdev->dev, "no IRQ resource found\n");
return -ENXIO; return irq;
} }
irq = res->start;
ath9k_fill_chanctx_ops(); ath9k_fill_chanctx_ops();
hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) { if (hw == NULL) {
......
...@@ -301,10 +301,11 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ...@@ -301,10 +301,11 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2) WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3)); | set11nPktDurRTSCTS(i->rates, 3));
WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0) WRITE_ONCE(ads->ds_ctl7,
| set11nRateFlags(i->rates, 1) set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
| set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
| set11nRateFlags(i->rates, 3) | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
| set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate)); | SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1)); WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1));
......
...@@ -177,7 +177,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah) ...@@ -177,7 +177,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
int i; int i;
/* Accumulate IQ cal measures for active chains */ /* Accumulate IQ cal measures for active chains */
for (i = 0; i < AR5416_MAX_CHAINS; i++) { for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) { if (ah->txchainmask & BIT(i)) {
ah->totalPowerMeasI[i] += ah->totalPowerMeasI[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
......
...@@ -3911,7 +3911,7 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan) ...@@ -3911,7 +3911,7 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
} }
/* Test value. if 0 then attenuation is unused. Don't load anything. */ /* Test value. if 0 then attenuation is unused. Don't load anything. */
for (i = 0; i < 3; i++) { for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (ah->txchainmask & BIT(i)) { if (ah->txchainmask & BIT(i)) {
value = ar9003_hw_atten_chain_get(ah, i, chan); value = ar9003_hw_atten_chain_get(ah, i, chan);
REG_RMW_FIELD(ah, ext_atten_reg[i], REG_RMW_FIELD(ah, ext_atten_reg[i],
...@@ -4747,7 +4747,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah, ...@@ -4747,7 +4747,7 @@ static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
} }
static int ar9003_hw_cal_pier_get(struct ath_hw *ah, static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
int mode, bool is2ghz,
int ipier, int ipier,
int ichain, int ichain,
int *pfrequency, int *pfrequency,
...@@ -4757,7 +4757,6 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ...@@ -4757,7 +4757,6 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
{ {
u8 *pCalPier; u8 *pCalPier;
struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct; struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
int is2GHz;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
...@@ -4768,17 +4767,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ...@@ -4768,17 +4767,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
return -1; return -1;
} }
if (mode) { /* 5GHz */ if (is2ghz) {
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
ath_dbg(common, EEPROM,
"Invalid 5GHz cal pier index, must be less than %d\n",
AR9300_NUM_5G_CAL_PIERS);
return -1;
}
pCalPier = &(eep->calFreqPier5G[ipier]);
pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
is2GHz = 0;
} else {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) { if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
ath_dbg(common, EEPROM, ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n", "Invalid 2GHz cal pier index, must be less than %d\n",
...@@ -4788,10 +4777,18 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah, ...@@ -4788,10 +4777,18 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
pCalPier = &(eep->calFreqPier2G[ipier]); pCalPier = &(eep->calFreqPier2G[ipier]);
pCalPierStruct = &(eep->calPierData2G[ichain][ipier]); pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
is2GHz = 1; } else {
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
ath_dbg(common, EEPROM,
"Invalid 5GHz cal pier index, must be less than %d\n",
AR9300_NUM_5G_CAL_PIERS);
return -1;
}
pCalPier = &(eep->calFreqPier5G[ipier]);
pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
} }
*pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz); *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2ghz);
*pcorrection = pCalPierStruct->refPower; *pcorrection = pCalPierStruct->refPower;
*ptemperature = pCalPierStruct->tempMeas; *ptemperature = pCalPierStruct->tempMeas;
*pvoltage = pCalPierStruct->voltMeas; *pvoltage = pCalPierStruct->voltMeas;
...@@ -4960,7 +4957,6 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah, ...@@ -4960,7 +4957,6 @@ static void ar9003_hw_power_control_override(struct ath_hw *ah,
static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
{ {
int ichain, ipier, npier; int ichain, ipier, npier;
int mode;
int lfrequency[AR9300_MAX_CHAINS], int lfrequency[AR9300_MAX_CHAINS],
lcorrection[AR9300_MAX_CHAINS], lcorrection[AR9300_MAX_CHAINS],
ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS], ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS],
...@@ -4976,12 +4972,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ...@@ -4976,12 +4972,12 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
int pfrequency, pcorrection, ptemperature, pvoltage, int pfrequency, pcorrection, ptemperature, pvoltage,
pnf_cal, pnf_pwr; pnf_cal, pnf_pwr;
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
bool is2ghz = frequency < 4000;
mode = (frequency >= 4000); if (is2ghz)
if (mode)
npier = AR9300_NUM_5G_CAL_PIERS;
else
npier = AR9300_NUM_2G_CAL_PIERS; npier = AR9300_NUM_2G_CAL_PIERS;
else
npier = AR9300_NUM_5G_CAL_PIERS;
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
lfrequency[ichain] = 0; lfrequency[ichain] = 0;
...@@ -4990,7 +4986,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ...@@ -4990,7 +4986,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* identify best lower and higher frequency calibration measurement */ /* identify best lower and higher frequency calibration measurement */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
for (ipier = 0; ipier < npier; ipier++) { for (ipier = 0; ipier < npier; ipier++) {
if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain, if (!ar9003_hw_cal_pier_get(ah, is2ghz, ipier, ichain,
&pfrequency, &pcorrection, &pfrequency, &pcorrection,
&ptemperature, &pvoltage, &ptemperature, &pvoltage,
&pnf_cal, &pnf_pwr)) { &pnf_cal, &pnf_pwr)) {
...@@ -5126,13 +5122,13 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency) ...@@ -5126,13 +5122,13 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
frequency, correction[0], correction[1], correction[2]); frequency, correction[0], correction[1], correction[2]);
/* Store calibrated noise floor values */ /* Store calibrated noise floor values */
for (ichain = 0; ichain < AR5416_MAX_CHAINS; ichain++) for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++)
if (mode) { if (is2ghz) {
ah->nf_5g.cal[ichain] = nf_cal[ichain];
ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
} else {
ah->nf_2g.cal[ichain] = nf_cal[ichain]; ah->nf_2g.cal[ichain] = nf_cal[ichain];
ah->nf_2g.pwr[ichain] = nf_pwr[ichain]; ah->nf_2g.pwr[ichain] = nf_pwr[ichain];
} else {
ah->nf_5g.cal[ichain] = nf_cal[ichain];
ah->nf_5g.pwr[ichain] = nf_pwr[ichain];
} }
return 0; return 0;
...@@ -5449,8 +5445,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, ...@@ -5449,8 +5445,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
{ {
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
struct ar9300_modal_eep_header *modal_hdr;
u8 targetPowerValT2[ar9300RateSize]; u8 targetPowerValT2[ar9300RateSize];
u8 target_power_val_t2_eep[ar9300RateSize]; u8 target_power_val_t2_eep[ar9300RateSize];
u8 targetPowerValT2_tpc[ar9300RateSize]; u8 targetPowerValT2_tpc[ar9300RateSize];
...@@ -5465,17 +5459,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, ...@@ -5465,17 +5459,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2); ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
if (ar9003_is_paprd_enabled(ah)) { if (ar9003_is_paprd_enabled(ah)) {
if (IS_CHAN_2GHZ(chan))
modal_hdr = &eep->modalHeader2G;
else
modal_hdr = &eep->modalHeader5G;
ah->paprd_ratemask = ah->paprd_ratemask =
le32_to_cpu(modal_hdr->papdRateMaskHt20) & ar9003_get_paprd_rate_mask_ht20(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK; AR9300_PAPRD_RATE_MASK;
ah->paprd_ratemask_ht40 = ah->paprd_ratemask_ht40 =
le32_to_cpu(modal_hdr->papdRateMaskHt40) & ar9003_get_paprd_rate_mask_ht40(ah, IS_CHAN_2GHZ(chan)) &
AR9300_PAPRD_RATE_MASK; AR9300_PAPRD_RATE_MASK;
paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan); paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
...@@ -5592,30 +5581,40 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz) ...@@ -5592,30 +5581,40 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
return ar9003_modal_header(ah, is2ghz)->spurChans; return ar9003_modal_header(ah, is2ghz)->spurChans;
} }
u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt20);
}
u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->papdRateMaskHt40);
}
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan) struct ath9k_channel *chan)
{ {
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; bool is2ghz = IS_CHAN_2GHZ(chan);
if (IS_CHAN_2GHZ(chan)) if (is2ghz)
return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20), return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1); AR9300_PAPRD_SCALE_1);
else { else {
if (chan->channel >= 5700) if (chan->channel >= 5700)
return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), return MS(ar9003_get_paprd_rate_mask_ht20(ah, is2ghz),
AR9300_PAPRD_SCALE_1); AR9300_PAPRD_SCALE_1);
else if (chan->channel >= 5400) else if (chan->channel >= 5400)
return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_2); AR9300_PAPRD_SCALE_2);
else else
return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), return MS(ar9003_get_paprd_rate_mask_ht40(ah, is2ghz),
AR9300_PAPRD_SCALE_1); AR9300_PAPRD_SCALE_1);
} }
} }
static u8 ar9003_get_eepmisc(struct ath_hw *ah) static u8 ar9003_get_eepmisc(struct ath_hw *ah)
{ {
return ah->eeprom.map4k.baseEepHeader.eepMisc; return ah->eeprom.ar9300_eep.baseEepHeader.opCapFlags.eepMisc;
} }
const struct eeprom_ops eep_ar9300_ops = { const struct eeprom_ops eep_ar9300_ops = {
......
...@@ -363,6 +363,8 @@ u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz); ...@@ -363,6 +363,8 @@ u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz); u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
u32 ar9003_get_paprd_rate_mask_ht20(struct ath_hw *ah, bool is2ghz);
u32 ar9003_get_paprd_rate_mask_ht40(struct ath_hw *ah, bool is2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan); struct ath9k_channel *chan);
......
...@@ -144,10 +144,11 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i) ...@@ -144,10 +144,11 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2) WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2)
| set11nPktDurRTSCTS(i->rates, 3)); | set11nPktDurRTSCTS(i->rates, 3));
WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0) WRITE_ONCE(ads->ctl18,
| set11nRateFlags(i->rates, 1) set11nRateFlags(i->rates, 0) | set11nChainSel(i->rates, 0)
| set11nRateFlags(i->rates, 2) | set11nRateFlags(i->rates, 1) | set11nChainSel(i->rates, 1)
| set11nRateFlags(i->rates, 3) | set11nRateFlags(i->rates, 2) | set11nChainSel(i->rates, 2)
| set11nRateFlags(i->rates, 3) | set11nChainSel(i->rates, 3)
| SM(i->rtscts_rate, AR_RTSCTSRate)); | SM(i->rtscts_rate, AR_RTSCTSRate));
WRITE_ONCE(ads->ctl19, AR_Not_Sounding); WRITE_ONCE(ads->ctl19, AR_Not_Sounding);
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
void ar9003_paprd_enable(struct ath_hw *ah, bool val) void ar9003_paprd_enable(struct ath_hw *ah, bool val)
{ {
struct ath9k_channel *chan = ah->curchan; struct ath9k_channel *chan = ah->curchan;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; bool is2ghz = IS_CHAN_2GHZ(chan);
/* /*
* 3 bits for modalHeader5G.papdRateMaskHt20 * 3 bits for modalHeader5G.papdRateMaskHt20
...@@ -36,17 +36,17 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val) ...@@ -36,17 +36,17 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
* -- disable PAPRD for lower band 5GHz * -- disable PAPRD for lower band 5GHz
*/ */
if (IS_CHAN_5GHZ(chan)) { if (!is2ghz) {
if (chan->channel >= UPPER_5G_SUB_BAND_START) { if (chan->channel >= UPPER_5G_SUB_BAND_START) {
if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(30)) & BIT(30))
val = false; val = false;
} else if (chan->channel >= MID_5G_SUB_BAND_START) { } else if (chan->channel >= MID_5G_SUB_BAND_START) {
if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(29)) & BIT(29))
val = false; val = false;
} else { } else {
if (le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20) if (ar9003_get_paprd_rate_mask_ht20(ah, is2ghz)
& BIT(28)) & BIT(28))
val = false; val = false;
} }
......
...@@ -523,21 +523,10 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, ...@@ -523,21 +523,10 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
int synth_freq; int synth_freq;
int range = 10; int range = 10;
int freq_offset = 0; int freq_offset = 0;
int mode; u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
u8* spurChansPtr;
unsigned int i; unsigned int i;
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
if (IS_CHAN_5GHZ(chan)) {
spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
mode = 0;
}
else {
spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
mode = 1;
}
if (spurChansPtr[0] == 0) if (spur_fbin_ptr[0] == 0)
return; /* No spur in the mode */ return; /* No spur in the mode */
if (IS_CHAN_HT40(chan)) { if (IS_CHAN_HT40(chan)) {
...@@ -554,16 +543,18 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah, ...@@ -554,16 +543,18 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
ar9003_hw_spur_ofdm_clear(ah); ar9003_hw_spur_ofdm_clear(ah);
for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { for (i = 0; i < AR_EEPROM_MODAL_SPURS && spur_fbin_ptr[i]; i++) {
freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode); freq_offset = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq; freq_offset -= synth_freq;
if (abs(freq_offset) < range) { if (abs(freq_offset) < range) {
ar9003_hw_spur_ofdm_work(ah, chan, freq_offset, ar9003_hw_spur_ofdm_work(ah, chan, freq_offset,
range, synth_freq); range, synth_freq);
if (AR_SREV_9565(ah) && (i < 4)) { if (AR_SREV_9565(ah) && (i < 4)) {
freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i + 1], freq_offset =
mode); ath9k_hw_fbin2freq(spur_fbin_ptr[i + 1],
IS_CHAN_2GHZ(chan));
freq_offset -= synth_freq; freq_offset -= synth_freq;
if (abs(freq_offset) < range) if (abs(freq_offset) < range)
ar9003_hw_spur_ofdm_9565(ah, freq_offset); ar9003_hw_spur_ofdm_9565(ah, freq_offset);
......
...@@ -35,8 +35,10 @@ ...@@ -35,8 +35,10 @@
|((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
AR_GI##_index : 0) \ AR_GI##_index : 0) \
|((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \ |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
AR_STBC##_index : 0) \ AR_STBC##_index : 0))
|SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define set11nChainSel(_series, _index) \
(SM((_series)[_index].ChSel, AR_ChainSel##_index))
#define CCK_SIFS_TIME 10 #define CCK_SIFS_TIME 10
#define CCK_PREAMBLE_BITS 144 #define CCK_PREAMBLE_BITS 144
......
...@@ -834,8 +834,8 @@ ...@@ -834,8 +834,8 @@
((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_5416_22)) || \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
#define AR_SREV_9100(ah) \ #define AR_SREV_9100(_ah) \
((ah->hw_version.macVersion) == AR_SREV_VERSION_9100) (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9100))
#define AR_SREV_9100_OR_LATER(_ah) \ #define AR_SREV_9100_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100)) (((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9100))
...@@ -891,7 +891,7 @@ ...@@ -891,7 +891,7 @@
#define AR_SREV_9300_20_OR_LATER(_ah) \ #define AR_SREV_9300_20_OR_LATER(_ah) \
((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300) ((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9300)
#define AR_SREV_9300_22(_ah) \ #define AR_SREV_9300_22(_ah) \
(AR_SREV_9300(ah) && \ (AR_SREV_9300((_ah)) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22)) ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_22))
#define AR_SREV_9330(_ah) \ #define AR_SREV_9330(_ah) \
...@@ -994,8 +994,8 @@ ...@@ -994,8 +994,8 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561)) (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
#define AR_SREV_SOC(_ah) \ #define AR_SREV_SOC(_ah) \
(AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \ (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(_ah) || \
AR_SREV_9561(ah)) AR_SREV_9561(_ah))
/* NOTE: When adding chips newer than Peacock, add chip check here */ /* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \ #define AR_SREV_9580_10_OR_LATER(_ah) \
......
...@@ -2626,7 +2626,12 @@ enum tx_rate_info { ...@@ -2626,7 +2626,12 @@ enum tx_rate_info {
HAL_TX_RATE_SGI = 0x8, HAL_TX_RATE_SGI = 0x8,
/* Rate with Long guard interval */ /* Rate with Long guard interval */
HAL_TX_RATE_LGI = 0x10 HAL_TX_RATE_LGI = 0x10,
/* VHT rates */
HAL_TX_RATE_VHT20 = 0x20,
HAL_TX_RATE_VHT40 = 0x40,
HAL_TX_RATE_VHT80 = 0x80,
}; };
struct ani_global_class_a_stats_info { struct ani_global_class_a_stats_info {
......
...@@ -192,70 +192,74 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif, ...@@ -192,70 +192,74 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index; sta_priv->sta_index;
} }
#define DEFINE(s) [s] = #s
static const char * const wcn36xx_caps_names[] = { static const char * const wcn36xx_caps_names[] = {
"MCC", /* 0 */ DEFINE(MCC),
"P2P", /* 1 */ DEFINE(P2P),
"DOT11AC", /* 2 */ DEFINE(DOT11AC),
"SLM_SESSIONIZATION", /* 3 */ DEFINE(SLM_SESSIONIZATION),
"DOT11AC_OPMODE", /* 4 */ DEFINE(DOT11AC_OPMODE),
"SAP32STA", /* 5 */ DEFINE(SAP32STA),
"TDLS", /* 6 */ DEFINE(TDLS),
"P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */ DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
"WLANACTIVE_OFFLOAD", /* 8 */ DEFINE(WLANACTIVE_OFFLOAD),
"BEACON_OFFLOAD", /* 9 */ DEFINE(BEACON_OFFLOAD),
"SCAN_OFFLOAD", /* 10 */ DEFINE(SCAN_OFFLOAD),
"ROAM_OFFLOAD", /* 11 */ DEFINE(ROAM_OFFLOAD),
"BCN_MISS_OFFLOAD", /* 12 */ DEFINE(BCN_MISS_OFFLOAD),
"STA_POWERSAVE", /* 13 */ DEFINE(STA_POWERSAVE),
"STA_ADVANCED_PWRSAVE", /* 14 */ DEFINE(STA_ADVANCED_PWRSAVE),
"AP_UAPSD", /* 15 */ DEFINE(AP_UAPSD),
"AP_DFS", /* 16 */ DEFINE(AP_DFS),
"BLOCKACK", /* 17 */ DEFINE(BLOCKACK),
"PHY_ERR", /* 18 */ DEFINE(PHY_ERR),
"BCN_FILTER", /* 19 */ DEFINE(BCN_FILTER),
"RTT", /* 20 */ DEFINE(RTT),
"RATECTRL", /* 21 */ DEFINE(RATECTRL),
"WOW", /* 22 */ DEFINE(WOW),
"WLAN_ROAM_SCAN_OFFLOAD", /* 23 */ DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
"SPECULATIVE_PS_POLL", /* 24 */ DEFINE(SPECULATIVE_PS_POLL),
"SCAN_SCH", /* 25 */ DEFINE(SCAN_SCH),
"IBSS_HEARTBEAT_OFFLOAD", /* 26 */ DEFINE(IBSS_HEARTBEAT_OFFLOAD),
"WLAN_SCAN_OFFLOAD", /* 27 */ DEFINE(WLAN_SCAN_OFFLOAD),
"WLAN_PERIODIC_TX_PTRN", /* 28 */ DEFINE(WLAN_PERIODIC_TX_PTRN),
"ADVANCE_TDLS", /* 29 */ DEFINE(ADVANCE_TDLS),
"BATCH_SCAN", /* 30 */ DEFINE(BATCH_SCAN),
"FW_IN_TX_PATH", /* 31 */ DEFINE(FW_IN_TX_PATH),
"EXTENDED_NSOFFLOAD_SLOT", /* 32 */ DEFINE(EXTENDED_NSOFFLOAD_SLOT),
"CH_SWITCH_V1", /* 33 */ DEFINE(CH_SWITCH_V1),
"HT40_OBSS_SCAN", /* 34 */ DEFINE(HT40_OBSS_SCAN),
"UPDATE_CHANNEL_LIST", /* 35 */ DEFINE(UPDATE_CHANNEL_LIST),
"WLAN_MCADDR_FLT", /* 36 */ DEFINE(WLAN_MCADDR_FLT),
"WLAN_CH144", /* 37 */ DEFINE(WLAN_CH144),
"NAN", /* 38 */ DEFINE(NAN),
"TDLS_SCAN_COEXISTENCE", /* 39 */ DEFINE(TDLS_SCAN_COEXISTENCE),
"LINK_LAYER_STATS_MEAS", /* 40 */ DEFINE(LINK_LAYER_STATS_MEAS),
"MU_MIMO", /* 41 */ DEFINE(MU_MIMO),
"EXTENDED_SCAN", /* 42 */ DEFINE(EXTENDED_SCAN),
"DYNAMIC_WMM_PS", /* 43 */ DEFINE(DYNAMIC_WMM_PS),
"MAC_SPOOFED_SCAN", /* 44 */ DEFINE(MAC_SPOOFED_SCAN),
"BMU_ERROR_GENERIC_RECOVERY", /* 45 */ DEFINE(BMU_ERROR_GENERIC_RECOVERY),
"DISA", /* 46 */ DEFINE(DISA),
"FW_STATS", /* 47 */ DEFINE(FW_STATS),
"WPS_PRBRSP_TMPL", /* 48 */ DEFINE(WPS_PRBRSP_TMPL),
"BCN_IE_FLT_DELTA", /* 49 */ DEFINE(BCN_IE_FLT_DELTA),
"TDLS_OFF_CHANNEL", /* 51 */ DEFINE(TDLS_OFF_CHANNEL),
"RTT3", /* 52 */ DEFINE(RTT3),
"MGMT_FRAME_LOGGING", /* 53 */ DEFINE(MGMT_FRAME_LOGGING),
"ENHANCED_TXBD_COMPLETION", /* 54 */ DEFINE(ENHANCED_TXBD_COMPLETION),
"LOGGING_ENHANCEMENT", /* 55 */ DEFINE(LOGGING_ENHANCEMENT),
"EXT_SCAN_ENHANCED", /* 56 */ DEFINE(EXT_SCAN_ENHANCED),
"MEMORY_DUMP_SUPPORTED", /* 57 */ DEFINE(MEMORY_DUMP_SUPPORTED),
"PER_PKT_STATS_SUPPORTED", /* 58 */ DEFINE(PER_PKT_STATS_SUPPORTED),
"EXT_LL_STAT", /* 60 */ DEFINE(EXT_LL_STAT),
"WIFI_CONFIG", /* 61 */ DEFINE(WIFI_CONFIG),
"ANTENNA_DIVERSITY_SELECTION", /* 62 */ DEFINE(ANTENNA_DIVERSITY_SELECTION),
}; };
#undef DEFINE
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x) static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
{ {
if (x >= ARRAY_SIZE(wcn36xx_caps_names)) if (x >= ARRAY_SIZE(wcn36xx_caps_names))
...@@ -1400,6 +1404,21 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx, ...@@ -1400,6 +1404,21 @@ static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
return 0; return 0;
} }
static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct station_info *sinfo)
{
struct wcn36xx *wcn;
u8 sta_index;
int status;
wcn = hw->priv;
sta_index = get_sta_index(vif, wcn36xx_sta_to_priv(sta));
status = wcn36xx_smd_get_stats(wcn, sta_index, HAL_GLOBAL_CLASS_A_STATS_INFO, sinfo);
if (status)
wcn36xx_err("wcn36xx_smd_get_stats failed\n");
}
static const struct ieee80211_ops wcn36xx_ops = { static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start, .start = wcn36xx_start,
.stop = wcn36xx_stop, .stop = wcn36xx_stop,
...@@ -1423,6 +1442,7 @@ static const struct ieee80211_ops wcn36xx_ops = { ...@@ -1423,6 +1442,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.set_rts_threshold = wcn36xx_set_rts_threshold, .set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add, .sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove, .sta_remove = wcn36xx_sta_remove,
.sta_statistics = wcn36xx_sta_statistics,
.ampdu_action = wcn36xx_ampdu_action, .ampdu_action = wcn36xx_ampdu_action,
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change, .ipv6_addr_change = wcn36xx_ipv6_addr_change,
......
...@@ -2627,6 +2627,62 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index) ...@@ -2627,6 +2627,62 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index)
return ret; return ret;
} }
int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
struct station_info *sinfo)
{
struct wcn36xx_hal_stats_req_msg msg_body;
struct wcn36xx_hal_stats_rsp_msg *rsp;
void *rsp_body;
int ret;
if (stats_mask & ~HAL_GLOBAL_CLASS_A_STATS_INFO) {
wcn36xx_err("stats_mask 0x%x contains unimplemented types\n",
stats_mask);
return -EINVAL;
}
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_GET_STATS_REQ);
msg_body.sta_id = sta_index;
msg_body.stats_mask = stats_mask;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("sending hal_get_stats failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_get_stats response failed err=%d\n", ret);
goto out;
}
rsp = (struct wcn36xx_hal_stats_rsp_msg *)wcn->hal_buf;
rsp_body = (wcn->hal_buf + sizeof(struct wcn36xx_hal_stats_rsp_msg));
if (rsp->stats_mask != stats_mask) {
wcn36xx_err("stats_mask 0x%x differs from requested 0x%x\n",
rsp->stats_mask, stats_mask);
goto out;
}
if (rsp->stats_mask & HAL_GLOBAL_CLASS_A_STATS_INFO) {
struct ani_global_class_a_stats_info *stats_info = rsp_body;
wcn36xx_process_tx_rate(stats_info, &sinfo->txrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
rsp_body += sizeof(struct ani_global_class_a_stats_info);
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info) static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{ {
struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate; struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
...@@ -3316,6 +3372,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, ...@@ -3316,6 +3372,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_ADD_BA_SESSION_RSP: case WCN36XX_HAL_ADD_BA_SESSION_RSP:
case WCN36XX_HAL_ADD_BA_RSP: case WCN36XX_HAL_ADD_BA_RSP:
case WCN36XX_HAL_DEL_BA_RSP: case WCN36XX_HAL_DEL_BA_RSP:
case WCN36XX_HAL_GET_STATS_RSP:
case WCN36XX_HAL_TRIGGER_BA_RSP: case WCN36XX_HAL_TRIGGER_BA_RSP:
case WCN36XX_HAL_UPDATE_CFG_RSP: case WCN36XX_HAL_UPDATE_CFG_RSP:
case WCN36XX_HAL_JOIN_RSP: case WCN36XX_HAL_JOIN_RSP:
......
...@@ -138,6 +138,8 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn, ...@@ -138,6 +138,8 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id); int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index); int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn); int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn);
int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
struct station_info *sinfo);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value); int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
......
...@@ -699,3 +699,32 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, ...@@ -699,3 +699,32 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
return ret; return ret;
} }
void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info)
{
/* tx_rate is in units of 500kbps; mac80211 wants them in 100kbps */
if (stats->tx_rate_flags & HAL_TX_RATE_LEGACY)
info->legacy = stats->tx_rate * 5;
info->flags = 0;
info->mcs = stats->mcs_index;
info->nss = 1;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_HT40))
info->flags |= RATE_INFO_FLAGS_MCS;
if (stats->tx_rate_flags & (HAL_TX_RATE_VHT20 | HAL_TX_RATE_VHT40 | HAL_TX_RATE_VHT80))
info->flags |= RATE_INFO_FLAGS_VHT_MCS;
if (stats->tx_rate_flags & HAL_TX_RATE_SGI)
info->flags |= RATE_INFO_FLAGS_SHORT_GI;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_VHT20))
info->bw = RATE_INFO_BW_20;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT40 | HAL_TX_RATE_VHT40))
info->bw = RATE_INFO_BW_40;
if (stats->tx_rate_flags & HAL_TX_RATE_VHT80)
info->bw = RATE_INFO_BW_80;
}
...@@ -164,5 +164,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb); ...@@ -164,5 +164,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
int wcn36xx_start_tx(struct wcn36xx *wcn, int wcn36xx_start_tx(struct wcn36xx *wcn,
struct wcn36xx_sta *sta_priv, struct wcn36xx_sta *sta_priv,
struct sk_buff *skb); struct sk_buff *skb);
void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info);
#endif /* _TXRX_H_ */ #endif /* _TXRX_H_ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册