提交 e604d912 编写于 作者: K Kalle Valo

Merge tag 'iwlwifi-for-kalle-2015-10-25' of...

Merge tag 'iwlwifi-for-kalle-2015-10-25' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-fixes

* some fixes for suspend/resume with unified FW images;
* a fix for a false-positive lockdep report;
* a fix for multi-queue that caused an unnecessary 1 second latency;
* a fix for an ACPI parsing bug that caused a misleading error message;
...@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, ...@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
ret = iwl_mvm_switch_to_d3(mvm); ret = iwl_mvm_switch_to_d3(mvm);
if (ret) if (ret)
return ret; return ret;
} else {
/* In theory, we wouldn't have to stop a running sched
* scan in order to start another one (for
* net-detect). But in practice this doesn't seem to
* work properly, so stop any running sched_scan now.
*/
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
if (ret)
return ret;
} }
/* rfkill release can be either for wowlan or netdetect */ /* rfkill release can be either for wowlan or netdetect */
...@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
out: out:
if (ret < 0) { if (ret < 0) {
iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
ieee80211_restart_hw(mvm->hw); if (mvm->restart_fw > 0) {
mvm->restart_fw--;
ieee80211_restart_hw(mvm->hw);
}
iwl_mvm_free_nd(mvm); iwl_mvm_free_nd(mvm);
} }
out_noreset: out_noreset:
...@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) ...@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_update_changed_regdom(mvm); iwl_mvm_update_changed_regdom(mvm);
if (mvm->net_detect) { if (mvm->net_detect) {
/* If this is a non-unified image, we restart the FW,
* so no need to stop the netdetect scan. If that
* fails, continue and try to get the wake-up reasons,
* but trigger a HW restart by keeping a failure code
* in ret.
*/
if (unified_image)
ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
false);
iwl_mvm_query_netdetect_reasons(mvm, vif); iwl_mvm_query_netdetect_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */ /* has unlocked the mutex, so skip that */
goto out; goto out;
...@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, ...@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
{ {
struct iwl_mvm *mvm = inode->i_private; struct iwl_mvm *mvm = inode->i_private;
int remaining_time = 10; bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
mvm->d3_test_active = false; mvm->d3_test_active = false;
...@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) ...@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
iwl_abort_notification_waits(&mvm->notif_wait); iwl_abort_notification_waits(&mvm->notif_wait);
ieee80211_restart_hw(mvm->hw); if (!unified_image) {
int remaining_time = 10;
/* wait for restart and disconnect all interfaces */ ieee80211_restart_hw(mvm->hw);
while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
remaining_time > 0) { /* wait for restart and disconnect all interfaces */
remaining_time--; while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
msleep(1000); remaining_time > 0) {
} remaining_time--;
msleep(1000);
}
if (remaining_time == 0) if (remaining_time == 0)
IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
}
ieee80211_iterate_active_interfaces_atomic( ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL, mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
......
...@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, ...@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
.data = { &cmd, }, .data = { &cmd, },
.len = { sizeof(cmd) }, .len = { sizeof(cmd) },
}; };
size_t delta, len; size_t delta;
ssize_t ret; ssize_t ret, len;
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
DEBUG_GROUP, 0); DEBUG_GROUP, 0);
......
...@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, ...@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
struct iwl_mvm_internal_rxq_notif *notif, struct iwl_mvm_internal_rxq_notif *notif,
u32 size) u32 size)
{ {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret; int ret;
...@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, ...@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
} }
if (notif->sync) if (notif->sync)
ret = wait_event_timeout(notif_waitq, ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0, atomic_read(&mvm->queue_sync_counter) == 0,
HZ); HZ);
WARN_ON_ONCE(!ret); WARN_ON_ONCE(!ret);
......
...@@ -937,6 +937,7 @@ struct iwl_mvm { ...@@ -937,6 +937,7 @@ struct iwl_mvm {
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock; spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq; wait_queue_head_t d0i3_exit_waitq;
wait_queue_head_t rx_sync_waitq;
/* BT-Coex */ /* BT-Coex */
struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_profile_notif last_bt_notif;
......
...@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
spin_lock_init(&mvm->refs_lock); spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx); skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq); init_waitqueue_head(&mvm->d0i3_exit_waitq);
init_waitqueue_head(&mvm->rx_sync_waitq);
atomic_set(&mvm->queue_sync_counter, 0); atomic_set(&mvm->queue_sync_counter, 0);
......
...@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, ...@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
"Received expired RX queue sync message\n"); "Received expired RX queue sync message\n");
return; return;
} }
atomic_dec(&mvm->queue_sync_counter); if (!atomic_dec_return(&mvm->queue_sync_counter))
wake_up(&mvm->rx_sync_waitq);
} }
switch (internal_notif->type) { switch (internal_notif->type) {
......
...@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm) ...@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
{ {
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
/* This looks a bit arbitrary, but the idea is that if we run /* This looks a bit arbitrary, but the idea is that if we run
* out of possible simultaneous scans and the userspace is * out of possible simultaneous scans and the userspace is
* trying to run a scan type that is already running, we * trying to run a scan type that is already running, we
...@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) ...@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EBUSY; return -EBUSY;
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
case IWL_MVM_SCAN_NETDETECT: case IWL_MVM_SCAN_NETDETECT:
/* No need to stop anything for net-detect since the /* For non-unified images, there's no need to stop
* firmware is restarted anyway. This way, any sched * anything for net-detect since the firmware is
* scans that were running will be restarted when we * restarted anyway. This way, any sched scans that
* resume. * were running will be restarted when we resume.
*/ */
return 0; if (!unified_image)
return 0;
/* If this is a unified image and we ran out of scans,
* we need to stop something. Prefer stopping regular
* scans, because the results are useless at this
* point, and we should be able to keep running
* another scheduled scan while suspended.
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
true);
if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
true);
/* fall through, something is wrong if no scan was
* running but we ran out of scans.
*/
default: default:
WARN_ON(1); WARN_ON(1);
break; break;
......
...@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#define SPL_METHOD "SPLC" #define ACPI_SPLC_METHOD "SPLC"
#define SPL_DOMAINTYPE_MODULE BIT(0) #define ACPI_SPLC_DOMAIN_WIFI (0x07)
#define SPL_DOMAINTYPE_WIFI BIT(1)
#define SPL_DOMAINTYPE_WIGIG BIT(2)
#define SPL_DOMAINTYPE_RFEM BIT(3)
static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc)
{ {
union acpi_object *limits, *domain_type, *power_limit; union acpi_object *data_pkg, *dflt_pwr_limit;
int i;
if (splx->type != ACPI_TYPE_PACKAGE ||
splx->package.count != 2 || /* We need at least two elements, one for the revision and one
splx->package.elements[0].type != ACPI_TYPE_INTEGER || * for the data itself. Also check that the revision is
splx->package.elements[0].integer.value != 0) { * supported (currently only revision 0).
IWL_ERR(trans, "Unsupported splx structure\n"); */
if (splc->type != ACPI_TYPE_PACKAGE ||
splc->package.count < 2 ||
splc->package.elements[0].type != ACPI_TYPE_INTEGER ||
splc->package.elements[0].integer.value != 0) {
IWL_DEBUG_INFO(trans,
"Unsupported structure returned by the SPLC method. Ignoring.\n");
return 0; return 0;
} }
limits = &splx->package.elements[1]; /* loop through all the packages to find the one for WiFi */
if (limits->type != ACPI_TYPE_PACKAGE || for (i = 1; i < splc->package.count; i++) {
limits->package.count < 2 || union acpi_object *domain;
limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
limits->package.elements[1].type != ACPI_TYPE_INTEGER) { data_pkg = &splc->package.elements[i];
IWL_ERR(trans, "Invalid limits element\n");
return 0; /* Skip anything that is not a package with the right
* amount of elements (i.e. at least 2 integers).
*/
if (data_pkg->type != ACPI_TYPE_PACKAGE ||
data_pkg->package.count < 2 ||
data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
continue;
domain = &data_pkg->package.elements[0];
if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI)
break;
data_pkg = NULL;
} }
domain_type = &limits->package.elements[0]; if (!data_pkg) {
power_limit = &limits->package.elements[1]; IWL_DEBUG_INFO(trans,
if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { "No element for the WiFi domain returned by the SPLC method.\n");
IWL_DEBUG_INFO(trans, "WiFi power is not limited\n");
return 0; return 0;
} }
return power_limit->integer.value; dflt_pwr_limit = &data_pkg->package.elements[1];
return dflt_pwr_limit->integer.value;
} }
static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
{ {
acpi_handle pxsx_handle; acpi_handle pxsx_handle;
acpi_handle handle; acpi_handle handle;
struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status; acpi_status status;
pxsx_handle = ACPI_HANDLE(&pdev->dev); pxsx_handle = ACPI_HANDLE(&pdev->dev);
...@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) ...@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
} }
/* Get the method's handle */ /* Get the method's handle */
status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD,
&handle);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
IWL_DEBUG_INFO(trans, "SPL method not found\n"); IWL_DEBUG_INFO(trans, "SPLC method not found\n");
return; return;
} }
/* Call SPLC with no arguments */ /* Call SPLC with no arguments */
status = acpi_evaluate_object(handle, NULL, NULL, &splx); status = acpi_evaluate_object(handle, NULL, NULL, &splc);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status);
return; return;
} }
trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer);
IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n",
trans->dflt_pwr_limit); trans->dflt_pwr_limit);
kfree(splx.pointer); kfree(splc.pointer);
} }
#else /* CONFIG_ACPI */ #else /* CONFIG_ACPI */
......
...@@ -592,6 +592,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, ...@@ -592,6 +592,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, u32 txq_id) int slots_num, u32 txq_id)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret; int ret;
txq->need_update = false; txq->need_update = false;
...@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, ...@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
return ret; return ret;
spin_lock_init(&txq->lock); spin_lock_init(&txq->lock);
if (txq_id == trans_pcie->cmd_queue) {
static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
}
__skb_queue_head_init(&txq->overflow_q); __skb_queue_head_init(&txq->overflow_q);
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册