提交 d2e5cb4e 编写于 作者: J John W. Linville
......@@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
......
......@@ -183,9 +183,9 @@ enum iwl_scan_type {
* this number of packets were received (typically 1)
* @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
* @max_out_time: in usecs, max out of serving channel time
* @max_out_time: in TUs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
* bits 0-19: beacon interal in usecs (suspend before executing)
* bits 0-19: beacon interal in TUs (suspend before executing)
* bits 20-23: reserved
* bits 24-31: number of beacons (suspend between channels)
* @rxon_flags: RXON_FLG_*
......@@ -383,8 +383,8 @@ enum scan_framework_client {
* @quiet_plcp_th: quiet channel num of packets threshold
* @good_CRC_th: passive to active promotion threshold
* @rx_chain: RXON rx chain.
* @max_out_time: max uSec to be out of assoceated channel
* @suspend_time: pause scan this long when returning to service channel
* @max_out_time: max TUs to be out of assoceated channel
* @suspend_time: pause scan this TUs when returning to service channel
* @flags: RXON flags
* @filter_flags: RXONfilter
* @tx_cmd: tx command for active scan; for 2GHz and for 5GHz.
......
......@@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (iwl_mvm_is_associated(mvm)) {
ret = -EBUSY;
goto out;
}
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
......
......@@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
return mvmvif->low_latency;
}
/* Assoc status */
bool iwl_mvm_is_associated(struct iwl_mvm *mvm);
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
......
......@@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
return;
}
#ifdef CPTCFG_MAC80211_DEBUGFS
#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate */
if (lq_sta->dbg_fixed_rate) {
IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
......
......@@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_bound);
/*
* Under low latency traffic passive scan is fragmented meaning
* that dwell on a particular channel will be fragmented. Each fragment
* dwell time is 20ms and fragments period is 105ms. Skipping to next
* channel will be delayed by the same period - 105ms. So suspend_time
* parameter describing both fragments and channels skipping periods is
* set to 105ms. This value is chosen so that overall passive scan
* duration will not be too long. Max_out_time in this case is set to
* 70ms, so for active scanning operating channel will be left for 70ms
* while for passive still for 20ms (fragment dwell).
*/
if (global_bound) {
if (!iwl_mvm_low_latency(mvm)) {
params->suspend_time = ieee80211_tu_to_usec(100);
params->max_out_time = ieee80211_tu_to_usec(600);
} else {
params->suspend_time = ieee80211_tu_to_usec(105);
/* P2P doesn't support fragmented passive scan, so
* configure max_out_time to be at least longest dwell
* time for passive scan.
*/
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
params->max_out_time = ieee80211_tu_to_usec(70);
params->passive_fragmented = true;
} else {
u32 passive_dwell;
/*
* Use band G so that passive channel dwell time
* will be assigned with maximum value.
*/
band = IEEE80211_BAND_2GHZ;
passive_dwell = iwl_mvm_get_passive_dwell(band);
params->max_out_time =
ieee80211_tu_to_usec(passive_dwell);
}
}
if (!global_bound)
goto not_bound;
params->suspend_time = 100;
params->max_out_time = 600;
if (iwl_mvm_low_latency(mvm)) {
params->suspend_time = 250;
params->max_out_time = 250;
}
not_bound:
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
if (params->passive_fragmented)
params->dwell[band].passive = 20;
else
params->dwell[band].passive =
iwl_mvm_get_passive_dwell(band);
params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band,
n_ssids);
}
......
......@@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
return result;
}
static void iwl_mvm_assoc_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
bool *assoc = _data;
if (vif->bss_conf.assoc)
*assoc = true;
}
bool iwl_mvm_is_associated(struct iwl_mvm *mvm)
{
bool assoc = false;
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_assoc_iter, &assoc);
return assoc;
}
......@@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
trans->dev = &pdev->dev;
trans_pcie->pci_dev = pdev;
iwl_disable_interrupts(trans);
err = pci_enable_msi(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
......@@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
trans->dev = &pdev->dev;
trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
......@@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_pci_disable_msi;
}
trans_pcie->inta_mask = CSR_INI_SET_MASK;
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
......@@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
goto out_free_ict;
}
trans_pcie->inta_mask = CSR_INI_SET_MASK;
return trans;
out_free_ict:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册