提交 41837ca9 编写于 作者: E Emmanuel Grumbach

iwlwifi: pcie: allow to pretend to have Tx CSUM for debug

Allow to configure the driver to pretend to have TX CSUM
offload support. This will be useful to test the TSO flows
that will come in further patches.
This configuration is disabled by default.
Signed-off-by: NJohannes Berg <johannes.berg@intel.com>
Signed-off-by: NEmmanuel Grumbach <emmanuel.grumbach@intel.com>
上级 cb2f8277
......@@ -478,6 +478,7 @@ struct iwl_hcmd_arr {
* in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @wide_cmd_header: firmware supports wide host command header
* @sw_csum_tx: transport should compute the TCP checksum
* @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only
* @command_groups_size: number of command groups, to avoid illegal access
......@@ -497,6 +498,7 @@ struct iwl_trans_config {
bool bc_table_dword;
bool scd_set_active;
bool wide_cmd_header;
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
......
......@@ -106,6 +106,7 @@
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
......
......@@ -667,6 +667,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~NETIF_F_RXCSUM;
if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
ret = ieee80211_register_hw(mvm->hw);
if (ret)
iwl_mvm_leds_exit(mvm);
......
......@@ -541,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.scd_set_active = true;
trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
/* Set a short watchdog for the command queue */
trans_cfg.cmd_q_wdg_timeout =
......
......@@ -307,6 +307,8 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
......@@ -361,6 +363,7 @@ struct iwl_trans_pcie {
bool bc_table_dword;
bool scd_set_active;
bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order;
/*protect hw register */
......
......@@ -1442,6 +1442,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
......
......@@ -1823,6 +1823,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
"TX on unused queue %d\n", txq_id))
return -EINVAL;
if (unlikely(trans_pcie->sw_csum_tx &&
skb->ip_summed == CHECKSUM_PARTIAL)) {
int offs = skb_checksum_start_offset(skb);
int csum_offs = offs + skb->csum_offset;
__wsum csum;
if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
return -1;
csum = skb_checksum(skb, offs, skb->len - offs, 0);
*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
}
if (skb_is_nonlinear(skb) &&
skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
__skb_linearize(skb))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册