提交 fc4ac67b 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

i40e: Do not directly increment Tx next_to_use

Avoid directly incrementing next_to_use for multiple reasons.  The main
reason being that if we directly increment it then it can attain a state
where it is equal to the ring count.  Technically this is a state it
should not be able to reach but the way this is written it now can.

This patch pulls the value off into a register and then increments it
and writes back either the value or 0 depending on if the value is equal
to the ring count.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: NKavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 35a1e2ad
...@@ -73,11 +73,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -73,11 +73,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
goto dma_fail; goto dma_fail;
/* grab the next descriptor */ /* grab the next descriptor */
fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_ring->next_to_use++; tx_buf = &tx_ring->tx_bi[i];
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0; i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT) << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
...@@ -134,11 +135,11 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -134,11 +135,11 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */ /* Now program a dummy descriptor */
tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use]; tx_desc = I40E_TX_DESC(tx_ring, i);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count) i++;
tx_ring->next_to_use = 0; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TX_DESC_CMD_EOP | td_cmd = I40E_TX_DESC_CMD_EOP |
...@@ -148,9 +149,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -148,9 +149,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
/* Mark the data descriptor to be watched */
tx_buf->next_to_watch = tx_desc;
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs, * applicable for weak-ordered memory model archs,
...@@ -158,6 +156,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -158,6 +156,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
*/ */
wmb(); wmb();
/* Mark the data descriptor to be watched */
tx_buf->next_to_watch = tx_desc;
writel(tx_ring->next_to_use, tx_ring->tail); writel(tx_ring->next_to_use, tx_ring->tail);
return 0; return 0;
...@@ -1143,6 +1144,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1143,6 +1144,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct tcphdr *th; struct tcphdr *th;
unsigned int hlen; unsigned int hlen;
u32 flex_ptype, dtype_cmd; u32 flex_ptype, dtype_cmd;
u16 i;
/* make sure ATR is enabled */ /* make sure ATR is enabled */
if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED)) if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
...@@ -1182,10 +1184,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1182,10 +1184,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->atr_count = 0; tx_ring->atr_count = 0;
/* grab the next descriptor */ /* grab the next descriptor */
fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use); i = tx_ring->next_to_use;
tx_ring->next_to_use++; fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
if (tx_ring->next_to_use == tx_ring->count)
tx_ring->next_to_use = 0; i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
I40E_TXD_FLTR_QW0_QINDEX_MASK; I40E_TXD_FLTR_QW0_QINDEX_MASK;
...@@ -1481,15 +1484,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -1481,15 +1484,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
const u32 cd_tunneling, const u32 cd_l2tag2) const u32 cd_tunneling, const u32 cd_l2tag2)
{ {
struct i40e_tx_context_desc *context_desc; struct i40e_tx_context_desc *context_desc;
int i = tx_ring->next_to_use;
if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
return; return;
/* grab the next descriptor */ /* grab the next descriptor */
context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use); context_desc = I40E_TX_CTXTDESC(tx_ring, i);
tx_ring->next_to_use++;
if (tx_ring->next_to_use == tx_ring->count) i++;
tx_ring->next_to_use = 0; tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* cpu_to_le32 and assign to struct fields */ /* cpu_to_le32 and assign to struct fields */
context_desc->tunneling_params = cpu_to_le32(cd_tunneling); context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册