提交 e3cc6e37 编写于 作者: D David S. Miller

Merge branch 'qed-next'

Manish Chopra says:

====================
qede: Enhancements

This patch series have few small fastpath features
support and code refactoring.

Note - regarding get/set tunable configuration via ethtool
Surprisingly, there is NO ethtool application support for
such configuration given that we have kernel support.
Do let us know if we need to add support for that in user ethtool.

Please consider applying this series to "net-next".
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/qed/qed_eth_if.h> #include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8 #define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 7 #define QEDE_MINOR_VERSION 10
#define QEDE_REVISION_VERSION 1 #define QEDE_REVISION_VERSION 1
#define QEDE_ENGINEERING_VERSION 20 #define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
...@@ -143,6 +143,8 @@ struct qede_dev { ...@@ -143,6 +143,8 @@ struct qede_dev {
struct mutex qede_lock; struct mutex qede_lock;
u32 state; /* Protected by qede_lock */ u32 state; /* Protected by qede_lock */
u16 rx_buf_size; u16 rx_buf_size;
u32 rx_copybreak;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8) #define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
/* Max supported alignment is 256 (8 shift) /* Max supported alignment is 256 (8 shift)
...@@ -235,6 +237,7 @@ struct qede_rx_queue { ...@@ -235,6 +237,7 @@ struct qede_rx_queue {
u64 rx_hw_errors; u64 rx_hw_errors;
u64 rx_alloc_errors; u64 rx_alloc_errors;
u64 rx_ip_frags;
}; };
union db_prod { union db_prod {
...@@ -332,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, ...@@ -332,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
#define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256 #define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
......
...@@ -37,6 +37,7 @@ static const struct { ...@@ -37,6 +37,7 @@ static const struct {
} qede_rqstats_arr[] = { } qede_rqstats_arr[] = {
QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors), QEDE_RQSTAT(rx_alloc_errors),
QEDE_RQSTAT(rx_ip_frags),
}; };
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
...@@ -1184,6 +1185,48 @@ static void qede_self_test(struct net_device *dev, ...@@ -1184,6 +1185,48 @@ static void qede_self_test(struct net_device *dev,
} }
} }
static int qede_set_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct qede_dev *edev = netdev_priv(dev);
u32 val;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
val = *(u32 *)data;
if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Invalid rx copy break value, range is [%u, %u]",
QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
return -EINVAL;
}
edev->rx_copybreak = *(u32 *)data;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int qede_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna, void *data)
{
struct qede_dev *edev = netdev_priv(dev);
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = edev->rx_copybreak;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static const struct ethtool_ops qede_ethtool_ops = { static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings, .get_settings = qede_get_settings,
.set_settings = qede_set_settings, .set_settings = qede_set_settings,
...@@ -1212,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = { ...@@ -1212,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_channels = qede_get_channels, .get_channels = qede_get_channels,
.set_channels = qede_set_channels, .set_channels = qede_set_channels,
.self_test = qede_self_test, .self_test = qede_self_test,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
}; };
static const struct ethtool_ops qede_vf_ethtool_ops = { static const struct ethtool_ops qede_vf_ethtool_ops = {
...@@ -1234,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { ...@@ -1234,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh, .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels, .get_channels = qede_get_channels,
.set_channels = qede_set_channels, .set_channels = qede_set_channels,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
}; };
void qede_set_ethtool_ops(struct net_device *dev) void qede_set_ethtool_ops(struct net_device *dev)
......
...@@ -485,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, ...@@ -485,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
} }
#endif #endif
static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
{
/* wmb makes sure that the BDs data is updated before updating the
* producer, otherwise FW may read old data from the BDs.
*/
wmb();
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
/* mmiowb is needed to synchronize doorbell writes from more than one
* processor. It guarantees that the write arrives to the device before
* the queue lock is released and another start_xmit is called (possibly
* on another CPU). Without this barrier, the next doorbell can bypass
* this doorbell. This is applicable to IA64/Altix systems.
*/
mmiowb();
}
/* Main transmit function */ /* Main transmit function */
static static
netdev_tx_t qede_start_xmit(struct sk_buff *skb, netdev_tx_t qede_start_xmit(struct sk_buff *skb,
...@@ -543,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -543,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n"); DP_NOTICE(edev, "SKB mapping failed\n");
qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
qede_update_tx_producer(txq);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
nbd++; nbd++;
...@@ -657,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -657,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) { if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split); data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -681,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -681,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) { if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split); data_split);
qede_update_tx_producer(txq);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
} }
...@@ -701,20 +722,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, ...@@ -701,20 +722,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod = txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
/* wmb makes sure that the BDs data is updated before updating the if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
* producer, otherwise FW may read old data from the BDs. qede_update_tx_producer(txq);
*/
wmb();
barrier();
writel(txq->tx_db.raw, txq->doorbell_addr);
/* mmiowb is needed to synchronize doorbell writes from more than one
* processor. It guarantees that the write arrives to the device before
* the queue lock is released and another start_xmit is called (possibly
* on another CPU). Without this barrier, the next doorbell can bypass
* this doorbell. This is applicable to IA64/Altix systems.
*/
mmiowb();
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) { < (MAX_SKB_FRAGS + 1))) {
...@@ -1348,6 +1357,20 @@ static u8 qede_check_csum(u16 flag) ...@@ -1348,6 +1357,20 @@ static u8 qede_check_csum(u16 flag)
return qede_check_tunn_csum(flag); return qede_check_tunn_csum(flag);
} }
static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
u16 flag)
{
u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
(flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
return true;
return false;
}
static int qede_rx_int(struct qede_fastpath *fp, int budget) static int qede_rx_int(struct qede_fastpath *fp, int budget)
{ {
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
...@@ -1426,6 +1449,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1426,6 +1449,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
csum_flag = qede_check_csum(parse_flag); csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
parse_flag)) {
rxq->rx_ip_frags++;
goto alloc_skb;
}
DP_NOTICE(edev, DP_NOTICE(edev,
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag); sw_comp_cons, parse_flag);
...@@ -1434,6 +1463,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1434,6 +1463,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe; goto next_cqe;
} }
alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
DP_NOTICE(edev, DP_NOTICE(edev,
...@@ -1444,7 +1474,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1444,7 +1474,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
} }
/* Copy data into SKB */ /* Copy data into SKB */
if (len + pad <= QEDE_RX_HDR_SIZE) { if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len), memcpy(skb_put(skb, len),
page_address(data) + pad + page_address(data) + pad +
sw_rx_data->page_offset, len); sw_rx_data->page_offset, len);
...@@ -1576,56 +1606,49 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) ...@@ -1576,56 +1606,49 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
static int qede_poll(struct napi_struct *napi, int budget) static int qede_poll(struct napi_struct *napi, int budget)
{ {
int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
napi); napi);
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
int rx_work_done = 0;
u8 tc;
while (1) { for (tc = 0; tc < edev->num_tc; tc++)
u8 tc; if (qede_txq_has_work(&fp->txqs[tc]))
qede_tx_int(edev, &fp->txqs[tc]);
for (tc = 0; tc < edev->num_tc; tc++)
if (qede_txq_has_work(&fp->txqs[tc])) rx_work_done = qede_has_rx_work(fp->rxq) ?
qede_tx_int(edev, &fp->txqs[tc]); qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
if (qede_has_rx_work(fp->rxq)) { qed_sb_update_sb_idx(fp->sb_info);
work_done += qede_rx_int(fp, budget - work_done); /* *_has_*_work() reads the status block,
* thus we need to ensure that status block indices
/* must not complete if we consumed full budget */ * have been actually read (qed_sb_update_sb_idx)
if (work_done >= budget) * prior to this check (*_has_*_work) so that
break; * we won't write the "newer" value of the status block
} * to HW (if there was a DMA right after
* qede_has_rx_work and if there is no rmb, the memory
* reading (qed_sb_update_sb_idx) may be postponed
* to right before *_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
rmb();
/* Fall out from the NAPI loop if needed */ /* Fall out from the NAPI loop if needed */
if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { if (!(qede_has_rx_work(fp->rxq) ||
qed_sb_update_sb_idx(fp->sb_info); qede_has_tx_work(fp))) {
/* *_has_*_work() reads the status block, napi_complete(napi);
* thus we need to ensure that status block indices
* have been actually read (qed_sb_update_sb_idx) /* Update and reenable interrupts */
* prior to this check (*_has_*_work) so that qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
* we won't write the "newer" value of the status block 1 /*update*/);
* to HW (if there was a DMA right after } else {
* qede_has_rx_work and if there is no rmb, the memory rx_work_done = budget;
* reading (qed_sb_update_sb_idx) may be postponed
* to right before *_ack_sb). In this case there
* will never be another interrupt until there is
* another update of the status block, while there
* is still unhandled work.
*/
rmb();
if (!(qede_has_rx_work(fp->rxq) ||
qede_has_tx_work(fp))) {
napi_complete(napi);
/* Update and reenable interrupts */
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
1 /*update*/);
break;
}
} }
} }
return work_done; return rx_work_done;
} }
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
...@@ -2496,6 +2519,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -2496,6 +2519,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock); mutex_init(&edev->qede_lock);
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n"); DP_INFO(edev, "Ending successfully qede probe\n");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册