提交 f586a336 编写于 作者: G Govindarajulu Varadarajan 提交者: David S. Miller

enic: use atomic_t instead of spin_lock in busy poll

We use spinlock to access a single flag. We can avoid spin_locks by using
atomic variable and atomic_cmpxchg(). Use atomic_cmpxchg to set the flag
for idle to poll. And a simple atomic_set to unlock (set idle from poll).

In napi poll, if gro is enabled, we call napi_gro_receive() to deliver the
packets. Before we call napi_complete(), i.e while re-polling, if low
latency busy poll is called, we use netif_receive_skb() to deliver the packets.
At this point if there are some skb's held in GRO, busy poll could deliver the
packets out of order. So we call napi_gro_flush() to flush skbs before we
move the napi poll to idle.
Signed-off-by: NGovindarajulu Varadarajan <_govind@gmx.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 1298267b
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
......@@ -1208,7 +1208,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
vnic_intr_unmask(&enic->intr[intr]);
}
enic_poll_unlock_napi(&enic->rq[cq_rq]);
enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
return rq_work_done;
}
......@@ -1414,7 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
*/
enic_calc_int_moderation(enic, &enic->rq[rq]);
enic_poll_unlock_napi(&enic->rq[rq]);
enic_poll_unlock_napi(&enic->rq[rq], napi);
if (work_done < work_to_do) {
/* Some work done, but not enough to stay in polling,
......
......@@ -21,6 +21,7 @@
#define _VNIC_RQ_H_
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
......@@ -75,6 +76,12 @@ struct vnic_rq_buf {
uint64_t wr_id;
};
enum enic_poll_state {
ENIC_POLL_STATE_IDLE,
ENIC_POLL_STATE_NAPI,
ENIC_POLL_STATE_POLL
};
struct vnic_rq {
unsigned int index;
struct vnic_dev *vdev;
......@@ -86,19 +93,7 @@ struct vnic_rq {
void *os_buf_head;
unsigned int pkts_outstanding;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define ENIC_POLL_STATE_IDLE 0
#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
ENIC_POLL_STATE_POLL_YIELD)
#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
ENIC_POLL_STATE_POLL)
#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
ENIC_POLL_STATE_POLL_YIELD)
unsigned int bpoll_state;
spinlock_t bpoll_lock;
atomic_t bpoll_state;
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
......@@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
{
spin_lock_init(&rq->bpoll_lock);
rq->bpoll_state = ENIC_POLL_STATE_IDLE;
atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
{
bool rc = true;
spin_lock(&rq->bpoll_lock);
if (rq->bpoll_state & ENIC_POLL_LOCKED) {
WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
rc = false;
} else {
rq->bpoll_state = ENIC_POLL_STATE_NAPI;
}
spin_unlock(&rq->bpoll_lock);
int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
ENIC_POLL_STATE_NAPI);
return rc;
return (rc == ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
struct napi_struct *napi)
{
bool rc = false;
spin_lock(&rq->bpoll_lock);
WARN_ON(rq->bpoll_state &
(ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
rc = true;
rq->bpoll_state = ENIC_POLL_STATE_IDLE;
spin_unlock(&rq->bpoll_lock);
return rc;
WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
napi_gro_flush(napi, false);
atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
{
bool rc = true;
spin_lock_bh(&rq->bpoll_lock);
if (rq->bpoll_state & ENIC_POLL_LOCKED) {
rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
rc = false;
} else {
rq->bpoll_state |= ENIC_POLL_STATE_POLL;
}
spin_unlock_bh(&rq->bpoll_lock);
int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
ENIC_POLL_STATE_POLL);
return rc;
return (rc == ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
{
bool rc = false;
spin_lock_bh(&rq->bpoll_lock);
WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
rc = true;
rq->bpoll_state = ENIC_POLL_STATE_IDLE;
spin_unlock_bh(&rq->bpoll_lock);
return rc;
static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
{
WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
}
static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
{
WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
return rq->bpoll_state & ENIC_POLL_USER_PEND;
return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
}
#else
......@@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
return true;
}
static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
struct napi_struct *napi)
{
return false;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部