提交 3ef0eb0d 编写于 作者: J Jesper Dangaard Brouer 提交者: David S. Miller

net: frag, move LRU list maintenance outside of rwlock

Updating the fragmentation queues LRU (Least-Recently-Used) list,
required taking the hash writer lock.  However, the LRU list isn't
tied to the hash at all, so we can use a separate lock for it.
Original-idea-by: NFlorian Westphal <fw@strlen.de>
Signed-off-by: NJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 6d7b857d
......@@ -6,6 +6,7 @@
struct netns_frags {
int nqueues;
struct list_head lru_list;
spinlock_t lru_lock;
/* The percpu_counter "mem" need to be cacheline aligned.
* mem.count must not share cacheline with other writers
......@@ -116,4 +117,25 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
return percpu_counter_sum_positive(&nf->mem);
}
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
list_move_tail(&q->lru_list, &q->net->lru_list);
spin_unlock(&q->net->lru_lock);
}
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
{
spin_lock(&q->net->lru_lock);
list_del(&q->lru_list);
spin_unlock(&q->net->lru_lock);
}
static inline void inet_frag_lru_add(struct netns_frags *nf,
struct inet_frag_queue *q)
{
spin_lock(&nf->lru_lock);
list_add_tail(&q->lru_list, &nf->lru_list);
spin_unlock(&nf->lru_lock);
}
#endif
......@@ -75,6 +75,7 @@ void inet_frags_init_net(struct netns_frags *nf)
nf->nqueues = 0;
init_frag_mem_limit(nf);
INIT_LIST_HEAD(&nf->lru_list);
spin_lock_init(&nf->lru_lock);
}
EXPORT_SYMBOL(inet_frags_init_net);
......@@ -100,9 +101,9 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
{
write_lock(&f->lock);
hlist_del(&fq->list);
list_del(&fq->lru_list);
fq->net->nqueues--;
write_unlock(&f->lock);
inet_frag_lru_del(fq);
}
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
......@@ -170,16 +171,17 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
work = frag_mem_limit(nf) - nf->low_thresh;
while (work > 0) {
read_lock(&f->lock);
spin_lock(&nf->lru_lock);
if (list_empty(&nf->lru_list)) {
read_unlock(&f->lock);
spin_unlock(&nf->lru_lock);
break;
}
q = list_first_entry(&nf->lru_list,
struct inet_frag_queue, lru_list);
atomic_inc(&q->refcnt);
read_unlock(&f->lock);
spin_unlock(&nf->lru_lock);
spin_lock(&q->lock);
if (!(q->last_in & INET_FRAG_COMPLETE))
......@@ -233,9 +235,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &f->hash[hash]);
list_add_tail(&qp->lru_list, &nf->lru_list);
nf->nqueues++;
write_unlock(&f->lock);
inet_frag_lru_add(nf, qp);
return qp;
}
......
......@@ -529,9 +529,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->q.meat == qp->q.len)
return ip_frag_reasm(qp, prev, dev);
write_lock(&ip4_frags.lock);
list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
write_unlock(&ip4_frags.lock);
inet_frag_lru_move(&qp->q);
return -EINPROGRESS;
err:
......
......@@ -328,9 +328,8 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->nhoffset = nhoff;
fq->q.last_in |= INET_FRAG_FIRST_IN;
}
write_lock(&nf_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&nf_frags.lock);
inet_frag_lru_move(&fq->q);
return 0;
discard_fq:
......
......@@ -341,9 +341,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->q.meat == fq->q.len)
return ip6_frag_reasm(fq, prev, dev);
write_lock(&ip6_frags.lock);
list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
write_unlock(&ip6_frags.lock);
inet_frag_lru_move(&fq->q);
return -1;
discard_fq:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册