提交 6610f720 编写于 作者: J J. Bruce Fields

svcrpc: minor cache cleanup

Pull out some code into helper functions, fix a typo.
Signed-off-by: NJ. Bruce Fields <bfields@redhat.com>
上级 f16b6e8d
...@@ -520,10 +520,26 @@ static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) ...@@ -520,10 +520,26 @@ static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
complete(&dr->completion); complete(&dr->completion);
} }
static void __unhash_deferred_req(struct cache_deferred_req *dreq)
{
list_del_init(&dreq->recent);
list_del_init(&dreq->hash);
cache_defer_cnt--;
}
static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
{
int hash = DFR_HASH(item);
list_add(&dreq->recent, &cache_defer_list);
if (cache_defer_hash[hash].next == NULL)
INIT_LIST_HEAD(&cache_defer_hash[hash]);
list_add(&dreq->hash, &cache_defer_hash[hash]);
}
static int cache_defer_req(struct cache_req *req, struct cache_head *item) static int cache_defer_req(struct cache_req *req, struct cache_head *item)
{ {
struct cache_deferred_req *dreq, *discard; struct cache_deferred_req *dreq, *discard;
int hash = DFR_HASH(item);
struct thread_deferred_req sleeper; struct thread_deferred_req sleeper;
if (cache_defer_cnt >= DFR_MAX) { if (cache_defer_cnt >= DFR_MAX) {
...@@ -549,20 +565,14 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) ...@@ -549,20 +565,14 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
list_add(&dreq->recent, &cache_defer_list); __hash_deferred_req(dreq, item);
if (cache_defer_hash[hash].next == NULL)
INIT_LIST_HEAD(&cache_defer_hash[hash]);
list_add(&dreq->hash, &cache_defer_hash[hash]);
/* it is in, now maybe clean up */ /* it is in, now maybe clean up */
discard = NULL; discard = NULL;
if (++cache_defer_cnt > DFR_MAX) { if (++cache_defer_cnt > DFR_MAX) {
discard = list_entry(cache_defer_list.prev, discard = list_entry(cache_defer_list.prev,
struct cache_deferred_req, recent); struct cache_deferred_req, recent);
list_del_init(&discard->recent); __unhash_deferred_req(discard);
list_del_init(&discard->hash);
cache_defer_cnt--;
} }
spin_unlock(&cache_defer_lock); spin_unlock(&cache_defer_lock);
...@@ -584,9 +594,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item) ...@@ -584,9 +594,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
*/ */
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
if (!list_empty(&sleeper.handle.hash)) { if (!list_empty(&sleeper.handle.hash)) {
list_del_init(&sleeper.handle.recent); __unhash_deferred_req(&sleeper.handle);
list_del_init(&sleeper.handle.hash);
cache_defer_cnt--;
spin_unlock(&cache_defer_lock); spin_unlock(&cache_defer_lock);
} else { } else {
/* cache_revisit_request already removed /* cache_revisit_request already removed
...@@ -632,9 +640,8 @@ static void cache_revisit_request(struct cache_head *item) ...@@ -632,9 +640,8 @@ static void cache_revisit_request(struct cache_head *item)
dreq = list_entry(lp, struct cache_deferred_req, hash); dreq = list_entry(lp, struct cache_deferred_req, hash);
lp = lp->next; lp = lp->next;
if (dreq->item == item) { if (dreq->item == item) {
list_del_init(&dreq->hash); __unhash_deferred_req(dreq);
list_move(&dreq->recent, &pending); list_add(&dreq->recent, &pending);
cache_defer_cnt--;
} }
} }
} }
...@@ -657,11 +664,8 @@ void cache_clean_deferred(void *owner) ...@@ -657,11 +664,8 @@ void cache_clean_deferred(void *owner)
spin_lock(&cache_defer_lock); spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) { if (dreq->owner == owner)
list_del_init(&dreq->hash); __unhash_deferred_req(dreq);
list_move(&dreq->recent, &pending);
cache_defer_cnt--;
}
} }
spin_unlock(&cache_defer_lock); spin_unlock(&cache_defer_lock);
......
...@@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -665,7 +665,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
/* As there is a shortage of threads and this request /* As there is a shortage of threads and this request
* had to be queue, don't allow the thread to wait so * had to be queued, don't allow the thread to wait so
* long for cache updates. * long for cache updates.
*/ */
rqstp->rq_chandle.thread_wait = 1*HZ; rqstp->rq_chandle.thread_wait = 1*HZ;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册