提交 a2ec388b 编写于 作者: N NeilBrown 提交者: Zheng Zengkai

SUNRPC: remove scheduling boost for "SWAPPER" tasks.

stable inclusion
from stable-v5.10.111
commit 4b6f122bdfdc048715d0ee630b3b20a1eea8094b
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I5GL1Z

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=4b6f122bdfdc048715d0ee630b3b20a1eea8094b

--------------------------------

[ Upstream commit a80a8461 ]

Currently, tasks marked as "swapper" tasks get put to the front of
non-priority rpc_queues, and are sorted earlier than non-swapper tasks on
the transport's ->xmit_queue.

This is pointless as currently *all* tasks for a mount that has swap
enabled on *any* file are marked as "swapper" tasks.  So the net result
is that the non-priority rpc_queues are reverse-ordered (LIFO).

This scheduling boost is not necessary to avoid deadlocks, and hurts
fairness, so remove it.  If there were a need to expedite some requests,
the tk_priority mechanism is a more appropriate tool.
Signed-off-by: NNeilBrown <neilb@suse.de>
Signed-off-by: NTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: NSasha Levin <sashal@kernel.org>
Signed-off-by: NZheng Zengkai <zhengzengkai@huawei.com>
Reviewed-by: NWei Li <liwei391@huawei.com>
上级 7679d3c1
...@@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, ...@@ -186,11 +186,6 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
/* /*
* Add new request to wait queue. * Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/ */
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
struct rpc_task *task, struct rpc_task *task,
...@@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, ...@@ -199,8 +194,6 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
INIT_LIST_HEAD(&task->u.tk_wait.timer_list); INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
if (RPC_IS_PRIORITY(queue)) if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task, queue_priority); __rpc_add_wait_queue_priority(queue, task, queue_priority);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue; task->tk_waitqueue = queue;
......
...@@ -1327,17 +1327,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task) ...@@ -1327,17 +1327,6 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
INIT_LIST_HEAD(&req->rq_xmit2); INIT_LIST_HEAD(&req->rq_xmit2);
goto out; goto out;
} }
} else if (RPC_IS_SWAPPER(task)) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_cong || pos->rq_bytes_sent)
continue;
if (RPC_IS_SWAPPER(pos->rq_task))
continue;
/* Note: req is added _before_ pos */
list_add_tail(&req->rq_xmit, &pos->rq_xmit);
INIT_LIST_HEAD(&req->rq_xmit2);
goto out;
}
} else if (!req->rq_seqno) { } else if (!req->rq_seqno) {
list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
if (pos->rq_task->tk_owner != task->tk_owner) if (pos->rq_task->tk_owner != task->tk_owner)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册