提交 b6eac931 编写于 作者: M Mike Marciniszyn 提交者: Doug Ledford

IB/hfi1: Prevent kernel QP post send hard lockups

The driver progress routines can call cond_resched() when
a timeslice is exhausted and irqs are enabled.

If the ULP had been holding a spin lock without disabling irqs and
the post send directly called the progress routine, the cond_resched()
could yield allowing another thread from the same ULP to deadlock
on that same lock.

Correct by replacing the current hfi1_do_send() calldown with a unique
one for post send and adding an argument to hfi1_do_send() to indicate
that the send engine is running in a thread.   If the routine is not
running in a thread, avoid calling cond_resched().

CC: <stable@vger.kernel.org> # 4.7.x-
Fixes: Commit 831464ce ("IB/hfi1: Don't call cond_resched in atomic mode when sending packets")
Reviewed-by: NDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: NMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: NDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: NDoug Ledford <dledford@redhat.com>
上级 3d591099
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015 - 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -784,23 +784,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, ...@@ -784,23 +784,29 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
/* when sending, force a reschedule every one of these periods */ /* when sending, force a reschedule every one of these periods */
#define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
void hfi1_do_send_from_rvt(struct rvt_qp *qp)
{
hfi1_do_send(qp, false);
}
void _hfi1_do_send(struct work_struct *work) void _hfi1_do_send(struct work_struct *work)
{ {
struct iowait *wait = container_of(work, struct iowait, iowork); struct iowait *wait = container_of(work, struct iowait, iowork);
struct rvt_qp *qp = iowait_to_qp(wait); struct rvt_qp *qp = iowait_to_qp(wait);
hfi1_do_send(qp); hfi1_do_send(qp, true);
} }
/** /**
* hfi1_do_send - perform a send on a QP * hfi1_do_send - perform a send on a QP
* @work: contains a pointer to the QP * @work: contains a pointer to the QP
* @in_thread: true if in a workqueue thread
* *
* Process entries in the send work queue until credit or queue is * Process entries in the send work queue until credit or queue is
* exhausted. Only allow one CPU to send a packet per QP. * exhausted. Only allow one CPU to send a packet per QP.
* Otherwise, two threads could send packets out of order. * Otherwise, two threads could send packets out of order.
*/ */
void hfi1_do_send(struct rvt_qp *qp) void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
{ {
struct hfi1_pkt_state ps; struct hfi1_pkt_state ps;
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
...@@ -868,8 +874,10 @@ void hfi1_do_send(struct rvt_qp *qp) ...@@ -868,8 +874,10 @@ void hfi1_do_send(struct rvt_qp *qp)
qp->s_hdrwords = 0; qp->s_hdrwords = 0;
/* allow other tasks to run */ /* allow other tasks to run */
if (unlikely(time_after(jiffies, timeout))) { if (unlikely(time_after(jiffies, timeout))) {
if (workqueue_congested(cpu, if (!in_thread ||
ps.ppd->hfi1_wq)) { workqueue_congested(
cpu,
ps.ppd->hfi1_wq)) {
spin_lock_irqsave( spin_lock_irqsave(
&qp->s_lock, &qp->s_lock,
ps.flags); ps.flags);
...@@ -882,11 +890,9 @@ void hfi1_do_send(struct rvt_qp *qp) ...@@ -882,11 +890,9 @@ void hfi1_do_send(struct rvt_qp *qp)
*ps.ppd->dd->send_schedule); *ps.ppd->dd->send_schedule);
return; return;
} }
if (!irqs_disabled()) { cond_resched();
cond_resched(); this_cpu_inc(
this_cpu_inc( *ps.ppd->dd->send_schedule);
*ps.ppd->dd->send_schedule);
}
timeout = jiffies + (timeout_int) / 8; timeout = jiffies + (timeout_int) / 8;
} }
spin_lock_irqsave(&qp->s_lock, ps.flags); spin_lock_irqsave(&qp->s_lock, ps.flags);
......
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015 - 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -1820,7 +1820,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ...@@ -1820,7 +1820,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free; dd->verbs_dev.rdi.driver_f.qp_priv_free = qp_priv_free;
dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps; dd->verbs_dev.rdi.driver_f.free_all_qps = free_all_qps;
dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send; dd->verbs_dev.rdi.driver_f.do_send = hfi1_do_send_from_rvt;
dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.schedule_send = hfi1_schedule_send;
dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send; dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _hfi1_schedule_send;
dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr; dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = get_pmtu_from_attr;
......
/* /*
* Copyright(c) 2015, 2016 Intel Corporation. * Copyright(c) 2015 - 2017 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -355,7 +355,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, ...@@ -355,7 +355,9 @@ void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
void _hfi1_do_send(struct work_struct *work); void _hfi1_do_send(struct work_struct *work);
void hfi1_do_send(struct rvt_qp *qp); void hfi1_do_send_from_rvt(struct rvt_qp *qp);
void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
enum ib_wc_status status); enum ib_wc_status status);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册