提交 adf90eb4 编写于 作者: P Paul E. McKenney

drivers/infiniband: Remove now-redundant smp_read_barrier_depends()

The smp_read_barrier_depends() does nothing at all except on DEC Alpha,
and no current DEC Alpha systems use Infiniband:

	lkml.kernel.org/r/20171023085921.jwbntptn6ictbnvj@tower

This commit therefore makes Infiniband depend on !ALPHA and removes
the now-ineffective invocations of smp_read_barrier_depends() from
the InfiniBand driver.

Please note that this patch should not be construed as my saying that
InfiniBand's memory ordering is correct, but rather that this patch does
not in any way affect InfiniBand's correctness.  In other words, the
result of applying this patch is bug-for-bug compatible with the original.
Signed-off-by: NPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Doug Ledford <dledford@redhat.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Andrea Parri <parri.andrea@gmail.com>
Cc: <linux-rdma@vger.kernel.org>
Cc: <linux-alpha@vger.kernel.org>
[ paulmck: Removed drivers/dma/ioat/dma.c per Jason Gunthorpe's feedback. ]
Acked-by: NJason Gunthorpe <jgg@mellanox.com>
上级 d963007c
...@@ -4,6 +4,7 @@ menuconfig INFINIBAND ...@@ -4,6 +4,7 @@ menuconfig INFINIBAND
depends on NET depends on NET
depends on INET depends on INET
depends on m || IPV6 != m depends on m || IPV6 != m
depends on !ALPHA
select IRQ_POLL select IRQ_POLL
---help--- ---help---
Core support for InfiniBand (IB). Make sure to also select Core support for InfiniBand (IB). Make sure to also select
......
...@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -302,7 +302,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -346,7 +345,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
newreq = 0; newreq = 0;
if (qp->s_cur == qp->s_tail) { if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */ /* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) { if (qp->s_tail == READ_ONCE(qp->s_head)) {
clear_ahg(qp); clear_ahg(qp);
goto bail; goto bail;
...@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd, ...@@ -900,7 +898,6 @@ void hfi1_send_rc_ack(struct hfi1_ctxtdata *rcd,
} }
/* Ensure s_rdma_ack_cnt changes are committed */ /* Ensure s_rdma_ack_cnt changes are committed */
smp_read_barrier_depends();
if (qp->s_rdma_ack_cnt) { if (qp->s_rdma_ack_cnt) {
hfi1_queue_rc_ack(qp, is_fecn); hfi1_queue_rc_ack(qp, is_fecn);
return; return;
...@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet) ...@@ -1562,7 +1559,6 @@ static void rc_rcv_resp(struct hfi1_packet *packet)
trace_hfi1_ack(qp, psn); trace_hfi1_ack(qp, psn);
/* Ignore invalid responses. */ /* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0) if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done; goto ack_done;
......
...@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp) ...@@ -362,7 +362,6 @@ static void ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY; sqp->s_flags |= RVT_S_BUSY;
again: again:
smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head)) if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy; goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
......
...@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque) ...@@ -553,7 +553,6 @@ static void sdma_hw_clean_up_task(unsigned long opaque)
static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
{ {
smp_read_barrier_depends(); /* see sdma_update_tail() */
return sde->tx_ring[sde->tx_head & sde->sdma_mask]; return sde->tx_ring[sde->tx_head & sde->sdma_mask];
} }
......
...@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -79,7 +79,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -119,7 +118,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
RVT_PROCESS_NEXT_SEND_OK)) RVT_PROCESS_NEXT_SEND_OK))
goto bail; goto bail;
/* Check if send work queue is empty. */ /* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) { if (qp->s_cur == READ_ONCE(qp->s_head)) {
clear_ahg(qp); clear_ahg(qp);
goto bail; goto bail;
......
...@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -486,7 +486,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) ...@@ -500,7 +499,6 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
} }
/* see post_one_send() */ /* see post_one_send() */
smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head)) if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail; goto bail;
......
...@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -246,7 +246,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -293,7 +292,6 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
newreq = 0; newreq = 0;
if (qp->s_cur == qp->s_tail) { if (qp->s_cur == qp->s_tail) {
/* Check if send work queue is empty. */ /* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_tail == READ_ONCE(qp->s_head)) if (qp->s_tail == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* /*
...@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, ...@@ -1340,7 +1338,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
goto ack_done; goto ack_done;
/* Ignore invalid responses. */ /* Ignore invalid responses. */
smp_read_barrier_depends(); /* see post_one_send */
if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0) if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
goto ack_done; goto ack_done;
......
...@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) ...@@ -367,7 +367,6 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
sqp->s_flags |= RVT_S_BUSY; sqp->s_flags |= RVT_S_BUSY;
again: again:
smp_read_barrier_depends(); /* see post_one_send() */
if (sqp->s_last == READ_ONCE(sqp->s_head)) if (sqp->s_last == READ_ONCE(sqp->s_head))
goto clr_busy; goto clr_busy;
wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
......
...@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -60,7 +60,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -90,7 +89,6 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
RVT_PROCESS_NEXT_SEND_OK)) RVT_PROCESS_NEXT_SEND_OK))
goto bail; goto bail;
/* Check if send work queue is empty. */ /* Check if send work queue is empty. */
smp_read_barrier_depends(); /* see post_one_send() */
if (qp->s_cur == READ_ONCE(qp->s_head)) if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* /*
......
...@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -252,7 +252,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail; goto bail;
/* We are in the error state, flush the work request. */ /* We are in the error state, flush the work request. */
smp_read_barrier_depends(); /* see post_one_send */
if (qp->s_last == READ_ONCE(qp->s_head)) if (qp->s_last == READ_ONCE(qp->s_head))
goto bail; goto bail;
/* If DMAs are in progress, we can't flush immediately. */ /* If DMAs are in progress, we can't flush immediately. */
...@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) ...@@ -266,7 +265,6 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
} }
/* see post_one_send() */ /* see post_one_send() */
smp_read_barrier_depends();
if (qp->s_cur == READ_ONCE(qp->s_head)) if (qp->s_cur == READ_ONCE(qp->s_head))
goto bail; goto bail;
......
...@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail( ...@@ -1684,7 +1684,6 @@ static inline int rvt_qp_is_avail(
/* non-reserved operations */ /* non-reserved operations */
if (likely(qp->s_avail)) if (likely(qp->s_avail))
return 0; return 0;
smp_read_barrier_depends(); /* see rc.c */
slast = READ_ONCE(qp->s_last); slast = READ_ONCE(qp->s_last);
if (qp->s_head >= slast) if (qp->s_head >= slast)
avail = qp->s_size - (qp->s_head - slast); avail = qp->s_size - (qp->s_head - slast);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册