提交 a2bd1140 编写于 作者: D Dave Jiang 提交者: Dan Williams

netdma: adding alignment check for NETDMA ops

This is the fallout from adding memcpy alignment workaround for certain
IOATDMA hardware. NetDMA will only use DMA engine that can handle byte align
ops.
Acked-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NDave Jiang <dave.jiang@intel.com>
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
上级 f26df1a1
...@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) ...@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
} }
EXPORT_SYMBOL(dma_find_channel); EXPORT_SYMBOL(dma_find_channel);
/*
* net_dma_find_channel - find a channel for net_dma
* net_dma has alignment requirements
*/
struct dma_chan *net_dma_find_channel(void)
{
struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
return NULL;
return chan;
}
EXPORT_SYMBOL(net_dma_find_channel);
/** /**
* dma_issue_pending_all - flush all pending operations across all channels * dma_issue_pending_all - flush all pending operations across all channels
*/ */
......
...@@ -948,6 +948,7 @@ int dma_async_device_register(struct dma_device *device); ...@@ -948,6 +948,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx); void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
/* --- Helper iov-locking functions --- */ /* --- Helper iov-locking functions --- */
......
...@@ -1450,7 +1450,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1450,7 +1450,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) && if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency && !sysctl_tcp_low_latency &&
dma_find_channel(DMA_MEMCPY)) { net_dma_find_channel()) {
preempt_enable_no_resched(); preempt_enable_no_resched();
tp->ucopy.pinned_list = tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len); dma_pin_iovec_pages(msg->msg_iov, len);
...@@ -1665,7 +1665,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1665,7 +1665,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!(flags & MSG_TRUNC)) { if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) { if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
......
...@@ -5190,7 +5190,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, ...@@ -5190,7 +5190,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
return 0; return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
......
...@@ -1727,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1727,7 +1727,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
else else
......
...@@ -1755,7 +1755,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1755,7 +1755,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
else else
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册