提交 924cc268 编写于 作者: D David Daney 提交者: Ralf Baechle

Staging: Octeon Ethernet: Enable scatter-gather.

Octeon ethernet hardware can handle NETIF_F_SG, so we enable it.

A gather list of up to six fragments will fit in the SKB's CB
structure, so no extra memory is required.  If a SKB has more than six
fragments, we must linearize it.
Signed-off-by: NDavid Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
To: gregkh@suse.de
Patchwork: http://patchwork.linux-mips.org/patch/838/Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 3368c784
......@@ -53,6 +53,8 @@
#include "cvmx-gmxx-defs.h"
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
/*
* You can define GET_SKBUFF_QOS() to override how the skbuff output
* function determines which output queue is used. The default
......@@ -121,6 +123,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
uint64_t old_scratch;
uint64_t old_scratch2;
int qos;
int i;
enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
struct sk_buff *to_free_list;
......@@ -170,6 +173,28 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
MAX_SKB_TO_FREE);
}
/*
* We have space for 6 segment pointers, If there will be more
* than that, we must linearize.
*/
if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
if (USE_ASYNC_IOBDMA) {
/* Get the number of skbuffs in use by the hardware */
CVMX_SYNCIOBDMA;
skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
} else {
/* Get the number of skbuffs in use by the hardware */
skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
}
/*
* The CN3XXX series of parts has an errata (GMX-401) which
* causes the GMX block to hang if a collision occurs towards
......@@ -198,13 +223,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
/* Build the PKO buffer pointer */
hw_buffer.u64 = 0;
hw_buffer.s.addr = cvmx_ptr_to_phys(skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size =
(unsigned long)skb_end_pointer(skb) - (unsigned long)skb->head;
/* Build the PKO command */
pko_command.u64 = 0;
pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
......@@ -215,6 +233,31 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
pko_command.s.dontfree = 1;
pko_command.s.reg0 = priv->fau + qos * 4;
/* Build the PKO buffer pointer */
hw_buffer.u64 = 0;
if (skb_shinfo(skb)->nr_frags == 0) {
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb->len;
} else {
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
hw_buffer.s.pool = 0;
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
hw_buffer.s.size = fs->size;
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
pko_command.s.gather = 1;
goto dont_put_skbuff_in_hw;
}
/*
* See if we can put this skb in the FPA pool. Any strange
* behavior from the Linux networking stack will most likely
......
......@@ -484,8 +484,11 @@ int cvm_oct_common_init(struct net_device *dev)
&& (always_use_pow || strstr(pow_send_list, dev->name)))
priv->queue = -1;
if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
if (priv->queue != -1) {
dev->features |= NETIF_F_SG;
if (USE_HW_TCPUDP_CHECKSUM)
dev->features |= NETIF_F_IP_CSUM;
}
/* We do our own locking, Linux doesn't need to */
dev->features |= NETIF_F_LLTX;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册