提交 a9380b0f 编写于 作者: I Iyappan Subramanian 提交者: David S. Miller

drivers: net: xgene: Add support for Jumbo frame

This patch adds support for jumbo frame, by allocating
additional buffer (page) pool and configuring the hardware.
Signed-off-by: NIyappan Subramanian <isubramanian@apm.com>
Signed-off-by: NQuan Nguyen <qnguyen@apm.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d6d48969
...@@ -679,6 +679,9 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) ...@@ -679,6 +679,9 @@ static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool; ring = pdata->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id)); pb |= BIT(xgene_enet_get_fpsel(ring->id));
ring = pdata->rx_ring[i]->page_pool;
if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
......
...@@ -37,6 +37,9 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) ...@@ -37,6 +37,9 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
struct xgene_enet_raw_desc16 *raw_desc; struct xgene_enet_raw_desc16 *raw_desc;
int i; int i;
if (!buf_pool)
return;
for (i = 0; i < buf_pool->slots; i++) { for (i = 0; i < buf_pool->slots; i++) {
raw_desc = &buf_pool->raw_desc16[i]; raw_desc = &buf_pool->raw_desc16[i];
...@@ -47,6 +50,86 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool) ...@@ -47,6 +50,86 @@ static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
} }
} }
static u16 xgene_enet_get_data_len(u64 bufdatalen)
{
u16 hw_len, mask;
hw_len = GET_VAL(BUFDATALEN, bufdatalen);
if (unlikely(hw_len == 0x7800)) {
return 0;
} else if (!(hw_len & BIT(14))) {
mask = GENMASK(13, 0);
return (hw_len & mask) ? (hw_len & mask) : SIZE_16K;
} else if (!(hw_len & GENMASK(13, 12))) {
mask = GENMASK(11, 0);
return (hw_len & mask) ? (hw_len & mask) : SIZE_4K;
} else {
mask = GENMASK(11, 0);
return (hw_len & mask) ? (hw_len & mask) : SIZE_2K;
}
}
static u16 xgene_enet_set_data_len(u32 size)
{
u16 hw_len;
hw_len = (size == SIZE_4K) ? BIT(14) : 0;
return hw_len;
}
static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring *buf_pool,
u32 nbuf)
{
struct xgene_enet_raw_desc16 *raw_desc;
struct xgene_enet_pdata *pdata;
struct net_device *ndev;
dma_addr_t dma_addr;
struct device *dev;
struct page *page;
u32 slots, tail;
u16 hw_len;
int i;
if (unlikely(!buf_pool))
return 0;
ndev = buf_pool->ndev;
pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev);
slots = buf_pool->slots - 1;
tail = buf_pool->tail;
for (i = 0; i < nbuf; i++) {
raw_desc = &buf_pool->raw_desc16[tail];
page = dev_alloc_page();
if (unlikely(!page))
return -ENOMEM;
dma_addr = dma_map_page(dev, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, dma_addr))) {
put_page(page);
return -ENOMEM;
}
hw_len = xgene_enet_set_data_len(PAGE_SIZE);
raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
SET_VAL(BUFDATALEN, hw_len) |
SET_BIT(COHERENT));
buf_pool->frag_page[tail] = page;
tail = (tail + 1) & slots;
}
pdata->ring_ops->wr_cmd(buf_pool, nbuf);
buf_pool->tail = tail;
return 0;
}
static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
u32 nbuf) u32 nbuf)
{ {
...@@ -64,8 +147,9 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, ...@@ -64,8 +147,9 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
ndev = buf_pool->ndev; ndev = buf_pool->ndev;
dev = ndev_to_dev(buf_pool->ndev); dev = ndev_to_dev(buf_pool->ndev);
pdata = netdev_priv(ndev); pdata = netdev_priv(ndev);
bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
len = XGENE_ENET_MAX_MTU; len = XGENE_ENET_STD_MTU;
for (i = 0; i < nbuf; i++) { for (i = 0; i < nbuf; i++) {
raw_desc = &buf_pool->raw_desc16[tail]; raw_desc = &buf_pool->raw_desc16[tail];
...@@ -122,6 +206,25 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) ...@@ -122,6 +206,25 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
} }
} }
static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring *buf_pool)
{
struct device *dev = ndev_to_dev(buf_pool->ndev);
dma_addr_t dma_addr;
struct page *page;
int i;
/* Free up the buffers held by hardware */
for (i = 0; i < buf_pool->slots; i++) {
page = buf_pool->frag_page[i];
if (page) {
dma_addr = buf_pool->frag_dma_addr[i];
dma_unmap_page(dev, dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
put_page(page);
}
}
}
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
{ {
struct xgene_enet_desc_ring *rx_ring = data; struct xgene_enet_desc_ring *rx_ring = data;
...@@ -515,23 +618,66 @@ static void xgene_enet_skip_csum(struct sk_buff *skb) ...@@ -515,23 +618,66 @@ static void xgene_enet_skip_csum(struct sk_buff *skb)
} }
} }
static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring *buf_pool,
struct xgene_enet_raw_desc *raw_desc,
struct xgene_enet_raw_desc *exp_desc)
{
__le64 *desc = (void *)exp_desc;
dma_addr_t dma_addr;
struct device *dev;
struct page *page;
u16 slots, head;
u32 frag_size;
int i;
if (!buf_pool || !raw_desc || !exp_desc ||
(!GET_VAL(NV, le64_to_cpu(raw_desc->m0))))
return;
dev = ndev_to_dev(buf_pool->ndev);
head = buf_pool->head;
for (i = 0; i < 4; i++) {
frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
if (!frag_size)
break;
dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
page = buf_pool->frag_page[head];
put_page(page);
buf_pool->frag_page[head] = NULL;
head = (head + 1) & slots;
}
buf_pool->head = head;
}
static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
struct xgene_enet_raw_desc *raw_desc) struct xgene_enet_raw_desc *raw_desc,
struct xgene_enet_raw_desc *exp_desc)
{ {
struct xgene_enet_desc_ring *buf_pool, *page_pool;
u32 datalen, frag_size, skb_index;
struct net_device *ndev; struct net_device *ndev;
struct device *dev; dma_addr_t dma_addr;
struct xgene_enet_desc_ring *buf_pool;
u32 datalen, skb_index;
struct sk_buff *skb; struct sk_buff *skb;
struct device *dev;
struct page *page;
u16 slots, head;
int i, ret = 0;
__le64 *desc;
u8 status; u8 status;
int ret = 0; bool nv;
ndev = rx_ring->ndev; ndev = rx_ring->ndev;
dev = ndev_to_dev(rx_ring->ndev); dev = ndev_to_dev(rx_ring->ndev);
buf_pool = rx_ring->buf_pool; buf_pool = rx_ring->buf_pool;
page_pool = rx_ring->page_pool;
dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)), dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); XGENE_ENET_STD_MTU, DMA_FROM_DEVICE);
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = buf_pool->rx_skb[skb_index]; skb = buf_pool->rx_skb[skb_index];
buf_pool->rx_skb[skb_index] = NULL; buf_pool->rx_skb[skb_index] = NULL;
...@@ -541,6 +687,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, ...@@ -541,6 +687,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
if (unlikely(status > 2)) { if (unlikely(status > 2)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
xgene_enet_free_pagepool(page_pool, raw_desc, exp_desc);
xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
status); status);
ret = -EIO; ret = -EIO;
...@@ -548,11 +695,44 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, ...@@ -548,11 +695,44 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
} }
/* strip off CRC as HW isn't doing this */ /* strip off CRC as HW isn't doing this */
datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)); datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
datalen = (datalen & DATALEN_MASK) - 4;
prefetch(skb->data - NET_IP_ALIGN); nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
if (!nv)
datalen -= 4;
skb_put(skb, datalen); skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
if (!nv)
goto skip_jumbo;
slots = page_pool->slots - 1;
head = page_pool->head;
desc = (void *)exp_desc;
for (i = 0; i < 4; i++) {
frag_size = xgene_enet_get_data_len(le64_to_cpu(desc[i ^ 1]));
if (!frag_size)
break;
dma_addr = GET_VAL(DATAADDR, le64_to_cpu(desc[i ^ 1]));
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
page = page_pool->frag_page[head];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
frag_size, PAGE_SIZE);
datalen += frag_size;
page_pool->frag_page[head] = NULL;
head = (head + 1) & slots;
}
page_pool->head = head;
rx_ring->npagepool -= skb_shinfo(skb)->nr_frags;
skip_jumbo:
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
if (likely((ndev->features & NETIF_F_IP_CSUM) && if (likely((ndev->features & NETIF_F_IP_CSUM) &&
...@@ -563,7 +743,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, ...@@ -563,7 +743,15 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
rx_ring->rx_packets++; rx_ring->rx_packets++;
rx_ring->rx_bytes += datalen; rx_ring->rx_bytes += datalen;
napi_gro_receive(&rx_ring->napi, skb); napi_gro_receive(&rx_ring->napi, skb);
out: out:
if (rx_ring->npagepool <= 0) {
ret = xgene_enet_refill_pagepool(page_pool, NUM_NXTBUFPOOL);
rx_ring->npagepool = NUM_NXTBUFPOOL;
if (ret)
return ret;
}
if (--rx_ring->nbufpool == 0) { if (--rx_ring->nbufpool == 0) {
ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL); ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->nbufpool = NUM_BUFPOOL;
...@@ -611,7 +799,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, ...@@ -611,7 +799,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
desc_count++; desc_count++;
} }
if (is_rx_desc(raw_desc)) { if (is_rx_desc(raw_desc)) {
ret = xgene_enet_rx_frame(ring, raw_desc); ret = xgene_enet_rx_frame(ring, raw_desc, exp_desc);
} else { } else {
ret = xgene_enet_tx_completion(ring, raw_desc); ret = xgene_enet_tx_completion(ring, raw_desc);
is_completion = true; is_completion = true;
...@@ -854,7 +1042,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) ...@@ -854,7 +1042,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
{ {
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool, *page_pool;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
int i; int i;
...@@ -867,18 +1055,28 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) ...@@ -867,18 +1055,28 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
xgene_enet_delete_ring(ring->cp_ring); xgene_enet_delete_ring(ring->cp_ring);
pdata->tx_ring[i] = NULL; pdata->tx_ring[i] = NULL;
} }
} }
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]; ring = pdata->rx_ring[i];
if (ring) { if (ring) {
page_pool = ring->page_pool;
if (page_pool) {
xgene_enet_delete_pagepool(page_pool);
xgene_enet_delete_ring(page_pool);
pdata->port_ops->clear(pdata, page_pool);
}
buf_pool = ring->buf_pool; buf_pool = ring->buf_pool;
xgene_enet_delete_bufpool(buf_pool); xgene_enet_delete_bufpool(buf_pool);
xgene_enet_delete_ring(buf_pool); xgene_enet_delete_ring(buf_pool);
pdata->port_ops->clear(pdata, buf_pool); pdata->port_ops->clear(pdata, buf_pool);
xgene_enet_delete_ring(ring); xgene_enet_delete_ring(ring);
pdata->rx_ring[i] = NULL; pdata->rx_ring[i] = NULL;
} }
} }
} }
...@@ -931,8 +1129,10 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) ...@@ -931,8 +1129,10 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
{ {
struct xgene_enet_desc_ring *page_pool;
struct device *dev = &pdata->pdev->dev; struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
void *p;
int i; int i;
for (i = 0; i < pdata->txq_cnt; i++) { for (i = 0; i < pdata->txq_cnt; i++) {
...@@ -940,10 +1140,13 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) ...@@ -940,10 +1140,13 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
if (ring) { if (ring) {
if (ring->cp_ring && ring->cp_ring->cp_skb) if (ring->cp_ring && ring->cp_ring->cp_skb)
devm_kfree(dev, ring->cp_ring->cp_skb); devm_kfree(dev, ring->cp_ring->cp_skb);
if (ring->cp_ring && pdata->cq_cnt) if (ring->cp_ring && pdata->cq_cnt)
xgene_enet_free_desc_ring(ring->cp_ring); xgene_enet_free_desc_ring(ring->cp_ring);
xgene_enet_free_desc_ring(ring); xgene_enet_free_desc_ring(ring);
} }
} }
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
...@@ -952,8 +1155,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) ...@@ -952,8 +1155,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
if (ring->buf_pool) { if (ring->buf_pool) {
if (ring->buf_pool->rx_skb) if (ring->buf_pool->rx_skb)
devm_kfree(dev, ring->buf_pool->rx_skb); devm_kfree(dev, ring->buf_pool->rx_skb);
xgene_enet_free_desc_ring(ring->buf_pool); xgene_enet_free_desc_ring(ring->buf_pool);
} }
page_pool = ring->page_pool;
if (page_pool) {
p = page_pool->frag_page;
if (p)
devm_kfree(dev, p);
p = page_pool->frag_dma_addr;
if (p)
devm_kfree(dev, p);
}
xgene_enet_free_desc_ring(ring); xgene_enet_free_desc_ring(ring);
} }
} }
...@@ -1071,19 +1287,20 @@ static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) ...@@ -1071,19 +1287,20 @@ static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
static int xgene_enet_create_desc_rings(struct net_device *ndev) static int xgene_enet_create_desc_rings(struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *page_pool = NULL;
struct xgene_enet_desc_ring *buf_pool = NULL; struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner; struct device *dev = ndev_to_dev(ndev);
dma_addr_t dma_exp_bufs;
u8 cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum; u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum; u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num; u16 ring_num = pdata->ring_num;
enum xgene_ring_owner owner;
dma_addr_t dma_exp_bufs;
u16 ring_id, slots;
__le64 *exp_bufs; __le64 *exp_bufs;
u16 ring_id;
int i, ret, size; int i, ret, size;
u8 cpu_bufnum;
cpu_bufnum = xgene_start_cpu_bufnum(pdata); cpu_bufnum = xgene_start_cpu_bufnum(pdata);
...@@ -1103,7 +1320,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ...@@ -1103,7 +1320,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
owner = xgene_derive_ring_owner(pdata); owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, RING_CFGSIZE_16KB,
ring_id); ring_id);
if (!buf_pool) { if (!buf_pool) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1111,7 +1328,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ...@@ -1111,7 +1328,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
} }
rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->nbufpool = NUM_BUFPOOL;
rx_ring->buf_pool = buf_pool; rx_ring->npagepool = NUM_NXTBUFPOOL;
rx_ring->irq = pdata->irqs[i]; rx_ring->irq = pdata->irqs[i];
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *), sizeof(struct sk_buff *),
...@@ -1124,6 +1341,42 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ...@@ -1124,6 +1341,42 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
rx_ring->buf_pool = buf_pool; rx_ring->buf_pool = buf_pool;
pdata->rx_ring[i] = rx_ring; pdata->rx_ring[i] = rx_ring;
if ((pdata->enet_id == XGENE_ENET1 && pdata->rxq_cnt > 4) ||
(pdata->enet_id == XGENE_ENET2 && pdata->rxq_cnt > 16)) {
break;
}
/* allocate next buffer pool for jumbo packets */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
page_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB,
ring_id);
if (!page_pool) {
ret = -ENOMEM;
goto err;
}
slots = page_pool->slots;
page_pool->frag_page = devm_kcalloc(dev, slots,
sizeof(struct page *),
GFP_KERNEL);
if (!page_pool->frag_page) {
ret = -ENOMEM;
goto err;
}
page_pool->frag_dma_addr = devm_kcalloc(dev, slots,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!page_pool->frag_dma_addr) {
ret = -ENOMEM;
goto err;
}
page_pool->dst_ring_num = xgene_enet_dst_ring_num(page_pool);
rx_ring->page_pool = page_pool;
} }
for (i = 0; i < pdata->txq_cnt; i++) { for (i = 0; i < pdata->txq_cnt; i++) {
...@@ -1523,6 +1776,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ...@@ -1523,6 +1776,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool;
u16 dst_ring_num, ring_id; u16 dst_ring_num, ring_id;
int i, ret; int i, ret;
u32 count;
ret = pdata->port_ops->reset(pdata); ret = pdata->port_ops->reset(pdata);
if (ret) if (ret)
...@@ -1538,9 +1792,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ...@@ -1538,9 +1792,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
buf_pool = pdata->rx_ring[i]->buf_pool; buf_pool = pdata->rx_ring[i]->buf_pool;
xgene_enet_init_bufpool(buf_pool); xgene_enet_init_bufpool(buf_pool);
ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); page_pool = pdata->rx_ring[i]->page_pool;
xgene_enet_init_bufpool(page_pool);
count = pdata->rx_buff_cnt;
ret = xgene_enet_refill_bufpool(buf_pool, count);
if (ret) if (ret)
goto err; goto err;
ret = xgene_enet_refill_pagepool(page_pool, count);
if (ret)
goto err;
} }
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
......
...@@ -41,11 +41,14 @@ ...@@ -41,11 +41,14 @@
#include "../../../phy/mdio-xgene.h" #include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0" #define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536 #define XGENE_ENET_STD_MTU 1536
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) #define XGENE_ENET_MAX_MTU 9600
#define SKB_BUFFER_SIZE (XGENE_ENET_STD_MTU - NET_IP_ALIGN)
#define BUFLEN_16K (16 * 1024) #define BUFLEN_16K (16 * 1024)
#define NUM_PKT_BUF 64 #define NUM_PKT_BUF 1024
#define NUM_BUFPOOL 32 #define NUM_BUFPOOL 32
#define NUM_NXTBUFPOOL 8
#define MAX_EXP_BUFFS 256 #define MAX_EXP_BUFFS 256
#define NUM_MSS_REG 4 #define NUM_MSS_REG 4
#define XGENE_MIN_ENET_FRAME_SIZE 60 #define XGENE_MIN_ENET_FRAME_SIZE 60
...@@ -88,6 +91,12 @@ enum xgene_enet_id { ...@@ -88,6 +91,12 @@ enum xgene_enet_id {
XGENE_ENET2 XGENE_ENET2
}; };
enum xgene_enet_buf_len {
SIZE_2K = 2048,
SIZE_4K = 4096,
SIZE_16K = 16384
};
/* software context of a descriptor ring */ /* software context of a descriptor ring */
struct xgene_enet_desc_ring { struct xgene_enet_desc_ring {
struct net_device *ndev; struct net_device *ndev;
...@@ -107,11 +116,14 @@ struct xgene_enet_desc_ring { ...@@ -107,11 +116,14 @@ struct xgene_enet_desc_ring {
dma_addr_t irq_mbox_dma; dma_addr_t irq_mbox_dma;
void *irq_mbox_addr; void *irq_mbox_addr;
u16 dst_ring_num; u16 dst_ring_num;
u8 nbufpool; u16 nbufpool;
int npagepool;
u8 index; u8 index;
u32 flags;
struct sk_buff *(*rx_skb); struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb); struct sk_buff *(*cp_skb);
dma_addr_t *frag_dma_addr; dma_addr_t *frag_dma_addr;
struct page *(*frag_page);
enum xgene_enet_ring_cfgsize cfgsize; enum xgene_enet_ring_cfgsize cfgsize;
struct xgene_enet_desc_ring *cp_ring; struct xgene_enet_desc_ring *cp_ring;
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool;
......
...@@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) ...@@ -119,6 +119,7 @@ static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
ring_id_buf |= PREFETCH_BUF_EN; ring_id_buf |= PREFETCH_BUF_EN;
if (is_bufpool) if (is_bufpool)
ring_id_buf |= IS_BUFFER_POOL; ring_id_buf |= IS_BUFFER_POOL;
......
...@@ -536,6 +536,9 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p) ...@@ -536,6 +536,9 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
for (i = 0; i < p->rxq_cnt; i++) { for (i = 0; i < p->rxq_cnt; i++) {
ring = p->rx_ring[i]->buf_pool; ring = p->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id)); pb |= BIT(xgene_enet_get_fpsel(ring->id));
ring = p->rx_ring[i]->page_pool;
if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
} }
xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
......
...@@ -367,6 +367,7 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, ...@@ -367,6 +367,7 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_FPSEL0_SET(&cb, fpsel);
CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb); xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
} }
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
...@@ -380,6 +381,9 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) ...@@ -380,6 +381,9 @@ static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i]->buf_pool; ring = pdata->rx_ring[i]->buf_pool;
pb |= BIT(xgene_enet_get_fpsel(ring->id)); pb |= BIT(xgene_enet_get_fpsel(ring->id));
ring = pdata->rx_ring[i]->page_pool;
if (ring)
pb |= BIT(xgene_enet_get_fpsel(ring->id));
} }
xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册