提交 107dec27 编写于 作者: I Iyappan Subramanian 提交者: David S. Miller

drivers: net: xgene: Add support for multiple queues

Signed-off-by: NIyappan Subramanian <isubramanian@apm.com>
Signed-off-by: NKhuong Dinh <kdinh@apm.com>
Signed-off-by: NTanmay Inamdar <tinamdar@apm.com>
Tested-by: NToan Le <toanle@apm.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 fc4262d2
...@@ -331,14 +331,15 @@ static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) ...@@ -331,14 +331,15 @@ static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
{ {
u32 fpsel, dstqid, nfpsel, idt_reg; u32 fpsel, dstqid, nfpsel, idt_reg, idx;
int i, ret = 0; int i, ret = 0;
u16 pool_id; u16 pool_id;
for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
pool_id = pdata->rx_ring->buf_pool->id; idx = i % pdata->rxq_cnt;
pool_id = pdata->rx_ring[idx]->buf_pool->id;
fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
dstqid = xgene_enet_dst_ring_num(pdata->rx_ring); dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
nfpsel = 0; nfpsel = 0;
idt_reg = 0; idt_reg = 0;
...@@ -695,8 +696,8 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ...@@ -695,8 +696,8 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
br->mask = 0xffff; br->mask = 0xffff;
} }
def_qid = xgene_enet_dst_ring_num(pdata->rx_ring); def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
pool_id = pdata->rx_ring->buf_pool->id; pool_id = pdata->rx_ring[0]->buf_pool->id;
def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20;
memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS);
......
...@@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) ...@@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs; return num_msgs;
} }
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
{
u32 data = 0x7777;
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
}
void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
struct xgene_enet_pdata *pdata, struct xgene_enet_pdata *pdata,
enum xgene_enet_err_code status) enum xgene_enet_err_code status)
...@@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = { ...@@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = {
.clear = xgene_enet_clear_ring, .clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd, .wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len, .len = xgene_enet_ring_len,
.coalesce = xgene_enet_setup_coalescing,
}; };
...@@ -54,6 +54,11 @@ enum xgene_enet_rm { ...@@ -54,6 +54,11 @@ enum xgene_enet_rm {
#define IS_BUFFER_POOL BIT(20) #define IS_BUFFER_POOL BIT(20)
#define PREFETCH_BUF_EN BIT(21) #define PREFETCH_BUF_EN BIT(21)
#define CSR_RING_ID_BUF 0x000c #define CSR_RING_ID_BUF 0x000c
#define CSR_PBM_COAL 0x0014
#define CSR_PBM_CTICK1 0x001c
#define CSR_PBM_CTICK2 0x0020
#define CSR_THRESHOLD0_SET1 0x0030
#define CSR_THRESHOLD1_SET1 0x0034
#define CSR_RING_NE_INT_MODE 0x017c #define CSR_RING_NE_INT_MODE 0x017c
#define CSR_RING_CONFIG 0x006c #define CSR_RING_CONFIG 0x006c
#define CSR_RING_WR_BASE 0x0070 #define CSR_RING_WR_BASE 0x0070
......
...@@ -182,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, ...@@ -182,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
static u64 xgene_enet_work_msg(struct sk_buff *skb) static u64 xgene_enet_work_msg(struct sk_buff *skb)
{ {
struct net_device *ndev = skb->dev; struct net_device *ndev = skb->dev;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct iphdr *iph; struct iphdr *iph;
u8 l3hlen = 0, l4hlen = 0; u8 l3hlen = 0, l4hlen = 0;
u8 ethhdr, proto = 0, csum_enable = 0; u8 ethhdr, proto = 0, csum_enable = 0;
...@@ -228,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb) ...@@ -228,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb)
if (!mss || ((skb->len - hdr_len) <= mss)) if (!mss || ((skb->len - hdr_len) <= mss))
goto out; goto out;
if (mss != pdata->mss) {
pdata->mss = mss;
pdata->mac_ops->set_mss(pdata);
}
hopinfo |= SET_BIT(ET); hopinfo |= SET_BIT(ET);
} }
} else if (iph->protocol == IPPROTO_UDP) { } else if (iph->protocol == IPPROTO_UDP) {
...@@ -413,7 +408,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, ...@@ -413,7 +408,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
SET_VAL(USERINFO, tx_ring->tail)); SET_VAL(USERINFO, tx_ring->tail));
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
pdata->tx_level += count; pdata->tx_level[tx_ring->cp_ring->index] += count;
tx_ring->tail = tail; tx_ring->tail = tail;
return count; return count;
...@@ -423,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, ...@@ -423,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
struct net_device *ndev) struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; struct xgene_enet_desc_ring *tx_ring;
u32 tx_level = pdata->tx_level; int index = skb->queue_mapping;
u32 tx_level = pdata->tx_level[index];
int count; int count;
if (tx_level < pdata->txc_level) tx_ring = pdata->tx_ring[index];
tx_level += ((typeof(pdata->tx_level))~0U); if (tx_level < pdata->txc_level[index])
tx_level += ((typeof(pdata->tx_level[index]))~0U);
if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
netif_stop_queue(ndev); netif_stop_subqueue(ndev, index);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -529,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) ...@@ -529,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
int budget) int budget)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); struct net_device *ndev = ring->ndev;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_raw_desc *raw_desc, *exp_desc; struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head; u16 head = ring->head;
u16 slots = ring->slots - 1; u16 slots = ring->slots - 1;
...@@ -573,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, ...@@ -573,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
desc_count++; desc_count++;
processed++; processed++;
if (is_completion) if (is_completion)
pdata->txc_level += desc_count; pdata->txc_level[ring->index] += desc_count;
if (ret) if (ret)
break; break;
...@@ -583,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, ...@@ -583,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
pdata->ring_ops->wr_cmd(ring, -count); pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head; ring->head = head;
if (netif_queue_stopped(ring->ndev)) if (__netif_subqueue_stopped(ndev, ring->index))
netif_start_queue(ring->ndev); netif_start_subqueue(ndev, ring->index);
} }
return processed; return processed;
...@@ -609,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) ...@@ -609,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
static void xgene_enet_timeout(struct net_device *ndev) static void xgene_enet_timeout(struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct netdev_queue *txq;
int i;
pdata->mac_ops->reset(pdata); pdata->mac_ops->reset(pdata);
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
txq->trans_start = jiffies;
netif_tx_start_queue(txq);
}
} }
static int xgene_enet_register_irq(struct net_device *ndev) static int xgene_enet_register_irq(struct net_device *ndev)
...@@ -618,17 +624,21 @@ static int xgene_enet_register_irq(struct net_device *ndev) ...@@ -618,17 +624,21 @@ static int xgene_enet_register_irq(struct net_device *ndev)
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev); struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
int ret; int ret = 0, i;
ring = pdata->rx_ring; for (i = 0; i < pdata->rxq_cnt; i++) {
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ring = pdata->rx_ring[i];
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
IRQF_SHARED, ring->irq_name, ring); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
if (ret) IRQF_SHARED, ring->irq_name, ring);
netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); if (ret) {
netdev_err(ndev, "Failed to request irq %s\n",
ring->irq_name);
}
}
if (pdata->cq_cnt) { for (i = 0; i < pdata->cq_cnt; i++) {
ring = pdata->tx_ring->cp_ring; ring = pdata->tx_ring[i]->cp_ring;
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
IRQF_SHARED, ring->irq_name, ring); IRQF_SHARED, ring->irq_name, ring);
...@@ -646,15 +656,19 @@ static void xgene_enet_free_irq(struct net_device *ndev) ...@@ -646,15 +656,19 @@ static void xgene_enet_free_irq(struct net_device *ndev)
struct xgene_enet_pdata *pdata; struct xgene_enet_pdata *pdata;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
struct device *dev; struct device *dev;
int i;
pdata = netdev_priv(ndev); pdata = netdev_priv(ndev);
dev = ndev_to_dev(ndev); dev = ndev_to_dev(ndev);
ring = pdata->rx_ring;
irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring);
if (pdata->cq_cnt) { for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->tx_ring->cp_ring; ring = pdata->rx_ring[i];
irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring);
}
for (i = 0; i < pdata->cq_cnt; i++) {
ring = pdata->tx_ring[i]->cp_ring;
irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(dev, ring->irq, ring); devm_free_irq(dev, ring->irq, ring);
} }
...@@ -663,12 +677,15 @@ static void xgene_enet_free_irq(struct net_device *ndev) ...@@ -663,12 +677,15 @@ static void xgene_enet_free_irq(struct net_device *ndev)
static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
{ {
struct napi_struct *napi; struct napi_struct *napi;
int i;
napi = &pdata->rx_ring->napi; for (i = 0; i < pdata->rxq_cnt; i++) {
napi_enable(napi); napi = &pdata->rx_ring[i]->napi;
napi_enable(napi);
}
if (pdata->cq_cnt) { for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring->cp_ring->napi; napi = &pdata->tx_ring[i]->cp_ring->napi;
napi_enable(napi); napi_enable(napi);
} }
} }
...@@ -676,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) ...@@ -676,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
{ {
struct napi_struct *napi; struct napi_struct *napi;
int i;
napi = &pdata->rx_ring->napi; for (i = 0; i < pdata->rxq_cnt; i++) {
napi_disable(napi); napi = &pdata->rx_ring[i]->napi;
napi_disable(napi);
}
if (pdata->cq_cnt) { for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring->cp_ring->napi; napi = &pdata->tx_ring[i]->cp_ring->napi;
napi_disable(napi); napi_disable(napi);
} }
} }
...@@ -692,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev) ...@@ -692,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev)
const struct xgene_mac_ops *mac_ops = pdata->mac_ops; const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int ret; int ret;
ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
if (ret)
return ret;
ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
if (ret)
return ret;
mac_ops->tx_enable(pdata); mac_ops->tx_enable(pdata);
mac_ops->rx_enable(pdata); mac_ops->rx_enable(pdata);
...@@ -714,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev) ...@@ -714,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev)
{ {
struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_pdata *pdata = netdev_priv(ndev);
const struct xgene_mac_ops *mac_ops = pdata->mac_ops; const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int i;
netif_stop_queue(ndev); netif_stop_queue(ndev);
...@@ -727,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev) ...@@ -727,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev)
xgene_enet_free_irq(ndev); xgene_enet_free_irq(ndev);
xgene_enet_napi_disable(pdata); xgene_enet_napi_disable(pdata);
xgene_enet_process_ring(pdata->rx_ring, -1); for (i = 0; i < pdata->rxq_cnt; i++)
xgene_enet_process_ring(pdata->rx_ring[i], -1);
return 0; return 0;
} }
...@@ -747,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) ...@@ -747,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
{ {
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool;
struct xgene_enet_desc_ring *ring;
int i;
if (pdata->tx_ring) { for (i = 0; i < pdata->txq_cnt; i++) {
xgene_enet_delete_ring(pdata->tx_ring); ring = pdata->tx_ring[i];
pdata->tx_ring = NULL; if (ring) {
xgene_enet_delete_ring(ring);
pdata->tx_ring[i] = NULL;
}
} }
if (pdata->rx_ring) { for (i = 0; i < pdata->rxq_cnt; i++) {
buf_pool = pdata->rx_ring->buf_pool; ring = pdata->rx_ring[i];
xgene_enet_delete_bufpool(buf_pool); if (ring) {
xgene_enet_delete_ring(buf_pool); buf_pool = ring->buf_pool;
xgene_enet_delete_ring(pdata->rx_ring); xgene_enet_delete_bufpool(buf_pool);
pdata->rx_ring = NULL; xgene_enet_delete_ring(buf_pool);
xgene_enet_delete_ring(ring);
pdata->rx_ring[i] = NULL;
}
} }
} }
...@@ -813,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) ...@@ -813,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
{ {
struct device *dev = &pdata->pdev->dev; struct device *dev = &pdata->pdev->dev;
struct xgene_enet_desc_ring *ring; struct xgene_enet_desc_ring *ring;
int i;
ring = pdata->tx_ring; for (i = 0; i < pdata->txq_cnt; i++) {
if (ring) { ring = pdata->tx_ring[i];
if (ring->cp_ring && ring->cp_ring->cp_skb) if (ring) {
devm_kfree(dev, ring->cp_ring->cp_skb); if (ring->cp_ring && ring->cp_ring->cp_skb)
if (ring->cp_ring && pdata->cq_cnt) devm_kfree(dev, ring->cp_ring->cp_skb);
xgene_enet_free_desc_ring(ring->cp_ring); if (ring->cp_ring && pdata->cq_cnt)
xgene_enet_free_desc_ring(ring); xgene_enet_free_desc_ring(ring->cp_ring);
} xgene_enet_free_desc_ring(ring);
}
ring = pdata->rx_ring; }
if (ring) {
if (ring->buf_pool) { for (i = 0; i < pdata->rxq_cnt; i++) {
if (ring->buf_pool->rx_skb) ring = pdata->rx_ring[i];
devm_kfree(dev, ring->buf_pool->rx_skb); if (ring) {
xgene_enet_free_desc_ring(ring->buf_pool); if (ring->buf_pool) {
if (ring->buf_pool->rx_skb)
devm_kfree(dev, ring->buf_pool->rx_skb);
xgene_enet_free_desc_ring(ring->buf_pool);
}
xgene_enet_free_desc_ring(ring);
} }
xgene_enet_free_desc_ring(ring);
} }
} }
...@@ -943,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) ...@@ -943,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
u8 bp_bufnum = pdata->bp_bufnum; u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num; u16 ring_num = pdata->ring_num;
u16 ring_id; u16 ring_id;
int ret, size; int i, ret, size;
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id);
if (!rx_ring) {
ret = -ENOMEM;
goto err;
}
/* allocate buffer pool for receiving packets */
owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_2KB, ring_id);
if (!buf_pool) {
ret = -ENOMEM;
goto err;
}
rx_ring->nbufpool = NUM_BUFPOOL; for (i = 0; i < pdata->rxq_cnt; i++) {
rx_ring->buf_pool = buf_pool; /* allocate rx descriptor ring */
rx_ring->irq = pdata->rx_irq; owner = xgene_derive_ring_owner(pdata);
if (!pdata->cq_cnt) { ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
ndev->name); RING_CFGSIZE_16KB,
} else { ring_id);
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); if (!rx_ring) {
} ret = -ENOMEM;
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, goto err;
sizeof(struct sk_buff *), GFP_KERNEL); }
if (!buf_pool->rx_skb) {
ret = -ENOMEM;
goto err;
}
buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
rx_ring->buf_pool = buf_pool;
pdata->rx_ring = rx_ring;
/* allocate tx descriptor ring */ /* allocate buffer pool for receiving packets */
owner = xgene_derive_ring_owner(pdata); owner = xgene_derive_ring_owner(pdata);
ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, ring_id); RING_CFGSIZE_2KB,
if (!tx_ring) { ring_id);
ret = -ENOMEM; if (!buf_pool) {
goto err; ret = -ENOMEM;
} goto err;
}
size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; rx_ring->nbufpool = NUM_BUFPOOL;
tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, rx_ring->buf_pool = buf_pool;
rx_ring->irq = pdata->irqs[i];
if (!pdata->cq_cnt) {
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
ndev->name);
} else {
snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
ndev->name, i);
}
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_ring->exp_bufs) { if (!buf_pool->rx_skb) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
pdata->tx_ring = tx_ring; buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
rx_ring->buf_pool = buf_pool;
pdata->rx_ring[i] = rx_ring;
}
if (!pdata->cq_cnt) { for (i = 0; i < pdata->txq_cnt; i++) {
cp_ring = pdata->rx_ring; /* allocate tx descriptor ring */
} else { owner = xgene_derive_ring_owner(pdata);
/* allocate tx completion descriptor ring */ ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB, RING_CFGSIZE_16KB,
ring_id); ring_id);
if (!cp_ring) { if (!tx_ring) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
cp_ring->irq = pdata->txc_irq;
snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name);
}
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
sizeof(struct sk_buff *), GFP_KERNEL); tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
if (!cp_ring->cp_skb) { &dma_exp_bufs,
ret = -ENOMEM; GFP_KERNEL);
goto err; if (!tx_ring->exp_bufs) {
} ret = -ENOMEM;
goto err;
}
size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; pdata->tx_ring[i] = tx_ring;
cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
size, GFP_KERNEL);
if (!cp_ring->frag_dma_addr) {
devm_kfree(dev, cp_ring->cp_skb);
ret = -ENOMEM;
goto err;
}
pdata->tx_ring->cp_ring = cp_ring; if (!pdata->cq_cnt) {
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); cp_ring = pdata->rx_ring[i];
} else {
/* allocate tx completion descriptor ring */
ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
cpu_bufnum++);
cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
RING_CFGSIZE_16KB,
ring_id);
if (!cp_ring) {
ret = -ENOMEM;
goto err;
}
cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
cp_ring->index = i;
snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
ndev->name, i);
}
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!cp_ring->cp_skb) {
ret = -ENOMEM;
goto err;
}
pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
size, GFP_KERNEL);
if (!cp_ring->frag_dma_addr) {
devm_kfree(dev, cp_ring->cp_skb);
ret = -ENOMEM;
goto err;
}
tx_ring->cp_ring = cp_ring;
tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
}
pdata->ring_ops->coalesce(pdata->tx_ring[0]);
pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
return 0; return 0;
...@@ -1159,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) ...@@ -1159,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
return 0; return 0;
} }
static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev = pdata->pdev;
struct device *dev = &pdev->dev;
int i, ret, max_irqs;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
max_irqs = 1;
else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
max_irqs = 2;
else
max_irqs = XGENE_MAX_ENET_IRQ;
for (i = 0; i < max_irqs; i++) {
ret = platform_get_irq(pdev, i);
if (ret <= 0) {
dev_err(dev, "Unable to get ENET IRQ\n");
ret = ret ? : -ENXIO;
return ret;
}
pdata->irqs[i] = ret;
}
return 0;
}
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{ {
struct platform_device *pdev; struct platform_device *pdev;
...@@ -1240,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) ...@@ -1240,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret) if (ret)
return ret; return ret;
ret = platform_get_irq(pdev, 0); ret = xgene_enet_get_irqs(pdata);
if (ret <= 0) { if (ret)
dev_err(dev, "Unable to get ENET Rx IRQ\n");
ret = ret ? : -ENXIO;
return ret; return ret;
}
pdata->rx_irq = ret;
if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) {
ret = platform_get_irq(pdev, 1);
if (ret <= 0) {
pdata->cq_cnt = 0;
dev_info(dev, "Unable to get Tx completion IRQ,"
"using Rx IRQ instead\n");
} else {
pdata->cq_cnt = XGENE_MAX_TXC_RINGS;
pdata->txc_irq = ret;
}
}
pdata->clk = devm_clk_get(&pdev->dev, NULL); pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) { if (IS_ERR(pdata->clk)) {
...@@ -1296,7 +1365,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ...@@ -1296,7 +1365,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
struct net_device *ndev = pdata->ndev; struct net_device *ndev = pdata->ndev;
struct xgene_enet_desc_ring *buf_pool; struct xgene_enet_desc_ring *buf_pool;
u16 dst_ring_num; u16 dst_ring_num;
int ret; int i, ret;
ret = pdata->port_ops->reset(pdata); ret = pdata->port_ops->reset(pdata);
if (ret) if (ret)
...@@ -1309,15 +1378,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) ...@@ -1309,15 +1378,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
} }
/* setup buffer pool */ /* setup buffer pool */
buf_pool = pdata->rx_ring->buf_pool; for (i = 0; i < pdata->rxq_cnt; i++) {
xgene_enet_init_bufpool(buf_pool); buf_pool = pdata->rx_ring[i]->buf_pool;
ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); xgene_enet_init_bufpool(buf_pool);
if (ret) { ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
xgene_enet_delete_desc_rings(pdata); if (ret) {
return ret; xgene_enet_delete_desc_rings(pdata);
return ret;
}
} }
dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
buf_pool = pdata->rx_ring[0]->buf_pool;
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
/* Initialize and Enable PreClassifier Tree */ /* Initialize and Enable PreClassifier Tree */
enet_cle->max_nodes = 512; enet_cle->max_nodes = 512;
...@@ -1348,17 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) ...@@ -1348,17 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
pdata->mac_ops = &xgene_gmac_ops; pdata->mac_ops = &xgene_gmac_ops;
pdata->port_ops = &xgene_gport_ops; pdata->port_ops = &xgene_gport_ops;
pdata->rm = RM3; pdata->rm = RM3;
pdata->rxq_cnt = 1;
pdata->txq_cnt = 1;
pdata->cq_cnt = 0;
break; break;
case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_SGMII:
pdata->mac_ops = &xgene_sgmac_ops; pdata->mac_ops = &xgene_sgmac_ops;
pdata->port_ops = &xgene_sgport_ops; pdata->port_ops = &xgene_sgport_ops;
pdata->rm = RM1; pdata->rm = RM1;
pdata->rxq_cnt = 1;
pdata->txq_cnt = 1;
pdata->cq_cnt = 1;
break; break;
default: default:
pdata->mac_ops = &xgene_xgmac_ops; pdata->mac_ops = &xgene_xgmac_ops;
pdata->port_ops = &xgene_xgport_ops; pdata->port_ops = &xgene_xgport_ops;
pdata->cle_ops = &xgene_cle3in_ops; pdata->cle_ops = &xgene_cle3in_ops;
pdata->rm = RM0; pdata->rm = RM0;
pdata->rxq_cnt = XGENE_NUM_RX_RING;
pdata->txq_cnt = XGENE_NUM_TX_RING;
pdata->cq_cnt = XGENE_NUM_TXC_RING;
break; break;
} }
...@@ -1412,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) ...@@ -1412,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
{ {
struct napi_struct *napi; struct napi_struct *napi;
int i;
napi = &pdata->rx_ring->napi; for (i = 0; i < pdata->rxq_cnt; i++) {
netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); napi = &pdata->rx_ring[i]->napi;
netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
NAPI_POLL_WEIGHT);
}
if (pdata->cq_cnt) { for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring->cp_ring->napi; napi = &pdata->tx_ring[i]->cp_ring->napi;
netif_napi_add(pdata->ndev, napi, xgene_enet_napi, netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} }
...@@ -1426,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) ...@@ -1426,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
{ {
struct napi_struct *napi; struct napi_struct *napi;
int i;
napi = &pdata->rx_ring->napi; for (i = 0; i < pdata->rxq_cnt; i++) {
netif_napi_del(napi); napi = &pdata->rx_ring[i]->napi;
netif_napi_del(napi);
}
if (pdata->cq_cnt) { for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring->cp_ring->napi; napi = &pdata->tx_ring[i]->cp_ring->napi;
netif_napi_del(napi); netif_napi_del(napi);
} }
} }
...@@ -1445,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev) ...@@ -1445,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev)
const struct of_device_id *of_id; const struct of_device_id *of_id;
int ret; int ret;
ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
if (!ndev) if (!ndev)
return -ENOMEM; return -ENOMEM;
......
...@@ -49,6 +49,11 @@ ...@@ -49,6 +49,11 @@
#define XGENE_ENET_MSS 1448 #define XGENE_ENET_MSS 1448
#define XGENE_MIN_ENET_FRAME_SIZE 60 #define XGENE_MIN_ENET_FRAME_SIZE 60
#define XGENE_MAX_ENET_IRQ 8
#define XGENE_NUM_RX_RING 4
#define XGENE_NUM_TX_RING 4
#define XGENE_NUM_TXC_RING 4
#define START_CPU_BUFNUM_0 0 #define START_CPU_BUFNUM_0 0
#define START_ETH_BUFNUM_0 2 #define START_ETH_BUFNUM_0 2
#define START_BP_BUFNUM_0 0x22 #define START_BP_BUFNUM_0 0x22
...@@ -73,7 +78,6 @@ ...@@ -73,7 +78,6 @@
#define X2_START_RING_NUM_1 256 #define X2_START_RING_NUM_1 256
#define IRQ_ID_SIZE 16 #define IRQ_ID_SIZE 16
#define XGENE_MAX_TXC_RINGS 1
#define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
...@@ -103,6 +107,7 @@ struct xgene_enet_desc_ring { ...@@ -103,6 +107,7 @@ struct xgene_enet_desc_ring {
void *irq_mbox_addr; void *irq_mbox_addr;
u16 dst_ring_num; u16 dst_ring_num;
u8 nbufpool; u8 nbufpool;
u8 index;
struct sk_buff *(*rx_skb); struct sk_buff *(*rx_skb);
struct sk_buff *(*cp_skb); struct sk_buff *(*cp_skb);
dma_addr_t *frag_dma_addr; dma_addr_t *frag_dma_addr;
...@@ -144,6 +149,7 @@ struct xgene_ring_ops { ...@@ -144,6 +149,7 @@ struct xgene_ring_ops {
void (*clear)(struct xgene_enet_desc_ring *); void (*clear)(struct xgene_enet_desc_ring *);
void (*wr_cmd)(struct xgene_enet_desc_ring *, int); void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
u32 (*len)(struct xgene_enet_desc_ring *); u32 (*len)(struct xgene_enet_desc_ring *);
void (*coalesce)(struct xgene_enet_desc_ring *);
}; };
struct xgene_cle_ops { struct xgene_cle_ops {
...@@ -159,15 +165,16 @@ struct xgene_enet_pdata { ...@@ -159,15 +165,16 @@ struct xgene_enet_pdata {
struct clk *clk; struct clk *clk;
struct platform_device *pdev; struct platform_device *pdev;
enum xgene_enet_id enet_id; enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring; struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING];
struct xgene_enet_desc_ring *rx_ring; struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING];
u16 tx_level; u16 tx_level[XGENE_NUM_TX_RING];
u16 txc_level; u16 txc_level[XGENE_NUM_TX_RING];
char *dev_name; char *dev_name;
u32 rx_buff_cnt; u32 rx_buff_cnt;
u32 tx_qcnt_hi; u32 tx_qcnt_hi;
u32 rx_irq; u32 irqs[XGENE_MAX_ENET_IRQ];
u32 txc_irq; u8 rxq_cnt;
u8 txq_cnt;
u8 cq_cnt; u8 cq_cnt;
void __iomem *eth_csr_addr; void __iomem *eth_csr_addr;
void __iomem *eth_ring_if_addr; void __iomem *eth_ring_if_addr;
......
...@@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) ...@@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
return num_msgs; return num_msgs;
} }
static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
{
u32 data = 0x7777;
xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
}
struct xgene_ring_ops xgene_ring2_ops = { struct xgene_ring_ops xgene_ring2_ops = {
.num_ring_config = X2_NUM_RING_CONFIG, .num_ring_config = X2_NUM_RING_CONFIG,
.num_ring_id_shift = 13, .num_ring_id_shift = 13,
...@@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = { ...@@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = {
.clear = xgene_enet_clear_ring, .clear = xgene_enet_clear_ring,
.wr_cmd = xgene_enet_wr_cmd, .wr_cmd = xgene_enet_wr_cmd,
.len = xgene_enet_ring_len, .len = xgene_enet_ring_len,
.coalesce = xgene_enet_setup_coalescing,
}; };
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册