提交 acbddb59 编写于 作者: J Jan-Bernd Themann 提交者: Jeff Garzik

ehea: removing unused functionality

This patch includes:
- removal of unused fields in structs
- ethtool statistics cleanup
- removes unsed functionality from send path
Signed-off-by: NJan-Bernd Themann <themann@de.ibm.com>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 144213d7
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0054" #define DRV_VERSION "EHEA_0055"
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
...@@ -79,7 +79,6 @@ ...@@ -79,7 +79,6 @@
#define EHEA_L_PKT_SIZE 256 /* low latency */ #define EHEA_L_PKT_SIZE 256 /* low latency */
/* Send completion signaling */ /* Send completion signaling */
#define EHEA_SIG_IV_LONG 1
/* Protection Domain Identifier */ /* Protection Domain Identifier */
#define EHEA_PD_ID 0xaabcdeff #define EHEA_PD_ID 0xaabcdeff
...@@ -106,11 +105,7 @@ ...@@ -106,11 +105,7 @@
#define EHEA_CACHE_LINE 128 #define EHEA_CACHE_LINE 128
/* Memory Regions */ /* Memory Regions */
#define EHEA_MR_MAX_TX_PAGES 20
#define EHEA_MR_TX_DATA_PN 3
#define EHEA_MR_ACC_CTRL 0x00800000 #define EHEA_MR_ACC_CTRL 0x00800000
#define EHEA_RWQES_PER_MR_RQ2 10
#define EHEA_RWQES_PER_MR_RQ3 10
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ #define EHEA_WATCH_DOG_TIMEOUT 10*HZ
...@@ -318,17 +313,12 @@ struct ehea_mr { ...@@ -318,17 +313,12 @@ struct ehea_mr {
/* /*
* Port state information * Port state information
*/ */
struct port_state { struct port_stats {
int poll_max_processed;
int poll_receive_errors; int poll_receive_errors;
int ehea_poll;
int queue_stopped; int queue_stopped;
int min_swqe_avail; int err_tcp_cksum;
u64 sqc_stop_sum; int err_ip_cksum;
int pkt_send; int err_frame_crc;
int pkt_xmit;
int send_tasklet;
int nwqe;
}; };
#define EHEA_IRQ_NAME_SIZE 20 #define EHEA_IRQ_NAME_SIZE 20
...@@ -347,6 +337,7 @@ struct ehea_q_skb_arr { ...@@ -347,6 +337,7 @@ struct ehea_q_skb_arr {
* Port resources * Port resources
*/ */
struct ehea_port_res { struct ehea_port_res {
struct port_stats p_stats;
struct ehea_mr send_mr; /* send memory region */ struct ehea_mr send_mr; /* send memory region */
struct ehea_mr recv_mr; /* receive memory region */ struct ehea_mr recv_mr; /* receive memory region */
spinlock_t xmit_lock; spinlock_t xmit_lock;
...@@ -358,7 +349,6 @@ struct ehea_port_res { ...@@ -358,7 +349,6 @@ struct ehea_port_res {
struct ehea_cq *recv_cq; struct ehea_cq *recv_cq;
struct ehea_eq *eq; struct ehea_eq *eq;
struct net_device *d_netdev; struct net_device *d_netdev;
spinlock_t send_lock;
struct ehea_q_skb_arr rq1_skba; struct ehea_q_skb_arr rq1_skba;
struct ehea_q_skb_arr rq2_skba; struct ehea_q_skb_arr rq2_skba;
struct ehea_q_skb_arr rq3_skba; struct ehea_q_skb_arr rq3_skba;
...@@ -368,11 +358,8 @@ struct ehea_port_res { ...@@ -368,11 +358,8 @@ struct ehea_port_res {
int swqe_refill_th; int swqe_refill_th;
atomic_t swqe_avail; atomic_t swqe_avail;
int swqe_ll_count; int swqe_ll_count;
int swqe_count;
u32 swqe_id_counter; u32 swqe_id_counter;
u64 tx_packets; u64 tx_packets;
spinlock_t recv_lock;
struct port_state p_state;
u64 rx_packets; u64 rx_packets;
u32 poll_counter; u32 poll_counter;
}; };
......
...@@ -166,33 +166,23 @@ static u32 ehea_get_rx_csum(struct net_device *dev) ...@@ -166,33 +166,23 @@ static u32 ehea_get_rx_csum(struct net_device *dev)
} }
static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"poll_max_processed"},
{"queue_stopped"},
{"min_swqe_avail"},
{"poll_receive_err"},
{"pkt_send"},
{"pkt_xmit"},
{"send_tasklet"},
{"ehea_poll"},
{"nwqe"},
{"swqe_available_0"},
{"sig_comp_iv"}, {"sig_comp_iv"},
{"swqe_refill_th"}, {"swqe_refill_th"},
{"port resets"}, {"port resets"},
{"rxo"}, {"Receive errors"},
{"rx64"}, {"TCP cksum errors"},
{"rx65"}, {"IP cksum errors"},
{"rx128"}, {"Frame cksum errors"},
{"rx256"}, {"num SQ stopped"},
{"rx512"}, {"SQ stopped"},
{"rx1024"}, {"PR0 free_swqes"},
{"txo"}, {"PR1 free_swqes"},
{"tx64"}, {"PR2 free_swqes"},
{"tx65"}, {"PR3 free_swqes"},
{"tx128"}, {"PR4 free_swqes"},
{"tx256"}, {"PR5 free_swqes"},
{"tx512"}, {"PR6 free_swqes"},
{"tx1024"}, {"PR7 free_swqes"},
}; };
static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
...@@ -211,63 +201,44 @@ static int ehea_get_stats_count(struct net_device *dev) ...@@ -211,63 +201,44 @@ static int ehea_get_stats_count(struct net_device *dev)
static void ehea_get_ethtool_stats(struct net_device *dev, static void ehea_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
u64 hret; int i, k, tmp;
int i;
struct ehea_port *port = netdev_priv(dev); struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct ehea_port_res *pr = &port->port_res[0];
struct port_state *p_state = &pr->p_state;
struct hcp_ehea_port_cb6 *cb6;
for (i = 0; i < ehea_get_stats_count(dev); i++) for (i = 0; i < ehea_get_stats_count(dev); i++)
data[i] = 0; data[i] = 0;
i = 0; i = 0;
data[i++] = p_state->poll_max_processed;
data[i++] = p_state->queue_stopped;
data[i++] = p_state->min_swqe_avail;
data[i++] = p_state->poll_receive_errors;
data[i++] = p_state->pkt_send;
data[i++] = p_state->pkt_xmit;
data[i++] = p_state->send_tasklet;
data[i++] = p_state->ehea_poll;
data[i++] = p_state->nwqe;
data[i++] = atomic_read(&port->port_res[0].swqe_avail);
data[i++] = port->sig_comp_iv; data[i++] = port->sig_comp_iv;
data[i++] = port->port_res[0].swqe_refill_th; data[i++] = port->port_res[0].swqe_refill_th;
data[i++] = port->resets; data[i++] = port->resets;
cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL); for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
if (!cb6) { tmp += port->port_res[k].p_stats.poll_receive_errors;
ehea_error("no mem for cb6"); data[i++] = tmp;
return;
} for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_tcp_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_ip_cksum;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.err_frame_crc;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp += port->port_res[k].p_stats.queue_stopped;
data[i++] = tmp;
for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
tmp |= port->port_res[k].queue_stopped;
data[i++] = tmp;
for (k = 0; k < 8; k++)
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB6, H_PORT_CB6_ALL, cb6);
if (netif_msg_hw(port))
ehea_dump(cb6, sizeof(*cb6), "ehea_get_ethtool_stats");
if (hret == H_SUCCESS) {
data[i++] = cb6->rxo;
data[i++] = cb6->rx64;
data[i++] = cb6->rx65;
data[i++] = cb6->rx128;
data[i++] = cb6->rx256;
data[i++] = cb6->rx512;
data[i++] = cb6->rx1024;
data[i++] = cb6->txo;
data[i++] = cb6->tx64;
data[i++] = cb6->tx65;
data[i++] = cb6->tx128;
data[i++] = cb6->tx256;
data[i++] = cb6->tx512;
data[i++] = cb6->tx1024;
} else
ehea_error("query_ehea_port failed");
kfree(cb6);
} }
const struct ethtool_ops ehea_ethtool_ops = { const struct ethtool_ops ehea_ethtool_ops = {
......
...@@ -327,6 +327,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, ...@@ -327,6 +327,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
pr->p_stats.err_tcp_cksum++;
if (cqe->status & EHEA_CQE_STAT_ERR_IP)
pr->p_stats.err_ip_cksum++;
if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
pr->p_stats.err_frame_crc++;
if (netif_msg_rx_err(pr->port)) { if (netif_msg_rx_err(pr->port)) {
ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(cqe, sizeof(*cqe), "CQE"); ehea_dump(cqe, sizeof(*cqe), "CQE");
...@@ -428,7 +435,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ...@@ -428,7 +435,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
else else
netif_receive_skb(skb); netif_receive_skb(skb);
} else { } else {
pr->p_state.poll_receive_errors++; pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe, port_reset = ehea_treat_poll_error(pr, rq, cqe,
&processed_rq2, &processed_rq2,
&processed_rq3); &processed_rq3);
...@@ -449,34 +456,15 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, ...@@ -449,34 +456,15 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
return cqe; return cqe;
} }
static void ehea_free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
{
struct sk_buff *skb;
int index, max_index_mask, i;
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
max_index_mask = pr->sq_skba.len - 1;
for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
skb = pr->sq_skba.arr[index];
if (likely(skb)) {
dev_kfree_skb(skb);
pr->sq_skba.arr[index] = NULL;
} else {
ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
cqe->wr_id, i, index);
}
index--;
index &= max_index_mask;
}
}
static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
{ {
struct sk_buff *skb;
struct ehea_cq *send_cq = pr->send_cq; struct ehea_cq *send_cq = pr->send_cq;
struct ehea_cqe *cqe; struct ehea_cqe *cqe;
int quota = my_quota; int quota = my_quota;
int cqe_counter = 0; int cqe_counter = 0;
int swqe_av = 0; int swqe_av = 0;
int index;
unsigned long flags; unsigned long flags;
cqe = ehea_poll_cq(send_cq); cqe = ehea_poll_cq(send_cq);
...@@ -498,8 +486,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) ...@@ -498,8 +486,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
ehea_dump(cqe, sizeof(*cqe), "CQE"); ehea_dump(cqe, sizeof(*cqe), "CQE");
if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
== EHEA_SWQE2_TYPE)) == EHEA_SWQE2_TYPE)) {
ehea_free_sent_skbs(cqe, pr);
index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
skb = pr->sq_skba.arr[index];
dev_kfree_skb(skb);
pr->sq_skba.arr[index] = NULL;
}
swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
quota--; quota--;
...@@ -1092,8 +1085,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, ...@@ -1092,8 +1085,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
memset(pr, 0, sizeof(struct ehea_port_res)); memset(pr, 0, sizeof(struct ehea_port_res));
pr->port = port; pr->port = port;
spin_lock_init(&pr->send_lock);
spin_lock_init(&pr->recv_lock);
spin_lock_init(&pr->xmit_lock); spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue); spin_lock_init(&pr->netif_queue);
...@@ -1811,7 +1802,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1811,7 +1802,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
if (!spin_trylock(&pr->xmit_lock)) if (!spin_trylock(&pr->xmit_lock))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -1841,6 +1831,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1841,6 +1831,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
swqe->wr_id = swqe->wr_id =
EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
| EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
| EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
| EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
pr->sq_skba.arr[pr->sq_skba.index] = skb; pr->sq_skba.arr[pr->sq_skba.index] = skb;
...@@ -1849,14 +1840,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1849,14 +1840,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
lkey = pr->send_mr.lkey; lkey = pr->send_mr.lkey;
ehea_xmit2(skb, dev, swqe, lkey); ehea_xmit2(skb, dev, swqe, lkey);
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
EHEA_SIG_IV_LONG);
swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
pr->swqe_count = 0;
} else
pr->swqe_count += 1;
} }
pr->swqe_id_counter += 1; pr->swqe_id_counter += 1;
...@@ -1876,6 +1860,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1876,6 +1860,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags); spin_lock_irqsave(&pr->netif_queue, flags);
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
pr->p_stats.queue_stopped++;
netif_stop_queue(dev); netif_stop_queue(dev);
pr->queue_stopped = 1; pr->queue_stopped = 1;
} }
......
...@@ -142,6 +142,8 @@ struct ehea_rwqe { ...@@ -142,6 +142,8 @@ struct ehea_rwqe {
#define EHEA_CQE_STAT_ERR_MASK 0x721F #define EHEA_CQE_STAT_ERR_MASK 0x721F
#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
#define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_TCP 0x4000
#define EHEA_CQE_STAT_ERR_IP 0x2000
#define EHEA_CQE_STAT_ERR_CRC 0x1000
struct ehea_cqe { struct ehea_cqe {
u64 wr_id; /* work request ID from WQE */ u64 wr_id; /* work request ID from WQE */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册