提交 b0abc4f5 编写于 作者: J Julian Wiedmann 提交者: David S. Miller

s390/qeth: overhaul ethtool statistics

Accumulate per-TX queue statistics, and increase their size to 64 bit.
Don't bother with enabling/disabling the statistics, the overhead is
negligible.
Signed-off-by: NJulian Wiedmann <jwi@linux.ibm.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 d896ac62
......@@ -34,6 +34,8 @@
#include <asm/ccwgroup.h>
#include <asm/sysinfo.h>
#include <uapi/linux/if_link.h>
#include "qeth_core_mpc.h"
/**
......@@ -112,35 +114,6 @@ static inline u32 qeth_get_device_id(struct ccw_device *cdev)
#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
/**
* card stuff
*/
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
unsigned int buf_elements_sent;
unsigned int skbs_sent_pack;
unsigned int bufs_sent_pack;
unsigned int sc_dp_p;
unsigned int sc_p_dp;
unsigned int large_send_bytes;
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
/* inbound scatter gather data */
unsigned int sg_skbs_rx;
unsigned int sg_frags_rx;
unsigned int sg_alloc_page_rx;
unsigned int tx_csum;
unsigned int tx_lin;
unsigned int tx_linfail;
unsigned int rx_csum;
};
/* Routing stuff */
struct qeth_routing_info {
enum qeth_routing_types type;
......@@ -470,10 +443,54 @@ enum qeth_out_q_states {
QETH_OUT_Q_LOCKED_FLUSH,
};
#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val))
#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1)
struct qeth_card_stats {
u64 rx_bufs;
u64 rx_skb_csum;
u64 rx_sg_skbs;
u64 rx_sg_frags;
u64 rx_sg_alloc_page;
/* rtnl_link_stats64 */
u64 rx_packets;
u64 rx_bytes;
u64 rx_errors;
u64 rx_dropped;
u64 rx_multicast;
u64 tx_errors;
};
struct qeth_out_q_stats {
u64 bufs;
u64 bufs_pack;
u64 buf_elements;
u64 skbs_pack;
u64 skbs_sg;
u64 skbs_csum;
u64 skbs_tso;
u64 skbs_linearized;
u64 skbs_linearized_fail;
u64 tso_bytes;
u64 packing_mode_switch;
/* rtnl_link_stats64 */
u64 tx_packets;
u64 tx_bytes;
u64 tx_errors;
u64 tx_dropped;
u64 tx_carrier_errors;
};
struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
int queue_no;
struct qeth_card *card;
atomic_t state;
......@@ -677,7 +694,6 @@ struct qeth_card_options {
struct qeth_vnicc_info vnicc; /* VNICC options */
int fake_broadcast;
enum qeth_discipline_id layer;
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
......@@ -753,8 +769,7 @@ struct qeth_card {
struct qeth_channel data;
struct net_device *dev;
struct net_device_stats stats;
struct qeth_card_stats stats;
struct qeth_card_info info;
struct qeth_token token;
struct qeth_seqno seqno;
......@@ -777,7 +792,6 @@ struct qeth_card {
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline *discipline;
......@@ -858,8 +872,7 @@ static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
if ((card->dev->features & NETIF_F_RXCSUM) &&
(flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (card->options.performance_stats)
card->perf_stats.rx_csum++;
QETH_CARD_STAT_INC(card, rx_skb_csum);
} else {
skb->ip_summed = CHECKSUM_NONE;
}
......@@ -966,7 +979,6 @@ void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
......@@ -1002,14 +1014,16 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
int qeth_vm_request_mac(struct qeth_card *card);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, int cast_type,
unsigned int data_len));
/* exports for OSN */
......
......@@ -2703,8 +2703,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
} else {
free_page((unsigned long)entry->elements[i]);
entry->elements[i] = page_address(page);
if (card->options.performance_stats)
card->perf_stats.sg_alloc_page_rx++;
QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
}
}
}
......@@ -3180,7 +3179,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card,
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
card->stats.rx_dropped++;
QETH_CARD_STAT_INC(card, rx_dropped);
return 0;
} else
return 1;
......@@ -3322,8 +3321,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_CARD_TEXT(queue->card, 6, "np->pack");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_dp_p++;
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 1;
}
}
......@@ -3342,8 +3340,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_CARD_TEXT(queue->card, 6, "pack->np");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
QETH_TXQ_STAT_INC(queue, packing_mode_switch);
queue->do_pack = 0;
return qeth_prep_flush_pack_buffer(queue);
}
......@@ -3397,6 +3394,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
QETH_TXQ_STAT_ADD(queue, bufs, count);
netif_trans_update(queue->card->dev);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
......@@ -3405,7 +3403,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (rc) {
queue->card->stats.tx_errors += count;
QETH_TXQ_STAT_ADD(queue, tx_errors, count);
/* ignore temporary SIGA errors without busy condition */
if (rc == -ENOBUFS)
return;
......@@ -3420,8 +3418,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qeth_schedule_recovery(queue->card);
return;
}
if (queue->card->options.performance_stats)
queue->card->perf_stats.bufs_sent += count;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
......@@ -3452,10 +3448,8 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
flush_cnt += qeth_prep_flush_pack_buffer(queue);
if (queue->card->options.performance_stats &&
q_was_packing)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
if (q_was_packing)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
if (flush_cnt)
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
......@@ -3748,11 +3742,12 @@ EXPORT_SYMBOL_GPL(qeth_count_elements);
* The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
struct qeth_hdr **hdr, unsigned int hdr_len,
unsigned int proto_len, unsigned int *elements)
static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr **hdr,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
unsigned int __elements;
addr_t start, end;
......@@ -3791,15 +3786,12 @@ static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
}
rc = skb_linearize(skb);
if (card->options.performance_stats) {
if (rc)
card->perf_stats.tx_linfail++;
else
card->perf_stats.tx_lin++;
}
if (rc)
if (rc) {
QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
return rc;
}
QETH_TXQ_STAT_INC(queue, skbs_linearized);
/* Linearization changed the layout, re-evaluate: */
goto check_layout;
}
......@@ -3923,9 +3915,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
} else {
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
QETH_TXQ_STAT_INC(queue, skbs_pack);
/* If the buffer still has free elements, keep using it. */
if (buf->next_element_to_fill <
QETH_MAX_BUFFER_ELEMENTS(queue->card))
......@@ -4039,8 +4030,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
out:
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
return rc;
}
......@@ -4064,8 +4055,9 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
void (*fill_header)(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, int cast_type,
unsigned int data_len))
{
unsigned int proto_len, hw_hdr_len;
......@@ -4090,7 +4082,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
if (rc)
return rc;
push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
&elements);
if (push_len < 0)
return push_len;
......@@ -4100,7 +4092,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
data_offset = push_len + proto_len;
}
memset(hdr, 0, hw_hdr_len);
fill_header(card, hdr, skb, ipv, cast_type, frame_len);
fill_header(queue, hdr, skb, ipv, cast_type, frame_len);
if (is_tso)
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
......@@ -4117,14 +4109,12 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
}
if (!rc) {
if (card->options.performance_stats) {
card->perf_stats.buf_elements_sent += elements;
if (is_sg)
card->perf_stats.sg_skbs_sent++;
if (is_tso) {
card->perf_stats.large_send_bytes += frame_len;
card->perf_stats.large_send_cnt++;
}
QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
if (is_sg)
QETH_TXQ_STAT_INC(queue, skbs_sg);
if (is_tso) {
QETH_TXQ_STAT_INC(queue, skbs_tso);
QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
}
} else {
if (!push_len)
......@@ -4183,18 +4173,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
struct qeth_card *card;
card = dev->ml_priv;
QETH_CARD_TEXT(card, 5, "getstat");
return &card->stats;
}
EXPORT_SYMBOL_GPL(qeth_get_stats);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
......@@ -4388,7 +4366,7 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
card->stats.tx_errors++;
QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
......@@ -5246,7 +5224,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
card->stats.rx_errors++;
QETH_CARD_STAT_INC(card, rx_errors);
return NULL;
}
element++;
......@@ -5258,16 +5236,17 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
}
*__element = element;
*__offset = offset;
if (use_rx_sg && card->options.performance_stats) {
card->perf_stats.sg_skbs_rx++;
card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
if (use_rx_sg) {
QETH_CARD_STAT_INC(card, rx_sg_skbs);
QETH_CARD_STAT_ADD(card, rx_sg_frags,
skb_shinfo(skb)->nr_frags);
}
return skb;
no_mem:
if (net_ratelimit()) {
QETH_CARD_TEXT(card, 2, "noskbmem");
}
card->stats.rx_dropped++;
QETH_CARD_STAT_INC(card, rx_dropped);
return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
......@@ -5308,8 +5287,7 @@ int qeth_poll(struct napi_struct *napi, int budget)
done = 1;
if (done) {
if (card->options.performance_stats)
card->perf_stats.bufs_rec++;
QETH_CARD_STAT_INC(card, rx_bufs);
qeth_put_buffer_pool_entry(card,
buffer->pool_entry);
qeth_queue_input_buffer(card, card->rx.b_index);
......@@ -6223,6 +6201,33 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(qeth_features_check);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_qdio_out_q *queue;
unsigned int i;
QETH_CARD_TEXT(card, 5, "getstat");
stats->rx_packets = card->stats.rx_packets;
stats->rx_bytes = card->stats.rx_bytes;
stats->rx_errors = card->stats.rx_errors;
stats->rx_dropped = card->stats.rx_dropped;
stats->multicast = card->stats.rx_multicast;
stats->tx_errors = card->stats.tx_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
stats->tx_packets += queue->stats.tx_packets;
stats->tx_bytes += queue->stats.tx_bytes;
stats->tx_errors += queue->stats.tx_errors;
stats->tx_dropped += queue->stats.tx_dropped;
stats->tx_carrier_errors += queue->stats.tx_carrier_errors;
}
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
......
......@@ -336,35 +336,36 @@ static ssize_t qeth_dev_performance_stats_show(struct device *dev,
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
return sprintf(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc = 0;
struct qeth_qdio_out_q *queue;
unsigned int i;
bool reset;
int rc;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
goto out;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
} else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
rc = kstrtobool(buf, &reset);
if (rc)
return rc;
if (reset) {
memset(&card->stats, 0, sizeof(card->stats));
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
if (!queue)
break;
memset(&queue->stats, 0, sizeof(queue->stats));
}
}
return count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
......
......@@ -9,43 +9,82 @@
#include <linux/ethtool.h>
#include "qeth_core.h"
static struct {
const char str[ETH_GSTRING_LEN];
} qeth_ethtool_stats_keys[] = {
/* 0 */{"rx skbs"},
{"rx buffers"},
{"tx skbs"},
{"tx buffers"},
{"tx skbs no packing"},
{"tx buffers no packing"},
{"tx skbs packing"},
{"tx buffers packing"},
{"tx sg skbs"},
{"tx buffer elements"},
/* 10 */{"rx sg skbs"},
{"rx sg frags"},
{"rx sg page allocs"},
{"tx large kbytes"},
{"tx large count"},
{"tx pk state ch n->p"},
{"tx pk state ch p->n"},
{"tx pk watermark low"},
{"tx pk watermark high"},
{"queue 0 buffer usage"},
/* 20 */{"queue 1 buffer usage"},
{"queue 2 buffer usage"},
{"queue 3 buffer usage"},
{"tx csum"},
{"tx lin"},
{"tx linfail"},
{"rx csum"}
#define QETH_TXQ_STAT(_name, _stat) { \
.name = _name, \
.offset = offsetof(struct qeth_out_q_stats, _stat) \
}
#define QETH_CARD_STAT(_name, _stat) { \
.name = _name, \
.offset = offsetof(struct qeth_card_stats, _stat) \
}
struct qeth_stats {
char name[ETH_GSTRING_LEN];
unsigned int offset;
};
static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("IO buffers", bufs),
QETH_TXQ_STAT("IO buffer elements", buf_elements),
QETH_TXQ_STAT("packed IO buffers", bufs_pack),
QETH_TXQ_STAT("skbs", tx_packets),
QETH_TXQ_STAT("packed skbs", skbs_pack),
QETH_TXQ_STAT("SG skbs", skbs_sg),
QETH_TXQ_STAT("HW csum skbs", skbs_csum),
QETH_TXQ_STAT("TSO skbs", skbs_tso),
QETH_TXQ_STAT("linearized skbs", skbs_linearized),
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
};
static const struct qeth_stats card_stats[] = {
QETH_CARD_STAT("rx0 IO buffers", rx_bufs),
QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum),
QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs),
QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags),
QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page),
};
#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats)
#define CARD_STATS_LEN ARRAY_SIZE(card_stats)
static void qeth_add_stat_data(u64 **dst, void *src,
const struct qeth_stats stats[],
unsigned int size)
{
unsigned int i;
char *stat;
for (i = 0; i < size; i++) {
stat = (char *)src + stats[i].offset;
**dst = *(u64 *)stat;
(*dst)++;
}
}
static void qeth_add_stat_strings(u8 **data, const char *prefix,
const struct qeth_stats stats[],
unsigned int size)
{
unsigned int i;
for (i = 0; i < size; i++) {
snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name);
*data += ETH_GSTRING_LEN;
}
}
static int qeth_get_sset_count(struct net_device *dev, int stringset)
{
struct qeth_card *card = dev->ml_priv;
switch (stringset) {
case ETH_SS_STATS:
return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
return CARD_STATS_LEN +
card->qdio.no_out_queues * TXQ_STATS_LEN;
default:
return -EINVAL;
}
......@@ -55,48 +94,29 @@ static void qeth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qeth_card *card = dev->ml_priv;
unsigned int i;
data[0] = card->stats.rx_packets -
card->perf_stats.initial_rx_packets;
data[1] = card->perf_stats.bufs_rec;
data[2] = card->stats.tx_packets -
card->perf_stats.initial_tx_packets;
data[3] = card->perf_stats.bufs_sent;
data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
- card->perf_stats.skbs_sent_pack;
data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
data[6] = card->perf_stats.skbs_sent_pack;
data[7] = card->perf_stats.bufs_sent_pack;
data[8] = card->perf_stats.sg_skbs_sent;
data[9] = card->perf_stats.buf_elements_sent;
data[10] = card->perf_stats.sg_skbs_rx;
data[11] = card->perf_stats.sg_frags_rx;
data[12] = card->perf_stats.sg_alloc_page_rx;
data[13] = (card->perf_stats.large_send_bytes >> 10);
data[14] = card->perf_stats.large_send_cnt;
data[15] = card->perf_stats.sc_dp_p;
data[16] = card->perf_stats.sc_p_dp;
data[17] = QETH_LOW_WATERMARK_PACK;
data[18] = QETH_HIGH_WATERMARK_PACK;
data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
data[20] = (card->qdio.no_out_queues > 1) ?
atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
data[21] = (card->qdio.no_out_queues > 2) ?
atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
data[22] = (card->qdio.no_out_queues > 3) ?
atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
data[23] = card->perf_stats.tx_csum;
data[24] = card->perf_stats.tx_lin;
data[25] = card->perf_stats.tx_linfail;
data[26] = card->perf_stats.rx_csum;
qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++)
qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats,
txq_stats, TXQ_STATS_LEN);
}
static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qeth_card *card = dev->ml_priv;
char prefix[ETH_GSTRING_LEN] = "";
unsigned int i;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, &qeth_ethtool_stats_keys,
sizeof(qeth_ethtool_stats_keys));
qeth_add_stat_strings(&data, prefix, card_stats,
CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++) {
snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
qeth_add_stat_strings(&data, prefix, txq_stats,
TXQ_STATS_LEN);
}
break;
default:
WARN_ON(1);
......
......@@ -174,9 +174,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, int cast_type, unsigned int data_len)
{
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
......@@ -188,8 +188,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
......@@ -369,8 +368,8 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
card->stats.rx_packets++;
card->stats.rx_bytes += len;
QETH_CARD_STAT_INC(card, rx_packets);
QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
......@@ -626,12 +625,13 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
QETH_TXQ_STAT_INC(queue, tx_carrier_errors);
goto tx_drop;
}
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
netif_stop_queue(dev);
if (IS_OSN(card))
......@@ -641,8 +641,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
qeth_l2_fill_header);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
......@@ -650,8 +650,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
QETH_TXQ_STAT_INC(queue, tx_dropped);
QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
......@@ -699,7 +699,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
.ndo_validate_addr = qeth_l2_validate_addr,
......
......@@ -1315,12 +1315,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
else
ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
card->stats.multicast++;
QETH_CARD_STAT_INC(card, rx_multicast);
break;
case QETH_CAST_BROADCAST:
ether_addr_copy(tg_addr, card->dev->broadcast);
card->stats.multicast++;
QETH_CARD_STAT_INC(card, rx_multicast);
break;
default:
if (card->options.sniffer)
......@@ -1401,8 +1400,8 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
}
work_done++;
budget--;
card->stats.rx_packets++;
card->stats.rx_bytes += len;
QETH_CARD_STAT_INC(card, rx_packets);
QETH_CARD_STAT_ADD(card, rx_bytes, len);
}
return work_done;
}
......@@ -1945,12 +1944,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type)
return QETH_CAST_UNICAST;
}
static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr *hdr, struct sk_buff *skb,
int ipv, int cast_type, unsigned int data_len)
{
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
hdr->hdr.l3.length = data_len;
......@@ -1972,8 +1972,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
/* some HW requires combined L3+L4 csum offload: */
if (ipv == 4)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
if (card->options.performance_stats)
card->perf_stats.tx_csum++;
QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
......@@ -2074,6 +2073,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
int tx_bytes = skb->len;
int rc;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
if (IS_IQD(card)) {
if (card->options.sniffer)
goto tx_drop;
......@@ -2084,14 +2085,13 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
}
if (card->state != CARD_STATE_UP) {
card->stats.tx_carrier_errors++;
QETH_TXQ_STAT_INC(queue, tx_carrier_errors);
goto tx_drop;
}
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
netif_stop_queue(dev);
if (ipv == 4 || IS_IQD(card))
......@@ -2101,8 +2101,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
qeth_l3_fill_header);
if (!rc) {
card->stats.tx_packets++;
card->stats.tx_bytes += tx_bytes;
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
netif_wake_queue(dev);
return NETDEV_TX_OK;
} else if (rc == -EBUSY) {
......@@ -2110,8 +2110,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
} /* else fall through */
tx_drop:
card->stats.tx_dropped++;
card->stats.tx_errors++;
QETH_TXQ_STAT_INC(queue, tx_dropped);
QETH_TXQ_STAT_INC(queue, tx_errors);
dev_kfree_skb_any(skb);
netif_wake_queue(dev);
return NETDEV_TX_OK;
......@@ -2153,7 +2153,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
......@@ -2168,7 +2168,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats = qeth_get_stats,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
.ndo_validate_addr = eth_validate_addr,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册