diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig index 6e2587af47a45de2a83ffafe154f12bbe7e8f9cc..9bb06d2846448a36e51f017305e420a67d8c0b09 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/Kconfig +++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig @@ -5,6 +5,7 @@ menuconfig RMNET tristate "RmNet MAP driver" default n + select GRO_CELLS ---help--- If you select this, you will enable the RMNET module which is used for handling data in the multiplexing and aggregation protocol (MAP) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 60115e69e4158976c26c1e4e2a0534d5e3c5b350..c19259eea99e317a9881c27990c2e367dc7d6c1d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -14,6 +14,7 @@ */ #include +#include #ifndef _RMNET_CONFIG_H_ #define _RMNET_CONFIG_H_ @@ -41,9 +42,24 @@ struct rmnet_port { extern struct rtnl_link_ops rmnet_link_ops; +struct rmnet_vnd_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 tx_pkts; + u64 tx_bytes; + u32 tx_drops; +}; + +struct rmnet_pcpu_stats { + struct rmnet_vnd_stats stats; + struct u64_stats_sync syncp; +}; + struct rmnet_priv { u8 mux_id; struct net_device *real_dev; + struct rmnet_pcpu_stats __percpu *pcpu_stats; + struct gro_cells gro_cells; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index df3d2d16ce550a348b65fe01bea47b9dc361e838..29842ccc91a9d35ff49a0e5ac1cb61ec61fb3b49 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -43,22 +43,23 @@ static void rmnet_set_skb_proto(struct sk_buff *skb) /* Generic handler */ -static rx_handler_result_t +static void rmnet_deliver_skb(struct sk_buff *skb) { + struct rmnet_priv *priv = netdev_priv(skb->dev); + skb_reset_transport_header(skb); skb_reset_network_header(skb); rmnet_vnd_rx_fixup(skb, skb->dev); skb->pkt_type = PACKET_HOST; skb_set_mac_header(skb, 0); - netif_receive_skb(skb); - return RX_HANDLER_CONSUMED; + gro_cells_receive(&priv->gro_cells, skb); } /* MAP handler */ -static rx_handler_result_t +static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { @@ -84,38 +85,33 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, if (!ep) goto free_skb; - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) - skb->dev = ep->egress_dev; + skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); skb_trim(skb, len); rmnet_set_skb_proto(skb); - return rmnet_deliver_skb(skb); + rmnet_deliver_skb(skb); + return; free_skb: kfree_skb(skb); - return RX_HANDLER_CONSUMED; } -static rx_handler_result_t +static void rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct sk_buff *skbn; - int rc; if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); - rc = RX_HANDLER_CONSUMED; } else { - rc = __rmnet_map_ingress_handler(skb, port); + __rmnet_map_ingress_handler(skb, port); } - - return rc; } static int rmnet_map_egress_handler(struct sk_buff *skb, @@ -149,15 +145,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, return RMNET_MAP_SUCCESS; } -static rx_handler_result_t +static void rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) { if (bridge_dev) { skb->dev = bridge_dev; dev_queue_xmit(skb); } - - return RX_HANDLER_CONSUMED; } /* Ingress / Egress Entry Points */ @@ -168,13 +162,12 @@ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) */ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) { - int rc = RX_HANDLER_CONSUMED; struct sk_buff *skb = *pskb; struct rmnet_port *port; struct net_device *dev; if (!skb) - return RX_HANDLER_CONSUMED; + goto done; dev = skb->dev; port = rmnet_get_port(dev); @@ -182,14 +175,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) switch (port->rmnet_mode) { case RMNET_EPMODE_VND: if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) - rc = rmnet_map_ingress_handler(skb, port); + rmnet_map_ingress_handler(skb, port); break; case RMNET_EPMODE_BRIDGE: - rc = rmnet_bridge_handler(skb, port->bridge_ep); + rmnet_bridge_handler(skb, port->bridge_ep); break; } - return rc; +done: + return RX_HANDLER_CONSUMED; } /* Modifies packet as per logical endpoint configuration and egress data format diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index ce2302c25b128dcf28dc73cd61865994460c9897..3af3fe7b54570e76b37ed8b43a19cbf2d674fdb6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb); struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, int hdrlen, int pad); -rx_handler_result_t rmnet_map_command(struct sk_buff *skb, - struct rmnet_port *port); +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 74d362f71cce935d654d23f16a86a3cc230bdca5..51e604923ac19ab4bc4362be89273630cdc88626 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, /* Process MAP command frame and send N/ACK message as appropriate. Message cmd * name is decoded here and appropriate handler is called. */ -rx_handler_result_t rmnet_map_command(struct sk_buff *skb, - struct rmnet_port *port) +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; unsigned char command_name; @@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb, } if (rc == RMNET_MAP_COMMAND_ACK) rmnet_map_send_ack(skb, rc); - return RX_HANDLER_CONSUMED; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 12bd0bbd5235dfab800f612163c407d69947b244..9caa5e387450933ebb1c963da14dd5042b45f2f7 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -27,14 +27,28 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.rx_pkts++; + pcpu_ptr->stats.rx_bytes += skb->len; + u64_stats_update_end(&pcpu_ptr->syncp); } void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.tx_pkts++; + pcpu_ptr->stats.tx_bytes += skb->len; + u64_stats_update_end(&pcpu_ptr->syncp); } /* Network Device Operations */ @@ -48,7 +62,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, if (priv->real_dev) { rmnet_egress_handler(skb); } else { - dev->stats.tx_dropped++; + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); } return NETDEV_TX_OK; @@ -70,12 +84,72 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev) return priv->real_dev->ifindex; } +static int rmnet_vnd_init(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + int err; + + priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); + if (!priv->pcpu_stats) + return -ENOMEM; + + err = gro_cells_init(&priv->gro_cells, dev); + if (err) { + free_percpu(priv->pcpu_stats); + return err; + } + + return 0; +} + +static void rmnet_vnd_uninit(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + gro_cells_destroy(&priv->gro_cells); + free_percpu(priv->pcpu_stats); +} + +static void rmnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_vnd_stats total_stats; + struct rmnet_pcpu_stats *pcpu_ptr; + unsigned int cpu, start; + + memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); + + for_each_possible_cpu(cpu) { + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + do { + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; + total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; + total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; + total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + + total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + } + + s->rx_packets = total_stats.rx_pkts; + s->rx_bytes = total_stats.rx_bytes; + s->tx_packets = total_stats.tx_pkts; + s->tx_bytes = total_stats.tx_bytes; + s->tx_dropped = total_stats.tx_drops; +} + static const struct net_device_ops rmnet_vnd_ops = { .ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_change_mtu = rmnet_vnd_change_mtu, .ndo_get_iflink = rmnet_vnd_get_iflink, .ndo_add_slave = rmnet_add_bridge, .ndo_del_slave = rmnet_del_bridge, + .ndo_init = rmnet_vnd_init, + .ndo_uninit = rmnet_vnd_uninit, + .ndo_get_stats64 = rmnet_get_stats64, }; /* Called by kernel whenever a new rmnet device is created. Sets MTU,