提交 b8278f2c 编写于 作者: D David S. Miller

Merge branch 'hv_netvsc-minor-optimizations'

Stephen Hemminger says:

====================
hv_netvsc: minor optimizations

These are a set of local optimizations the Hyper-V networking driver.
Also include a vmbus patch in this set, because it depends on the
netvsc that last used that function.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -140,6 +140,29 @@ static u32 hv_copyto_ringbuffer(
return start_write_offset;
}
/*
*
* hv_get_ringbuffer_availbytes()
*
* Get number of bytes available to read and to write to
* for the specified ring buffer
*/
static void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
u32 *read, u32 *write)
{
u32 read_loc, write_loc, dsize;
/* Capture the read/write indices before they changed */
read_loc = READ_ONCE(rbi->ring_buffer->read_index);
write_loc = READ_ONCE(rbi->ring_buffer->write_index);
dsize = rbi->ring_datasize;
*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
read_loc - write_loc;
*read = dsize - *write;
}
/* Get various debug metrics for the specified ring buffer. */
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info)
......
......@@ -146,7 +146,6 @@ struct hv_netvsc_packet {
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
int ring_size;
u32 num_chn;
u32 send_sections;
u32 recv_sections;
......@@ -188,6 +187,9 @@ struct rndis_message;
struct netvsc_device;
struct net_device_context;
extern u32 netvsc_ring_bytes;
extern struct reciprocal_value netvsc_ring_reciprocal;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
......@@ -804,8 +806,6 @@ struct netvsc_device {
struct rndis_device *extension;
int ring_size;
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
......@@ -1425,32 +1425,6 @@ struct rndis_message {
(sizeof(msg) + (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container)))
/* get pointer to info buffer with message pointer */
#define MESSAGE_TO_INFO_BUFFER(msg) \
(((unsigned char *)(msg)) + msg->info_buf_offset)
/* get pointer to status buffer with message pointer */
#define MESSAGE_TO_STATUS_BUFFER(msg) \
(((unsigned char *)(msg)) + msg->status_buf_offset)
/* get pointer to OOBD buffer with message pointer */
#define MESSAGE_TO_OOBD_BUFFER(msg) \
(((unsigned char *)(msg)) + msg->oob_data_offset)
/* get pointer to data buffer with message pointer */
#define MESSAGE_TO_DATA_BUFFER(msg) \
(((unsigned char *)(msg)) + msg->per_pkt_info_offset)
/* get pointer to contained message from NDIS_MESSAGE pointer */
#define RNDIS_MESSAGE_PTR_TO_MESSAGE_PTR(rndis_msg) \
((void *) &rndis_msg->msg)
/* get pointer to contained message from NDIS_MESSAGE pointer */
#define RNDIS_MESSAGE_RAW_PTR_TO_MESSAGE_PTR(rndis_msg) \
((void *) rndis_msg)
#define RNDIS_HEADER_SIZE (sizeof(struct rndis_message) - \
sizeof(union rndis_message_container))
......
......@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <linux/reciprocal_div.h>
#include <asm/sync_bitops.h>
......@@ -588,14 +589,11 @@ void netvsc_device_remove(struct hv_device *device)
* Get the percentage of available bytes to write in the ring.
* The return value is in range from 0 to 100.
*/
static inline u32 hv_ringbuf_avail_percent(
struct hv_ring_buffer_info *ring_info)
static u32 hv_ringbuf_avail_percent(const struct hv_ring_buffer_info *ring_info)
{
u32 avail_read, avail_write;
u32 avail_write = hv_get_bytes_to_write(ring_info);
hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
return avail_write * 100 / ring_info->ring_datasize;
return reciprocal_divide(avail_write * 100, netvsc_ring_reciprocal);
}
static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
......@@ -712,11 +710,12 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
int i;
u32 msg_size = 0;
u32 padding = 0;
u32 remain = packet->total_data_buflen % net_device->pkt_align;
u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
packet->page_buf_cnt;
u32 remain;
/* Add padding */
remain = packet->total_data_buflen & (net_device->pkt_align - 1);
if (skb->xmit_more && remain && !packet->cp_partial) {
padding = net_device->pkt_align - remain;
rndis_msg->msg_len += padding;
......@@ -848,7 +847,6 @@ int netvsc_send(struct net_device_context *ndev_ctx,
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
struct sk_buff *msd_skb = NULL;
bool try_batch;
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
/* If device is rescinded, return error and packet will get dropped. */
if (unlikely(!net_device || net_device->destroy))
......@@ -922,7 +920,7 @@ int netvsc_send(struct net_device_context *ndev_ctx,
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
if (xmit_more && !packet->cp_partial) {
if (skb->xmit_more && !packet->cp_partial) {
msdp->skb = skb;
msdp->pkt = packet;
msdp->count++;
......@@ -1249,7 +1247,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
......@@ -1261,8 +1258,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
net_device_ctx->tx_table[i] = 0;
net_device->ring_size = ring_size;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
*/
......@@ -1289,10 +1284,9 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_poll, NAPI_POLL_WEIGHT);
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb,
net_device->chan_table);
ret = vmbus_open(device->channel, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
......
......@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include <linux/netpoll.h>
#include <linux/reciprocal_div.h>
#include <net/arp.h>
#include <net/route.h>
......@@ -54,9 +55,11 @@
#define LINKCHANGE_INT (2 * HZ)
#define VF_TAKEOVER_INT (HZ / 10)
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
static unsigned int ring_size __ro_after_init = 128;
module_param(ring_size, uint, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
unsigned int netvsc_ring_bytes __ro_after_init;
struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
......@@ -174,17 +177,15 @@ static int netvsc_close(struct net_device *net)
return ret;
}
static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
int pkt_type)
static inline void *init_ppi_data(struct rndis_message *msg,
u32 ppi_size, u32 pkt_type)
{
struct rndis_packet *rndis_pkt;
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
struct rndis_per_packet_info *ppi;
rndis_pkt = &msg->msg.pkt;
rndis_pkt->data_offset += ppi_size;
ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
+ rndis_pkt->per_pkt_info_len;
ppi->size = ppi_size;
ppi->type = pkt_type;
......@@ -192,7 +193,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
rndis_pkt->per_pkt_info_len += ppi_size;
return ppi;
return ppi + 1;
}
/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
......@@ -469,10 +470,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
int ret;
unsigned int num_data_pgs;
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
struct net_device *vf_netdev;
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
u32 hash;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
......@@ -527,34 +526,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg = (struct rndis_message *)skb->head;
memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
/* Add the rndis header */
rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
rndis_msg->msg_len = packet->total_data_buflen;
rndis_pkt = &rndis_msg->msg.pkt;
rndis_pkt->data_offset = sizeof(struct rndis_packet);
rndis_pkt->data_len = packet->total_data_buflen;
rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
rndis_msg->msg.pkt = (struct rndis_packet) {
.data_offset = sizeof(struct rndis_packet),
.data_len = packet->total_data_buflen,
.per_pkt_info_offset = sizeof(struct rndis_packet),
};
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
hash = skb_get_hash_raw(skb);
if (hash != 0 && net->real_num_tx_queues > 1) {
u32 *hash_info;
rndis_msg_size += NDIS_HASH_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
NBL_HASH_VALUE);
*(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
NBL_HASH_VALUE);
*hash_info = hash;
}
if (skb_vlan_tag_present(skb)) {
struct ndis_pkt_8021q_info *vlan;
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
IEEE_8021Q_INFO);
vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
IEEE_8021Q_INFO);
vlan = (void *)ppi + ppi->ppi_offset;
vlan->value = 0;
vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
VLAN_PRIO_SHIFT;
......@@ -564,11 +565,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info = (void *)ppi + ppi->ppi_offset;
lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
TCP_LARGESEND_PKTINFO);
lso_info->value = 0;
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
......@@ -593,12 +593,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info->value = 0;
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
......@@ -860,7 +858,6 @@ static int netvsc_set_channels(struct net_device *net,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = count;
device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
......@@ -975,7 +972,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
rndis_filter_close(nvdev);
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
device_info.send_section_size = nvdev->send_section_size;
......@@ -1539,7 +1535,6 @@ static int netvsc_set_ringparam(struct net_device *ndev,
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn;
device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
......@@ -1995,7 +1990,6 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
......@@ -2158,11 +2152,13 @@ static int __init netvsc_drv_init(void)
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
pr_info("Increased ring_size to %d (min allowed)\n",
pr_info("Increased ring_size to %u (min allowed)\n",
ring_size);
}
ret = vmbus_driver_register(&netvsc_drv);
netvsc_ring_bytes = ring_size * PAGE_SIZE;
netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
ret = vmbus_driver_register(&netvsc_drv);
if (ret)
return ret;
......
......@@ -1040,8 +1040,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
/* Set the channel before opening.*/
nvchan->channel = new_sc;
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
ret = vmbus_open(new_sc, netvsc_ring_bytes,
netvsc_ring_bytes, NULL, 0,
netvsc_channel_cb, nvchan);
if (ret == 0)
napi_enable(&nvchan->napi);
......
......@@ -127,28 +127,6 @@ struct hv_ring_buffer_info {
u32 priv_read_index;
};
/*
*
* hv_get_ringbuffer_availbytes()
*
* Get number of bytes available to read and to write to
* for the specified ring buffer
*/
static inline void
hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
u32 *read, u32 *write)
{
u32 read_loc, write_loc, dsize;
/* Capture the read/write indices before they changed */
read_loc = rbi->ring_buffer->read_index;
write_loc = rbi->ring_buffer->write_index;
dsize = rbi->ring_datasize;
*write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
read_loc - write_loc;
*read = dsize - *write;
}
static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册