提交 fe21b269 编写于 作者: D David S. Miller

Merge branch 'netvsc-minor-fixes-and-optimization'

Stephen Hemminger says:

====================
netvsc: minor fixes and optimization

This is a subset of earlier submission with a few more fixes
found during testing. The are two small optimizations, one is to
better manage the receive completion ring, and the other is removing
one unneeded level of indirection.

Will submit the improved VF support and buffer sizing in a later
patch so they get more review.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -186,11 +186,12 @@ struct net_device_context;
struct netvsc_device *netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct net_device_context *ndc,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer **page_buffer,
struct hv_page_buffer *page_buffer,
struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
......@@ -217,7 +218,8 @@ int rndis_filter_receive(struct net_device *ndev,
struct vmbus_channel *channel,
void *data, u32 buflen);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
int rndis_filter_set_device_mac(struct netvsc_device *ndev,
const char *mac);
void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
......@@ -656,13 +658,10 @@ struct recv_comp_data {
u32 status;
};
/* Netvsc Receive Slots Max */
#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1)
struct multi_recv_comp {
void *buf; /* queued receive completions */
u32 first; /* first data entry */
u32 next; /* next entry for writing */
struct recv_comp_data *slots;
u32 first; /* first data entry */
u32 next; /* next entry for writing */
};
struct netvsc_stats {
......@@ -749,7 +748,7 @@ struct netvsc_device {
u32 recv_buf_size;
u32 recv_buf_gpadl_handle;
u32 recv_section_cnt;
struct nvsp_1_receive_buffer_section *recv_section;
u32 recv_completion_cnt;
/* Send buffer allocated by us */
void *send_buf;
......@@ -777,8 +776,6 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
atomic_t num_outstanding_recvs;
atomic_t open_cnt;
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
......
......@@ -72,9 +72,6 @@ static struct netvsc_device *alloc_net_device(void)
if (!net_device)
return NULL;
net_device->chan_table[0].mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
......@@ -92,7 +89,7 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
vfree(nvdev->chan_table[i].mrc.buf);
vfree(nvdev->chan_table[i].mrc.slots);
kfree(nvdev);
}
......@@ -171,12 +168,6 @@ static void netvsc_destroy_buf(struct hv_device *device)
net_device->recv_buf = NULL;
}
if (net_device->recv_section) {
net_device->recv_section_cnt = 0;
kfree(net_device->recv_section);
net_device->recv_section = NULL;
}
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
* NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
......@@ -239,11 +230,26 @@ static void netvsc_destroy_buf(struct hv_device *device)
kfree(net_device->send_section_map);
}
int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
{
struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
int node = cpu_to_node(nvchan->channel->target_cpu);
size_t size;
size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
nvchan->mrc.slots = vzalloc_node(size, node);
if (!nvchan->mrc.slots)
nvchan->mrc.slots = vzalloc(size);
return nvchan->mrc.slots ? 0 : -ENOMEM;
}
static int netvsc_init_buf(struct hv_device *device,
struct netvsc_device *net_device)
{
int ret = 0;
struct nvsp_message *init_packet;
struct nvsp_1_message_send_receive_buffer_complete *resp;
struct net_device *ndev;
size_t map_words;
int node;
......@@ -300,43 +306,41 @@ static int netvsc_init_buf(struct hv_device *device,
wait_for_completion(&net_device->channel_init_wait);
/* Check the response */
if (init_packet->msg.v1_msg.
send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
netdev_err(ndev, "Unable to complete receive buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
send_recv_buf_complete.status);
resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
if (resp->status != NVSP_STAT_SUCCESS) {
netdev_err(ndev,
"Unable to complete receive buffer initialization with NetVsp - status %d\n",
resp->status);
ret = -EINVAL;
goto cleanup;
}
/* Parse the response */
netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
resp->num_sections, resp->sections[0].sub_alloc_size,
resp->sections[0].num_sub_allocs);
net_device->recv_section_cnt = init_packet->msg.
v1_msg.send_recv_buf_complete.num_sections;
net_device->recv_section = kmemdup(
init_packet->msg.v1_msg.send_recv_buf_complete.sections,
net_device->recv_section_cnt *
sizeof(struct nvsp_1_receive_buffer_section),
GFP_KERNEL);
if (net_device->recv_section == NULL) {
ret = -EINVAL;
goto cleanup;
}
net_device->recv_section_cnt = resp->num_sections;
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
*/
if (net_device->recv_section_cnt != 1 ||
net_device->recv_section->offset != 0) {
resp->sections[0].offset != 0) {
ret = -EINVAL;
goto cleanup;
}
/* Now setup the send buffer.
*/
/* Setup receive completion ring */
net_device->recv_completion_cnt
= round_up(resp->sections[0].num_sub_allocs + 1,
PAGE_SIZE / sizeof(u64));
ret = netvsc_alloc_recv_comp_ring(net_device, 0);
if (ret)
goto cleanup;
/* Now setup the send buffer. */
net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
if (!net_device->send_buf)
net_device->send_buf = vzalloc(net_device->send_buf_size);
......@@ -697,7 +701,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
u32 pend_size,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer **pb,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
char *start = net_device->send_buf;
......@@ -718,9 +722,9 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
}
for (i = 0; i < page_count; i++) {
char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT);
u32 offset = (*pb)[i].offset;
u32 len = (*pb)[i].len;
char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT);
u32 offset = pb[i].offset;
u32 len = pb[i].len;
memcpy(dest, (src + offset), len);
msg_size += len;
......@@ -739,7 +743,7 @@ static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
struct hv_page_buffer **pb,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
......@@ -750,7 +754,6 @@ static inline int netvsc_send_pkt(
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
......@@ -776,11 +779,11 @@ static inline int netvsc_send_pkt(
return -ENODEV;
if (packet->page_buf_cnt) {
pgbuf = packet->cp_partial ? (*pb) +
packet->rmsg_pgcnt : (*pb);
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
pgbuf,
packet->page_buf_cnt,
pb, packet->page_buf_cnt,
&nvmsg,
sizeof(struct nvsp_message),
req_id,
......@@ -805,8 +808,10 @@ static inline int netvsc_send_pkt(
ret = -ENOSPC;
}
} else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
netdev_err(ndev,
"Unable to send packet pages %u len %u, ret %d\n",
packet->page_buf_cnt, packet->total_data_buflen,
ret);
}
return ret;
......@@ -828,11 +833,11 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
int netvsc_send(struct net_device_context *ndev_ctx,
struct hv_netvsc_packet *packet,
struct rndis_message *rndis_msg,
struct hv_page_buffer **pb,
struct hv_page_buffer *pb,
struct sk_buff *skb)
{
struct netvsc_device *net_device
= rcu_dereference_rtnl(ndev_ctx->nvdev);
= rcu_dereference_bh(ndev_ctx->nvdev);
struct hv_device *device = ndev_ctx->device_ctx;
int ret = 0;
struct netvsc_channel *nvchan;
......@@ -950,130 +955,94 @@ int netvsc_send(struct net_device_context *ndev_ctx,
return ret;
}
static int netvsc_send_recv_completion(struct vmbus_channel *channel,
u64 transaction_id, u32 status)
/* Send pending recv completions */
static int send_recv_completions(struct netvsc_channel *nvchan)
{
struct nvsp_message recvcompMessage;
struct netvsc_device *nvdev = nvchan->net_device;
struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_msg {
struct nvsp_message_header hdr;
u32 status;
} __packed;
struct recv_comp_msg msg = {
.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
};
int ret;
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status;
/* Send the completion */
ret = vmbus_sendpacket(channel, &recvcompMessage,
sizeof(struct nvsp_message_header) + sizeof(u32),
transaction_id, VM_PKT_COMP, 0);
return ret;
}
static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
u32 *filled, u32 *avail)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 first = mrc->first;
u32 next = mrc->next;
while (mrc->first != mrc->next) {
const struct recv_comp_data *rcd
= mrc->slots + mrc->first;
*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
next - first;
msg.status = rcd->status;
ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
rcd->tid, VM_PKT_COMP, 0);
if (unlikely(ret))
return ret;
*avail = NETVSC_RECVSLOT_MAX - *filled - 1;
}
/* Read the first filled slot, no change to index */
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
*nvdev, u16 q_idx)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail;
if (unlikely(!mrc->buf))
return NULL;
if (++mrc->first == nvdev->recv_completion_cnt)
mrc->first = 0;
}
count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
if (!filled)
return NULL;
/* receive completion ring has been emptied */
if (unlikely(nvdev->destroy))
wake_up(&nvdev->wait_drain);
return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
return 0;
}
/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
/* Count how many receive completions are outstanding */
static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
const struct multi_recv_comp *mrc,
u32 *filled, u32 *avail)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
int num_recv;
u32 count = nvdev->recv_completion_cnt;
mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
if (mrc->next >= mrc->first)
*filled = mrc->next - mrc->first;
else
*filled = (count - mrc->first) + mrc->next;
if (nvdev->destroy && num_recv == 0)
wake_up(&nvdev->wait_drain);
*avail = count - *filled - 1;
}
/* Check and send pending recv completions */
static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
struct vmbus_channel *channel, u16 q_idx)
/* Add receive complete to ring to send to host. */
static void enq_receive_complete(struct net_device *ndev,
struct netvsc_device *nvdev, u16 q_idx,
u64 tid, u32 status)
{
struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
struct multi_recv_comp *mrc = &nvchan->mrc;
struct recv_comp_data *rcd;
int ret;
while (true) {
rcd = read_recv_comp_slot(nvdev, q_idx);
if (!rcd)
break;
u32 filled, avail;
ret = netvsc_send_recv_completion(channel, rcd->tid,
rcd->status);
if (ret)
break;
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
put_recv_comp_slot(nvdev, q_idx);
if (unlikely(filled > NAPI_POLL_WEIGHT)) {
send_recv_completions(nvchan);
recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
}
}
#define NETVSC_RCD_WATERMARK 80
/* Get next available slot */
static inline struct recv_comp_data *get_recv_comp_slot(
struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail, next;
struct recv_comp_data *rcd;
if (unlikely(!nvdev->recv_section))
return NULL;
if (unlikely(!mrc->buf))
return NULL;
if (atomic_read(&nvdev->num_outstanding_recvs) >
nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100)
netvsc_chk_recv_comp(nvdev, channel, q_idx);
count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
if (!avail)
return NULL;
next = mrc->next;
rcd = mrc->buf + next * sizeof(struct recv_comp_data);
mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
if (unlikely(!avail)) {
netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
q_idx, tid);
return;
}
atomic_inc(&nvdev->num_outstanding_recvs);
rcd = mrc->slots + mrc->next;
rcd->tid = tid;
rcd->status = status;
return rcd;
if (++mrc->next == nvdev->recv_completion_cnt)
mrc->next = 0;
}
static int netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct net_device_context *net_device_ctx,
struct hv_device *device,
struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc,
struct nvsp_message *nvsp)
struct netvsc_device *net_device,
struct net_device_context *net_device_ctx,
struct hv_device *device,
struct vmbus_channel *channel,
const struct vmpacket_descriptor *desc,
struct nvsp_message *nvsp)
{
const struct vmtransfer_page_packet_header *vmxferpage_packet
= container_of(desc, const struct vmtransfer_page_packet_header, d);
......@@ -1082,7 +1051,6 @@ static int netvsc_receive(struct net_device *ndev,
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
int ret;
/* Make sure this is a valid nvsp packet */
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
......@@ -1113,25 +1081,9 @@ static int netvsc_receive(struct net_device *ndev,
channel, data, buflen);
}
if (net_device->chan_table[q_idx].mrc.buf) {
struct recv_comp_data *rcd;
enq_receive_complete(ndev, net_device, q_idx,
vmxferpage_packet->d.trans_id, status);
rcd = get_recv_comp_slot(net_device, channel, q_idx);
if (rcd) {
rcd->tid = vmxferpage_packet->d.trans_id;
rcd->status = status;
} else {
netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
q_idx, vmxferpage_packet->d.trans_id);
}
} else {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
if (ret)
netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
q_idx, vmxferpage_packet->d.trans_id, ret);
}
return count;
}
......@@ -1230,7 +1182,6 @@ int netvsc_poll(struct napi_struct *napi, int budget)
struct netvsc_device *net_device = nvchan->net_device;
struct vmbus_channel *channel = nvchan->channel;
struct hv_device *device = netvsc_channel_to_device(channel);
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct net_device *ndev = hv_get_drvdata(device);
int work_done = 0;
......@@ -1244,17 +1195,23 @@ int netvsc_poll(struct napi_struct *napi, int budget)
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
/* If receive ring was exhausted
* and not doing busy poll
/* if ring is empty, signal host */
if (!nvchan->desc)
hv_pkt_iter_close(channel);
/* If send of pending receive completions suceeded
* and did not exhaust NAPI budget this time
* and not doing busy poll
* then re-enable host interrupts
* and reschedule if ring is not empty.
* and reschedule if ring is not empty.
*/
if (work_done < budget &&
if (send_recv_completions(nvchan) == 0 &&
work_done < budget &&
napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound) != 0)
hv_end_read(&channel->inbound)) {
hv_begin_read(&channel->inbound);
napi_reschedule(napi);
netvsc_chk_recv_comp(net_device, channel, q_idx);
}
/* Driver may overshoot since multiple packets per descriptor */
return min(work_done, budget);
......@@ -1355,7 +1312,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
return net_device;
close:
netif_napi_del(&net_device->chan_table[0].napi);
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
napi_disable(&net_device->chan_table[0].napi);
/* Now, we can close the channel safely */
vmbus_close(device->channel);
......
......@@ -282,9 +282,8 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
struct hv_netvsc_packet *packet,
struct hv_page_buffer **page_buf)
struct hv_page_buffer *pb)
{
struct hv_page_buffer *pb = *page_buf;
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
......@@ -359,8 +358,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
u32 hash;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
/* We can only transmit MAX_PAGE_BUFFER_COUNT number
* of pages in a single packet. If skb is scattered around
......@@ -503,12 +501,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
skb, packet, &pb);
skb, packet, pb);
/* timestamp packet in software */
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx, packet, rndis_msg, &pb, skb);
ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb);
if (likely(ret == 0))
return NETDEV_TX_OK;
......@@ -758,8 +756,8 @@ static int netvsc_set_channels(struct net_device *net,
if (!IS_ERR(nvdev)) {
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
ret = PTR_ERR(nvdev);
} else {
ret = PTR_ERR(nvdev);
device_info.num_chn = orig;
rndis_filter_device_add(dev, &device_info);
}
......@@ -923,6 +921,8 @@ static void netvsc_get_stats64(struct net_device *net,
static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
struct sockaddr *addr = p;
char save_adr[ETH_ALEN];
unsigned char save_aatype;
......@@ -935,7 +935,10 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
if (err != 0)
return err;
err = rndis_filter_set_device_mac(ndev, addr->sa_data);
if (!nvdev)
return -ENODEV;
err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
if (err != 0) {
/* roll back to saved MAC */
memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
......@@ -981,7 +984,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
const void *nds = &ndc->eth_stats;
const struct netvsc_stats *qstats;
unsigned int start;
......@@ -1019,7 +1022,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
u8 *p = data;
int i;
......@@ -1077,7 +1080,7 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev);
struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev)
return -ENODEV;
......@@ -1127,7 +1130,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = rcu_dereference(ndc->nvdev);
struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
struct rndis_device *rndis_dev;
int i;
......
......@@ -85,14 +85,6 @@ static struct rndis_device *get_rndis_device(void)
return device;
}
static struct netvsc_device *
net_device_to_netvsc_device(struct net_device *ndev)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
return rtnl_dereference(net_device_ctx->nvdev);
}
static struct rndis_request *get_rndis_request(struct rndis_device *dev,
u32 msg_type,
u32 msg_len)
......@@ -222,11 +214,11 @@ static void dump_rndis_message(struct hv_device *hv_dev,
static int rndis_filter_send_request(struct rndis_device *dev,
struct rndis_request *req)
{
int ret;
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
int ret;
/* Setup the packet to send it */
packet = &req->pkt;
......@@ -252,7 +244,10 @@ static int rndis_filter_send_request(struct rndis_device *dev,
pb[0].len;
}
ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL);
rcu_read_lock_bh();
ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL);
rcu_read_unlock_bh();
return ret;
}
......@@ -452,8 +447,9 @@ int rndis_filter_receive(struct net_device *ndev,
return 0;
}
static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
void *result, u32 *result_size)
static int rndis_filter_query_device(struct rndis_device *dev,
struct netvsc_device *nvdev,
u32 oid, void *result, u32 *result_size)
{
struct rndis_request *request;
u32 inresult_size = *result_size;
......@@ -480,8 +476,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->dev_vc_handle = 0;
if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
struct net_device_context *ndevctx = netdev_priv(dev->ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
struct ndis_offload *hwcaps;
u32 nvsp_version = nvdev->nvsp_version;
u8 ndis_rev;
......@@ -550,14 +544,15 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
/* Get the hardware offload capabilities */
static int
rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
struct ndis_offload *caps)
{
u32 caps_len = sizeof(*caps);
int ret;
memset(caps, 0, sizeof(*caps));
ret = rndis_filter_query_device(dev,
ret = rndis_filter_query_device(dev, net_device,
OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
caps, &caps_len);
if (ret)
......@@ -586,11 +581,12 @@ rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
return 0;
}
static int rndis_filter_query_device_mac(struct rndis_device *dev)
static int rndis_filter_query_device_mac(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = ETH_ALEN;
return rndis_filter_query_device(dev,
return rndis_filter_query_device(dev, net_device,
RNDIS_OID_802_3_PERMANENT_ADDRESS,
dev->hw_mac_adr, &size);
}
......@@ -598,9 +594,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
#define NWADR_STR "NetworkAddress"
#define NWADR_STRLEN 14
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
const char *mac)
{
struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev);
struct rndis_device *rdev = nvdev->extension;
struct rndis_request *request;
struct rndis_set_request *set;
......@@ -654,11 +650,8 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status != RNDIS_STATUS_SUCCESS) {
netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
set_complete->status);
ret = -EINVAL;
}
if (set_complete->status != RNDIS_STATUS_SUCCESS)
ret = -EIO;
cleanup:
put_rndis_request(rdev, request);
......@@ -791,27 +784,27 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
return ret;
}
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
static int rndis_filter_query_device_link_status(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = sizeof(u32);
u32 link_status;
int ret;
ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
return ret;
return rndis_filter_query_device(dev, net_device,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
}
static int rndis_filter_query_link_speed(struct rndis_device *dev)
static int rndis_filter_query_link_speed(struct rndis_device *dev,
struct netvsc_device *net_device)
{
u32 size = sizeof(u32);
u32 link_speed;
struct net_device_context *ndc;
int ret;
ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED,
ret = rndis_filter_query_device(dev, net_device,
RNDIS_OID_GEN_LINK_SPEED,
&link_speed, &size);
if (!ret) {
......@@ -880,14 +873,14 @@ void rndis_filter_update(struct netvsc_device *nvdev)
schedule_work(&rdev->mcast_work);
}
static int rndis_filter_init_device(struct rndis_device *dev)
static int rndis_filter_init_device(struct rndis_device *dev,
struct netvsc_device *nvdev)
{
struct rndis_request *request;
struct rndis_initialize_request *init;
struct rndis_initialize_complete *init_complete;
u32 status;
int ret;
struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev);
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
......@@ -935,12 +928,12 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
int i;
if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
return false;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
if (nvchan->mrc.first != nvchan->mrc.next)
return false;
if (atomic_read(&nvchan->queue_sends) > 0)
return false;
}
......@@ -1024,20 +1017,20 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
struct net_device *ndev =
hv_get_drvdata(new_sc->primary_channel->device_obj);
struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev);
struct net_device_context *ndev_ctx = netdev_priv(ndev);
struct netvsc_device *nvscdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
struct netvsc_channel *nvchan;
int ret;
if (chn_index >= nvscdev->num_chn)
/* This is safe because this callback only happens when
* new device is being setup and waiting on the channel_init_wait.
*/
nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
if (!nvscdev || chn_index >= nvscdev->num_chn)
return;
nvchan = nvscdev->chan_table + chn_index;
nvchan->mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
if (!nvchan->mrc.buf)
return;
/* Because the device uses NAPI, all the interrupt batching and
* control is done via Net softirq, not the channel handling
......@@ -1104,27 +1097,27 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
ret = rndis_filter_init_device(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
/* Get the MTU from the host */
size = sizeof(u32);
ret = rndis_filter_query_device(rndis_device,
ret = rndis_filter_query_device(rndis_device, net_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
ret = rndis_filter_query_device_mac(rndis_device, net_device);
if (ret != 0)
goto err_dev_remv;
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, &hwcaps);
ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps);
if (ret != 0)
goto err_dev_remv;
......@@ -1185,7 +1178,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (ret)
goto err_dev_remv;
rndis_filter_query_device_link_status(rndis_device);
rndis_filter_query_device_link_status(rndis_device, net_device);
netdev_dbg(net, "Device MAC %pM link state %s\n",
rndis_device->hw_mac_adr,
......@@ -1194,11 +1187,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return net_device;
rndis_filter_query_link_speed(rndis_device);
rndis_filter_query_link_speed(rndis_device, net_device);
/* vRSS setup */
memset(&rsscap, 0, rsscap_size);
ret = rndis_filter_query_device(rndis_device,
ret = rndis_filter_query_device(rndis_device, net_device,
OID_GEN_RECEIVE_SCALE_CAPABILITIES,
&rsscap, &rsscap_size);
if (ret || rsscap.num_recv_que < 2)
......@@ -1227,6 +1220,15 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
if (num_rss_qs == 0)
return net_device;
for (i = 1; i < net_device->num_chn; i++) {
ret = netvsc_alloc_recv_comp_ring(net_device, i);
if (ret) {
while (--i != 0)
vfree(net_device->chan_table[i].mrc.slots);
goto out;
}
}
refcount_set(&net_device->sc_offered, num_rss_qs);
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册