提交 a8545b60 编写于 作者: D David S. Miller

Merge branch 'hv_netvsc-VF-removal-fixes'

Vitaly Kuznetsov says:

====================
hv_netvsc: fixes for VF removal path

Kernel crash is reported after VF is removed and detached from netvsc
device. Turns out we have multiple different (but related) issues on the
VF removal path which I'm trying to address with PATCHes 2-5 of this
series. PATCH1 is required to support the change.

Changes since v1:
- Re-arrange patches in the series to not introduce new issues [David Miller]
- Add PATCH5 which fixes a new issue I discovered while testing.
- Add Haiyang' A-b tags to PATCH1-4

With regards to Stephen's suggestion: I believe that switching to using RCU
and eliminating vf_use_cnt/vf_inject is the right thing to do long-term, we
can either put this on top of this series or do it later in net-next.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -644,12 +644,6 @@ struct netvsc_reconfig {
u32 event;
};
struct garp_wrk {
struct work_struct dwrk;
struct net_device *netdev;
struct netvsc_device *netvsc_dev;
};
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
......@@ -667,7 +661,6 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
......@@ -678,6 +671,15 @@ struct net_device_context {
/* the device is going away */
bool start_remove;
/* State to manage the associated VF interface. */
struct net_device *vf_netdev;
bool vf_inject;
atomic_t vf_use_cnt;
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
};
/* Per netvsc device */
......@@ -733,15 +735,7 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
atomic_t open_cnt;
/* State to manage the associated VF interface. */
bool vf_inject;
struct net_device *vf_netdev;
atomic_t vf_use_cnt;
};
static inline struct netvsc_device *
......
......@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
atomic_set(&net_device->open_cnt, 0);
atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
net_device->vf_netdev = NULL;
net_device->vf_inject = false;
return net_device;
}
......@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev,
nvscdev->send_table[i] = tab[i];
}
static void netvsc_send_vf(struct netvsc_device *nvdev,
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
}
static inline void netvsc_receive_inband(struct hv_device *hdev,
struct netvsc_device *nvdev,
struct nvsp_message *nvmsg)
struct net_device_context *net_device_ctx,
struct nvsp_message *nvmsg)
{
switch (nvmsg->hdr.msg_type) {
case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
......@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
break;
case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
netvsc_send_vf(nvdev, nvmsg);
netvsc_send_vf(net_device_ctx, nvmsg);
break;
}
}
......@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
struct vmpacket_descriptor *desc)
{
struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
nvmsg = (struct nvsp_message *)((unsigned long)
desc + (desc->offset8 << 3));
......@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_INBAND:
netvsc_receive_inband(device, net_device, nvmsg);
netvsc_receive_inband(device, net_device_ctx, nvmsg);
break;
default:
......
......@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
struct sk_buff *vf_skb;
struct netvsc_stats *rx_stats;
struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
u32 bytes_recvd = packet->total_data_buflen;
int ret = 0;
if (!net || net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
if (READ_ONCE(netvsc_dev->vf_inject)) {
atomic_inc(&netvsc_dev->vf_use_cnt);
if (!READ_ONCE(netvsc_dev->vf_inject)) {
if (READ_ONCE(net_device_ctx->vf_inject)) {
atomic_inc(&net_device_ctx->vf_use_cnt);
if (!READ_ONCE(net_device_ctx->vf_inject)) {
/*
* We raced; just move on.
*/
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
goto vf_injection_done;
}
......@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* the host). Deliver these via the VF interface
* in the guest.
*/
vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
csum_info, *data, vlan_tci);
vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev,
packet, csum_info, *data,
vlan_tci);
if (vf_skb != NULL) {
++netvsc_dev->vf_netdev->stats.rx_packets;
netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
++net_device_ctx->vf_netdev->stats.rx_packets;
net_device_ctx->vf_netdev->stats.rx_bytes +=
bytes_recvd;
netif_receive_skb(vf_skb);
} else {
++net->stats.rx_dropped;
ret = NVSP_STAT_FAIL;
}
atomic_dec(&netvsc_dev->vf_use_cnt);
atomic_dec(&net_device_ctx->vf_use_cnt);
return ret;
}
......@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
static void netvsc_notify_peers(struct work_struct *wrk)
{
struct garp_wrk *gwrk;
gwrk = container_of(wrk, struct garp_wrk, dwrk);
netdev_notify_peers(gwrk->netdev);
atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
}
static struct net_device *get_netvsc_net_device(char *mac)
{
struct net_device *dev, *found = NULL;
......@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
......@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
* Take a reference on the module.
*/
try_module_get(THIS_MODULE);
netvsc_dev->vf_netdev = vf_netdev;
net_device_ctx->vf_netdev = vf_netdev;
return NOTIFY_OK;
}
static void netvsc_inject_enable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = true;
}
static void netvsc_inject_disable(struct net_device_context *net_device_ctx)
{
net_device_ctx->vf_inject = false;
/* Wait for currently active users to drain out. */
while (atomic_read(&net_device_ctx->vf_use_cnt) != 0)
udelay(50);
}
static int netvsc_vf_up(struct net_device *vf_netdev)
{
......@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = true;
netvsc_inject_enable(net_device_ctx);
/*
* Open the device before switching data path.
......@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev)
netif_carrier_off(ndev);
/*
* Now notify peers. We are scheduling work to
* notify peers; take a reference to prevent
* the VF interface from vanishing.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = vf_netdev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through VF device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
return NOTIFY_OK;
}
......@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
netvsc_dev->vf_inject = false;
/*
* Wait for currently active users to
* drain out.
*/
while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
udelay(50);
netvsc_inject_disable(net_device_ctx);
netvsc_switch_datapath(ndev, false);
netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
rndis_filter_close(netvsc_dev);
netif_carrier_on(ndev);
/*
* Notify peers.
*/
atomic_inc(&netvsc_dev->vf_use_cnt);
net_device_ctx->gwrk.netdev = ndev;
net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
schedule_work(&net_device_ctx->gwrk.dwrk);
/* Now notify peers through netvsc device. */
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
return NOTIFY_OK;
}
......@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
if (netvsc_dev == NULL)
if (!netvsc_dev || !net_device_ctx->vf_netdev)
return NOTIFY_DONE;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
netvsc_dev->vf_netdev = NULL;
netvsc_inject_disable(net_device_ctx);
net_device_ctx->vf_netdev = NULL;
module_put(THIS_MODULE);
return NOTIFY_OK;
}
......@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev,
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
atomic_set(&net_device_ctx->vf_use_cnt, 0);
net_device_ctx->vf_netdev = NULL;
net_device_ctx->vf_inject = false;
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
......@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this,
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
/* Avoid Vlan, Bonding dev with same MAC registering as VF */
if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING))
/* Avoid Vlan dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
if (event_dev->priv_flags & IFF_BONDING &&
event_dev->flags & IFF_MASTER)
return NOTIFY_DONE;
switch (event) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册