提交 36a857e4 编写于 作者: J Jakub Kicinski 提交者: David S. Miller

nfp: convert .ndo_change_mtu() to prepare/commit paradigm

When changing MTU on running device first allocate new rings
and buffers and once it succeeds proceed with changing MTU.

Allocation of new rings is not really necessary for this
operation - it's done to keep the code simple and because
size of the extra ring memory is quite small compared to
the size of buffers.

Operation can still fail midway through if FW communication
times out.  In that case we retry with old MTU (rings).
Signed-off-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 30d21171
......@@ -1506,6 +1506,64 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz)
return -ENOMEM;
}
static struct nfp_net_rx_ring *
nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz)
{
struct nfp_net_rx_ring *rings;
unsigned int r;
rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
if (!rings)
return NULL;
for (r = 0; r < nn->num_rx_rings; r++) {
nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz))
goto err_free_prev;
if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
goto err_free_ring;
}
return rings;
err_free_prev:
while (r--) {
nfp_net_rx_ring_bufs_free(nn, &rings[r]);
err_free_ring:
nfp_net_rx_ring_free(&rings[r]);
}
kfree(rings);
return NULL;
}
static struct nfp_net_rx_ring *
nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
struct nfp_net_rx_ring *old = nn->rx_rings;
unsigned int r;
for (r = 0; r < nn->num_rx_rings; r++)
old[r].r_vec->rx_ring = &rings[r];
nn->rx_rings = rings;
return old;
}
static void
nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
unsigned int r;
for (r = 0; r < nn->num_r_vecs; r++) {
nfp_net_rx_ring_bufs_free(nn, &rings[r]);
nfp_net_rx_ring_free(&rings[r]);
}
kfree(rings);
}
static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx)
......@@ -1984,23 +2042,61 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_rx_ring *tmp_rings;
int err;
if (new_mtu < 68 || new_mtu > nn->max_mtu) {
nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
return -EINVAL;
}
old_mtu = netdev->mtu;
old_fl_bufsz = nn->fl_bufsz;
new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
if (!netif_running(netdev)) {
netdev->mtu = new_mtu;
nn->fl_bufsz = new_fl_bufsz;
return 0;
}
/* Prepare new rings */
tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz);
if (!tmp_rings)
return -ENOMEM;
/* Stop device, swap in new rings, try to start the firmware */
nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn);
tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
netdev->mtu = new_mtu;
nn->fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
nn->fl_bufsz = new_fl_bufsz;
err = nfp_net_set_config_and_enable(nn);
if (err) {
const int err_new = err;
/* Try with old configuration and old rings */
tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
netdev->mtu = old_mtu;
nn->fl_bufsz = old_fl_bufsz;
/* restart if running */
if (netif_running(netdev)) {
nfp_net_netdev_close(netdev);
nfp_net_netdev_open(netdev);
err = __nfp_net_set_config_and_enable(nn);
if (err)
nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
err_new, err);
}
return 0;
nfp_net_shadow_rx_rings_free(nn, tmp_rings);
nfp_net_open_stack(nn);
return err;
}
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册