提交 bcc93a23 编写于 作者: J Jakub Kicinski 提交者: David S. Miller

nfp: register devlink after app is created

Devlink used to have two global locks: devlink lock and port lock,
our lock ordering looked like this:

  devlink lock -> driver's pf->lock -> devlink port lock

After recent changes port lock was replaced with per-instance
lock.  Unfortunately, new per-instance lock is taken on most
operations now.  This means we can only grab the pf->lock from
the port split/unsplit ops.  Lock ordering looks like this:

  devlink lock -> driver's pf->lock -> devlink instance lock

Since we can't take pf->lock from most devlink ops, make sure
nfp_apps are prepared to service them as soon as devlink is
registered.  Locking the pf must be pushed down after
nfp_app_init() callback.

The init order looks like this:
 nfp_app_init
 devlink_register
 nfp_app_start
 netdev/port_register

As soon as app_init is done nfp_apps must be ready to service
devlink-related callbacks.  apps can only register their own
devlink objects from nfp_app_start.

Fixes: 2406e7e5 ("devlink: Add per devlink instance lock")
Signed-off-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: NDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e1a2599d
......@@ -152,18 +152,8 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
mutex_lock(&pf->lock);
if (!pf->app) {
ret = -EBUSY;
goto out;
}
ret = nfp_app_eswitch_mode_get(pf->app, mode);
out:
mutex_unlock(&pf->lock);
return ret;
return nfp_app_eswitch_mode_get(pf->app, mode);
}
const struct devlink_ops nfp_devlink_ops = {
......
......@@ -499,13 +499,9 @@ static int nfp_pci_probe(struct pci_dev *pdev,
if (err)
goto err_hwinfo_free;
err = devlink_register(devlink, &pdev->dev);
if (err)
goto err_hwinfo_free;
err = nfp_nsp_init(pdev, pf);
if (err)
goto err_devlink_unreg;
goto err_hwinfo_free;
pf->mip = nfp_mip_open(pf->cpp);
pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip);
......@@ -549,8 +545,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
kfree(pf->eth_tbl);
kfree(pf->nspi);
vfree(pf->dumpspec);
err_devlink_unreg:
devlink_unregister(devlink);
err_hwinfo_free:
kfree(pf->hwinfo);
nfp_cpp_free(pf->cpp);
......@@ -571,18 +565,13 @@ static int nfp_pci_probe(struct pci_dev *pdev,
static void nfp_pci_remove(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
struct devlink *devlink;
nfp_hwmon_unregister(pf);
devlink = priv_to_devlink(pf);
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
devlink_unregister(devlink);
nfp_net_pci_remove(pf);
vfree(pf->dumpspec);
kfree(pf->rtbl);
......@@ -598,7 +587,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
kfree(pf->eth_tbl);
kfree(pf->nspi);
mutex_destroy(&pf->lock);
devlink_free(devlink);
devlink_free(priv_to_devlink(pf));
pci_release_regions(pdev);
pci_disable_device(pdev);
}
......
......@@ -373,7 +373,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
if (IS_ERR(pf->app))
return PTR_ERR(pf->app);
mutex_lock(&pf->lock);
err = nfp_app_init(pf->app);
mutex_unlock(&pf->lock);
if (err)
goto err_free;
......@@ -401,7 +403,9 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
err_unmap:
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
err_app_clean:
mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
mutex_unlock(&pf->lock);
err_free:
nfp_app_free(pf->app);
pf->app = NULL;
......@@ -414,7 +418,11 @@ static void nfp_net_pf_app_clean(struct nfp_pf *pf)
nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
}
mutex_lock(&pf->lock);
nfp_app_clean(pf->app);
mutex_unlock(&pf->lock);
nfp_app_free(pf->app);
pf->app = NULL;
}
......@@ -693,6 +701,7 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
*/
int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
int stride;
......@@ -706,16 +715,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
return -EINVAL;
}
mutex_lock(&pf->lock);
pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
if ((int)pf->max_data_vnics < 0) {
err = pf->max_data_vnics;
goto err_unlock;
}
if ((int)pf->max_data_vnics < 0)
return pf->max_data_vnics;
err = nfp_net_pci_map_mem(pf);
if (err)
goto err_unlock;
return err;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
......@@ -754,6 +760,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
err = devlink_register(devlink, &pf->pdev->dev);
if (err)
goto err_app_clean;
mutex_lock(&pf->lock);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
......@@ -785,12 +796,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
devlink_unregister(devlink);
err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
err_unlock:
mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
return err;
}
......@@ -810,11 +822,13 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
mutex_unlock(&pf->lock);
devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
nfp_net_pci_unmap_mem(pf);
mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册