提交 d78f8d83 编写于 作者: D David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
net: implement devlink reload in ice

Michal Swiatkowski says:

This is a part of changes done in patchset [0]. Resource management is
kind of controversial part, so I split it into two patchsets.

It is the first one, covering refactor and implement reload API call.
The refactor will unblock some of the patches needed by SIOV or
subfunction.

Most of this patchset is about implementing driver reload mechanism.
Part of code from probe and rebuild is used to not duplicate code.
To allow this reuse probe and rebuild path are split into smaller
functions.

Patch "ice: split ice_vsi_setup into smaller functions" changes
boolean variable in function call to integer and adds define
for it. Instead of having the function called with true/false now it
can be called with readable defines ICE_VSI_FLAG_INIT or
ICE_VSI_FLAG_NO_INIT. It was suggested by Jacob Keller and probably this
mechanism will be implemented across ice driver in follow up patchset.

Previously the code was reviewed here [0].

[0] https://lore.kernel.org/netdev/Y3ckRWtAtZU1BdXm@unreal/T/#m3bb8feba0a62f9b4cd54cd94917b7e2143fc2ecd

====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -353,7 +353,6 @@ struct ice_vsi { ...@@ -353,7 +353,6 @@ struct ice_vsi {
struct ice_vf *vf; /* VF associated with this VSI */ struct ice_vf *vf; /* VF associated with this VSI */
u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr; u16 num_gfltr;
u16 num_bfltr; u16 num_bfltr;
...@@ -889,7 +888,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, ...@@ -889,7 +888,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
...@@ -907,6 +906,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup); ...@@ -907,6 +906,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf); int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err); const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw); bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi); void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
...@@ -931,6 +931,8 @@ int ice_open(struct net_device *netdev); ...@@ -931,6 +931,8 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev); int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev); int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf); void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
/** /**
* ice_set_rdma_cap - enable RDMA support * ice_set_rdma_cap - enable RDMA support
......
...@@ -1088,8 +1088,10 @@ int ice_init_hw(struct ice_hw *hw) ...@@ -1088,8 +1088,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status) if (status)
goto err_unroll_cqinit; goto err_unroll_cqinit;
hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), if (!hw->port_info)
sizeof(*hw->port_info), GFP_KERNEL); hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->port_info),
GFP_KERNEL);
if (!hw->port_info) { if (!hw->port_info) {
status = -ENOMEM; status = -ENOMEM;
goto err_unroll_cqinit; goto err_unroll_cqinit;
...@@ -1217,11 +1219,6 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -1217,11 +1219,6 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw); ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock); mutex_destroy(&hw->tnl_lock);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
hw->port_info = NULL;
}
/* Attempt to disable FW logging before shutting down control queues */ /* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false); ice_cfg_fw_log(hw, false);
ice_destroy_all_ctrlq(hw); ice_destroy_all_ctrlq(hw);
......
...@@ -371,10 +371,7 @@ static int ice_devlink_info_get(struct devlink *devlink, ...@@ -371,10 +371,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
/** /**
* ice_devlink_reload_empr_start - Start EMP reset to activate new firmware * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
* @devlink: pointer to the devlink instance to reload * @pf: pointer to the pf instance
* @netns_change: if true, the network namespace is changing
* @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
* @limit: limits on what reload should do, such as not resetting
* @extack: netlink extended ACK structure * @extack: netlink extended ACK structure
* *
* Allow user to activate new Embedded Management Processor firmware by * Allow user to activate new Embedded Management Processor firmware by
...@@ -387,12 +384,9 @@ static int ice_devlink_info_get(struct devlink *devlink, ...@@ -387,12 +384,9 @@ static int ice_devlink_info_get(struct devlink *devlink,
* any source. * any source.
*/ */
static int static int
ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, ice_devlink_reload_empr_start(struct ice_pf *pf,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u8 pending; u8 pending;
...@@ -430,12 +424,52 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, ...@@ -430,12 +424,52 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
return 0; return 0;
} }
/**
* ice_devlink_reload_down - prepare for reload
* @devlink: pointer to the devlink instance to reload
* @netns_change: if true, the network namespace is changing
* @action: the action to perform
* @limit: limits on what reload should do, such as not resetting
* @extack: netlink extended ACK structure
*/
static int
ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack,
"Go to legacy mode before doing reinit\n");
return -EOPNOTSUPP;
}
if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack,
"Turn off ADQ before doing reinit\n");
return -EOPNOTSUPP;
}
if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack,
"Remove all VFs before doing reinit\n");
return -EOPNOTSUPP;
}
ice_unload(pf);
return 0;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
return ice_devlink_reload_empr_start(pf, extack);
default:
WARN_ON(1);
return -EOPNOTSUPP;
}
}
/** /**
* ice_devlink_reload_empr_finish - Wait for EMP reset to finish * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
* @devlink: pointer to the devlink instance reloading * @pf: pointer to the pf instance
* @action: the action requested
* @limit: limits imposed by userspace, such as not resetting
* @actions_performed: on return, indicate what actions actually performed
* @extack: netlink extended ACK structure * @extack: netlink extended ACK structure
* *
* Wait for driver to finish rebuilding after EMP reset is completed. This * Wait for driver to finish rebuilding after EMP reset is completed. This
...@@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change, ...@@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
* for the driver's rebuild to complete. * for the driver's rebuild to complete.
*/ */
static int static int
ice_devlink_reload_empr_finish(struct devlink *devlink, ice_devlink_reload_empr_finish(struct ice_pf *pf,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
u32 *actions_performed,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct ice_pf *pf = devlink_priv(devlink);
int err; int err;
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
err = ice_wait_for_reset(pf, 60 * HZ); err = ice_wait_for_reset(pf, 60 * HZ);
if (err) { if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute"); NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
...@@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, ...@@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status; return status;
} }
/**
* ice_devlink_reload_up - do reload up after reinit
* @devlink: pointer to the devlink instance reloading
* @action: the action requested
* @limit: limits imposed by userspace, such as not resetting
* @actions_performed: on return, indicate what actions actually performed
* @extack: netlink extended ACK structure
*/
static int
ice_devlink_reload_up(struct devlink *devlink,
enum devlink_reload_action action,
enum devlink_reload_limit limit,
u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return ice_load(pf);
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return ice_devlink_reload_empr_finish(pf, extack);
default:
WARN_ON(1);
return -EOPNOTSUPP;
}
}
static const struct devlink_ops ice_devlink_ops = { static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK, .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */ /* The ice driver currently does not support driver reinit */
.reload_down = ice_devlink_reload_empr_start, .reload_down = ice_devlink_reload_down,
.reload_up = ice_devlink_reload_empr_finish, .reload_up = ice_devlink_reload_up,
.port_split = ice_devlink_port_split, .port_split = ice_devlink_port_split,
.port_unsplit = ice_devlink_port_unsplit, .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get, .eswitch_mode_get = ice_eswitch_mode_get,
......
...@@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi) ...@@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status) if (status)
goto err_setup_rx_ring; goto err_setup_rx_ring;
status = ice_vsi_cfg(vsi); status = ice_vsi_cfg_lan(vsi);
if (status) if (status)
goto err_setup_rx_ring; goto err_setup_rx_ring;
......
...@@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list) ...@@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
void ice_fltr_remove_all(struct ice_vsi *vsi) void ice_fltr_remove_all(struct ice_vsi *vsi)
{ {
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
/* sync netdev filters if exist */
if (vsi->netdev) {
__dev_uc_unsync(vsi->netdev, NULL);
__dev_mc_unsync(vsi->netdev, NULL);
}
} }
/** /**
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/** /**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct * @pf: pointer to PF struct
...@@ -245,6 +247,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf) ...@@ -245,6 +247,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
* @pf: board private structure to initialize
*/
static void ice_free_rdma_qvector(struct ice_pf *pf)
{
pf->num_avail_sw_msix -= pf->num_rdma_msix;
ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
ICE_RES_RDMA_VEC_ID);
}
/** /**
* ice_adev_release - function to be mapped to AUX dev's release op * ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free * @dev: pointer to device to free
...@@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf) ...@@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
int ret; int ret;
if (!ice_is_rdma_ena(pf)) {
dev_warn(dev, "RDMA is not supported on this device\n");
return 0;
}
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
return -ENOMEM;
}
/* Reserve vector resources */ /* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf); ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n"); dev_err(dev, "failed to reserve vectors for RDMA\n");
return ret; goto err_reserve_rdma_qvector;
} }
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
return ice_plug_aux_dev(pf); ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
ice_free_rdma_qvector(pf);
err_reserve_rdma_qvector:
pf->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
return ret;
}
/**
* ice_deinit_rdma - deinitialize RDMA on PF
* @pf: ptr to ice_pf
*/
void ice_deinit_rdma(struct ice_pf *pf)
{
if (!ice_is_rdma_ena(pf))
return;
ice_unplug_aux_dev(pf);
ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
} }
...@@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr) ...@@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)
} }
/** /**
* ice_vsi_delete - delete a VSI from the switch * ice_vsi_delete_from_hw - delete a VSI from the switch
* @vsi: pointer to VSI being removed * @vsi: pointer to VSI being removed
*/ */
void ice_vsi_delete(struct ice_vsi *vsi) static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt; struct ice_vsi_ctx *ctxt;
...@@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) ...@@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
} }
/** /**
* ice_vsi_clear - clean up and deallocate the provided VSI * ice_vsi_free_stats - Free the ring statistics structures
* @vsi: VSI pointer
*/
static void ice_vsi_free_stats(struct ice_vsi *vsi)
{
struct ice_vsi_stats *vsi_stat;
struct ice_pf *pf = vsi->back;
int i;
if (vsi->type == ICE_VSI_CHNL)
return;
if (!pf->vsi_stats)
return;
vsi_stat = pf->vsi_stats[vsi->idx];
if (!vsi_stat)
return;
ice_for_each_alloc_txq(vsi, i) {
if (vsi_stat->tx_ring_stats[i]) {
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
}
}
ice_for_each_alloc_rxq(vsi, i) {
if (vsi_stat->rx_ring_stats[i]) {
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
kfree(vsi_stat->tx_ring_stats);
kfree(vsi_stat->rx_ring_stats);
kfree(vsi_stat);
pf->vsi_stats[vsi->idx] = NULL;
}
/**
* ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
* @vsi: VSI which is having stats allocated
*/
static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
{
struct ice_ring_stats **tx_ring_stats;
struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stats;
struct ice_pf *pf = vsi->back;
u16 i;
vsi_stats = pf->vsi_stats[vsi->idx];
tx_ring_stats = vsi_stats->tx_ring_stats;
rx_ring_stats = vsi_stats->rx_ring_stats;
/* Allocate Tx ring stats */
ice_for_each_alloc_txq(vsi, i) {
struct ice_ring_stats *ring_stats;
struct ice_tx_ring *ring;
ring = vsi->tx_rings[i];
ring_stats = tx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
if (!ring_stats)
goto err_out;
WRITE_ONCE(tx_ring_stats[i], ring_stats);
}
ring->ring_stats = ring_stats;
}
/* Allocate Rx ring stats */
ice_for_each_alloc_rxq(vsi, i) {
struct ice_ring_stats *ring_stats;
struct ice_rx_ring *ring;
ring = vsi->rx_rings[i];
ring_stats = rx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
if (!ring_stats)
goto err_out;
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
ring->ring_stats = ring_stats;
}
return 0;
err_out:
ice_vsi_free_stats(vsi);
return -ENOMEM;
}
/**
* ice_vsi_free - clean up and deallocate the provided VSI
* @vsi: pointer to VSI being cleared * @vsi: pointer to VSI being cleared
* *
* This deallocates the VSI's queue resources, removes it from the PF's * This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI * VSI array if necessary, and deallocates the VSI
*
* Returns 0 on success, negative on failure
*/ */
int ice_vsi_clear(struct ice_vsi *vsi) static void ice_vsi_free(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = NULL; struct ice_pf *pf = NULL;
struct device *dev; struct device *dev;
if (!vsi) if (!vsi || !vsi->back)
return 0; return;
if (!vsi->back)
return -EINVAL;
pf = vsi->back; pf = vsi->back;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx); dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
return -EINVAL; return;
} }
mutex_lock(&pf->sw_mutex); mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared VSI */ /* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL; pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL) pf->next_vsi = vsi->idx;
pf->next_vsi = vsi->idx;
if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
pf->next_vsi = vsi->idx;
ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi); ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex); mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi); devm_kfree(dev, vsi);
}
return 0; void ice_vsi_delete(struct ice_vsi *vsi)
{
ice_vsi_delete_from_hw(vsi);
ice_vsi_free(vsi);
} }
/** /**
...@@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) ...@@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
if (!pf->vsi_stats) if (!pf->vsi_stats)
return -ENOENT; return -ENOENT;
if (pf->vsi_stats[vsi->idx])
/* realloc will happen in rebuild path */
return 0;
vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL); vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
if (!vsi_stat) if (!vsi_stat)
return -ENOMEM; return -ENOMEM;
...@@ -490,9 +591,57 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) ...@@ -490,9 +591,57 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
return -ENOMEM; return -ENOMEM;
} }
/**
* ice_vsi_alloc_def - set default values for already allocated VSI
* @vsi: ptr to VSI
* @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
* @ch: ptr to channel
*/
static int
ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_vf *vf,
struct ice_channel *ch)
{
if (vsi->type != ICE_VSI_CHNL) {
ice_vsi_set_num_qs(vsi, vf);
if (ice_vsi_alloc_arrays(vsi))
return -ENOMEM;
}
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
break;
case ICE_VSI_CHNL:
if (!ch)
return -EINVAL;
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
case ICE_VSI_VF:
break;
default:
ice_vsi_free_arrays(vsi);
return -EINVAL;
}
return 0;
}
/** /**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF * ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure * @pf: board private structure
* @pi: pointer to the port_info instance
* @vsi_type: type of VSI * @vsi_type: type of VSI
* @ch: ptr to channel * @ch: ptr to channel
* @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
...@@ -504,8 +653,9 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi) ...@@ -504,8 +653,9 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
* returns a pointer to a VSI on success, NULL on failure. * returns a pointer to a VSI on success, NULL on failure.
*/ */
static struct ice_vsi * static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, ice_vsi_alloc(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch, struct ice_vf *vf) enum ice_vsi_type vsi_type, struct ice_channel *ch,
struct ice_vf *vf)
{ {
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL; struct ice_vsi *vsi = NULL;
...@@ -531,88 +681,28 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, ...@@ -531,88 +681,28 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
vsi->type = vsi_type; vsi->type = vsi_type;
vsi->back = pf; vsi->back = pf;
vsi->port_info = pi;
/* For VSIs which don't have a connected VF, this will be NULL */
vsi->vf = vf;
set_bit(ICE_VSI_DOWN, vsi->state); set_bit(ICE_VSI_DOWN, vsi->state);
if (vsi_type == ICE_VSI_VF) /* fill slot and make note of the index */
ice_vsi_set_num_qs(vsi, vf); vsi->idx = pf->next_vsi;
else if (vsi_type != ICE_VSI_CHNL) pf->vsi[pf->next_vsi] = vsi;
ice_vsi_set_num_qs(vsi, NULL);
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
/* For the PF control VSI this is NULL, for the VF control VSI
* this will be the first VF to allocate it.
*/
vsi->vf = vf;
break;
case ICE_VSI_VF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
goto err_rings;
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
case ICE_VSI_LB:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
default:
dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf;
}
if (vsi->type == ICE_VSI_CTRL && !vf) { /* prepare pf->next_vsi for next use */
/* Use the last VSI slot as the index for PF control VSI */ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
vsi->idx = pf->num_alloc_vsi - 1; pf->next_vsi);
pf->ctrl_vsi_idx = vsi->idx;
pf->vsi[vsi->idx] = vsi;
} else {
/* fill slot and make note of the index */
vsi->idx = pf->next_vsi;
pf->vsi[pf->next_vsi] = vsi;
/* prepare pf->next_vsi for next use */ if (vsi->type == ICE_VSI_CTRL) {
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, if (vf) {
pf->next_vsi); vf->ctrl_vsi_idx = vsi->idx;
} else {
WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
pf->ctrl_vsi_idx = vsi->idx;
}
} }
if (vsi->type == ICE_VSI_CTRL && vf)
vf->ctrl_vsi_idx = vsi->idx;
/* allocate memory for Tx/Rx ring stat pointers */
if (ice_vsi_alloc_stat_arrays(vsi))
goto err_rings;
goto unlock_pf;
err_rings:
devm_kfree(dev, vsi);
vsi = NULL;
unlock_pf: unlock_pf:
mutex_unlock(&pf->sw_mutex); mutex_unlock(&pf->sw_mutex);
return vsi; return vsi;
...@@ -1177,12 +1267,12 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -1177,12 +1267,12 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/** /**
* ice_vsi_init - Create and initialize a VSI * ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @init_vsi: is this call creating a VSI * @init_vsi: flag, tell if VSI need to be initialized
* *
* This initializes a VSI context depending on the VSI type to be added and * This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI. * passes it down to the add_vsi aq command to create a new VSI.
*/ */
static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) static int ice_vsi_init(struct ice_vsi *vsi, int init_vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
...@@ -1244,7 +1334,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) ...@@ -1244,7 +1334,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
/* if updating VSI context, make sure to set valid_section: /* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated * to indicate which section of VSI context being updated
*/ */
if (!init_vsi) if (!(init_vsi & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |= ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
} }
...@@ -1257,7 +1347,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) ...@@ -1257,7 +1347,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (ret) if (ret)
goto out; goto out;
if (!init_vsi) /* means VSI being updated */ if (!(init_vsi & ICE_VSI_FLAG_INIT))
/* means VSI being updated */
/* must to indicate which section of VSI context are /* must to indicate which section of VSI context are
* being modified * being modified
*/ */
...@@ -1272,7 +1363,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) ...@@ -1272,7 +1363,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
} }
if (init_vsi) { if (init_vsi & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) { if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret); dev_err(dev, "Add VSI failed, err %d\n", ret);
...@@ -1584,133 +1675,33 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1584,133 +1675,33 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
} }
/** /**
* ice_vsi_free_stats - Free the ring statistics structures * ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: VSI pointer * @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
*
* In the event of disable request for RSS, this function will zero out RSS
* LUT, while in the event of enable request for RSS, it will reconfigure RSS
* LUT.
*/ */
static void ice_vsi_free_stats(struct ice_vsi *vsi) void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{ {
struct ice_vsi_stats *vsi_stat; u8 *lut;
struct ice_pf *pf = vsi->back;
int i;
if (vsi->type == ICE_VSI_CHNL)
return;
if (!pf->vsi_stats)
return;
vsi_stat = pf->vsi_stats[vsi->idx]; lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!vsi_stat) if (!lut)
return; return;
ice_for_each_alloc_txq(vsi, i) { if (ena) {
if (vsi_stat->tx_ring_stats[i]) { if (vsi->rss_lut_user)
kfree_rcu(vsi_stat->tx_ring_stats[i], rcu); memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL); else
} ice_fill_rss_lut(lut, vsi->rss_table_size,
vsi->rss_size);
} }
ice_for_each_alloc_rxq(vsi, i) { ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
if (vsi_stat->rx_ring_stats[i]) { kfree(lut);
kfree_rcu(vsi_stat->rx_ring_stats[i], rcu); }
WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
}
}
kfree(vsi_stat->tx_ring_stats);
kfree(vsi_stat->rx_ring_stats);
kfree(vsi_stat);
pf->vsi_stats[vsi->idx] = NULL;
}
/**
* ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
* @vsi: VSI which is having stats allocated
*/
static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
{
struct ice_ring_stats **tx_ring_stats;
struct ice_ring_stats **rx_ring_stats;
struct ice_vsi_stats *vsi_stats;
struct ice_pf *pf = vsi->back;
u16 i;
vsi_stats = pf->vsi_stats[vsi->idx];
tx_ring_stats = vsi_stats->tx_ring_stats;
rx_ring_stats = vsi_stats->rx_ring_stats;
/* Allocate Tx ring stats */
ice_for_each_alloc_txq(vsi, i) {
struct ice_ring_stats *ring_stats;
struct ice_tx_ring *ring;
ring = vsi->tx_rings[i];
ring_stats = tx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
if (!ring_stats)
goto err_out;
WRITE_ONCE(tx_ring_stats[i], ring_stats);
}
ring->ring_stats = ring_stats;
}
/* Allocate Rx ring stats */
ice_for_each_alloc_rxq(vsi, i) {
struct ice_ring_stats *ring_stats;
struct ice_rx_ring *ring;
ring = vsi->rx_rings[i];
ring_stats = rx_ring_stats[i];
if (!ring_stats) {
ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
if (!ring_stats)
goto err_out;
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
ring->ring_stats = ring_stats;
}
return 0;
err_out:
ice_vsi_free_stats(vsi);
return -ENOMEM;
}
/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
*
* In the event of disable request for RSS, this function will zero out RSS
* LUT, while in the event of enable request for RSS, it will reconfigure RSS
* LUT.
*/
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
{
u8 *lut;
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return;
if (ena) {
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
else
ice_fill_rss_lut(lut, vsi->rss_table_size,
vsi->rss_size);
}
ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
kfree(lut);
}
/** /**
* ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
...@@ -2645,54 +2636,101 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) ...@@ -2645,54 +2636,101 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
} }
/** /**
* ice_vsi_setup - Set up a VSI by a given type * ice_free_vf_ctrl_res - Free the VF control VSI resource
* @pf: board private structure * @pf: pointer to PF structure
* @pi: pointer to the port_info instance * @vsi: the VSI to free resources for
* @vsi_type: VSI type
* @vf: pointer to VF to which this VSI connects. This field is used primarily
* for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
* *
* Returns pointer to the successfully allocated and configured VSI sw struct on * Check if the VF control VSI resource is still in use. If no VF is using it
* success, NULL on failure. * any more, release the VSI resource. Otherwise, leave it to be cleaned up
* once no other VF uses it.
*/ */
struct ice_vsi * static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, {
enum ice_vsi_type vsi_type, struct ice_vf *vf, struct ice_vf *vf;
struct ice_channel *ch) unsigned int bkt;
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
/* No other VFs left that have control VSI. It is now safe to reclaim
* SW interrupts back to the common pool.
*/
ice_free_res(pf->irq_tracker, vsi->base_vector,
ICE_RES_VF_CTRL_VEC_ID);
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
int ret, i; int ret, i;
if (vsi_type == ICE_VSI_CHNL) /* configure VSI nodes based on number of queues and TC's */
vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL); ice_for_each_traffic_class(i) {
else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL) if (!(vsi->tc_cfg.ena_tc & BIT(i)))
vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf); continue;
else
vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
if (!vsi) { if (vsi->type == ICE_VSI_CHNL) {
dev_err(dev, "could not allocate VSI\n"); if (!vsi->alloc_txq && vsi->num_txq)
return NULL; max_txqs[i] = vsi->num_txq;
else
max_txqs[i] = pf->num_lan_tx;
} else {
max_txqs[i] = vsi->alloc_txq;
}
} }
vsi->port_info = pi; dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, ret);
return ret;
}
return 0;
}
/**
* ice_vsi_cfg_def - configure default VSI based on the type
* @vsi: pointer to VSI
* @vf: pointer to VF to which this VSI connects. This field is used primarily
* for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
* @init_vsi: is this an initialization or a reconfigure of the VSI
*/
static int
ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vf *vf, struct ice_channel *ch,
int init_vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_pf *pf = vsi->back;
int ret;
vsi->vsw = pf->first_sw; vsi->vsw = pf->first_sw;
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE; ret = ice_vsi_alloc_def(vsi, vf, ch);
if (ret)
return ret;
/* allocate memory for Tx/Rx ring stat pointers */
if (ice_vsi_alloc_stat_arrays(vsi))
goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi); ice_alloc_fd_res(vsi);
if (vsi_type != ICE_VSI_CHNL) { if (ice_vsi_get_qs(vsi)) {
if (ice_vsi_get_qs(vsi)) { dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", vsi->idx);
vsi->idx); goto unroll_vsi_alloc_stat;
goto unroll_vsi_alloc;
}
} }
/* set RSS capabilities */ /* set RSS capabilities */
...@@ -2702,7 +2740,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2702,7 +2740,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi); ice_vsi_set_tc_cfg(vsi);
/* create the VSI */ /* create the VSI */
ret = ice_vsi_init(vsi, true); ret = ice_vsi_init(vsi, init_vsi);
if (ret) if (ret)
goto unroll_get_qs; goto unroll_get_qs;
...@@ -2733,6 +2771,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2733,6 +2771,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vector_base; goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi); ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto unroll_vector_base;
}
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL) if (vsi->type != ICE_VSI_CTRL)
...@@ -2797,52 +2843,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2797,52 +2843,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init; goto unroll_vsi_init;
} }
/* configure VSI nodes based on number of queues and TC's */ return 0;
ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i)))
continue;
if (vsi->type == ICE_VSI_CHNL) {
if (!vsi->alloc_txq && vsi->num_txq)
max_txqs[i] = vsi->num_txq;
else
max_txqs[i] = pf->num_lan_tx;
} else {
max_txqs[i] = vsi->alloc_txq;
}
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
dev_err(dev, "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, ret);
goto unroll_clear_rings;
}
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
* Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
return vsi;
unroll_clear_rings:
ice_vsi_clear_rings(vsi);
unroll_vector_base: unroll_vector_base:
/* reclaim SW interrupts back to the common pool */ /* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx); ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
...@@ -2850,31 +2852,171 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2850,31 +2852,171 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
unroll_alloc_q_vector: unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi); ice_vsi_free_q_vectors(vsi);
unroll_vsi_init: unroll_vsi_init:
ice_vsi_free_stats(vsi); ice_vsi_delete_from_hw(vsi);
ice_vsi_delete(vsi);
unroll_get_qs: unroll_get_qs:
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
unroll_vsi_alloc_stat:
ice_vsi_free_stats(vsi);
unroll_vsi_alloc: unroll_vsi_alloc:
if (vsi_type == ICE_VSI_VF) ice_vsi_free_arrays(vsi);
ice_enable_lag(pf->lag); return ret;
ice_vsi_clear(vsi);
return NULL;
} }
/** /**
* ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW * ice_vsi_cfg - configure VSI and tc on it
* @vsi: the VSI being cleaned up * @vsi: pointer to VSI
* @vf: pointer to VF to which this VSI connects. This field is used primarily
* for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
* @init_vsi: is this an initialization or a reconfigure of the VSI
*/ */
static void ice_vsi_release_msix(struct ice_vsi *vsi) int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vf *vf, struct ice_channel *ch,
int init_vsi)
{ {
struct ice_pf *pf = vsi->back; int ret;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
int i, q;
ice_for_each_q_vector(vsi, i) { ret = ice_vsi_cfg_def(vsi, vf, ch, init_vsi);
if (ret)
return ret;
ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
if (ret)
ice_vsi_decfg(vsi);
return ret;
}
/**
* ice_vsi_decfg - remove all VSI configuration
* @vsi: pointer to VSI
*/
void ice_vsi_decfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int err;
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
vsi->vsi_num, err);
if (ice_is_xdp_ena_vsi(vsi))
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
ice_destroy_xdp_rings(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_free_arrays(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
* VSI since when VFs are spawned we know how many VFs there are and how
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
ice_free_vf_ctrl_res(pf, vsi);
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
vsi->base_vector = 0;
}
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
if (vsi->agg_node) {
vsi->agg_node->valid = false;
vsi->agg_node->agg_id = 0;
}
}
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @vsi_type: VSI type
* @vf: pointer to VF to which this VSI connects. This field is used primarily
* for the ICE_VSI_VF type. Other VSI types should pass NULL.
* @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
*
* Returns pointer to the successfully allocated and configured VSI sw struct on
* success, NULL on failure.
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type vsi_type, struct ice_vf *vf,
struct ice_channel *ch)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
int ret;
vsi = ice_vsi_alloc(pf, pi, vsi_type, ch, vf);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
}
ret = ice_vsi_cfg(vsi, vf, ch, ICE_VSI_FLAG_INIT);
if (ret)
goto err_vsi_cfg;
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
* Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
return vsi;
err_vsi_cfg:
if (vsi_type == ICE_VSI_VF)
ice_enable_lag(pf->lag);
ice_vsi_free(vsi);
return NULL;
}
/**
* ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
* @vsi: the VSI being cleaned up
*/
static void ice_vsi_release_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 txq = 0;
u32 rxq = 0;
int i, q;
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0); ice_write_intrl(q_vector, 0);
...@@ -3120,37 +3262,6 @@ void ice_napi_del(struct ice_vsi *vsi) ...@@ -3120,37 +3262,6 @@ void ice_napi_del(struct ice_vsi *vsi)
netif_napi_del(&vsi->q_vectors[v_idx]->napi); netif_napi_del(&vsi->q_vectors[v_idx]->napi);
} }
/**
* ice_free_vf_ctrl_res - Free the VF control VSI resource
* @pf: pointer to PF structure
* @vsi: the VSI to free resources for
*
* Check if the VF control VSI resource is still in use. If no VF is using it
* any more, release the VSI resource. Otherwise, leave it to be cleaned up
* once no other VF uses it.
*/
static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
{
struct ice_vf *vf;
unsigned int bkt;
rcu_read_lock();
ice_for_each_vf_rcu(pf, bkt, vf) {
if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
rcu_read_unlock();
return;
}
}
rcu_read_unlock();
/* No other VFs left that have control VSI. It is now safe to reclaim
* SW interrupts back to the common pool.
*/
ice_free_res(pf->irq_tracker, vsi->base_vector,
ICE_RES_VF_CTRL_VEC_ID);
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
/** /**
* ice_vsi_release - Delete a VSI and free its resources * ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed * @vsi: the VSI being removed
...@@ -3160,7 +3271,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi) ...@@ -3160,7 +3271,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_release(struct ice_vsi *vsi)
{ {
struct ice_pf *pf; struct ice_pf *pf;
int err;
if (!vsi->back) if (!vsi->back)
return -ENODEV; return -ENODEV;
...@@ -3178,50 +3288,14 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -3178,50 +3288,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
} }
if (vsi->type == ICE_VSI_PF)
ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi); ice_rss_clean(vsi);
/* Disable VSI and free resources */
if (vsi->type != ICE_VSI_LB)
ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi); ice_vsi_close(vsi);
ice_vsi_decfg(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
* VSI since when VFs are spawned we know how many VFs there are and how
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
ice_free_vf_ctrl_res(pf, vsi);
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
if (!ice_is_safe_mode(pf)) {
if (vsi->type == ICE_VSI_PF) {
ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
}
if (ice_is_vsi_dflt_vsi(vsi))
ice_clear_dflt_vsi(vsi);
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
vsi->vsi_num, err);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
if (vsi->netdev) { if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) { if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
...@@ -3235,19 +3309,12 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -3235,19 +3309,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
} }
} }
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
ice_vsi_clear_rings(vsi);
ice_vsi_free_stats(vsi);
ice_vsi_put_qs(vsi);
/* retain SW VSI data structure since it is needed to unregister and /* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\ * free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod. * for ex: during rmmod.
*/ */
if (!ice_is_reset_in_progress(pf->state)) if (!ice_is_reset_in_progress(pf->state))
ice_vsi_clear(vsi); ice_vsi_delete(vsi);
return 0; return 0;
} }
...@@ -3410,29 +3477,24 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq) ...@@ -3410,29 +3477,24 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
/** /**
* ice_vsi_rebuild - Rebuild VSI after reset * ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild * @vsi: VSI to be rebuild
* @init_vsi: is this an initialization or a reconfigure of the VSI * @init_vsi: flag, tell if VSI need to be initialized
* *
* Returns 0 on success and negative value on failure * Returns 0 on success and negative value on failure
*/ */
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) int ice_vsi_rebuild(struct ice_vsi *vsi, int init_vsi)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce; struct ice_coalesce_stored *coalesce;
int ret, i, prev_txq, prev_rxq; int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0; int prev_num_q_vectors = 0;
enum ice_vsi_type vtype;
struct ice_pf *pf; struct ice_pf *pf;
if (!vsi) if (!vsi)
return -EINVAL; return -EINVAL;
pf = vsi->back; pf = vsi->back;
vtype = vsi->type; if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
return -EINVAL; return -EINVAL;
ice_vsi_init_vlan_ops(vsi);
coalesce = kcalloc(vsi->num_q_vectors, coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL); sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce) if (!coalesce)
...@@ -3443,188 +3505,33 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ...@@ -3443,188 +3505,33 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_txq = vsi->num_txq; prev_txq = vsi->num_txq;
prev_rxq = vsi->num_rxq; prev_rxq = vsi->num_rxq;
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_decfg(vsi);
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); ret = ice_vsi_cfg_def(vsi, vsi->vf, vsi->ch, init_vsi);
if (ret) if (ret)
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", goto err_vsi_cfg;
vsi->vsi_num, ret);
ice_vsi_free_q_vectors(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
* VSI since when VFs are spawned we know how many VFs there are and how
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
if (vtype != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
vsi->base_vector = 0;
}
if (ice_is_xdp_ena_vsi(vsi))
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
ice_destroy_xdp_rings(vsi);
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
if (vtype == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vsi->vf);
else
ice_vsi_set_num_qs(vsi, NULL);
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
ice_vsi_get_qs(vsi);
ice_alloc_fd_res(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi, init_vsi);
if (ret < 0)
goto err_vsi;
switch (vtype) {
case ICE_VSI_CTRL:
case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_rings;
ret = ice_vsi_setup_vector_base(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto err_vectors;
ice_vsi_map_rings_to_vectors(vsi);
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
}
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vtype != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
* least receive traffic on first queue. Hence no
* need to capture return value
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
/* disable or enable CRC stripping */
if (vsi->netdev)
ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
NETIF_F_RXFCS));
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
goto err_rings;
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
goto err_vectors;
vsi->stat_offsets_loaded = false;
break;
case ICE_VSI_CHNL:
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
ice_vsi_cfg_rss_lut_key(vsi);
ice_vsi_set_rss_flow_fld(vsi);
}
break;
default:
break;
}
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
/* configure VSI nodes based on number of queues and TC's.
* ADQ creates VSIs for each TC/Channel but doesn't
* allocate queues instead it reconfigures the PF queues
* as per the TC command. So max_txqs should point to the
* PF Tx queues.
*/
if (vtype == ICE_VSI_CHNL)
max_txqs[i] = pf->num_lan_tx;
else
max_txqs[i] = vsi->alloc_txq;
if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}
if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
/* If MQPRIO is set, means channel code path, hence for main
* VSI's, use TC as 1
*/
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
vsi->tc_cfg.ena_tc, max_txqs);
ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) { if (ret) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n", if (init_vsi & ICE_VSI_FLAG_INIT) {
vsi->vsi_num, ret);
if (init_vsi) {
ret = -EIO; ret = -EIO;
goto err_vectors; goto err_vsi_cfg_tc_lan;
} else { } else {
kfree(coalesce);
return ice_schedule_reset(pf, ICE_RESET_PFR); return ice_schedule_reset(pf, ICE_RESET_PFR);
} }
} }
if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq)) if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
goto err_vectors; goto err_vsi_cfg_tc_lan;
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce); kfree(coalesce);
return 0; return 0;
err_vectors: err_vsi_cfg_tc_lan:
ice_vsi_free_q_vectors(vsi); ice_vsi_decfg(vsi);
err_rings: err_vsi_cfg:
if (vsi->netdev) {
vsi->current_netdev_flags = 0;
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
err_vsi:
ice_vsi_clear(vsi);
set_bit(ICE_RESET_FAILED, pf->state);
kfree(coalesce); kfree(coalesce);
return ret; return ret;
} }
......
...@@ -42,7 +42,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); ...@@ -42,7 +42,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena); int ice_set_link(struct ice_vsi *vsi, bool ena);
void ice_vsi_delete(struct ice_vsi *vsi); void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
...@@ -63,6 +62,7 @@ void ice_vsi_close(struct ice_vsi *vsi); ...@@ -63,6 +62,7 @@ void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked); int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked); void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
...@@ -70,7 +70,11 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); ...@@ -70,7 +70,11 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi); #define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
int ice_vsi_rebuild(struct ice_vsi *vsi, int init_vsi);
int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vf *vf,
struct ice_channel *ch, int init_vsi);
bool ice_is_reset_in_progress(unsigned long *state); bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
......
...@@ -44,7 +44,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX ...@@ -44,7 +44,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */ #endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key); DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key); EXPORT_SYMBOL(ice_xdp_locking_key);
...@@ -3423,53 +3422,6 @@ static void ice_set_netdev_features(struct net_device *netdev) ...@@ -3423,53 +3422,6 @@ static void ice_set_netdev_features(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXFCS; netdev->hw_features |= NETIF_F_RXFCS;
} }
/**
* ice_cfg_netdev - Allocate, configure and register a netdev
* @vsi: the VSI associated with the new netdev
*
* Returns 0 on success, negative value on failure
*/
static int ice_cfg_netdev(struct ice_vsi *vsi)
{
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
ice_set_netdev_features(netdev);
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Setup netdev TC information */
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
return 0;
}
/** /**
* ice_fill_rss_lut - Fill the RSS lookup table with default values * ice_fill_rss_lut - Fill the RSS lookup table with default values
* @lut: Lookup table * @lut: Lookup table
...@@ -3689,20 +3641,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi) ...@@ -3689,20 +3641,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
ice_rep_indr_tc_block_unbind); ice_rep_indr_tc_block_unbind);
} }
/**
* ice_tc_indir_block_remove - clean indirect TC block notifications
* @pf: PF structure
*/
static void ice_tc_indir_block_remove(struct ice_pf *pf)
{
struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi)
return;
ice_tc_indir_block_unregister(pf_vsi);
}
/** /**
* ice_tc_indir_block_register - Register TC indirect block notifications * ice_tc_indir_block_register - Register TC indirect block notifications
* @vsi: VSI struct which has the netdev * @vsi: VSI struct which has the netdev
...@@ -3722,76 +3660,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi) ...@@ -3722,76 +3660,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
return flow_indr_dev_register(ice_indr_setup_tc_cb, np); return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
} }
/**
* ice_setup_pf_sw - Setup the HW switch on startup or after reset
* @pf: board private structure
*
* Returns 0 on success, negative value on failure
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int status;
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
if (status)
return -EIO;
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi)
return -ENOMEM;
/* init channel list */
INIT_LIST_HEAD(&vsi->ch_list);
status = ice_cfg_netdev(vsi);
if (status)
goto unroll_vsi_setup;
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
/* init indirect block notifications */
status = ice_tc_indir_block_register(vsi);
if (status) {
dev_err(dev, "Failed to register netdev notifier\n");
goto unroll_cfg_netdev;
}
/* Setup DCB netlink interface */
ice_dcbnl_setup(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
* and ice_cfg_netdev() respectively
*/
ice_napi_add(vsi);
status = ice_init_mac_fltr(pf);
if (status)
goto unroll_napi_add;
return 0;
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
ice_napi_del(vsi);
if (vsi->netdev) {
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
unroll_vsi_setup:
ice_vsi_release(vsi);
return status;
}
/** /**
* ice_get_avail_q_count - Get count of queues in use * ice_get_avail_q_count - Get count of queues in use
* @pf_qmap: bitmap to get queue use count from * @pf_qmap: bitmap to get queue use count from
...@@ -4221,13 +4089,13 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) ...@@ -4221,13 +4089,13 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */ /* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) { if (!netif_running(vsi->netdev)) {
ice_vsi_rebuild(vsi, false); ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done; goto done;
} }
ice_vsi_close(vsi); ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, false); ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
ice_pf_dcb_recfg(pf, locked); ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi); ice_vsi_open(vsi);
done: done:
...@@ -4490,6 +4358,23 @@ static int ice_init_fdir(struct ice_pf *pf) ...@@ -4490,6 +4358,23 @@ static int ice_init_fdir(struct ice_pf *pf)
return err; return err;
} }
static void ice_deinit_fdir(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
if (!vsi)
return;
ice_vsi_manage_fdir(vsi, false);
ice_vsi_release(vsi);
if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
pf->vsi[pf->ctrl_vsi_idx] = NULL;
pf->ctrl_vsi_idx = ICE_NO_VSI;
}
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
}
/** /**
* ice_get_opt_fw_name - return optional firmware file name or NULL * ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance * @pf: pointer to the PF instance
...@@ -4590,33 +4475,586 @@ static void ice_print_wake_reason(struct ice_pf *pf) ...@@ -4590,33 +4475,586 @@ static void ice_print_wake_reason(struct ice_pf *pf)
/** /**
* ice_register_netdev - register netdev * ice_register_netdev - register netdev
* @pf: pointer to the PF struct * @vsi: pointer to the VSI struct
*/ */
static int ice_register_netdev(struct ice_pf *pf) static int ice_register_netdev(struct ice_vsi *vsi)
{ {
struct ice_vsi *vsi; int err;
int err = 0;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev) if (!vsi || !vsi->netdev)
return -EIO; return -EIO;
err = register_netdev(vsi->netdev); err = register_netdev(vsi->netdev);
if (err)
return err;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
return 0;
}
static void ice_unregister_netdev(struct ice_vsi *vsi)
{
if (!vsi || !vsi->netdev)
return;
unregister_netdev(vsi->netdev);
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
/**
* ice_cfg_netdev - Allocate, configure and register a netdev
* @vsi: the VSI associated with the new netdev
*
* Returns 0 on success, negative value on failure
*/
static int ice_cfg_netdev(struct ice_vsi *vsi)
{
struct ice_netdev_priv *np;
struct net_device *netdev;
u8 mac_addr[ETH_ALEN];
netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
vsi->alloc_rxq);
if (!netdev)
return -ENOMEM;
set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
vsi->netdev = netdev;
np = netdev_priv(netdev);
np->vsi = vsi;
ice_set_netdev_features(netdev);
ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
eth_hw_addr_set(netdev, mac_addr);
}
netdev->priv_flags |= IFF_UNICAST_FLT;
/* Setup netdev TC information */
ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
netdev->max_mtu = ICE_MAX_MTU;
return 0;
}
static void ice_decfg_netdev(struct ice_vsi *vsi)
{
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
static int ice_start_eth(struct ice_vsi *vsi)
{
int err;
err = ice_init_mac_fltr(vsi->back);
if (err)
return err;
rtnl_lock();
err = ice_vsi_open(vsi);
rtnl_unlock();
return err;
}
static int ice_init_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
int err;
if (!vsi)
return -EINVAL;
/* init channel list */
INIT_LIST_HEAD(&vsi->ch_list);
err = ice_cfg_netdev(vsi);
if (err)
return err;
/* Setup DCB netlink interface */
ice_dcbnl_setup(vsi);
err = ice_init_mac_fltr(pf);
if (err)
goto err_init_mac_fltr;
err = ice_devlink_create_pf_port(pf);
if (err)
goto err_devlink_create_pf_port;
SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
err = ice_register_netdev(vsi);
if (err) if (err)
goto err_register_netdev; goto err_register_netdev;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); err = ice_tc_indir_block_register(vsi);
netif_carrier_off(vsi->netdev); if (err)
netif_tx_stop_all_queues(vsi->netdev); goto err_tc_indir_block_register;
ice_napi_add(vsi);
return 0;
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
err_register_netdev:
ice_devlink_destroy_pf_port(pf);
err_devlink_create_pf_port:
err_init_mac_fltr:
ice_decfg_netdev(vsi);
return err;
}
static void ice_deinit_eth(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
if (!vsi)
return;
ice_vsi_close(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
ice_tc_indir_block_unregister(vsi);
ice_decfg_netdev(vsi);
}
static int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
err = ice_init_hw(hw);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
return err;
}
ice_init_feature_support(pf);
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
if (ice_is_safe_mode(pf)) {
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
* device/function capabilities, override them.
*/
ice_set_safe_mode_caps(hw);
}
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
goto err_init_pf;
}
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
pf->hw.udp_tunnel_nic.tables[0].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
pf->hw.udp_tunnel_nic.tables[1].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_scheme;
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_req_irq_msix_misc;
}
return 0;
err_req_irq_msix_misc:
ice_clear_interrupt_scheme(pf);
err_init_interrupt_scheme:
ice_deinit_pf(pf);
err_init_pf:
ice_deinit_hw(hw);
return err;
}
static void ice_deinit_dev(struct ice_pf *pf)
{
ice_free_irq_msix_misc(pf);
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
}
static void ice_init_features(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf))
return;
/* initialize DDP driven features */
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
ice_cfg_lldp_mib_change(&pf->hw, true);
}
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
}
static void ice_deinit_features(struct ice_pf *pf)
{
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
ice_cfg_lldp_mib_change(&pf->hw, false);
ice_deinit_fdir(pf);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
{
/* Save wakeup reason register for later use */
pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
/* check for a power management event */
ice_print_wake_reason(pf);
/* clear wake status, all bits */
wr32(&pf->hw, PFPM_WUS, U32_MAX);
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable(ice_pf_to_dev(pf), false);
}
static int ice_init_link(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
int err;
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
return err;
}
/* not a fatal error if this fails */
err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err)
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
/* not a fatal error if this fails */
err = ice_update_link_info(pf->hw.port_info);
if (err)
dev_err(dev, "ice_update_link_info failed: %d\n", err);
ice_init_link_dflt_override(pf->hw.port_info);
ice_check_link_cfg_err(pf,
pf->hw.port_info->phy.link_info.link_cfg_err);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
/* not a fatal error if this fails */
err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err)
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf);
if (vsi)
ice_configure_phy(vsi);
}
} else {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
return err;
}
static int ice_init_pf_sw(struct ice_pf *pf)
{
bool dvm = ice_is_dvm_ena(&pf->hw);
struct ice_vsi *vsi;
int err;
/* create switch struct for the switch element created by FW on boot */
pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw)
return -ENOMEM;
if (pf->hw.evb_veb)
pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
else
pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
pf->first_sw->sw_id = pf->hw.port_info->sw_id;
err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
if (err)
goto err_aq_set_port_params;
vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
if (!vsi) {
err = -ENOMEM;
goto err_pf_vsi_setup;
}
return 0;
err_pf_vsi_setup:
err_aq_set_port_params:
kfree(pf->first_sw);
return err;
}
static void ice_deinit_pf_sw(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
if (!vsi)
return;
ice_vsi_release(vsi);
kfree(pf->first_sw);
}
static int ice_alloc_vsis(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi)
return -EIO;
if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
dev_warn(dev,
"limiting the VSI count due to UDP tunnel limitation %d > %d\n",
pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
if (!pf->vsi)
return -ENOMEM;
pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
sizeof(*pf->vsi_stats), GFP_KERNEL);
if (!pf->vsi_stats) {
devm_kfree(dev, pf->vsi);
return -ENOMEM;
}
return 0;
}
static void ice_dealloc_vsis(struct ice_pf *pf)
{
devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
pf->vsi_stats = NULL;
pf->num_alloc_vsi = 0;
devm_kfree(ice_pf_to_dev(pf), pf->vsi);
pf->vsi = NULL;
}
static int ice_init_devlink(struct ice_pf *pf)
{
int err;
err = ice_devlink_register_params(pf);
if (err)
return err;
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
}
static int ice_init(struct ice_pf *pf)
{
int err;
err = ice_init_dev(pf);
if (err)
return err;
err = ice_alloc_vsis(pf);
if (err)
goto err_alloc_vsis;
err = ice_init_pf_sw(pf);
if (err)
goto err_init_pf_sw;
ice_init_wakeup(pf);
err = ice_init_link(pf);
if (err)
goto err_init_link;
err = ice_send_version(pf);
if (err)
goto err_init_link;
ice_verify_cacheline_size(pf);
if (ice_is_safe_mode(pf))
ice_set_safe_mode_vlan_cfg(pf);
else
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
clear_bit(ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
return 0;
err_init_link:
ice_deinit_pf_sw(pf);
err_init_pf_sw:
ice_dealloc_vsis(pf);
err_alloc_vsis:
ice_deinit_dev(pf);
return err;
}
static void ice_deinit(struct ice_pf *pf)
{
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
ice_deinit_pf_sw(pf);
ice_dealloc_vsis(pf);
ice_deinit_dev(pf);
}
/**
* ice_load - load pf by init hw and starting VSI
* @pf: pointer to the pf instance
*/
int ice_load(struct ice_pf *pf)
{
struct ice_vsi *vsi;
int err;
err = ice_reset(&pf->hw, ICE_RESET_PFR);
if (err)
return err;
err = ice_init_dev(pf);
if (err)
return err;
vsi = ice_get_main_vsi(pf);
err = ice_vsi_cfg(vsi, NULL, NULL, ICE_VSI_FLAG_INIT);
if (err)
goto err_vsi_cfg;
err = ice_start_eth(ice_get_main_vsi(pf));
if (err)
goto err_start_eth;
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
return 0; return 0;
err_register_netdev:
free_netdev(vsi->netdev); err_init_rdma:
vsi->netdev = NULL; ice_vsi_close(ice_get_main_vsi(pf));
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); err_start_eth:
ice_vsi_decfg(ice_get_main_vsi(pf));
err_vsi_cfg:
ice_deinit_dev(pf);
return err; return err;
} }
/**
* ice_unload - unload pf by stopping VSI and deinit hw
* @pf: pointer to the pf instance
*/
void ice_unload(struct ice_pf *pf)
{
ice_deinit_features(pf);
ice_deinit_rdma(pf);
ice_vsi_close(ice_get_main_vsi(pf));
ice_vsi_decfg(ice_get_main_vsi(pf));
ice_deinit_dev(pf);
}
/** /**
* ice_probe - Device initialization routine * ice_probe - Device initialization routine
* @pdev: PCI device information struct * @pdev: PCI device information struct
...@@ -4628,10 +5066,9 @@ static int ...@@ -4628,10 +5066,9 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ice_vsi *vsi;
struct ice_pf *pf; struct ice_pf *pf;
struct ice_hw *hw; struct ice_hw *hw;
int i, err; int err;
if (pdev->is_virtfn) { if (pdev->is_virtfn) {
dev_err(dev, "can't probe a virtual function\n"); dev_err(dev, "can't probe a virtual function\n");
...@@ -4678,6 +5115,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -4678,6 +5115,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pci_save_state(pdev); pci_save_state(pdev);
hw->back = pf; hw->back = pf;
hw->port_info = NULL;
hw->vendor_id = pdev->vendor; hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device; hw->device_id = pdev->device;
pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
...@@ -4694,293 +5132,33 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -4694,293 +5132,33 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug; hw->debug_mask = debug;
#endif #endif
err = ice_init_hw(hw); err = ice_init(pf);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
ice_init_feature_support(pf);
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
if (ice_is_safe_mode(pf)) {
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
* device/function capabilities, override them.
*/
ice_set_safe_mode_caps(hw);
}
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
goto err_init_pf_unroll;
}
ice_devlink_init_regions(pf);
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
i = 0;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
pf->hw.udp_tunnel_nic.tables[i].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
i++;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
pf->hw.udp_tunnel_nic.tables[i].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
i++;
}
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
dev_warn(&pf->pdev->dev,
"limiting the VSI count due to UDP tunnel limitation %d > %d\n",
pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
}
pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
sizeof(*pf->vsi_stats), GFP_KERNEL);
if (!pf->vsi_stats) {
err = -ENOMEM;
goto err_init_vsi_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_vsi_stats_unroll;
}
/* In case of MSIX we are going to setup the misc vector right here
* to handle admin queue events etc. In case of legacy and MSI
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
/* create switch struct for the switch element created by FW on boot */
pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
}
if (hw->evb_veb)
pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
else
pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
pf->first_sw->pf = pf;
/* record the sw_id available for later use */
pf->first_sw->sw_id = hw->port_info->sw_id;
err = ice_setup_pf_sw(pf);
if (err) {
dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
goto err_alloc_sw_unroll;
}
clear_bit(ICE_SERVICE_DIS, pf->state);
/* tell the firmware we are up */
err = ice_send_version(pf);
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
UTS_RELEASE, err);
goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
goto err_send_version_unroll;
}
/* not a fatal error if this fails */
err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err)
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
/* not a fatal error if this fails */
err = ice_update_link_info(pf->hw.port_info);
if (err) if (err)
dev_err(dev, "ice_update_link_info failed: %d\n", err); goto err_init;
ice_init_link_dflt_override(pf->hw.port_info);
ice_check_link_cfg_err(pf,
pf->hw.port_info->phy.link_info.link_cfg_err);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
/* not a fatal error if this fails */
err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err)
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
struct ice_vsi *vsi = ice_get_main_vsi(pf);
if (vsi)
ice_configure_phy(vsi);
}
} else {
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
ice_verify_cacheline_size(pf);
/* Save wakeup reason register for later use */
pf->wakeup_reason = rd32(hw, PFPM_WUS);
/* check for a power management event */
ice_print_wake_reason(pf);
/* clear wake status, all bits */
wr32(hw, PFPM_WUS, U32_MAX);
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable(dev, false);
if (ice_is_safe_mode(pf)) {
ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
}
/* initialize DDP driven features */
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_init(pf);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
/* Note: DCB init failure is non-fatal to load */
if (ice_init_pf_dcb(pf, false)) {
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
ice_cfg_lldp_mib_change(&pf->hw, true);
}
if (ice_init_lag(pf))
dev_warn(dev, "Failed to init link aggregation support\n");
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
probe_done: err = ice_init_eth(pf);
err = ice_devlink_create_pf_port(pf);
if (err) if (err)
goto err_create_pf_port; goto err_init_eth;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev) {
err = -EINVAL;
goto err_netdev_reg;
}
SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
err = ice_register_netdev(pf); err = ice_init_rdma(pf);
if (err) if (err)
goto err_netdev_reg; goto err_init_rdma;
err = ice_devlink_register_params(pf); err = ice_init_devlink(pf);
if (err) if (err)
goto err_netdev_reg; goto err_init_devlink;
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
err = -ENOMEM;
goto err_devlink_reg_param;
}
err = ice_init_rdma(pf); ice_init_features(pf);
if (err) {
dev_err(dev, "Failed to initialize RDMA: %d\n", err);
err = -EIO;
goto err_init_aux_unroll;
}
} else {
dev_warn(dev, "RDMA is not supported on this device\n");
}
ice_devlink_register(pf);
return 0; return 0;
err_init_aux_unroll: err_init_devlink:
pf->adev = NULL; ice_deinit_rdma(pf);
ida_free(&ice_aux_ida, pf->aux_idx); err_init_rdma:
err_devlink_reg_param: ice_deinit_eth(pf);
ice_devlink_unregister_params(pf); err_init_eth:
err_netdev_reg: ice_deinit(pf);
ice_devlink_destroy_pf_port(pf); err_init:
err_create_pf_port:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
err_init_vsi_stats_unroll:
devm_kfree(dev, pf->vsi_stats);
pf->vsi_stats = NULL;
err_init_vsi_unroll:
devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
pci_disable_device(pdev); pci_disable_device(pdev);
return err; return err;
} }
...@@ -5053,65 +5231,42 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf) ...@@ -5053,65 +5231,42 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
static void ice_remove(struct pci_dev *pdev) static void ice_remove(struct pci_dev *pdev)
{ {
struct ice_pf *pf = pci_get_drvdata(pdev); struct ice_pf *pf = pci_get_drvdata(pdev);
struct ice_hw *hw;
int i; int i;
hw = &pf->hw;
ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state)) if (!ice_is_reset_in_progress(pf->state))
break; break;
msleep(100); msleep(100);
} }
ice_tc_indir_block_remove(pf);
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state); set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf); ice_free_vfs(pf);
} }
ice_service_task_stop(pf); ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf); ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
if (pf->aux_idx >= 0)
ida_free(&ice_aux_ida, pf->aux_idx);
ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state); set_bit(ICE_DOWN, pf->state);
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf)) if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf); ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf); ice_deinit_features(pf);
ice_deinit_devlink(pf);
ice_deinit_rdma(pf);
ice_deinit_eth(pf);
ice_deinit(pf);
ice_vsi_release_all(pf); ice_vsi_release_all(pf);
mutex_destroy(&hw->fdir_fltr_lock);
ice_devlink_destroy_pf_port(pf); ice_setup_mc_magic_wake(pf);
ice_set_wake(pf); ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
devm_kfree(&pdev->dev, pf->vsi_stats);
pf->vsi_stats = NULL;
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not /* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild * do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped. * and the service task is already stopped.
*/ */
ice_reset(hw, ICE_RESET_PFR); ice_reset(&pf->hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev); pci_wait_for_pending_transaction(pdev);
ice_clear_interrupt_scheme(pf);
pci_disable_device(pdev); pci_disable_device(pdev);
} }
...@@ -6145,12 +6300,12 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi) ...@@ -6145,12 +6300,12 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
} }
/** /**
* ice_vsi_cfg - Setup the VSI * ice_vsi_cfg_lan - Setup the VSI lan related config
* @vsi: the VSI being configured * @vsi: the VSI being configured
* *
* Return 0 on success and negative value on error * Return 0 on success and negative value on error
*/ */
int ice_vsi_cfg(struct ice_vsi *vsi) int ice_vsi_cfg_lan(struct ice_vsi *vsi)
{ {
int err; int err;
...@@ -6366,7 +6521,7 @@ int ice_up(struct ice_vsi *vsi) ...@@ -6366,7 +6521,7 @@ int ice_up(struct ice_vsi *vsi)
{ {
int err; int err;
err = ice_vsi_cfg(vsi); err = ice_vsi_cfg_lan(vsi);
if (!err) if (!err)
err = ice_up_complete(vsi); err = ice_up_complete(vsi);
...@@ -6934,7 +7089,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi) ...@@ -6934,7 +7089,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
err = ice_vsi_cfg(vsi); err = ice_vsi_cfg_lan(vsi);
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
...@@ -6988,7 +7143,7 @@ int ice_vsi_open(struct ice_vsi *vsi) ...@@ -6988,7 +7143,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
err = ice_vsi_cfg(vsi); err = ice_vsi_cfg_lan(vsi);
if (err) if (err)
goto err_setup_rx; goto err_setup_rx;
...@@ -7073,7 +7228,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type) ...@@ -7073,7 +7228,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue; continue;
/* rebuild the VSI */ /* rebuild the VSI */
err = ice_vsi_rebuild(vsi, true); err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) { if (err) {
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n", dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type)); err, vsi->idx, ice_vsi_type_str(type));
...@@ -8418,12 +8573,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr) ...@@ -8418,12 +8573,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
/* clear the VSI from scheduler tree */ /* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
/* Delete VSI from FW */ /* Delete VSI from FW, PF and HW VSI arrays */
ice_vsi_delete(ch->ch_vsi); ice_vsi_delete(ch->ch_vsi);
/* Delete VSI from PF and HW VSI arrays */
ice_vsi_clear(ch->ch_vsi);
/* free the channel */ /* free the channel */
kfree(ch); kfree(ch);
} }
...@@ -8482,7 +8634,7 @@ static int ice_rebuild_channels(struct ice_pf *pf) ...@@ -8482,7 +8634,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)
type = vsi->type; type = vsi->type;
/* rebuild ADQ VSI */ /* rebuild ADQ VSI */
err = ice_vsi_rebuild(vsi, true); err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) { if (err) {
dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n", dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
ice_vsi_type_str(type), vsi->idx, err); ice_vsi_type_str(type), vsi->idx, err);
...@@ -8714,14 +8866,14 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data) ...@@ -8714,14 +8866,14 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
cur_rxq = vsi->num_rxq; cur_rxq = vsi->num_rxq;
/* proceed with rebuild main VSI using correct number of queues */ /* proceed with rebuild main VSI using correct number of queues */
ret = ice_vsi_rebuild(vsi, false); ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
if (ret) { if (ret) {
/* fallback to current number of queues */ /* fallback to current number of queues */
dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n"); dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
vsi->req_txq = cur_txq; vsi->req_txq = cur_txq;
vsi->req_rxq = cur_rxq; vsi->req_rxq = cur_rxq;
clear_bit(ICE_RESET_FAILED, pf->state); clear_bit(ICE_RESET_FAILED, pf->state);
if (ice_vsi_rebuild(vsi, false)) { if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
dev_err(dev, "Rebuild of main VSI failed again\n"); dev_err(dev, "Rebuild of main VSI failed again\n");
return ret; return ret;
} }
......
...@@ -256,7 +256,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) ...@@ -256,7 +256,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
if (WARN_ON(!vsi)) if (WARN_ON(!vsi))
return -EINVAL; return -EINVAL;
if (ice_vsi_rebuild(vsi, true)) { if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n", dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
vf->vf_id); vf->vf_id);
return -EIO; return -EIO;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册