提交 4015d11e 编写于 作者: B Brett Creeley 提交者: Jeff Kirsher

ice: Add ice_pf_to_dev(pf) macro

We use &pf->dev->pdev all over the code. Add a simple
macro to do this for us. When multiple de-references
like this are being done add a local struct device
variable.
Signed-off-by: NBrett Creeley <brett.creeley@intel.com>
Signed-off-by: NTony Nguyen <anthony.l.nguyen@intel.com>
Tested-by: NAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 9efe35d0
...@@ -130,6 +130,8 @@ extern const char ice_drv_ver[]; ...@@ -130,6 +130,8 @@ extern const char ice_drv_ver[];
ICE_PROMISC_VLAN_TX | \ ICE_PROMISC_VLAN_TX | \
ICE_PROMISC_VLAN_RX) ICE_PROMISC_VLAN_RX)
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
struct ice_txq_meta { struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */ u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */ u16 q_id; /* Entry in VSI's txq_map bitmap */
......
...@@ -101,7 +101,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -101,7 +101,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
/* allocate q_vector */ /* allocate q_vector */
q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
GFP_KERNEL);
if (!q_vector) if (!q_vector)
return -ENOMEM; return -ENOMEM;
...@@ -138,10 +139,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -138,10 +139,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_ring *ring; struct ice_ring *ring;
struct device *dev;
dev = ice_pf_to_dev(pf);
if (!vsi->q_vectors[v_idx]) { if (!vsi->q_vectors[v_idx]) {
dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
v_idx);
return; return;
} }
q_vector = vsi->q_vectors[v_idx]; q_vector = vsi->q_vectors[v_idx];
...@@ -155,7 +157,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) ...@@ -155,7 +157,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev) if (vsi->netdev)
netif_napi_del(&q_vector->napi); netif_napi_del(&q_vector->napi);
devm_kfree(&pf->pdev->dev, q_vector); devm_kfree(dev, q_vector);
vsi->q_vectors[v_idx] = NULL; vsi->q_vectors[v_idx] = NULL;
} }
...@@ -482,7 +484,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) ...@@ -482,7 +484,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
/* wait for the change to finish */ /* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena); ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret) if (ret)
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf),
"VSI idx %d Rx ring %d %sable timeout\n", "VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis")); vsi->idx, pf_q, (ena ? "en" : "dis"));
...@@ -500,11 +502,12 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) ...@@ -500,11 +502,12 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int v_idx = 0, num_q_vectors; int v_idx = 0, num_q_vectors;
struct device *dev;
int err; int err;
dev = ice_pf_to_dev(pf);
if (vsi->q_vectors[0]) { if (vsi->q_vectors[0]) {
dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
vsi->vsi_num);
return -EEXIST; return -EEXIST;
} }
...@@ -522,8 +525,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) ...@@ -522,8 +525,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
while (v_idx--) while (v_idx--)
ice_free_q_vector(vsi, v_idx); ice_free_q_vector(vsi, v_idx);
dev_err(&pf->pdev->dev, dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
vsi->num_q_vectors, vsi->vsi_num, err); vsi->num_q_vectors, vsi->vsi_num, err);
vsi->num_q_vectors = 0; vsi->num_q_vectors = 0;
return err; return err;
...@@ -640,7 +642,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -640,7 +642,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL); 1, qg_buf, buf_len, NULL);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf),
"Failed to set LAN Tx queue context, error: %d\n", "Failed to set LAN Tx queue context, error: %d\n",
status); status);
return -ENODEV; return -ENODEV;
......
...@@ -160,6 +160,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -160,6 +160,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{ {
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret = ICE_DCB_NO_HW_CHG; int ret = ICE_DCB_NO_HW_CHG;
struct ice_vsi *pf_vsi; struct ice_vsi *pf_vsi;
...@@ -171,15 +172,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -171,15 +172,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) { if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags); set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else { } else {
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} }
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) { if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
dev_dbg(&pf->pdev->dev, "No change in DCB config required\n"); dev_dbg(dev, "No change in DCB config required\n");
return ret; return ret;
} }
...@@ -188,10 +189,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -188,10 +189,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
if (!old_cfg) if (!old_cfg)
return -ENOMEM; return -ENOMEM;
dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n"); dev_info(dev, "Commit DCB Configuration to the hardware\n");
pf_vsi = ice_get_main_vsi(pf); pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) { if (!pf_vsi) {
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); dev_dbg(dev, "PF VSI doesn't exist\n");
ret = -EINVAL; ret = -EINVAL;
goto free_cfg; goto free_cfg;
} }
...@@ -213,7 +214,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -213,7 +214,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
if (pf->hw.port_info->is_sw_lldp) { if (pf->hw.port_info->is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Set DCB Config failed\n"); dev_err(dev, "Set DCB Config failed\n");
/* Restore previous settings to local config */ /* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
goto out; goto out;
...@@ -222,7 +223,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -222,7 +223,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
goto out; goto out;
} }
...@@ -269,6 +270,7 @@ static bool ...@@ -269,6 +270,7 @@ static bool
ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg) struct ice_dcbx_cfg *new_cfg)
{ {
struct device *dev = ice_pf_to_dev(pf);
bool need_reconfig = false; bool need_reconfig = false;
/* Check if ETS configuration has changed */ /* Check if ETS configuration has changed */
...@@ -279,33 +281,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, ...@@ -279,33 +281,33 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
&old_cfg->etscfg.prio_table, &old_cfg->etscfg.prio_table,
sizeof(new_cfg->etscfg.prio_table))) { sizeof(new_cfg->etscfg.prio_table))) {
need_reconfig = true; need_reconfig = true;
dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); dev_dbg(dev, "ETS UP2TC changed.\n");
} }
if (memcmp(&new_cfg->etscfg.tcbwtable, if (memcmp(&new_cfg->etscfg.tcbwtable,
&old_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable,
sizeof(new_cfg->etscfg.tcbwtable))) sizeof(new_cfg->etscfg.tcbwtable)))
dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); dev_dbg(dev, "ETS TC BW Table changed.\n");
if (memcmp(&new_cfg->etscfg.tsatable, if (memcmp(&new_cfg->etscfg.tsatable,
&old_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable,
sizeof(new_cfg->etscfg.tsatable))) sizeof(new_cfg->etscfg.tsatable)))
dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); dev_dbg(dev, "ETS TSA Table changed.\n");
} }
/* Check if PFC configuration has changed */ /* Check if PFC configuration has changed */
if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
need_reconfig = true; need_reconfig = true;
dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); dev_dbg(dev, "PFC config change detected.\n");
} }
/* Check if APP Table has changed */ /* Check if APP Table has changed */
if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) { if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
need_reconfig = true; need_reconfig = true;
dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); dev_dbg(dev, "APP Table change detected.\n");
} }
dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); dev_dbg(dev, "dcb need_reconfig=%d\n", need_reconfig);
return need_reconfig; return need_reconfig;
} }
...@@ -317,11 +319,12 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -317,11 +319,12 @@ void ice_dcb_rebuild(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg; struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
enum ice_status ret; enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
goto dcb_error; goto dcb_error;
} }
...@@ -340,16 +343,14 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -340,16 +343,14 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info); ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); dev_err(dev, "Failed to set DCB to unwilling\n");
goto dcb_error; goto dcb_error;
} }
/* Retrieve DCB config and ensure same as current in SW */ /* Retrieve DCB config and ensure same as current in SW */
prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) { if (!prev_cfg)
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
goto dcb_error; goto dcb_error;
}
ice_init_dcb(&pf->hw, true); ice_init_dcb(&pf->hw, true);
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS) if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
...@@ -359,7 +360,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -359,7 +360,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) { if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */ /* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n"); dev_err(dev, "Set local MIB not accurate\n");
kfree(prev_cfg); kfree(prev_cfg);
goto dcb_error; goto dcb_error;
} }
...@@ -375,20 +376,20 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -375,20 +376,20 @@ void ice_dcb_rebuild(struct ice_pf *pf)
ice_cfg_etsrec_defaults(pf->hw.port_info); ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Failed to set desired config\n"); dev_err(dev, "Failed to set desired config\n");
goto dcb_error; goto dcb_error;
} }
dev_info(&pf->pdev->dev, "DCB restored after reset\n"); dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
goto dcb_error; goto dcb_error;
} }
return; return;
dcb_error: dcb_error:
dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n"); dev_err(dev, "Disabling DCB until new settings occur\n");
prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL); prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) if (!prev_cfg)
return; return;
...@@ -419,7 +420,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) ...@@ -419,7 +420,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked)) if (ice_pf_dcb_cfg(pf, newcfg, locked))
ret = -EINVAL; ret = -EINVAL;
...@@ -507,13 +508,13 @@ static bool ice_dcb_tc_contig(u8 *prio_table) ...@@ -507,13 +508,13 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
static int ice_dcb_noncontig_cfg(struct ice_pf *pf) static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret; int ret;
/* Configure SW DCB default with ETS non-willing */ /* Configure SW DCB default with ETS non-willing */
ret = ice_dcb_sw_dflt_cfg(pf, false, true); ret = ice_dcb_sw_dflt_cfg(pf, false, true);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(dev, "Failed to set local DCB config %d\n", ret);
"Failed to set local DCB config %d\n", ret);
return ret; return ret;
} }
...@@ -521,7 +522,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) ...@@ -521,7 +522,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
dcbcfg->etscfg.willing = 1; dcbcfg->etscfg.willing = 1;
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) if (ret)
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n"); dev_err(dev, "Failed to set DCB to unwilling\n");
return ret; return ret;
} }
...@@ -542,10 +543,12 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -542,10 +543,12 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
/* Update each VSI */ /* Update each VSI */
ice_for_each_vsi(pf, v) { ice_for_each_vsi(pf, v) {
if (!pf->vsi[v]) struct ice_vsi *vsi = pf->vsi[v];
if (!vsi)
continue; continue;
if (pf->vsi[v]->type == ICE_VSI_PF) { if (vsi->type == ICE_VSI_PF) {
tc_map = ice_dcb_get_ena_tc(dcbcfg); tc_map = ice_dcb_get_ena_tc(dcbcfg);
/* If DCBX request non-contiguous TC, then configure /* If DCBX request non-contiguous TC, then configure
...@@ -559,17 +562,16 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -559,17 +562,16 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS; tc_map = ICE_DFLT_TRAFFIC_CLASS;
} }
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); ret = ice_vsi_cfg_tc(vsi, tc_map);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf), "Failed to config TC for VSI index: %d\n",
"Failed to config TC for VSI index: %d\n", vsi->idx);
pf->vsi[v]->idx);
continue; continue;
} }
ice_vsi_map_rings_to_vectors(pf->vsi[v]); ice_vsi_map_rings_to_vectors(vsi);
if (pf->vsi[v]->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(pf->vsi[v]); ice_dcbnl_set_all(vsi);
} }
} }
...@@ -580,7 +582,7 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -580,7 +582,7 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
*/ */
int ice_init_pf_dcb(struct ice_pf *pf, bool locked) int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
{ {
struct device *dev = &pf->pdev->dev; struct device *dev = ice_pf_to_dev(pf);
struct ice_port_info *port_info; struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int err; int err;
...@@ -589,23 +591,22 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -589,23 +591,22 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
err = ice_init_dcb(hw, false); err = ice_init_dcb(hw, false);
if (err && !port_info->is_sw_lldp) { if (err && !port_info->is_sw_lldp) {
dev_err(&pf->pdev->dev, "Error initializing DCB %d\n", err); dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err; goto dcb_init_err;
} }
dev_info(&pf->pdev->dev, dev_info(dev,
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n", "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc); pf->hw.func_caps.common_cap.maxtc);
if (err) { if (err) {
struct ice_vsi *pf_vsi; struct ice_vsi *pf_vsi;
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */ /* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(&pf->pdev->dev, dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
"FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, true, locked); err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) { if (err) {
dev_err(&pf->pdev->dev, dev_err(dev,
"Failed to set local DCB config %d\n", err); "Failed to set local DCB config %d\n", err);
err = -EIO; err = -EIO;
goto dcb_init_err; goto dcb_init_err;
...@@ -616,8 +617,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -616,8 +617,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
*/ */
pf_vsi = ice_get_main_vsi(pf); pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) { if (!pf_vsi) {
dev_err(&pf->pdev->dev, dev_err(dev, "Failed to set local DCB config\n");
"Failed to set local DCB config\n");
err = -EIO; err = -EIO;
goto dcb_init_err; goto dcb_init_err;
} }
...@@ -732,6 +732,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -732,6 +732,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event) struct ice_rq_event_info *event)
{ {
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib; struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg; struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false; bool need_reconfig = false;
...@@ -745,8 +746,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -745,8 +746,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
return; return;
if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) { if (pf->dcbx_cap & DCB_CAP_DCBX_HOST) {
dev_dbg(&pf->pdev->dev, dev_dbg(dev, "MIB Change Event in HOST mode\n");
"MIB Change Event in HOST mode\n");
return; return;
} }
...@@ -755,21 +755,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -755,21 +755,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Ignore if event is not for Nearest Bridge */ /* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) & type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M); ICE_AQ_LLDP_BRID_TYPE_M);
dev_dbg(&pf->pdev->dev, "LLDP event MIB bridge type 0x%x\n", type); dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", type);
if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) if (type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return; return;
/* Check MIB Type and return if event for Remote MIB update */ /* Check MIB Type and return if event for Remote MIB update */
type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
dev_dbg(&pf->pdev->dev, dev_dbg(dev, "LLDP event mib type %s\n", type ? "remote" : "local");
"LLDP event mib type %s\n", type ? "remote" : "local");
if (type == ICE_AQ_LLDP_MIB_REMOTE) { if (type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */ /* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->remote_dcbx_cfg); &pi->remote_dcbx_cfg);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Failed to get remote DCB config\n"); dev_err(dev, "Failed to get remote DCB config\n");
return; return;
} }
} }
...@@ -783,14 +782,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -783,14 +782,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Get updated DCBX data from firmware */ /* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info); ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Failed to get DCB config\n"); dev_err(dev, "Failed to get DCB config\n");
return; return;
} }
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(&pf->pdev->dev, dev_dbg(dev, "No change detected in DCBX configuration.\n");
"No change detected in DCBX configuration.\n");
return; return;
} }
...@@ -802,16 +800,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -802,16 +800,16 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags); set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else { } else {
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags); clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
} }
pf_vsi = ice_get_main_vsi(pf); pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) { if (!pf_vsi) {
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n"); dev_dbg(dev, "PF VSI doesn't exist\n");
return; return;
} }
...@@ -820,7 +818,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -820,7 +818,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
rtnl_unlock(); rtnl_unlock();
return; return;
} }
......
...@@ -58,7 +58,7 @@ ice_dcb_get_tc(struct ice_vsi __always_unused *vsi, ...@@ -58,7 +58,7 @@ ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
static inline int static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked) ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{ {
dev_dbg(&pf->pdev->dev, "DCB not supported\n"); dev_dbg(ice_pf_to_dev(pf), "DCB not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -179,7 +179,7 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) ...@@ -179,7 +179,7 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
else else
pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
dev_info(&pf->pdev->dev, "DCBx mode = 0x%x\n", mode); dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST; return ICE_DCB_HW_CHG_RST;
} }
...@@ -297,7 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) ...@@ -297,7 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
return; return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
dev_dbg(&pf->pdev->dev, dev_dbg(ice_pf_to_dev(pf),
"Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n", "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
} }
...@@ -328,7 +328,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set) ...@@ -328,7 +328,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
else else
new_cfg->pfc.pfcena &= ~BIT(prio); new_cfg->pfc.pfcena &= ~BIT(prio);
dev_dbg(&pf->pdev->dev, "Set PFC config UP:%d set:%d pfcena:0x%x\n", dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
prio, set, new_cfg->pfc.pfcena); prio, set, new_cfg->pfc.pfcena);
} }
...@@ -359,7 +359,7 @@ static u8 ice_dcbnl_getstate(struct net_device *netdev) ...@@ -359,7 +359,7 @@ static u8 ice_dcbnl_getstate(struct net_device *netdev)
state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
dev_dbg(&pf->pdev->dev, "DCB enabled state = %d\n", state); dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
return state; return state;
} }
...@@ -418,7 +418,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, ...@@ -418,7 +418,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
return; return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
dev_dbg(&pf->pdev->dev, dev_dbg(ice_pf_to_dev(pf),
"Get PG config prio=%d tc=%d\n", prio, *pgid); "Get PG config prio=%d tc=%d\n", prio, *pgid);
} }
...@@ -479,7 +479,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct) ...@@ -479,7 +479,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
return; return;
*bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid]; *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n", dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
pgid, *bw_pct); pgid, *bw_pct);
} }
...@@ -597,7 +597,7 @@ static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) ...@@ -597,7 +597,7 @@ static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
break; break;
} }
dev_dbg(&pf->pdev->dev, "DCBX Get Capability cap=%d capval=0x%x\n", dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
capid, *cap); capid, *cap);
return 0; return 0;
} }
......
...@@ -248,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, ...@@ -248,7 +248,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
int ret = 0; int ret = 0;
u16 *buf; u16 *buf;
dev = &pf->pdev->dev; dev = ice_pf_to_dev(pf);
eeprom->magic = hw->vendor_id | (hw->device_id << 16); eeprom->magic = hw->vendor_id | (hw->device_id << 16);
...@@ -343,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev) ...@@ -343,6 +343,7 @@ static u64 ice_eeprom_test(struct net_device *netdev)
static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
{ {
struct ice_pf *pf = (struct ice_pf *)hw->back; struct ice_pf *pf = (struct ice_pf *)hw->back;
struct device *dev = ice_pf_to_dev(pf);
static const u32 patterns[] = { static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x5A5A5A5A, 0xA5A5A5A5,
0x00000000, 0xFFFFFFFF 0x00000000, 0xFFFFFFFF
...@@ -358,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) ...@@ -358,7 +359,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg); val = rd32(hw, reg);
if (val == pattern) if (val == pattern)
continue; continue;
dev_err(&pf->pdev->dev, dev_err(dev,
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n" "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val); , __func__, reg, pattern, val);
return 1; return 1;
...@@ -367,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask) ...@@ -367,7 +368,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val); wr32(hw, reg, orig_val);
val = rd32(hw, reg); val = rd32(hw, reg);
if (val != orig_val) { if (val != orig_val) {
dev_err(&pf->pdev->dev, dev_err(dev,
"%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n" "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val); , __func__, reg, orig_val, val);
return 1; return 1;
...@@ -507,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) ...@@ -507,7 +508,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size)
if (!pf) if (!pf)
return -EINVAL; return -EINVAL;
data = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
...@@ -649,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -649,9 +650,11 @@ static u64 ice_loopback_test(struct net_device *netdev)
u8 broadcast[ETH_ALEN], ret = 0; u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames; int num_frames, valid_frames;
LIST_HEAD(tmp_list); LIST_HEAD(tmp_list);
struct device *dev;
u8 *tx_frame; u8 *tx_frame;
int i; int i;
dev = ice_pf_to_dev(pf);
netdev_info(netdev, "loopback test\n"); netdev_info(netdev, "loopback test\n");
test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info);
...@@ -712,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev) ...@@ -712,12 +715,12 @@ static u64 ice_loopback_test(struct net_device *netdev)
ret = 10; ret = 10;
lbtest_free_frame: lbtest_free_frame:
devm_kfree(&pf->pdev->dev, tx_frame); devm_kfree(dev, tx_frame);
remove_mac_filters: remove_mac_filters:
if (ice_remove_mac(&pf->hw, &tmp_list)) if (ice_remove_mac(&pf->hw, &tmp_list))
netdev_err(netdev, "Could not remove MAC filter for the test VSI"); netdev_err(netdev, "Could not remove MAC filter for the test VSI");
free_mac_list: free_mac_list:
ice_free_fltr_list(&pf->pdev->dev, &tmp_list); ice_free_fltr_list(dev, &tmp_list);
lbtest_mac_dis: lbtest_mac_dis:
/* Disable MAC loopback after the test is completed. */ /* Disable MAC loopback after the test is completed. */
if (ice_aq_set_mac_loopback(&pf->hw, false, NULL)) if (ice_aq_set_mac_loopback(&pf->hw, false, NULL))
...@@ -774,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, ...@@ -774,6 +777,9 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
bool if_running = netif_running(netdev); bool if_running = netif_running(netdev);
struct ice_pf *pf = np->vsi->back; struct ice_pf *pf = np->vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) { if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
netdev_info(netdev, "offline testing starting\n"); netdev_info(netdev, "offline testing starting\n");
...@@ -781,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, ...@@ -781,7 +787,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state); set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) { if (ice_active_vfs(pf)) {
dev_warn(&pf->pdev->dev, dev_warn(dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1; data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1; data[ICE_ETH_TEST_EEPROM] = 1;
...@@ -816,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test, ...@@ -816,8 +822,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
int status = ice_open(netdev); int status = ice_open(netdev);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(dev, "Could not open device %s, err %d",
"Could not open device %s, err %d",
pf->int_name, status); pf->int_name, status);
} }
} }
...@@ -1155,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1155,12 +1160,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
int ret = 0; int ret = 0;
u32 i; u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL; return -EINVAL;
dev = ice_pf_to_dev(pf);
set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS); bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
...@@ -1189,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1189,7 +1196,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to. * events to respond to.
*/ */
if (status) if (status)
dev_info(&pf->pdev->dev, dev_info(dev,
"Failed to unreg for LLDP events\n"); "Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate /* The AQ call to stop the FW LLDP agent will generate
...@@ -1197,15 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1197,15 +1204,14 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/ */
status = ice_aq_stop_lldp(&pf->hw, true, true, NULL); status = ice_aq_stop_lldp(&pf->hw, true, true, NULL);
if (status) if (status)
dev_warn(&pf->pdev->dev, dev_warn(dev, "Fail to stop LLDP agent\n");
"Fail to stop LLDP agent\n");
/* Use case for having the FW LLDP agent stopped /* Use case for having the FW LLDP agent stopped
* will likely not need DCB, so failure to init is * will likely not need DCB, so failure to init is
* not a concern of ethtool * not a concern of ethtool
*/ */
status = ice_init_pf_dcb(pf, true); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); dev_warn(dev, "Fail to init DCB\n");
} else { } else {
enum ice_status status; enum ice_status status;
bool dcbx_agent_status; bool dcbx_agent_status;
...@@ -1215,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1215,8 +1221,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/ */
status = ice_aq_start_lldp(&pf->hw, true, NULL); status = ice_aq_start_lldp(&pf->hw, true, NULL);
if (status) if (status)
dev_warn(&pf->pdev->dev, dev_warn(dev, "Fail to start LLDP Agent\n");
"Fail to start LLDP Agent\n");
/* AQ command to start FW DCBX agent will fail if /* AQ command to start FW DCBX agent will fail if
* the agent is already started * the agent is already started
...@@ -1225,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1225,10 +1230,9 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
&dcbx_agent_status, &dcbx_agent_status,
NULL); NULL);
if (status) if (status)
dev_dbg(&pf->pdev->dev, dev_dbg(dev, "Failed to start FW DCBX\n");
"Failed to start FW DCBX\n");
dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n", dev_info(dev, "FW DCBX agent is %s\n",
dcbx_agent_status ? "ACTIVE" : "DISABLED"); dcbx_agent_status ? "ACTIVE" : "DISABLED");
/* Failure to configure MIB change or init DCB is not /* Failure to configure MIB change or init DCB is not
...@@ -1238,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1238,7 +1242,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
*/ */
status = ice_init_pf_dcb(pf, true); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); dev_dbg(dev, "Fail to init DCB\n");
/* Remove rule to direct LLDP packets to default VSI. /* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them. * The FW LLDP engine will now be consuming them.
...@@ -1248,7 +1252,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1248,7 +1252,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */ /* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true); status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status) if (status)
dev_dbg(&pf->pdev->dev, dev_dbg(dev,
"Fail to enable MIB change events\n"); "Fail to enable MIB change events\n");
} }
} }
...@@ -3089,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, ...@@ -3089,8 +3093,10 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
u8 *seed = NULL; u8 *seed = NULL;
dev = ice_pf_to_dev(pf);
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3103,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, ...@@ -3103,8 +3109,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
if (key) { if (key) {
if (!vsi->rss_hkey_user) { if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user = vsi->rss_hkey_user =
devm_kzalloc(&pf->pdev->dev, devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
ICE_VSIQF_HKEY_ARRAY_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!vsi->rss_hkey_user) if (!vsi->rss_hkey_user)
return -ENOMEM; return -ENOMEM;
...@@ -3114,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, ...@@ -3114,8 +3119,7 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
} }
if (!vsi->rss_lut_user) { if (!vsi->rss_lut_user) {
vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev, vsi->rss_lut_user = devm_kzalloc(dev, vsi->rss_table_size,
vsi->rss_table_size,
GFP_KERNEL); GFP_KERNEL);
if (!vsi->rss_lut_user) if (!vsi->rss_lut_user)
return -ENOMEM; return -ENOMEM;
...@@ -3177,7 +3181,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, ...@@ -3177,7 +3181,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
break; break;
default: default:
dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type); dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
return -EINVAL; return -EINVAL;
} }
...@@ -3317,7 +3321,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, ...@@ -3317,7 +3321,8 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break; break;
default: default:
dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
c_type);
return -EINVAL; return -EINVAL;
} }
......
...@@ -52,26 +52,29 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) ...@@ -52,26 +52,29 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
/* allocate memory for both Tx and Rx ring pointers */ /* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL); sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings) if (!vsi->tx_rings)
return -ENOMEM; return -ENOMEM;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL); sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings) if (!vsi->rx_rings)
goto err_rings; goto err_rings;
/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */ /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
vsi->txq_map = devm_kcalloc(&pf->pdev->dev, (2 * vsi->alloc_txq), vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
sizeof(*vsi->txq_map), GFP_KERNEL); sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map) if (!vsi->txq_map)
goto err_txq_map; goto err_txq_map;
vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
sizeof(*vsi->rxq_map), GFP_KERNEL); sizeof(*vsi->rxq_map), GFP_KERNEL);
if (!vsi->rxq_map) if (!vsi->rxq_map)
goto err_rxq_map; goto err_rxq_map;
...@@ -81,7 +84,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) ...@@ -81,7 +84,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0; return 0;
/* allocate memory for q_vector pointers */ /* allocate memory for q_vector pointers */
vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
sizeof(*vsi->q_vectors), GFP_KERNEL); sizeof(*vsi->q_vectors), GFP_KERNEL);
if (!vsi->q_vectors) if (!vsi->q_vectors)
goto err_vectors; goto err_vectors;
...@@ -89,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) ...@@ -89,13 +92,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0; return 0;
err_vectors: err_vectors:
devm_kfree(&pf->pdev->dev, vsi->rxq_map); devm_kfree(dev, vsi->rxq_map);
err_rxq_map: err_rxq_map:
devm_kfree(&pf->pdev->dev, vsi->txq_map); devm_kfree(dev, vsi->txq_map);
err_txq_map: err_txq_map:
devm_kfree(&pf->pdev->dev, vsi->rx_rings); devm_kfree(dev, vsi->rx_rings);
err_rings: err_rings:
devm_kfree(&pf->pdev->dev, vsi->tx_rings); devm_kfree(dev, vsi->tx_rings);
return -ENOMEM; return -ENOMEM;
} }
...@@ -169,7 +172,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) ...@@ -169,7 +172,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1; vsi->alloc_rxq = 1;
break; break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
break; break;
} }
...@@ -227,8 +230,8 @@ void ice_vsi_delete(struct ice_vsi *vsi) ...@@ -227,8 +230,8 @@ void ice_vsi_delete(struct ice_vsi *vsi)
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status) if (status)
dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
vsi->vsi_num); vsi->vsi_num, status);
kfree(ctxt); kfree(ctxt);
} }
...@@ -240,26 +243,29 @@ void ice_vsi_delete(struct ice_vsi *vsi) ...@@ -240,26 +243,29 @@ void ice_vsi_delete(struct ice_vsi *vsi)
static void ice_vsi_free_arrays(struct ice_vsi *vsi) static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
/* free the ring and vector containers */ /* free the ring and vector containers */
if (vsi->q_vectors) { if (vsi->q_vectors) {
devm_kfree(&pf->pdev->dev, vsi->q_vectors); devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL; vsi->q_vectors = NULL;
} }
if (vsi->tx_rings) { if (vsi->tx_rings) {
devm_kfree(&pf->pdev->dev, vsi->tx_rings); devm_kfree(dev, vsi->tx_rings);
vsi->tx_rings = NULL; vsi->tx_rings = NULL;
} }
if (vsi->rx_rings) { if (vsi->rx_rings) {
devm_kfree(&pf->pdev->dev, vsi->rx_rings); devm_kfree(dev, vsi->rx_rings);
vsi->rx_rings = NULL; vsi->rx_rings = NULL;
} }
if (vsi->txq_map) { if (vsi->txq_map) {
devm_kfree(&pf->pdev->dev, vsi->txq_map); devm_kfree(dev, vsi->txq_map);
vsi->txq_map = NULL; vsi->txq_map = NULL;
} }
if (vsi->rxq_map) { if (vsi->rxq_map) {
devm_kfree(&pf->pdev->dev, vsi->rxq_map); devm_kfree(dev, vsi->rxq_map);
vsi->rxq_map = NULL; vsi->rxq_map = NULL;
} }
} }
...@@ -276,6 +282,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) ...@@ -276,6 +282,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
int ice_vsi_clear(struct ice_vsi *vsi) int ice_vsi_clear(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = NULL; struct ice_pf *pf = NULL;
struct device *dev;
if (!vsi) if (!vsi)
return 0; return 0;
...@@ -284,10 +291,10 @@ int ice_vsi_clear(struct ice_vsi *vsi) ...@@ -284,10 +291,10 @@ int ice_vsi_clear(struct ice_vsi *vsi)
return -EINVAL; return -EINVAL;
pf = vsi->back; pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) { if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n", dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
vsi->idx);
return -EINVAL; return -EINVAL;
} }
...@@ -300,7 +307,7 @@ int ice_vsi_clear(struct ice_vsi *vsi) ...@@ -300,7 +307,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi); ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex); mutex_unlock(&pf->sw_mutex);
devm_kfree(&pf->pdev->dev, vsi); devm_kfree(dev, vsi);
return 0; return 0;
} }
...@@ -333,6 +340,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) ...@@ -333,6 +340,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
static struct ice_vsi * static struct ice_vsi *
ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{ {
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL; struct ice_vsi *vsi = NULL;
/* Need to protect the allocation of the VSIs at the PF level */ /* Need to protect the allocation of the VSIs at the PF level */
...@@ -343,11 +351,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) ...@@ -343,11 +351,11 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
* is available to be populated * is available to be populated
*/ */
if (pf->next_vsi == ICE_NO_VSI) { if (pf->next_vsi == ICE_NO_VSI) {
dev_dbg(&pf->pdev->dev, "out of VSI slots!\n"); dev_dbg(dev, "out of VSI slots!\n");
goto unlock_pf; goto unlock_pf;
} }
vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL); vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
if (!vsi) if (!vsi)
goto unlock_pf; goto unlock_pf;
...@@ -379,7 +387,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) ...@@ -379,7 +387,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto err_rings; goto err_rings;
break; break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
goto unlock_pf; goto unlock_pf;
} }
...@@ -392,7 +400,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id) ...@@ -392,7 +400,7 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
goto unlock_pf; goto unlock_pf;
err_rings: err_rings:
devm_kfree(&pf->pdev->dev, vsi); devm_kfree(dev, vsi);
vsi = NULL; vsi = NULL;
unlock_pf: unlock_pf:
mutex_unlock(&pf->sw_mutex); mutex_unlock(&pf->sw_mutex);
...@@ -481,14 +489,15 @@ bool ice_is_safe_mode(struct ice_pf *pf) ...@@ -481,14 +489,15 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/ */
static void ice_rss_clean(struct ice_vsi *vsi) static void ice_rss_clean(struct ice_vsi *vsi)
{ {
struct ice_pf *pf; struct ice_pf *pf = vsi->back;
struct device *dev;
pf = vsi->back; dev = ice_pf_to_dev(pf);
if (vsi->rss_hkey_user) if (vsi->rss_hkey_user)
devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user) if (vsi->rss_lut_user)
devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); devm_kfree(dev, vsi->rss_lut_user);
} }
/** /**
...@@ -526,7 +535,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) ...@@ -526,7 +535,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
case ICE_VSI_LB: case ICE_VSI_LB:
break; break;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
vsi->type); vsi->type);
break; break;
} }
...@@ -702,9 +711,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -702,9 +711,11 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{ {
u8 lut_type, hash_type; u8 lut_type, hash_type;
struct device *dev;
struct ice_pf *pf; struct ice_pf *pf;
pf = vsi->back; pf = vsi->back;
dev = ice_pf_to_dev(pf);
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
...@@ -718,11 +729,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) ...@@ -718,11 +729,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break; break;
case ICE_VSI_LB: case ICE_VSI_LB:
dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n", dev_dbg(dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type)); ice_vsi_type_str(vsi->type));
return; return;
default: default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type); dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
return; return;
} }
...@@ -796,8 +807,7 @@ static int ice_vsi_init(struct ice_vsi *vsi) ...@@ -796,8 +807,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf), "Add VSI failed, err %d\n", ret);
"Add VSI failed, err %d\n", ret);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
...@@ -826,14 +836,16 @@ static int ice_vsi_init(struct ice_vsi *vsi) ...@@ -826,14 +836,16 @@ static int ice_vsi_init(struct ice_vsi *vsi)
static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
u16 num_q_vectors; u16 num_q_vectors;
dev = ice_pf_to_dev(pf);
/* SRIOV doesn't grab irq_tracker entries for each VSI */ /* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
return 0; return 0;
if (vsi->base_vector) { if (vsi->base_vector) {
dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
vsi->vsi_num, vsi->base_vector); vsi->vsi_num, vsi->base_vector);
return -EEXIST; return -EEXIST;
} }
...@@ -843,7 +855,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) ...@@ -843,7 +855,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx); vsi->idx);
if (vsi->base_vector < 0) { if (vsi->base_vector < 0) {
dev_err(&pf->pdev->dev, dev_err(dev,
"Failed to get tracking for %d vectors for VSI %d, err=%d\n", "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector); num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT; return -ENOENT;
...@@ -886,8 +898,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi) ...@@ -886,8 +898,10 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
static int ice_vsi_alloc_rings(struct ice_vsi *vsi) static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct device *dev;
int i; int i;
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) { for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring; struct ice_ring *ring;
...@@ -902,7 +916,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -902,7 +916,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i]; ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false; ring->ring_active = false;
ring->vsi = vsi; ring->vsi = vsi;
ring->dev = &pf->pdev->dev; ring->dev = dev;
ring->count = vsi->num_tx_desc; ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring; vsi->tx_rings[i] = ring;
} }
...@@ -921,7 +935,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -921,7 +935,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false; ring->ring_active = false;
ring->vsi = vsi; ring->vsi = vsi;
ring->netdev = vsi->netdev; ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev; ring->dev = dev;
ring->count = vsi->num_rx_desc; ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring; vsi->rx_rings[i] = ring;
} }
...@@ -973,9 +987,11 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) ...@@ -973,9 +987,11 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
struct ice_aqc_get_set_rss_keys *key; struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
enum ice_status status; enum ice_status status;
struct device *dev;
int err = 0; int err = 0;
u8 *lut; u8 *lut;
dev = ice_pf_to_dev(pf);
vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq); vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
...@@ -991,8 +1007,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) ...@@ -991,8 +1007,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size); vsi->rss_table_size);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(dev, "set_rss_lut failed, error %d\n", status);
"set_rss_lut failed, error %d\n", status);
err = -EIO; err = -EIO;
goto ice_vsi_cfg_rss_exit; goto ice_vsi_cfg_rss_exit;
} }
...@@ -1014,8 +1029,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi) ...@@ -1014,8 +1029,7 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key); status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) { if (status) {
dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n", dev_err(dev, "set_rss_key failed, error %d\n", status);
status);
err = -EIO; err = -EIO;
} }
...@@ -1041,7 +1055,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, ...@@ -1041,7 +1055,7 @@ int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
struct ice_fltr_list_entry *tmp; struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC); tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
...@@ -1133,9 +1147,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) ...@@ -1133,9 +1147,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list); LIST_HEAD(tmp_add_list);
enum ice_status status; enum ice_status status;
struct device *dev;
int err = 0; int err = 0;
tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL); dev = ice_pf_to_dev(pf);
tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
...@@ -1152,11 +1168,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid) ...@@ -1152,11 +1168,11 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_add_vlan(&pf->hw, &tmp_add_list); status = ice_add_vlan(&pf->hw, &tmp_add_list);
if (status) { if (status) {
err = -ENODEV; err = -ENODEV;
dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n", dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
vid, vsi->vsi_num); vsi->vsi_num);
} }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(dev, &tmp_add_list);
return err; return err;
} }
...@@ -1173,9 +1189,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) ...@@ -1173,9 +1189,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list); LIST_HEAD(tmp_add_list);
enum ice_status status; enum ice_status status;
struct device *dev;
int err = 0; int err = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list) if (!list)
return -ENOMEM; return -ENOMEM;
...@@ -1191,17 +1209,17 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) ...@@ -1191,17 +1209,17 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list); status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) { if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(&pf->pdev->dev, dev_dbg(dev,
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n", "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status); vid, vsi->vsi_num, status);
} else if (status) { } else if (status) {
dev_err(&pf->pdev->dev, dev_err(dev,
"Error removing VLAN %d on vsi %i error: %d\n", "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status); vid, vsi->vsi_num, status);
err = -EIO; err = -EIO;
} }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(dev, &tmp_add_list);
return err; return err;
} }
...@@ -1683,8 +1701,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) ...@@ -1683,8 +1701,10 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list); LIST_HEAD(tmp_add_list);
enum ice_status status; enum ice_status status;
struct device *dev;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list) if (!list)
return; return;
...@@ -1704,11 +1724,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) ...@@ -1704,11 +1724,11 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status) if (status)
dev_err(&pf->pdev->dev, dev_err(dev,
"Failure Adding or Removing Ethertype on VSI %i error: %d\n", "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(dev, &tmp_add_list);
} }
/** /**
...@@ -1723,8 +1743,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) ...@@ -1723,8 +1743,10 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list); LIST_HEAD(tmp_add_list);
enum ice_status status; enum ice_status status;
struct device *dev;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); dev = ice_pf_to_dev(pf);
list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
if (!list) if (!list)
return; return;
...@@ -1751,12 +1773,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) ...@@ -1751,12 +1773,11 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status) if (status)
dev_err(&pf->pdev->dev, dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
"Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX", create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, status); vsi->vsi_num, status);
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(dev, &tmp_add_list);
} }
/** /**
...@@ -1778,7 +1799,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -1778,7 +1799,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id) enum ice_vsi_type type, u16 vf_id)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev; struct device *dev = ice_pf_to_dev(pf);
enum ice_status status; enum ice_status status;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int ret, i; int ret, i;
...@@ -1887,8 +1908,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -1887,8 +1908,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(dev, "VSI %d failed lan queue config, error %d\n",
"VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
goto unroll_vector_base; goto unroll_vector_base;
} }
...@@ -2000,8 +2020,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) ...@@ -2000,8 +2020,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */ /* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL); irq_set_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num); synchronize_irq(irq_num);
devm_free_irq(&pf->pdev->dev, irq_num, devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
vsi->q_vectors[i]);
} }
} }
...@@ -2187,7 +2206,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) ...@@ -2187,7 +2206,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL; return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf),
"param err: needed=%d, num_entries = %d id=0x%04x\n", "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id); needed, res->num_entries, id);
return -EINVAL; return -EINVAL;
...@@ -2469,7 +2488,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2469,7 +2488,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(ice_pf_to_dev(pf),
"VSI %d failed lan queue config, error %d\n", "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
goto err_vectors; goto err_vectors;
...@@ -2532,9 +2551,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -2532,9 +2551,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
struct ice_vsi_ctx *ctx; struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
enum ice_status status; enum ice_status status;
struct device *dev;
int i, ret = 0; int i, ret = 0;
u8 num_tc = 0; u8 num_tc = 0;
dev = ice_pf_to_dev(pf);
ice_for_each_traffic_class(i) { ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */ /* build bitmap of enabled TCs */
if (ena_tc & BIT(i)) if (ena_tc & BIT(i))
...@@ -2559,7 +2581,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -2559,7 +2581,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) { if (status) {
dev_info(&pf->pdev->dev, "Failed VSI Update\n"); dev_info(dev, "Failed VSI Update\n");
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
...@@ -2568,8 +2590,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -2568,8 +2590,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs); max_txqs);
if (status) { if (status) {
dev_err(&pf->pdev->dev, dev_err(dev, "VSI %d failed TC config, error %d\n",
"VSI %d failed TC config, error %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
ret = -EIO; ret = -EIO;
goto out; goto out;
......
...@@ -331,7 +331,7 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem) ...@@ -331,7 +331,7 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
struct device *dev; struct device *dev;
unsigned int i; unsigned int i;
dev = &pf->pdev->dev; dev = ice_pf_to_dev(pf);
for (i = 0; i < umem->npgs; i++) { for (i = 0; i < umem->npgs; i++) {
dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0, dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
PAGE_SIZE, PAGE_SIZE,
...@@ -369,7 +369,7 @@ static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem) ...@@ -369,7 +369,7 @@ static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
struct device *dev; struct device *dev;
unsigned int i; unsigned int i;
dev = &pf->pdev->dev; dev = ice_pf_to_dev(pf);
for (i = 0; i < umem->npgs; i++) { for (i = 0; i < umem->npgs; i++) {
dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR); DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册