提交 129cf89e 编写于 作者: J Jesse Brandeburg 提交者: Jeff Kirsher

iavf: rename functions and structs to new name

This basically begins the internal portion of the rename of i40evf to iavf,
by renaming many of the functions, structs, variables and defines.

Most of the changes were made mechanically, which introduces some
alignment issues.
Signed-off-by: NJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: NAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 ee61022a
......@@ -495,7 +495,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
}
/**
* i40evf_init_adminq - main initialization routine for Admin Queue
* iavf_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
* Prior to calling this function, drivers *MUST* set the following fields
......@@ -505,7 +505,7 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
i40e_status i40evf_init_adminq(struct i40e_hw *hw)
i40e_status iavf_init_adminq(struct i40e_hw *hw)
{
i40e_status ret_code;
......@@ -546,15 +546,15 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
}
/**
* i40evf_shutdown_adminq - shutdown routine for the Admin Queue
* iavf_shutdown_adminq - shutdown routine for the Admin Queue
* @hw: pointer to the hardware structure
**/
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
i40e_status iavf_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (i40evf_check_asq_alive(hw))
i40evf_aq_queue_shutdown(hw, true);
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
......@@ -604,13 +604,13 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
}
/**
* i40evf_asq_done - check if FW has processed the Admin Send Queue
* iavf_asq_done - check if FW has processed the Admin Send Queue
* @hw: pointer to the hw struct
*
* Returns true if the firmware has processed all descriptors on the
* admin send queue. Returns false if there are still requests pending.
**/
bool i40evf_asq_done(struct i40e_hw *hw)
bool iavf_asq_done(struct i40e_hw *hw)
{
/* AQ designers suggest use of head for better
* timing reliability than DD bit
......@@ -620,7 +620,7 @@ bool i40evf_asq_done(struct i40e_hw *hw)
}
/**
* i40evf_asq_send_command - send command to Admin Queue
* iavf_asq_send_command - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
......@@ -630,11 +630,11 @@ bool i40evf_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
i40e_status iavf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
......@@ -741,8 +741,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
hw->aq.asq.next_to_use = 0;
......@@ -759,7 +759,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
/* AQ designers suggest use of head for better
* timing reliability than DD bit
*/
if (i40evf_asq_done(hw))
if (iavf_asq_done(hw))
break;
udelay(50);
total_delay += 50;
......@@ -767,7 +767,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
/* if ready, copy the desc back to temp */
if (i40evf_asq_done(hw)) {
if (iavf_asq_done(hw)) {
*desc = *desc_on_ring;
if (buff != NULL)
memcpy(buff, dma_buff->va, buff_size);
......@@ -793,8 +793,8 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
buff_size);
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
buff_size);
/* save writeback aq if requested */
if (details->wb_desc)
......@@ -820,13 +820,13 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
/**
* i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
* iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
* @opcode: the opcode can be used to decide which flags to turn off or on
*
* Fill the desc with default values
**/
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode)
{
/* zero out the desc */
......@@ -836,7 +836,7 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
}
/**
* i40evf_clean_arq_element
* iavf_clean_arq_element
* @hw: pointer to the hw struct
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
......@@ -845,9 +845,9 @@ void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *pending)
{
i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
......@@ -902,8 +902,8 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_len);
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
iavf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
......
......@@ -130,7 +130,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
#define I40E_AQ_LARGE_BUF 512
#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
#endif /* _I40E_ADMINQ_H_ */
......@@ -62,11 +62,11 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
}
/**
* i40evf_aq_str - convert AQ err code to a string
* iavf_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
**/
const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
{
switch (aq_err) {
case I40E_AQ_RC_OK:
......@@ -122,11 +122,11 @@ const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
}
/**
* i40evf_stat_str - convert status err code to a string
* iavf_stat_str - convert status err code to a string
* @hw: pointer to the HW structure
* @stat_err: the status error code to convert
**/
const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
{
switch (stat_err) {
case 0:
......@@ -270,7 +270,7 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
}
/**
* i40evf_debug_aq
* iavf_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
* @desc: pointer to admin queue descriptor
......@@ -279,7 +279,7 @@ const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
*
* Dumps debug log about adminq command with descriptor contents.
**/
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
......@@ -315,7 +315,7 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
char prefix[27];
snprintf(prefix, sizeof(prefix),
"i40evf %02x:%02x.%x: \t0x",
"iavf %02x:%02x.%x: \t0x",
hw->bus.bus_id,
hw->bus.device,
hw->bus.func);
......@@ -327,12 +327,12 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
/**
* i40evf_check_asq_alive
* iavf_check_asq_alive
* @hw: pointer to the hw struct
*
* Returns true if Queue is enabled else false.
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
bool iavf_check_asq_alive(struct i40e_hw *hw)
{
if (hw->aq.asq.len)
return !!(rd32(hw, hw->aq.asq.len) &
......@@ -342,27 +342,27 @@ bool i40evf_check_asq_alive(struct i40e_hw *hw)
}
/**
* i40evf_aq_queue_shutdown
* iavf_aq_queue_shutdown
* @hw: pointer to the hw struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
iavf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, NULL);
status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
}
......@@ -389,11 +389,11 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
if (set)
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_lut);
iavf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_lut);
else
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
iavf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_lut);
/* Indirect command */
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
......@@ -416,13 +416,13 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL);
return status;
}
/**
* i40evf_aq_get_rss_lut
* iavf_aq_get_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false
......@@ -431,15 +431,15 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
}
/**
* i40evf_aq_set_rss_lut
* iavf_aq_set_rss_lut
* @hw: pointer to the hardware structure
* @vsi_id: vsi fw index
* @pf_lut: for PF table set true, for VSI table set false
......@@ -448,8 +448,8 @@ i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
......@@ -463,8 +463,7 @@ i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
u16 vsi_id,
static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key,
bool set)
{
......@@ -475,11 +474,11 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
if (set)
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_key);
iavf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_rss_key);
else
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
iavf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_rss_key);
/* Indirect command */
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
......@@ -491,41 +490,39 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
status = iavf_asq_send_command(hw, &desc, key, key_size, NULL);
return status;
}
/**
* i40evf_aq_get_rss_key
* iavf_aq_get_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
**/
i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
}
/**
* i40evf_aq_set_rss_key
* iavf_aq_set_rss_key
* @hw: pointer to the hw struct
* @vsi_id: vsi fw index
* @key: pointer to key info struct
*
* set the RSS key per VSI
**/
i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
*
......@@ -538,10 +535,10 @@ i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
*
* Typical work flow:
*
* IF NOT i40evf_ptype_lookup[ptype].known
* IF NOT iavf_ptype_lookup[ptype].known
* THEN
* Packet is unknown
* ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
* ELSE IF iavf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
* Use the rest of the fields to look at the tunnels, inner protocols, etc
* ELSE
* Use the enum i40e_rx_l2_ptype to decode the packet type
......@@ -570,7 +567,7 @@ i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
/* Lookup table mapping the HW PTYPE to the bit field for decoding */
struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
struct i40e_rx_ptype_decoded iavf_ptype_lookup[] = {
/* L2 Packet types */
I40E_PTT_UNUSED_ENTRY(0),
I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
......@@ -891,7 +888,7 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
};
/**
* i40e_aq_send_msg_to_pf
* iavf_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
* @v_retval: return error code
......@@ -900,20 +897,19 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
* @cmd_details: pointer to command details
*
* Send message to PF driver using admin queue. By default, this message
* is sent asynchronously, i.e. i40evf_asq_send_command() does not wait for
* is sent asynchronously, i.e. iavf_asq_send_command() does not wait for
* completion before returning.
**/
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
i40e_status v_retval,
u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_asq_cmd_details details;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
iavf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
......@@ -929,19 +925,19 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
details.async = true;
cmd_details = &details;
}
status = i40evf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details);
return status;
}
/**
* i40e_vf_parse_hw_config
* iavf_vf_parse_hw_config
* @hw: pointer to the hardware structure
* @msg: pointer to the virtual channel VF resource structure
*
* Given a VF resource message from the PF, populate the hw struct
* with appropriate information.
**/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
void iavf_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg)
{
struct virtchnl_vsi_resource *vsi_res;
......@@ -968,15 +964,15 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
}
/**
* i40e_vf_reset
* iavf_vf_reset
* @hw: pointer to the hardware structure
*
* Send a VF_RESET message to the PF. Does not wait for response from PF
* as none will be forthcoming. Immediately after calling this function,
* the admin queue should be shut down and (optionally) reinitialized.
**/
i40e_status i40e_vf_reset(struct i40e_hw *hw)
i40e_status iavf_vf_reset(struct i40e_hw *hw)
{
return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
return iavf_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF,
0, NULL, 0, NULL);
}
......@@ -34,18 +34,18 @@ struct i40e_dma_mem {
};
#define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40evf_allocate_dma_mem_d(h, m, s, a)
#define i40e_free_dma_mem(h, m) i40evf_free_dma_mem_d(h, m)
iavf_allocate_dma_mem_d(h, m, s, a)
#define i40e_free_dma_mem(h, m) iavf_free_dma_mem_d(h, m)
struct i40e_virt_mem {
void *va;
u32 size;
};
#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
#define i40e_allocate_virt_mem(h, m, s) iavf_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) iavf_free_virt_mem_d(h, m)
#define i40e_debug(h, m, s, ...) i40evf_debug_d(h, m, s, ##__VA_ARGS__)
extern void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
#define i40e_debug(h, m, s, ...) iavf_debug_d(h, m, s, ##__VA_ARGS__)
extern void iavf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
__attribute__ ((format(gnu_printf, 3, 4)));
typedef enum i40e_status_code i40e_status;
......
......@@ -16,55 +16,53 @@
*/
/* adminq functions */
i40e_status i40evf_init_adminq(struct i40e_hw *hw);
i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw);
i40e_status iavf_init_adminq(struct i40e_hw *hw);
i40e_status iavf_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool i40evf_asq_done(struct i40e_hw *hw);
i40e_status iavf_clean_arq_element(struct i40e_hw *hw,
struct i40e_arq_event_info *e,
u16 *events_pending);
i40e_status iavf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
bool iavf_asq_done(struct i40e_hw *hw);
/* debug function for adminq */
void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void iavf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
void i40evf_resume_aq(struct i40e_hw *hw);
bool i40evf_check_asq_alive(struct i40e_hw *hw);
i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
void iavf_resume_aq(struct i40e_hw *hw);
bool iavf_check_asq_alive(struct i40e_hw *hw);
i40e_status iavf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *iavf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *iavf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status iavf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status iavf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
i40e_status iavf_aq_get_rss_key(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status iavf_aq_set_rss_key(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
i40e_status i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
extern struct i40e_rx_ptype_decoded iavf_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
{
return i40evf_ptype_lookup[ptype];
return iavf_ptype_lookup[ptype];
}
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
void iavf_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
i40e_status i40e_vf_reset(struct i40e_hw *hw);
i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
i40e_status iavf_vf_reset(struct i40e_hw *hw);
i40e_status iavf_aq_send_msg_to_pf(struct i40e_hw *hw,
enum virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen,
struct i40e_asq_cmd_details *cmd_details);
......
......@@ -3,7 +3,7 @@
/* Modeled on trace-events-sample.h */
/* The trace subsystem name for i40evf will be "i40evf".
/* The trace subsystem name for iavf will be "iavf".
*
* This file is named i40e_trace.h.
*
......@@ -12,7 +12,7 @@
* of this file.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i40evf
#define TRACE_SYSTEM iavf
/* See trace-events-sample.h for a detailed description of why this
* guard clause is different from most normal include files.
......@@ -42,7 +42,7 @@
* Similarly, i40e_trace_enabled(trace_name) wraps references to
* trace_i40e{,vf}_<trace_name>_enabled() functions.
*/
#define _I40E_TRACE_NAME(trace_name) (trace_ ## i40evf ## _ ## trace_name)
#define _I40E_TRACE_NAME(trace_name) (trace_ ## iavf ## _ ## trace_name)
#define I40E_TRACE_NAME(trace_name) _I40E_TRACE_NAME(trace_name)
#define i40e_trace(trace_name, args...) I40E_TRACE_NAME(trace_name)(args)
......@@ -50,14 +50,14 @@
#define i40e_trace_enabled(trace_name) I40E_TRACE_NAME(trace_name##_enabled)()
/* Events common to PF and VF. Corresponding versions will be defined
* for both, named trace_i40e_* and trace_i40evf_*. The i40e_trace()
* for both, named trace_i40e_* and trace_iavf_*. The i40e_trace()
* macro above will select the right trace point name for the driver
* being built from shared code.
*/
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(
i40evf_tx_template,
iavf_tx_template,
TP_PROTO(struct i40e_ring *ring,
struct i40e_tx_desc *desc,
......@@ -93,7 +93,7 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
i40evf_tx_template, i40evf_clean_tx_irq,
iavf_tx_template, iavf_clean_tx_irq,
TP_PROTO(struct i40e_ring *ring,
struct i40e_tx_desc *desc,
struct i40e_tx_buffer *buf),
......@@ -101,7 +101,7 @@ DEFINE_EVENT(
TP_ARGS(ring, desc, buf));
DEFINE_EVENT(
i40evf_tx_template, i40evf_clean_tx_irq_unmap,
iavf_tx_template, iavf_clean_tx_irq_unmap,
TP_PROTO(struct i40e_ring *ring,
struct i40e_tx_desc *desc,
struct i40e_tx_buffer *buf),
......@@ -109,7 +109,7 @@ DEFINE_EVENT(
TP_ARGS(ring, desc, buf));
DECLARE_EVENT_CLASS(
i40evf_rx_template,
iavf_rx_template,
TP_PROTO(struct i40e_ring *ring,
union i40e_32byte_rx_desc *desc,
......@@ -138,7 +138,7 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
i40evf_rx_template, i40evf_clean_rx_irq,
iavf_rx_template, iavf_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
union i40e_32byte_rx_desc *desc,
struct sk_buff *skb),
......@@ -146,7 +146,7 @@ DEFINE_EVENT(
TP_ARGS(ring, desc, skb));
DEFINE_EVENT(
i40evf_rx_template, i40evf_clean_rx_irq_rx,
iavf_rx_template, iavf_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
union i40e_32byte_rx_desc *desc,
struct sk_buff *skb),
......@@ -154,7 +154,7 @@ DEFINE_EVENT(
TP_ARGS(ring, desc, skb));
DECLARE_EVENT_CLASS(
i40evf_xmit_template,
iavf_xmit_template,
TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring),
......@@ -180,14 +180,14 @@ DECLARE_EVENT_CLASS(
);
DEFINE_EVENT(
i40evf_xmit_template, i40evf_xmit_frame_ring,
iavf_xmit_template, iavf_xmit_frame_ring,
TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring),
TP_ARGS(skb, ring));
DEFINE_EVENT(
i40evf_xmit_template, i40evf_xmit_frame_ring_drop,
iavf_xmit_template, iavf_xmit_frame_ring_drop,
TP_PROTO(struct sk_buff *skb,
struct i40e_ring *ring),
......
......@@ -52,10 +52,10 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
}
/**
* i40evf_clean_tx_ring - Free any empty Tx buffers
* iavf_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
**/
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
void iavf_clean_tx_ring(struct i40e_ring *tx_ring)
{
unsigned long bi_size;
u16 i;
......@@ -85,14 +85,14 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
}
/**
* i40evf_free_tx_resources - Free Tx resources per queue
* iavf_free_tx_resources - Free Tx resources per queue
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
**/
void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
void iavf_free_tx_resources(struct i40e_ring *tx_ring)
{
i40evf_clean_tx_ring(tx_ring);
iavf_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
......@@ -104,14 +104,14 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
}
/**
* i40evf_get_tx_pending - how many Tx descriptors not processed
* iavf_get_tx_pending - how many Tx descriptors not processed
* @ring: the ring of descriptors
* @in_sw: is tx_pending being checked in SW or HW
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
......@@ -126,13 +126,13 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
}
/**
* i40evf_detect_recover_hung - Function to detect and recover hung_queues
* iavf_detect_recover_hung - Function to detect and recover hung_queues
* @vsi: pointer to vsi struct with tx queues
*
* VSI has netdev and netdev has TX queues. This function is to check each of
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
**/
void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
void iavf_detect_recover_hung(struct i40e_vsi *vsi)
{
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
......@@ -164,16 +164,16 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
*/
packets = tx_ring->stats.packets & INT_MAX;
if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
i40evf_force_wb(vsi, tx_ring->q_vector);
iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
/* Memory barrier between read of packet count and call
* to i40evf_get_tx_pending()
* to iavf_get_tx_pending()
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
}
......@@ -292,7 +292,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
unsigned int j = i40evf_get_tx_pending(tx_ring, false);
unsigned int j = iavf_get_tx_pending(tx_ring, false);
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
......@@ -325,7 +325,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
}
/**
* i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* iavf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about
* @q_vector: the vector on which to enable writeback
*
......@@ -351,12 +351,12 @@ static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
}
/**
* i40evf_force_wb - Issue SW Interrupt so HW does a wb
* iavf_force_wb - Issue SW Interrupt so HW does a wb
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
......@@ -607,12 +607,12 @@ static void i40e_update_itr(struct i40e_q_vector *q_vector,
}
/**
* i40evf_setup_tx_descriptors - Allocate the Tx descriptors
* iavf_setup_tx_descriptors - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
*
* Return 0 on success, negative on error
**/
int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
int bi_size;
......@@ -650,10 +650,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
}
/**
* i40evf_clean_rx_ring - Free Rx buffers
* iavf_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
**/
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
void iavf_clean_rx_ring(struct i40e_ring *rx_ring)
{
unsigned long bi_size;
u16 i;
......@@ -707,14 +707,14 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
}
/**
* i40evf_free_rx_resources - Free Rx resources
* iavf_free_rx_resources - Free Rx resources
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
**/
void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
void iavf_free_rx_resources(struct i40e_ring *rx_ring)
{
i40evf_clean_rx_ring(rx_ring);
iavf_clean_rx_ring(rx_ring);
kfree(rx_ring->rx_bi);
rx_ring->rx_bi = NULL;
......@@ -726,12 +726,12 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* iavf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
* Returns 0 on success, negative on failure
**/
int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
int bi_size;
......@@ -871,13 +871,13 @@ static void i40e_receive_skb(struct i40e_ring *rx_ring,
}
/**
* i40evf_alloc_rx_buffers - Replace used receive buffers
* iavf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
* Returns false if all allocations were successful, true if any fail
**/
bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
bool iavf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
......@@ -1069,7 +1069,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
* i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
* iavf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
......@@ -1080,9 +1080,9 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
static inline
void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
u8 rx_ptype)
void iavf_process_skb_fields(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
u8 rx_ptype)
{
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
......@@ -1479,7 +1479,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
iavf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
......@@ -1551,7 +1551,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
I40E_RXD_QW1_PTYPE_SHIFT;
/* populate checksum, VLAN, and protocol */
i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
......@@ -1676,7 +1676,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
}
/**
* i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
* iavf_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
......@@ -1684,7 +1684,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
*
* Returns the amount of work done
**/
int i40evf_napi_poll(struct napi_struct *napi, int budget)
int iavf_napi_poll(struct napi_struct *napi, int budget)
{
struct i40e_q_vector *q_vector =
container_of(napi, struct i40e_q_vector, napi);
......@@ -1746,7 +1746,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
napi_complete_done(napi, work_done);
/* Force an interrupt */
i40evf_force_wb(vsi, q_vector);
iavf_force_wb(vsi, q_vector);
/* Return budget-1 so that polling stops */
return budget - 1;
......@@ -1771,7 +1771,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
}
/**
* i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* iavf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
......@@ -1782,9 +1782,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
......@@ -2130,7 +2130,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
}
/**
* __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
* __iavf_chk_linearize - Check if there are more than 8 buffers per packet
* @skb: send buffer
*
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
......@@ -2142,7 +2142,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* the segment payload in the first descriptor, and another 7 for the
* fragments.
**/
bool __i40evf_chk_linearize(struct sk_buff *skb)
bool __iavf_chk_linearize(struct sk_buff *skb)
{
const struct skb_frag_struct *frag, *stale;
int nr_frags, sum;
......@@ -2214,13 +2214,13 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
}
/**
* __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
* __iavf_maybe_stop_tx - 2nd level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
......@@ -2237,7 +2237,7 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
}
/**
* i40evf_tx_map - Build the Tx descriptor
* iavf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
......@@ -2246,9 +2246,9 @@ int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
static inline void iavf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
......@@ -2437,7 +2437,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
......@@ -2470,8 +2470,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset);
iavf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset);
return NETDEV_TX_OK;
......@@ -2483,15 +2483,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
}
/**
* i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
* iavf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
**/
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
struct iavf_adapter *adapter = netdev_priv(netdev);
struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works
......
......@@ -380,12 +380,12 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
* here and resume receiving this
* packet the next time
* i40evf_clean_rx_ring_irq() is called
* iavf_clean_rx_ring_irq() is called
* for this ring.
*/
} ____cacheline_internodealigned_in_smp;
......@@ -437,20 +437,20 @@ static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40evf_detect_recover_hung(struct i40e_vsi *vsi);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
bool iavf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void iavf_clean_tx_ring(struct i40e_ring *tx_ring);
void iavf_clean_rx_ring(struct i40e_ring *rx_ring);
int iavf_setup_tx_descriptors(struct i40e_ring *tx_ring);
int iavf_setup_rx_descriptors(struct i40e_ring *rx_ring);
void iavf_free_tx_resources(struct i40e_ring *tx_ring);
void iavf_free_rx_resources(struct i40e_ring *rx_ring);
int iavf_napi_poll(struct napi_struct *napi, int budget);
void iavf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 iavf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void iavf_detect_recover_hung(struct i40e_vsi *vsi);
int __iavf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __iavf_chk_linearize(struct sk_buff *skb);
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
......@@ -490,7 +490,7 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40evf_maybe_stop_tx(tx_ring, size);
return __iavf_maybe_stop_tx(tx_ring, size);
}
/**
......@@ -509,7 +509,7 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
return false;
if (skb_is_gso(skb))
return __i40evf_chk_linearize(skb);
return __iavf_chk_linearize(skb);
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
......
......@@ -9,31 +9,31 @@
#include "i40evf_client.h"
static
const char i40evf_client_interface_version_str[] = I40EVF_CLIENT_VERSION_STR;
const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
static struct i40e_client *vf_registered_client;
static LIST_HEAD(i40evf_devices);
static DEFINE_MUTEX(i40evf_device_mutex);
static LIST_HEAD(iavf_devices);
static DEFINE_MUTEX(iavf_device_mutex);
static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u8 *msg, u16 len);
static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u8 *msg, u16 len);
static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info);
static int iavf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info);
static struct i40e_ops i40evf_lan_ops = {
.virtchnl_send = i40evf_client_virtchnl_send,
.setup_qvlist = i40evf_client_setup_qvlist,
static struct i40e_ops iavf_lan_ops = {
.virtchnl_send = iavf_client_virtchnl_send,
.setup_qvlist = iavf_client_setup_qvlist,
};
/**
* i40evf_client_get_params - retrieve relevant client parameters
* iavf_client_get_params - retrieve relevant client parameters
* @vsi: VSI with parameters
* @params: client param struct
**/
static
void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
void iavf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
{
int i;
......@@ -48,14 +48,14 @@ void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
}
/**
* i40evf_notify_client_message - call the client message receive callback
* iavf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
* @msg: message buffer
* @len: length of message
*
* If there is a client to this VSI, call the client
**/
void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
void iavf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
{
struct i40e_client_instance *cinst;
......@@ -74,12 +74,12 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len)
}
/**
* i40evf_notify_client_l2_params - call the client notify callback
* iavf_notify_client_l2_params - call the client notify callback
* @vsi: the VSI with l2 param changes
*
* If there is a client to this VSI, call the client
**/
void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
void iavf_notify_client_l2_params(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cinst;
struct i40e_params params;
......@@ -95,21 +95,21 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi)
"Cannot locate client instance l2_param_change function\n");
return;
}
i40evf_client_get_params(vsi, &params);
iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
&params);
}
/**
* i40evf_notify_client_open - call the client open callback
* iavf_notify_client_open - call the client open callback
* @vsi: the VSI with netdev opened
*
* If there is a client to this netdev, call the client with open
**/
void i40evf_notify_client_open(struct i40e_vsi *vsi)
void iavf_notify_client_open(struct i40e_vsi *vsi)
{
struct i40evf_adapter *adapter = vsi->back;
struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst;
int ret;
......@@ -127,22 +127,22 @@ void i40evf_notify_client_open(struct i40e_vsi *vsi)
}
/**
* i40evf_client_release_qvlist - send a message to the PF to release iwarp qv map
* iavf_client_release_qvlist - send a message to the PF to release iwarp qv map
* @ldev: pointer to L2 context.
*
* Return 0 on success or < 0 on error
**/
static int i40evf_client_release_qvlist(struct i40e_info *ldev)
static int iavf_client_release_qvlist(struct i40e_info *ldev)
{
struct i40evf_adapter *adapter = ldev->vf;
struct iavf_adapter *adapter = ldev->vf;
i40e_status err;
if (adapter->aq_required)
return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
I40E_SUCCESS, NULL, 0, NULL);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
I40E_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
......@@ -153,15 +153,15 @@ static int i40evf_client_release_qvlist(struct i40e_info *ldev)
}
/**
* i40evf_notify_client_close - call the client close callback
* iavf_notify_client_close - call the client close callback
* @vsi: the VSI with netdev closed
* @reset: true when close called due to reset pending
*
* If there is a client to this netdev, call the client with close
**/
void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
void iavf_notify_client_close(struct i40e_vsi *vsi, bool reset)
{
struct i40evf_adapter *adapter = vsi->back;
struct iavf_adapter *adapter = vsi->back;
struct i40e_client_instance *cinst = adapter->cinst;
if (!cinst || !cinst->client || !cinst->client->ops ||
......@@ -171,18 +171,18 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset)
return;
}
cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
i40evf_client_release_qvlist(&cinst->lan_info);
iavf_client_release_qvlist(&cinst->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
}
/**
* i40evf_client_add_instance - add a client instance to the instance list
* iavf_client_add_instance - add a client instance to the instance list
* @adapter: pointer to the board struct
*
* Returns cinst ptr on success, NULL on failure
**/
static struct i40e_client_instance *
i40evf_client_add_instance(struct i40evf_adapter *adapter)
iavf_client_add_instance(struct iavf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
......@@ -207,11 +207,11 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
cinst->lan_info.fid = 0;
cinst->lan_info.ftype = I40E_CLIENT_FTYPE_VF;
cinst->lan_info.hw_addr = adapter->hw.hw_addr;
cinst->lan_info.ops = &i40evf_lan_ops;
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
i40evf_client_get_params(vsi, &params);
cinst->lan_info.ops = &iavf_lan_ops;
cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD;
iavf_client_get_params(vsi, &params);
cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
......@@ -233,28 +233,28 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter)
}
/**
* i40evf_client_del_instance - removes a client instance from the list
* iavf_client_del_instance - removes a client instance from the list
* @adapter: pointer to the board struct
*
**/
static
void i40evf_client_del_instance(struct i40evf_adapter *adapter)
void iavf_client_del_instance(struct iavf_adapter *adapter)
{
kfree(adapter->cinst);
adapter->cinst = NULL;
}
/**
* i40evf_client_subtask - client maintenance work
* iavf_client_subtask - client maintenance work
* @adapter: board private structure
**/
void i40evf_client_subtask(struct i40evf_adapter *adapter)
void iavf_client_subtask(struct iavf_adapter *adapter)
{
struct i40e_client *client = vf_registered_client;
struct i40e_client_instance *cinst;
int ret = 0;
if (adapter->state < __I40EVF_DOWN)
if (adapter->state < __IAVF_DOWN)
return;
/* first check client is registered */
......@@ -262,7 +262,7 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter)
return;
/* Add the client instance to the instance list */
cinst = i40evf_client_add_instance(adapter);
cinst = iavf_client_add_instance(adapter);
if (!cinst)
return;
......@@ -279,23 +279,23 @@ void i40evf_client_subtask(struct i40evf_adapter *adapter)
&cinst->state);
else
/* remove client instance */
i40evf_client_del_instance(adapter);
iavf_client_del_instance(adapter);
}
}
/**
* i40evf_lan_add_device - add a lan device struct to the list of lan devices
* iavf_lan_add_device - add a lan device struct to the list of lan devices
* @adapter: pointer to the board struct
*
* Returns 0 on success or none 0 on error
**/
int i40evf_lan_add_device(struct i40evf_adapter *adapter)
int iavf_lan_add_device(struct iavf_adapter *adapter)
{
struct i40e_device *ldev;
int ret = 0;
mutex_lock(&i40evf_device_mutex);
list_for_each_entry(ldev, &i40evf_devices, list) {
mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) {
if (ldev->vf == adapter) {
ret = -EEXIST;
goto out;
......@@ -308,7 +308,7 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter)
}
ldev->vf = adapter;
INIT_LIST_HEAD(&ldev->list);
list_add(&ldev->list, &i40evf_devices);
list_add(&ldev->list, &iavf_devices);
dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
adapter->hw.bus.bus_id, adapter->hw.bus.device,
adapter->hw.bus.func);
......@@ -316,26 +316,26 @@ int i40evf_lan_add_device(struct i40evf_adapter *adapter)
/* Since in some cases register may have happened before a device gets
* added, we can schedule a subtask to go initiate the clients.
*/
adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
out:
mutex_unlock(&i40evf_device_mutex);
mutex_unlock(&iavf_device_mutex);
return ret;
}
/**
* i40evf_lan_del_device - removes a lan device from the device list
* iavf_lan_del_device - removes a lan device from the device list
* @adapter: pointer to the board struct
*
* Returns 0 on success or non-0 on error
**/
int i40evf_lan_del_device(struct i40evf_adapter *adapter)
int iavf_lan_del_device(struct iavf_adapter *adapter)
{
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
mutex_lock(&i40evf_device_mutex);
list_for_each_entry_safe(ldev, tmp, &i40evf_devices, list) {
mutex_lock(&iavf_device_mutex);
list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) {
if (ldev->vf == adapter) {
dev_info(&adapter->pdev->dev,
"Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
......@@ -348,23 +348,23 @@ int i40evf_lan_del_device(struct i40evf_adapter *adapter)
}
}
mutex_unlock(&i40evf_device_mutex);
mutex_unlock(&iavf_device_mutex);
return ret;
}
/**
* i40evf_client_release - release client specific resources
* iavf_client_release - release client specific resources
* @client: pointer to the registered client
*
**/
static void i40evf_client_release(struct i40e_client *client)
static void iavf_client_release(struct i40e_client *client)
{
struct i40e_client_instance *cinst;
struct i40e_device *ldev;
struct i40evf_adapter *adapter;
struct iavf_adapter *adapter;
mutex_lock(&i40evf_device_mutex);
list_for_each_entry(ldev, &i40evf_devices, list) {
mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
cinst = adapter->cinst;
if (!cinst)
......@@ -373,41 +373,41 @@ static void i40evf_client_release(struct i40e_client *client)
if (client->ops && client->ops->close)
client->ops->close(&cinst->lan_info, client,
false);
i40evf_client_release_qvlist(&cinst->lan_info);
iavf_client_release_qvlist(&cinst->lan_info);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cinst->state);
dev_warn(&adapter->pdev->dev,
"Client %s instance closed\n", client->name);
}
/* delete the client instance */
i40evf_client_del_instance(adapter);
iavf_client_del_instance(adapter);
dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
client->name);
}
mutex_unlock(&i40evf_device_mutex);
mutex_unlock(&iavf_device_mutex);
}
/**
* i40evf_client_prepare - prepare client specific resources
* iavf_client_prepare - prepare client specific resources
* @client: pointer to the registered client
*
**/
static void i40evf_client_prepare(struct i40e_client *client)
static void iavf_client_prepare(struct i40e_client *client)
{
struct i40e_device *ldev;
struct i40evf_adapter *adapter;
struct iavf_adapter *adapter;
mutex_lock(&i40evf_device_mutex);
list_for_each_entry(ldev, &i40evf_devices, list) {
mutex_lock(&iavf_device_mutex);
list_for_each_entry(ldev, &iavf_devices, list) {
adapter = ldev->vf;
/* Signal the watchdog to service the client */
adapter->flags |= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
mutex_unlock(&i40evf_device_mutex);
mutex_unlock(&iavf_device_mutex);
}
/**
* i40evf_client_virtchnl_send - send a message to the PF instance
* iavf_client_virtchnl_send - send a message to the PF instance
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @msg: pointer to message buffer
......@@ -415,17 +415,17 @@ static void i40evf_client_prepare(struct i40e_client *client)
*
* Return 0 on success or < 0 on error
**/
static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u8 *msg, u16 len)
static u32 iavf_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
u8 *msg, u16 len)
{
struct i40evf_adapter *adapter = ldev->vf;
struct iavf_adapter *adapter = ldev->vf;
i40e_status err;
if (adapter->aq_required)
return -EAGAIN;
err = i40e_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
I40E_SUCCESS, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
......@@ -435,19 +435,19 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev,
}
/**
* i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
* iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info)
static int iavf_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_client *client,
struct i40e_qvlist_info *qvlist_info)
{
struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
struct i40evf_adapter *adapter = ldev->vf;
struct iavf_adapter *adapter = ldev->vf;
struct i40e_qv_info *qv_info;
i40e_status err;
u32 v_idx, i;
......@@ -474,9 +474,9 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
(v_qvlist_info->num_vectors - 1));
adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
err = i40e_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
I40E_SUCCESS, (u8 *)v_qvlist_info, msg_size, NULL);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, I40E_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
dev_err(&adapter->pdev->dev,
......@@ -499,12 +499,12 @@ static int i40evf_client_setup_qvlist(struct i40e_info *ldev,
}
/**
* i40evf_register_client - Register a i40e client driver with the L2 driver
* iavf_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40evf_register_client(struct i40e_client *client)
int iavf_register_client(struct i40e_client *client)
{
int ret = 0;
......@@ -514,48 +514,48 @@ int i40evf_register_client(struct i40e_client *client)
}
if (strlen(client->name) == 0) {
pr_info("i40evf: Failed to register client with no name\n");
pr_info("iavf: Failed to register client with no name\n");
ret = -EIO;
goto out;
}
if (vf_registered_client) {
pr_info("i40evf: Client %s has already been registered!\n",
pr_info("iavf: Client %s has already been registered!\n",
client->name);
ret = -EEXIST;
goto out;
}
if ((client->version.major != I40EVF_CLIENT_VERSION_MAJOR) ||
(client->version.minor != I40EVF_CLIENT_VERSION_MINOR)) {
pr_info("i40evf: Failed to register client %s due to mismatched client interface version\n",
if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) ||
(client->version.minor != IAVF_CLIENT_VERSION_MINOR)) {
pr_info("iavf: Failed to register client %s due to mismatched client interface version\n",
client->name);
pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
client->version.major, client->version.minor,
client->version.build,
i40evf_client_interface_version_str);
iavf_client_interface_version_str);
ret = -EIO;
goto out;
}
vf_registered_client = client;
i40evf_client_prepare(client);
iavf_client_prepare(client);
pr_info("i40evf: Registered client %s with return code %d\n",
pr_info("iavf: Registered client %s with return code %d\n",
client->name, ret);
out:
return ret;
}
EXPORT_SYMBOL(i40evf_register_client);
EXPORT_SYMBOL(iavf_register_client);
/**
* i40evf_unregister_client - Unregister a i40e client driver with the L2 driver
* iavf_unregister_client - Unregister a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
*
* Returns 0 on success or non-0 on error
**/
int i40evf_unregister_client(struct i40e_client *client)
int iavf_unregister_client(struct i40e_client *client)
{
int ret = 0;
......@@ -563,17 +563,17 @@ int i40evf_unregister_client(struct i40e_client *client)
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
i40evf_client_release(client);
iavf_client_release(client);
if (vf_registered_client != client) {
pr_info("i40evf: Client %s has not been registered\n",
pr_info("iavf: Client %s has not been registered\n",
client->name);
ret = -ENODEV;
goto out;
}
vf_registered_client = NULL;
pr_info("i40evf: Unregistered client %s\n", client->name);
pr_info("iavf: Unregistered client %s\n", client->name);
out:
return ret;
}
EXPORT_SYMBOL(i40evf_unregister_client);
EXPORT_SYMBOL(iavf_unregister_client);
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#ifndef _I40EVF_CLIENT_H_
#define _I40EVF_CLIENT_H_
#ifndef _IAVF_CLIENT_H_
#define _IAVF_CLIENT_H_
#define I40EVF_CLIENT_STR_LENGTH 10
#define IAVF_CLIENT_STR_LENGTH 10
/* Client interface version should be updated anytime there is a change in the
* existing APIs or data structures.
*/
#define I40EVF_CLIENT_VERSION_MAJOR 0
#define I40EVF_CLIENT_VERSION_MINOR 01
#define I40EVF_CLIENT_VERSION_BUILD 00
#define I40EVF_CLIENT_VERSION_STR \
__stringify(I40EVF_CLIENT_VERSION_MAJOR) "." \
__stringify(I40EVF_CLIENT_VERSION_MINOR) "." \
__stringify(I40EVF_CLIENT_VERSION_BUILD)
#define IAVF_CLIENT_VERSION_MAJOR 0
#define IAVF_CLIENT_VERSION_MINOR 01
#define IAVF_CLIENT_VERSION_BUILD 00
#define IAVF_CLIENT_VERSION_STR \
__stringify(IAVF_CLIENT_VERSION_MAJOR) "." \
__stringify(IAVF_CLIENT_VERSION_MINOR) "." \
__stringify(IAVF_CLIENT_VERSION_BUILD)
struct i40e_client_version {
u8 major;
......@@ -90,7 +90,7 @@ struct i40e_info {
#define I40E_CLIENT_FTYPE_PF 0
#define I40E_CLIENT_FTYPE_VF 1
u8 ftype; /* function type, PF or VF */
void *vf; /* cast to i40evf_adapter */
void *vf; /* cast to iavf_adapter */
/* All L2 params that could change during the life span of the device
* and needs to be communicated to the client when they change
......@@ -151,7 +151,7 @@ struct i40e_client_instance {
struct i40e_client {
struct list_head list; /* list of registered clients */
char name[I40EVF_CLIENT_STR_LENGTH];
char name[IAVF_CLIENT_STR_LENGTH];
struct i40e_client_version version;
unsigned long state; /* client state */
atomic_t ref_cnt; /* Count of all the client devices of this kind */
......@@ -164,6 +164,6 @@ struct i40e_client {
};
/* used by clients */
int i40evf_register_client(struct i40e_client *client);
int i40evf_unregister_client(struct i40e_client *client);
#endif /* _I40EVF_CLIENT_H_ */
int iavf_register_client(struct i40e_client *client);
int iavf_unregister_client(struct i40e_client *client);
#endif /* _IAVF_CLIENT_H_ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册