提交 e27a8792 编写于 作者: D David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2017-07-25

This series contains updates to i40e and i40evf only.

Gustavo Silva fixes a variable assignment, where the incorrect variable
was being used to store the error parameter.

Carolyn provides a fix for a problem found in systems when entering S4
state, by ensuring that the misc vector's IRQ is disabled as well.

Jake removes the single-threaded restriction on the module workqueue,
which was causing issues with events such as CORER.  Does some future
proofing, by changing how the driver displays the UDP tunnel type.

Paul adds a retry in releasing resources if the admin queue times out
during the first attempt to release the resources.

Jesse fixes up references to 32bit timspec, since there are a small set
of errors on 32 bit, so we need to be using the right calls for dealing
with timespec64 variables.  Cleaned up code indentation and corrected
an "if" conditional check, as well as making the code flow more clear.
Cast or changed the types to remove warnings for comparing signed and
unsigned types.  Adds missing includes in i40evf, which were being used
but were not being directly included.

Daniel Borkmann fixes i40e to fill the XDP prog_id with the id just like
other XDP enabled drivers, so that on dump we can retrieve the attached
program based on the id and dump BPF insns, opcodes, etc back to user
space.

Tushar Dave adds le32_to_cpu while evaluating the hardware descriptor
fields, since they are in little-endian format.  Also removed
unnecessary "__packed" to a couple of i40evf structures.

Stefan Assmann fixes an issue when an administratively set MAC was set
and should now be switched back to 00:00:00:00:00:00, the pf_set_mac
flag is not being toggled back to false.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, ...@@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
struct i40e_pf *pf = np->vsi->back; struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 *reg_buf = p; u32 *reg_buf = p;
int i, j, ri; unsigned int i, j, ri;
u32 reg; u32 reg;
/* Tell ethtool which driver-version-specific regs output we have. /* Tell ethtool which driver-version-specific regs output we have.
...@@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_ring *tx_ring, *rx_ring; struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
unsigned int j;
int i = 0; int i = 0;
char *p; char *p;
int j;
struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
unsigned int start; unsigned int start;
...@@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
char *p = (char *)data; char *p = (char *)data;
int i; unsigned int i;
switch (stringset) { switch (stringset) {
case ETH_SS_TEST: case ETH_SS_TEST:
......
...@@ -4773,7 +4773,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) ...@@ -4773,7 +4773,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf)
{ {
struct net_device *netdev; struct net_device *netdev;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
int i; unsigned int i;
/* Only for LAN VSI */ /* Only for LAN VSI */
vsi = pf->vsi[pf->lan_vsi]; vsi = pf->vsi[pf->lan_vsi];
...@@ -7520,6 +7520,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) ...@@ -7520,6 +7520,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw); i40e_flush(hw);
} }
static const char *i40e_tunnel_name(struct i40e_udp_port_config *port)
{
switch (port->type) {
case UDP_TUNNEL_TYPE_VXLAN:
return "vxlan";
case UDP_TUNNEL_TYPE_GENEVE:
return "geneve";
default:
return "unknown";
}
}
/** /**
* i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
* @pf: board private structure * @pf: board private structure
...@@ -7565,14 +7577,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ...@@ -7565,14 +7577,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
ret = i40e_aq_del_udp_tunnel(hw, i, NULL); ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) { if (ret) {
dev_dbg(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"%s %s port %d, index %d failed, err %s aq_err %s\n", "%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve", i40e_tunnel_name(&pf->udp_ports[i]),
port ? "add" : "delete", port ? "add" : "delete",
port, i, port, i,
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status)); pf->hw.aq.asq_last_status));
pf->udp_ports[i].port = 0; pf->udp_ports[i].port = 0;
} }
} }
...@@ -9589,6 +9601,7 @@ static int i40e_xdp(struct net_device *dev, ...@@ -9589,6 +9601,7 @@ static int i40e_xdp(struct net_device *dev,
return i40e_xdp_setup(vsi, xdp->prog); return i40e_xdp_setup(vsi, xdp->prog);
case XDP_QUERY_PROG: case XDP_QUERY_PROG:
xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); xdp->prog_attached = i40e_enabled_xdp_vsi(vsi);
xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
return 0; return 0;
default: default:
return -EINVAL; return -EINVAL;
...@@ -12089,7 +12102,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -12089,7 +12102,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
i40e_stop_misc_vector(pf); i40e_stop_misc_vector(pf);
if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
retval = pci_save_state(pdev); retval = pci_save_state(pdev);
if (retval) if (retval)
return retval; return retval;
...@@ -12129,6 +12145,15 @@ static int i40e_resume(struct pci_dev *pdev) ...@@ -12129,6 +12145,15 @@ static int i40e_resume(struct pci_dev *pdev)
/* handling the reset will rebuild the device state */ /* handling the reset will rebuild the device state */
if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
clear_bit(__I40E_DOWN, pf->state); clear_bit(__I40E_DOWN, pf->state);
if (pf->msix_entries) {
err = request_irq(pf->msix_entries[0].vector,
i40e_intr, 0, pf->int_name, pf);
if (err) {
dev_err(&pf->pdev->dev,
"request_irq for %s failed: %d\n",
pf->int_name, err);
}
}
i40e_reset_and_rebuild(pf, false, false); i40e_reset_and_rebuild(pf, false, false);
} }
...@@ -12168,12 +12193,14 @@ static int __init i40e_init_module(void) ...@@ -12168,12 +12193,14 @@ static int __init i40e_init_module(void)
i40e_driver_string, i40e_driver_version_str); i40e_driver_string, i40e_driver_version_str);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
/* we will see if single thread per module is enough for now, /* There is no need to throttle the number of active tasks because
* it can't be any worse than using the system workqueue which * each device limits its own task using a state bit for scheduling
* was already single threaded * the service task, and the device tasks do not interfere with each
* other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/ */
i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
i40e_driver_name);
if (!i40e_wq) { if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name); pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM; return -ENOMEM;
......
...@@ -134,8 +134,25 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw, ...@@ -134,8 +134,25 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
**/ **/
void i40e_release_nvm(struct i40e_hw *hw) void i40e_release_nvm(struct i40e_hw *hw)
{ {
if (!hw->nvm.blank_nvm_mode) i40e_status ret_code = I40E_SUCCESS;
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); u32 total_delay = 0;
if (hw->nvm.blank_nvm_mode)
return;
ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
/* there are some rare cases when trying to release the resource
* results in an admin Q timeout, so handle them correctly
*/
while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
(total_delay < hw->aq.asq_cmd_timeout)) {
usleep_range(1000, 2000);
ret_code = i40e_aq_release_resource(hw,
I40E_NVM_RESOURCE_ID,
0, NULL);
total_delay++;
}
} }
/** /**
......
...@@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ...@@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now, then; struct timespec64 now;
then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock); mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now); i40e_ptp_read(pf, &now);
now = timespec64_add(now, then); timespec64_add_ns(&now, delta);
i40e_ptp_write(pf, (const struct timespec64 *)&now); i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock); mutex_unlock(&pf->tmreg_lock);
......
...@@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
netdev_tx_completed_queue(txring_txq(tx_ring), netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes); total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this /* Make sure that anybody stopping the queue after this
...@@ -2063,7 +2063,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2063,7 +2063,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false, xdp_xmit = false; bool failure = false, xdp_xmit = false;
while (likely(total_rx_packets < budget)) { while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer; struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
struct xdp_buff xdp; struct xdp_buff xdp;
...@@ -2196,7 +2196,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2196,7 +2196,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
/* guarantee a trip back through this routine if there was a failure */ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets; return failure ? budget : (int)total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
...@@ -2451,9 +2451,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2451,9 +2451,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
hlen = (hdr.network[0] & 0x0F) << 2; hlen = (hdr.network[0] & 0x0F) << 2;
l4_proto = hdr.ipv4->protocol; l4_proto = hdr.ipv4->protocol;
} else { } else {
hlen = hdr.network - skb->data; /* find the start of the innermost ipv6 header */
l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); unsigned int inner_hlen = hdr.network - skb->data;
hlen -= hdr.network - skb->data; unsigned int h_offset = inner_hlen;
/* this function updates h_offset to the end of the header */
l4_proto =
ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
/* hlen will contain our best estimate of the tcp header */
hlen = h_offset - inner_hlen;
} }
if (l4_proto != IPPROTO_TCP) if (l4_proto != IPPROTO_TCP)
......
...@@ -1567,7 +1567,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) ...@@ -1567,7 +1567,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
vf->vf_id); vf->vf_id);
ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto err; goto err;
} }
vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
...@@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
NULL); NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0; if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { continue;
aq_ret = aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
vsi->seid, alluni,
alluni, f->vlan,
f->vlan, NULL);
NULL); aq_err = pf->hw.aq.asq_last_status;
aq_err = pf->hw.aq.asq_last_status;
}
if (aq_ret) if (aq_ret)
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
...@@ -2764,7 +2762,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2764,7 +2762,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
spin_unlock_bh(&vsi->mac_filter_hash_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */ /* program mac filter */
if (i40e_sync_vsi_filters(vsi)) { if (i40e_sync_vsi_filters(vsi)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
...@@ -2772,7 +2769,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2772,7 +2769,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param; goto error_param;
} }
ether_addr_copy(vf->default_lan_addr.addr, mac); ether_addr_copy(vf->default_lan_addr.addr, mac);
vf->pf_set_mac = true;
if (is_zero_ether_addr(mac)) {
vf->pf_set_mac = false;
dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
} else {
vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
mac, vf_id);
}
/* Force the VF driver stop so it has to reload with new MAC address */ /* Force the VF driver stop so it has to reload with new MAC address */
i40e_vc_disable_vf(pf, vf); i40e_vc_disable_vf(pf, vf);
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
......
...@@ -54,7 +54,7 @@ struct i40e_dma_mem { ...@@ -54,7 +54,7 @@ struct i40e_dma_mem {
void *va; void *va;
dma_addr_t pa; dma_addr_t pa;
u32 size; u32 size;
} __packed; };
#define i40e_allocate_dma_mem(h, m, unused, s, a) \ #define i40e_allocate_dma_mem(h, m, unused, s, a) \
i40evf_allocate_dma_mem_d(h, m, s, a) i40evf_allocate_dma_mem_d(h, m, s, a)
...@@ -63,7 +63,7 @@ struct i40e_dma_mem { ...@@ -63,7 +63,7 @@ struct i40e_dma_mem {
struct i40e_virt_mem { struct i40e_virt_mem {
void *va; void *va;
u32 size; u32 size;
} __packed; };
#define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) #define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s)
#define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) #define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m)
......
...@@ -275,7 +275,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -275,7 +275,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
netdev_tx_completed_queue(txring_txq(tx_ring), netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes); total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this /* Make sure that anybody stopping the queue after this
...@@ -1299,7 +1299,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1299,7 +1299,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
bool failure = false; bool failure = false;
while (likely(total_rx_packets < budget)) { while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer; struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
unsigned int size; unsigned int size;
...@@ -1406,7 +1406,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -1406,7 +1406,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
/* guarantee a trip back through this routine if there was a failure */ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets; return failure ? budget : (int)total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
......
...@@ -39,6 +39,17 @@ ...@@ -39,6 +39,17 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/sctp.h> #include <linux/sctp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/socket.h>
#include <linux/jiffies.h>
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <net/udp.h> #include <net/udp.h>
......
...@@ -165,7 +165,7 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, ...@@ -165,7 +165,7 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
int i, j; unsigned int i, j;
char *p; char *p;
for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
...@@ -197,7 +197,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) ...@@ -197,7 +197,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
int i; int i;
if (sset == ETH_SS_STATS) { if (sset == ETH_SS_STATS) {
for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) {
memcpy(p, i40evf_gstrings_stats[i].stat_string, memcpy(p, i40evf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
......
...@@ -1957,8 +1957,8 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1957,8 +1957,8 @@ static void i40evf_adminq_task(struct work_struct *work)
container_of(work, struct i40evf_adapter, adminq_task); container_of(work, struct i40evf_adapter, adminq_task);
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
struct i40e_arq_event_info event; struct i40e_arq_event_info event;
struct virtchnl_msg *v_msg; enum virtchnl_ops v_op;
i40e_status ret; i40e_status ret, v_ret;
u32 val, oldval; u32 val, oldval;
u16 pending; u16 pending;
...@@ -1970,15 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work) ...@@ -1970,15 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work)
if (!event.msg_buf) if (!event.msg_buf)
goto out; goto out;
v_msg = (struct virtchnl_msg *)&event.desc;
do { do {
ret = i40evf_clean_arq_element(hw, &event, &pending); ret = i40evf_clean_arq_element(hw, &event, &pending);
if (ret || !v_msg->v_opcode) v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low);
if (ret || !v_op)
break; /* No event to process or error cleaning ARQ */ break; /* No event to process or error cleaning ARQ */
i40evf_virtchnl_completion(adapter, v_msg->v_opcode, i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
(i40e_status)v_msg->v_retval,
event.msg_buf,
event.msg_len); event.msg_len);
if (pending != 0) if (pending != 0)
memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册