提交 c5870942 编写于 作者: D David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-10-31

This series contains updates to i40e and i40evf.

Colin Ian King fixes a minor issue with dev_err message where a new line
character was missing from the end of the message.

Jake provides several most of the changes in the series, starting with
dropping the is_vf and is_netdev fields in the i40e_mac_filter structure
since they are not needed (along with the checks that used these fields).
Reason being that we use separate VSI's for SRIOV VFs and for netdev VSIs,
therefore a single VSI should only have one type of filter.  Then
simplifies our .set_rx_mode handler by using the kernel provided
__dev_uc_sync and __dev_mc_sync functions for notification of add and
deletion of filters.  Refactored the i40e_put_mac_in_vlan() to resolve
an issue where this function was arbitrarily modifying all filters to
have the same VLAN, which is incorrect because it could be modifying
active filters without putting them into the new state.  Refactored the
delete filter logic so that we can re-use the functionality, where
appropriate, without having to search for the filter twice.  Reduced the
latency of operations related to searching for specific MAC filters by
using a static hash table instead of a list.  Reduced code duplication
in the adminq command to add/delete for filters.  Fixed an issue where
TSYNVALID bit was not being checked as the true indicator of whether
the packet has an associated timestamp.  Cleaned up a second msleep()
call by simply re-ordering the code so that the extra wait is no longer
needed.

Alan provides additional fix to the work Jake has been doing to resolve
a bug where adding at least one VLAN and then removing all VLANs leaves
the MAC filters for the VSI with an incorrect value for the VID which
indicates the MAC filter's VLAN status.

Alex adds a common method for finding a VSI by type.  Also cleaned up
the logic for coalescing RS bits, which was convoluted and larger than
it needed to be.

Mitch fixes an issue with the failure to add filters when the VF driver
is reloaded by simply setting the number of filters to 0 when freeing
VF resources.

Maciej implements a I40E_NVMUPD_STATE_ERROR state for NVM update, so
that the driver has the ability to return NVM image write failure.

Filip removes unreachable code which was found using static analysis
where "if" statements were never in a "true/false" state, so clean up
unnecessary if statements.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/hashtable.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/in.h> #include <linux/in.h>
#include <linux/ip.h> #include <linux/ip.h>
...@@ -428,11 +429,13 @@ struct i40e_pf { ...@@ -428,11 +429,13 @@ struct i40e_pf {
struct ptp_clock_info ptp_caps; struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb; struct sk_buff *ptp_tx_skb;
struct hwtstamp_config tstamp_config; struct hwtstamp_config tstamp_config;
unsigned long last_rx_ptp_check; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj; u64 ptp_base_adj;
u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared; u32 rx_hwtstamp_cleared;
u32 latch_event_flags;
spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */
unsigned long latch_events[4];
bool ptp_tx; bool ptp_tx;
bool ptp_rx; bool ptp_rx;
u16 rss_table_size; /* HW RSS table size */ u16 rss_table_size; /* HW RSS table size */
...@@ -445,6 +448,20 @@ struct i40e_pf { ...@@ -445,6 +448,20 @@ struct i40e_pf {
u16 phy_led_val; u16 phy_led_val;
}; };
/**
* i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
**/
static inline u64 i40e_addr_to_hkey(const u8 *macaddr)
{
u64 key = 0;
ether_addr_copy((u8 *)&key, macaddr);
return key;
}
enum i40e_filter_state { enum i40e_filter_state {
I40E_FILTER_INVALID = 0, /* Invalid state */ I40E_FILTER_INVALID = 0, /* Invalid state */
I40E_FILTER_NEW, /* New, not sent to FW yet */ I40E_FILTER_NEW, /* New, not sent to FW yet */
...@@ -454,13 +471,10 @@ enum i40e_filter_state { ...@@ -454,13 +471,10 @@ enum i40e_filter_state {
/* There is no 'removed' state; the filter struct is freed */ /* There is no 'removed' state; the filter struct is freed */
}; };
struct i40e_mac_filter { struct i40e_mac_filter {
struct list_head list; struct hlist_node hlist;
u8 macaddr[ETH_ALEN]; u8 macaddr[ETH_ALEN];
#define I40E_VLAN_ANY -1 #define I40E_VLAN_ANY -1
s16 vlan; s16 vlan;
u8 counter; /* number of instances of this filter */
bool is_vf; /* filter belongs to a VF */
bool is_netdev; /* filter belongs to a netdev */
enum i40e_filter_state state; enum i40e_filter_state state;
}; };
...@@ -501,9 +515,11 @@ struct i40e_vsi { ...@@ -501,9 +515,11 @@ struct i40e_vsi {
#define I40E_VSI_FLAG_VEB_OWNER BIT(1) #define I40E_VSI_FLAG_VEB_OWNER BIT(1)
unsigned long flags; unsigned long flags;
/* Per VSI lock to protect elements/list (MAC filter) */ /* Per VSI lock to protect elements/hash (MAC filter) */
spinlock_t mac_filter_list_lock; spinlock_t mac_filter_hash_lock;
struct list_head mac_filter_list; /* Fixed size hash table with 2^8 buckets for MAC filters */
DECLARE_HASHTABLE(mac_filter_hash, 8);
bool has_vlan_filter;
/* VSI stats */ /* VSI stats */
struct rtnl_link_stats64 net_stats; struct rtnl_link_stats64 net_stats;
...@@ -707,6 +723,25 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); ...@@ -707,6 +723,25 @@ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
u16 rss_table_size, u16 rss_size); u16 rss_table_size, u16 rss_size);
struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
/**
* i40e_find_vsi_by_type - Find and return Flow Director VSI
* @pf: PF to search for VSI
* @type: Value indicating type of VSI we are looking for
**/
static inline struct i40e_vsi *
i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
{
int i;
for (i = 0; i < pf->num_alloc_vsi; i++) {
struct i40e_vsi *vsi = pf->vsi[i];
if (vsi && vsi->type == type)
return vsi;
}
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi); void i40e_update_stats(struct i40e_vsi *vsi);
void i40e_update_eth_stats(struct i40e_vsi *vsi); void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi); struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
...@@ -723,10 +758,8 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf); ...@@ -723,10 +758,8 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf);
bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features); bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
void i40e_set_ethtool_ops(struct net_device *netdev); void i40e_set_ethtool_ops(struct net_device *netdev);
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan, const u8 *macaddr, s16 vlan);
bool is_vf, bool is_netdev); void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi); int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1); u16 uplink, u32 param1);
...@@ -740,7 +773,8 @@ void i40e_service_event_schedule(struct i40e_pf *pf); ...@@ -740,7 +773,8 @@ void i40e_service_event_schedule(struct i40e_pf *pf);
void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
u8 *msg, u16 len); u8 *msg, u16 len);
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable); int i40e_vsi_start_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc); u16 downlink_seid, u8 enabled_tc);
...@@ -816,14 +850,12 @@ int i40e_close(struct net_device *netdev); ...@@ -816,14 +850,12 @@ int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi); int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev); const u8 *macaddr);
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
bool is_vf, bool is_netdev);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE #ifdef I40E_FCOE
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc); struct tc_to_netdev *tc);
......
...@@ -964,11 +964,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, ...@@ -964,11 +964,11 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc; desc_idx = ntc;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags); flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) { if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw, i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE, I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n", "AQRX: Event received with error 0x%X.\n",
......
...@@ -3313,8 +3313,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, ...@@ -3313,8 +3313,10 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread /* partition id is 1-based, and functions are evenly spread
* across the ports as partitions * across the ports as partitions
*/ */
hw->partition_id = (hw->pf_id / hw->num_ports) + 1; if (hw->num_ports != 0) {
hw->num_partitions = num_functions / hw->num_ports; hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
hw->num_partitions = num_functions / hw->num_ports;
}
/* additional HW specific goodies that might /* additional HW specific goodies that might
* someday be HW version specific * someday be HW version specific
......
...@@ -134,7 +134,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -134,7 +134,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct rtnl_link_stats64 *nstat; struct rtnl_link_stats64 *nstat;
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
int i; int i, bkt;
vsi = i40e_dbg_find_vsi(pf, seid); vsi = i40e_dbg_find_vsi(pf, seid);
if (!vsi) { if (!vsi) {
...@@ -166,11 +166,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -166,11 +166,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.addr, pf->hw.mac.addr,
pf->hw.mac.san_addr, pf->hw.mac.san_addr,
pf->hw.mac.port_addr); pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n", " mac_filter_hash: %pM vid=%d, state %s\n",
f->macaddr, f->vlan, f->is_netdev, f->is_vf, f->macaddr, f->vlan,
f->counter, i40e_filter_state_string[f->state]); i40e_filter_state_string[f->state]);
} }
dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n", dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
vsi->active_filters, vsi->promisc_threshold, vsi->active_filters, vsi->promisc_threshold,
...@@ -867,86 +867,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -867,86 +867,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]); i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
struct i40e_mac_filter *f;
int vlan = 0;
u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
"%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
&vsi_seid,
&ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
&vlan);
if (cnt == 7) {
vlan = 0;
} else if (cnt != 8) {
dev_info(&pf->pdev->dev,
"add macaddr: bad command string, cnt=%d\n",
cnt);
goto command_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
"add macaddr: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi);
if (f && !ret)
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n",
ma, vlan, vsi_seid);
else
dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
int vlan = 0;
u8 ma[6];
int ret;
cnt = sscanf(&cmd_buf[11],
"%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
&vsi_seid,
&ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
&vlan);
if (cnt == 7) {
vlan = 0;
} else if (cnt != 8) {
dev_info(&pf->pdev->dev,
"del macaddr: bad command string, cnt=%d\n",
cnt);
goto command_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
"del macaddr: VSI %d not found\n", vsi_seid);
goto command_write_done;
}
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi);
if (!ret)
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n",
ma, vlan, vsi_seid);
else
dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) { } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
i40e_status ret; i40e_status ret;
u16 vid; u16 vid;
...@@ -1615,8 +1535,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1615,8 +1535,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
dev_info(&pf->pdev->dev, " dump switch\n"); dev_info(&pf->pdev->dev, " dump switch\n");
......
...@@ -216,7 +216,6 @@ enum i40e_ethtool_test_id { ...@@ -216,7 +216,6 @@ enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0, I40E_ETH_TEST_REG = 0,
I40E_ETH_TEST_EEPROM, I40E_ETH_TEST_EEPROM,
I40E_ETH_TEST_INTR, I40E_ETH_TEST_INTR,
I40E_ETH_TEST_LOOPBACK,
I40E_ETH_TEST_LINK, I40E_ETH_TEST_LINK,
}; };
...@@ -224,7 +223,6 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -224,7 +223,6 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Register test (offline)",
"Eeprom test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Interrupt test (offline)",
"Loopback test (offline)",
"Link test (on/offline)" "Link test (on/offline)"
}; };
...@@ -1744,17 +1742,6 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data) ...@@ -1744,17 +1742,6 @@ static int i40e_intr_test(struct net_device *netdev, u64 *data)
return *data; return *data;
} }
static int i40e_loopback_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
netif_info(pf, hw, netdev, "loopback test not implemented\n");
*data = 0;
return *data;
}
static inline bool i40e_active_vfs(struct i40e_pf *pf) static inline bool i40e_active_vfs(struct i40e_pf *pf)
{ {
struct i40e_vf *vfs = pf->vf; struct i40e_vf *vfs = pf->vf;
...@@ -1768,17 +1755,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf) ...@@ -1768,17 +1755,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
static inline bool i40e_active_vmdqs(struct i40e_pf *pf) static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
{ {
struct i40e_vsi **vsi = pf->vsi; return !!i40e_find_vsi_by_type(pf, I40E_VSI_VMDQ2);
int i;
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!vsi[i])
continue;
if (vsi[i]->type == I40E_VSI_VMDQ2)
return true;
}
return false;
} }
static void i40e_diag_test(struct net_device *netdev, static void i40e_diag_test(struct net_device *netdev,
...@@ -1800,7 +1777,6 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1800,7 +1777,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 1; data[I40E_ETH_TEST_REG] = 1;
data[I40E_ETH_TEST_EEPROM] = 1; data[I40E_ETH_TEST_EEPROM] = 1;
data[I40E_ETH_TEST_INTR] = 1; data[I40E_ETH_TEST_INTR] = 1;
data[I40E_ETH_TEST_LOOPBACK] = 1;
data[I40E_ETH_TEST_LINK] = 1; data[I40E_ETH_TEST_LINK] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__I40E_TESTING, &pf->state); clear_bit(__I40E_TESTING, &pf->state);
...@@ -1828,9 +1804,6 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1828,9 +1804,6 @@ static void i40e_diag_test(struct net_device *netdev,
if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR])) if (i40e_intr_test(netdev, &data[I40E_ETH_TEST_INTR]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
if (i40e_loopback_test(netdev, &data[I40E_ETH_TEST_LOOPBACK]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* run reg test last, a reset is required after it */ /* run reg test last, a reset is required after it */
if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG])) if (i40e_reg_test(netdev, &data[I40E_ETH_TEST_REG]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
...@@ -1851,7 +1824,6 @@ static void i40e_diag_test(struct net_device *netdev, ...@@ -1851,7 +1824,6 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_REG] = 0; data[I40E_ETH_TEST_REG] = 0;
data[I40E_ETH_TEST_EEPROM] = 0; data[I40E_ETH_TEST_EEPROM] = 0;
data[I40E_ETH_TEST_INTR] = 0; data[I40E_ETH_TEST_INTR] = 0;
data[I40E_ETH_TEST_LOOPBACK] = 0;
} }
skip_ol_tests: skip_ol_tests:
......
...@@ -1522,12 +1522,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi) ...@@ -1522,12 +1522,12 @@ void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
* same PCI function. * same PCI function.
*/ */
netdev->dev_port = 1; netdev->dev_port = 1;
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false); i40e_add_filter(vsi, hw->mac.san_addr, 0);
i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false); i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0);
i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0);
i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false); i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* use san mac */ /* use san mac */
ether_addr_copy(netdev->dev_addr, hw->mac.san_addr); ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
......
...@@ -1145,25 +1145,22 @@ void i40e_update_stats(struct i40e_vsi *vsi) ...@@ -1145,25 +1145,22 @@ void i40e_update_stats(struct i40e_vsi *vsi)
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the MAC address * @macaddr: the MAC address
* @vlan: the vlan * @vlan: the vlan
* @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
* *
* Returns ptr to the filter object or NULL * Returns ptr to the filter object or NULL
**/ **/
static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan, const u8 *macaddr, s16 vlan)
bool is_vf, bool is_netdev)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
u64 key;
if (!vsi || !macaddr) if (!vsi || !macaddr)
return NULL; return NULL;
list_for_each_entry(f, &vsi->mac_filter_list, list) { key = i40e_addr_to_hkey(macaddr);
hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
if ((ether_addr_equal(macaddr, f->macaddr)) && if ((ether_addr_equal(macaddr, f->macaddr)) &&
(vlan == f->vlan) && (vlan == f->vlan))
(!is_vf || f->is_vf) &&
(!is_netdev || f->is_netdev))
return f; return f;
} }
return NULL; return NULL;
...@@ -1173,24 +1170,21 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi, ...@@ -1173,24 +1170,21 @@ static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
* i40e_find_mac - Find a mac addr in the macvlan filters list * i40e_find_mac - Find a mac addr in the macvlan filters list
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the MAC address we are searching for * @macaddr: the MAC address we are searching for
* @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
* *
* Returns the first filter with the provided MAC address or NULL if * Returns the first filter with the provided MAC address or NULL if
* MAC address was not found * MAC address was not found
**/ **/
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
bool is_vf, bool is_netdev)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
u64 key;
if (!vsi || !macaddr) if (!vsi || !macaddr)
return NULL; return NULL;
list_for_each_entry(f, &vsi->mac_filter_list, list) { key = i40e_addr_to_hkey(macaddr);
if ((ether_addr_equal(macaddr, f->macaddr)) && hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
(!is_vf || f->is_vf) && if ((ether_addr_equal(macaddr, f->macaddr)))
(!is_netdev || f->is_netdev))
return f; return f;
} }
return NULL; return NULL;
...@@ -1204,86 +1198,31 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, ...@@ -1204,86 +1198,31 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
**/ **/
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi) bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
{ {
struct i40e_mac_filter *f; /* If we have a PVID, always operate in VLAN mode */
if (vsi->info.pvid)
return true;
/* Only -1 for all the filters denotes not in vlan mode /* We need to operate in VLAN mode whenever we have any filters with
* so we have to go through all the list in order to make sure * a VLAN other than I40E_VLAN_ALL. We could check the table each
* time, incurring search cost repeatedly. However, we can notice two
* things:
*
* 1) the only place where we can gain a VLAN filter is in
* i40e_add_filter.
*
* 2) the only place where filters are actually removed is in
* i40e_vsi_sync_filters_subtask.
*
* Thus, we can simply use a boolean value, has_vlan_filters which we
* will set to true when we add a VLAN filter in i40e_add_filter. Then
* we have to perform the full search after deleting filters in
* i40e_vsi_sync_filters_subtask, but we already have to search
* filters here and can perform the check at the same time. This
* results in avoiding embedding a loop for VLAN mode inside another
* loop over all the filters, and should maintain correctness as noted
* above.
*/ */
list_for_each_entry(f, &vsi->mac_filter_list, list) { return vsi->has_vlan_filter;
if (f->vlan >= 0 || vsi->info.pvid)
return true;
}
return false;
}
/**
* i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
* @vsi: the VSI to be searched
* @macaddr: the mac address to be filtered
* @is_vf: true if it is a VF
* @is_netdev: true if it is a netdev
*
* Goes through all the macvlan filters and adds a
* macvlan filter for each unique vlan that already exists
*
* Returns first filter found on success, else NULL
**/
struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev)
{
struct i40e_mac_filter *f;
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (vsi->info.pvid)
f->vlan = le16_to_cpu(vsi->info.pvid);
if (!i40e_find_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev)) {
if (!i40e_add_filter(vsi, macaddr, f->vlan,
is_vf, is_netdev))
return NULL;
}
}
return list_first_entry_or_null(&vsi->mac_filter_list,
struct i40e_mac_filter, list);
}
/**
* i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
* @vsi: the VSI to be searched
* @macaddr: the mac address to be removed
* @is_vf: true if it is a VF
* @is_netdev: true if it is a netdev
*
* Removes a given MAC address from a VSI, regardless of VLAN
*
* Returns 0 for success, or error
**/
int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev)
{
struct i40e_mac_filter *f = NULL;
int changed = 0;
WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
"Missing mac_filter_list_lock\n");
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if ((ether_addr_equal(macaddr, f->macaddr)) &&
(is_vf == f->is_vf) &&
(is_netdev == f->is_netdev)) {
f->counter--;
changed = 1;
if (f->counter == 0)
f->state = I40E_FILTER_REMOVE;
}
}
if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
return 0;
}
return -ENOENT;
} }
/** /**
...@@ -1291,20 +1230,17 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, ...@@ -1291,20 +1230,17 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the MAC address * @macaddr: the MAC address
* @vlan: the vlan * @vlan: the vlan
* @is_vf: make sure its a VF filter, else doesn't matter
* @is_netdev: make sure its a netdev filter, else doesn't matter
* *
* Returns ptr to the filter object or NULL when no memory available. * Returns ptr to the filter object or NULL when no memory available.
* *
* NOTE: This function is expected to be called with mac_filter_list_lock * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held. * being held.
**/ **/
struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan, const u8 *macaddr, s16 vlan)
bool is_vf, bool is_netdev)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
int changed = false; u64 key;
if (!vsi || !macaddr) if (!vsi || !macaddr)
return NULL; return NULL;
...@@ -1316,11 +1252,17 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1316,11 +1252,17 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (is_broadcast_ether_addr(macaddr)) if (is_broadcast_ether_addr(macaddr))
return NULL; return NULL;
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); f = i40e_find_filter(vsi, macaddr, vlan);
if (!f) { if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f) if (!f)
goto add_filter_out; return NULL;
/* Update the boolean indicating if we need to function in
* VLAN mode.
*/
if (vlan >= 0)
vsi->has_vlan_filter = true;
ether_addr_copy(f->macaddr, macaddr); ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan; f->vlan = vlan;
...@@ -1332,100 +1274,148 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1332,100 +1274,148 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->state = I40E_FILTER_FAILED; f->state = I40E_FILTER_FAILED;
else else
f->state = I40E_FILTER_NEW; f->state = I40E_FILTER_NEW;
changed = true; INIT_HLIST_NODE(&f->hlist);
INIT_LIST_HEAD(&f->list);
list_add_tail(&f->list, &vsi->mac_filter_list);
}
/* increment counter and add a new flag if needed */ key = i40e_addr_to_hkey(macaddr);
if (is_vf) { hash_add(vsi->mac_filter_hash, &f->hlist, key);
if (!f->is_vf) {
f->is_vf = true;
f->counter++;
}
} else if (is_netdev) {
if (!f->is_netdev) {
f->is_netdev = true;
f->counter++;
}
} else {
f->counter++;
}
if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
} }
add_filter_out: /* If we're asked to add a filter that has been marked for removal, it
* is safe to simply restore it to active state. __i40e_del_filter
* will have simply deleted any filters which were previously marked
* NEW or FAILED, so if it is currently marked REMOVE it must have
* previously been ACTIVE. Since we haven't yet run the sync filters
* task, just restore this filter to the ACTIVE state so that the
* sync task leaves it in place
*/
if (f->state == I40E_FILTER_REMOVE)
f->state = I40E_FILTER_ACTIVE;
return f; return f;
} }
/** /**
* i40e_del_filter - Remove a mac/vlan filter from the VSI * __i40e_del_filter - Remove a specific filter from the VSI
* @vsi: VSI to remove from
* @f: the filter to remove from the list
*
* This function should be called instead of i40e_del_filter only if you know
* the exact filter you will remove already, such as via i40e_find_filter or
* i40e_find_mac.
*
* NOTE: This function is expected to be called with mac_filter_hash_lock
* being held.
* ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry().
**/
static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
{
if (!f)
return;
if ((f->state == I40E_FILTER_FAILED) ||
(f->state == I40E_FILTER_NEW)) {
/* this one never got added by the FW. Just remove it,
* no need to sync anything.
*/
hash_del(&f->hlist);
kfree(f);
} else {
f->state = I40E_FILTER_REMOVE;
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
}
/**
* i40e_del_filter - Remove a MAC/VLAN filter from the VSI
* @vsi: the VSI to be searched * @vsi: the VSI to be searched
* @macaddr: the MAC address * @macaddr: the MAC address
* @vlan: the vlan * @vlan: the VLAN
* @is_vf: make sure it's a VF filter, else doesn't matter
* @is_netdev: make sure it's a netdev filter, else doesn't matter
* *
* NOTE: This function is expected to be called with mac_filter_list_lock * NOTE: This function is expected to be called with mac_filter_hash_lock
* being held. * being held.
* ANOTHER NOTE: This function MUST be called from within the context of * ANOTHER NOTE: This function MUST be called from within the context of
* the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
* instead of list_for_each_entry(). * instead of list_for_each_entry().
**/ **/
void i40e_del_filter(struct i40e_vsi *vsi, void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
if (!vsi || !macaddr) if (!vsi || !macaddr)
return; return;
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); f = i40e_find_filter(vsi, macaddr, vlan);
if (!f || f->counter == 0) __i40e_del_filter(vsi, f);
return; }
if (is_vf) { /**
if (f->is_vf) { * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
f->is_vf = false; * @vsi: the VSI to be searched
f->counter--; * @macaddr: the mac address to be filtered
} *
} else if (is_netdev) { * Goes through all the macvlan filters and adds a macvlan filter for each
if (f->is_netdev) { * unique vlan that already exists. If a PVID has been assigned, instead only
f->is_netdev = false; * add the macaddr to that VLAN.
f->counter--; *
} * Returns last filter added on success, else NULL
} else { **/
/* make sure we don't remove a filter in use by VF or netdev */ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
int min_f = 0; const u8 *macaddr)
{
struct i40e_mac_filter *f, *add = NULL;
struct hlist_node *h;
int bkt;
min_f += (f->is_vf ? 1 : 0); if (vsi->info.pvid)
min_f += (f->is_netdev ? 1 : 0); return i40e_add_filter(vsi, macaddr,
le16_to_cpu(vsi->info.pvid));
if (f->counter > min_f) hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->counter--; if (f->state == I40E_FILTER_REMOVE)
continue;
add = i40e_add_filter(vsi, macaddr, f->vlan);
if (!add)
return NULL;
} }
/* counter == 0 tells sync_filters_subtask to return add;
* remove the filter from the firmware's list }
*/
if (f->counter == 0) { /**
if ((f->state == I40E_FILTER_FAILED) || * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
(f->state == I40E_FILTER_NEW)) { * @vsi: the VSI to be searched
/* this one never got added by the FW. Just remove it, * @macaddr: the mac address to be removed
* no need to sync anything. *
*/ * Removes a given MAC address from a VSI, regardless of VLAN
list_del(&f->list); *
kfree(f); * Returns 0 for success, or error
} else { **/
f->state = I40E_FILTER_REMOVE; int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; {
vsi->back->flags |= I40E_FLAG_FILTER_SYNC; struct i40e_mac_filter *f;
struct hlist_node *h;
bool found = false;
int bkt;
WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
"Missing mac_filter_hash_lock\n");
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
found = true;
} }
} }
if (found)
return 0;
else
return -ENOENT;
} }
/** /**
...@@ -1466,10 +1456,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1466,10 +1456,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true); i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true); i40e_put_mac_in_vlan(vsi, addr->sa_data);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
ether_addr_copy(netdev->dev_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) { if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret; i40e_status ret;
...@@ -1632,6 +1622,52 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, ...@@ -1632,6 +1622,52 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
ctxt->info.valid_sections |= cpu_to_le16(sections); ctxt->info.valid_sections |= cpu_to_le16(sections);
} }
/**
* i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be added. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_mac_filter *f;
if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, addr);
else
f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
if (f)
return 0;
else
return -ENOMEM;
}
/**
* i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
* @netdev: the netdevice
* @addr: address to add
*
* Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
* __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
*/
static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
if (i40e_is_vsi_in_vlan(vsi))
i40e_del_mac_all_vlan(vsi, addr);
else
i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
return 0;
}
/** /**
* i40e_set_rx_mode - NDO callback to set the netdev filters * i40e_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -1643,62 +1679,14 @@ static void i40e_set_rx_mode(struct net_device *netdev) ...@@ -1643,62 +1679,14 @@ static void i40e_set_rx_mode(struct net_device *netdev)
#endif #endif
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_mac_filter *f, *ftmp;
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct netdev_hw_addr *uca;
struct netdev_hw_addr *mca;
struct netdev_hw_addr *ha;
spin_lock_bh(&vsi->mac_filter_list_lock);
/* add addr if not already in the filter list */
netdev_for_each_uc_addr(uca, netdev) {
if (!i40e_find_mac(vsi, uca->addr, false, true)) {
if (i40e_is_vsi_in_vlan(vsi))
i40e_put_mac_in_vlan(vsi, uca->addr,
false, true);
else
i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
false, true);
}
}
netdev_for_each_mc_addr(mca, netdev) { spin_lock_bh(&vsi->mac_filter_hash_lock);
if (!i40e_find_mac(vsi, mca->addr, false, true)) {
if (i40e_is_vsi_in_vlan(vsi))
i40e_put_mac_in_vlan(vsi, mca->addr,
false, true);
else
i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
false, true);
}
}
/* remove filter if not in netdev list */ __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
if (!f->is_netdev) spin_unlock_bh(&vsi->mac_filter_hash_lock);
continue;
netdev_for_each_mc_addr(mca, netdev)
if (ether_addr_equal(mca->addr, f->macaddr))
goto bottom_of_search_loop;
netdev_for_each_uc_addr(uca, netdev)
if (ether_addr_equal(uca->addr, f->macaddr))
goto bottom_of_search_loop;
for_each_dev_addr(netdev, ha)
if (ether_addr_equal(ha->addr, f->macaddr))
goto bottom_of_search_loop;
/* f->macaddr wasn't found in uc, mc, or ha list so delete it */
i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
bottom_of_search_loop:
continue;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
/* check for other flag changes */ /* check for other flag changes */
if (vsi->current_netdev_flags != vsi->netdev->flags) { if (vsi->current_netdev_flags != vsi->netdev->flags) {
...@@ -1713,21 +1701,26 @@ static void i40e_set_rx_mode(struct net_device *netdev) ...@@ -1713,21 +1701,26 @@ static void i40e_set_rx_mode(struct net_device *netdev)
} }
/** /**
* i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
* @vsi: pointer to vsi struct * @vsi: Pointer to VSI struct
* @from: Pointer to list which contains MAC filter entries - changes to * @from: Pointer to list which contains MAC filter entries - changes to
* those entries needs to be undone. * those entries needs to be undone.
* *
* MAC filter entries from list were slated to be removed from device. * MAC filter entries from list were slated to be sent to firmware, either for
* addition or deletion.
**/ **/
static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
struct list_head *from) struct hlist_head *from)
{ {
struct i40e_mac_filter *f, *ftmp; struct i40e_mac_filter *f;
struct hlist_node *h;
hlist_for_each_entry_safe(f, h, from, hlist) {
u64 key = i40e_addr_to_hkey(f->macaddr);
list_for_each_entry_safe(f, ftmp, from, list) {
/* Move the element back into MAC filter list*/ /* Move the element back into MAC filter list*/
list_move_tail(&f->list, &vsi->mac_filter_list); hlist_del(&f->hlist);
hash_add(vsi->mac_filter_hash, &f->hlist, key);
} }
} }
...@@ -1756,7 +1749,9 @@ i40e_update_filter_state(int count, ...@@ -1756,7 +1749,9 @@ i40e_update_filter_state(int count,
/* Everything's good, mark all filters active. */ /* Everything's good, mark all filters active. */
for (i = 0; i < count ; i++) { for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_ACTIVE; add_head->state = I40E_FILTER_ACTIVE;
add_head = list_next_entry(add_head, list); add_head = hlist_entry(add_head->hlist.next,
typeof(struct i40e_mac_filter),
hlist);
} }
} else if (aq_err == I40E_AQ_RC_ENOSPC) { } else if (aq_err == I40E_AQ_RC_ENOSPC) {
/* Device ran out of filter space. Check the return value /* Device ran out of filter space. Check the return value
...@@ -1770,19 +1765,97 @@ i40e_update_filter_state(int count, ...@@ -1770,19 +1765,97 @@ i40e_update_filter_state(int count,
add_head->state = I40E_FILTER_ACTIVE; add_head->state = I40E_FILTER_ACTIVE;
retval++; retval++;
} }
add_head = list_next_entry(add_head, list); add_head = hlist_entry(add_head->hlist.next,
typeof(struct i40e_mac_filter),
hlist);
} }
} else { } else {
/* Some other horrible thing happened, fail all filters */ /* Some other horrible thing happened, fail all filters */
retval = 0; retval = 0;
for (i = 0; i < count ; i++) { for (i = 0; i < count ; i++) {
add_head->state = I40E_FILTER_FAILED; add_head->state = I40E_FILTER_FAILED;
add_head = list_next_entry(add_head, list); add_head = hlist_entry(add_head->hlist.next,
typeof(struct i40e_mac_filter),
hlist);
} }
} }
return retval; return retval;
} }
/**
* i40e_aqc_del_filters - Request firmware to delete a set of filters
* @vsi: ptr to the VSI
* @vsi_name: name to display in messages
* @list: the list of filters to send to firmware
* @num_del: the number of filters to delete
* @retval: Set to -EIO on failure to delete
*
* Send a request to firmware via AdminQ to delete a set of filters. Uses
* *retval instead of a return value so that success does not force ret_val to
* be set to 0. This ensures that a sequence of calls to this function
* preserve the previous value of *retval on successful delete.
*/
static
void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_remove_macvlan_element_data *list,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
i40e_status aq_ret;
int aq_err;
aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
aq_err = hw->aq.asq_last_status;
/* Explicitly ignore and do not report when firmware returns ENOENT */
if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
"ignoring delete macvlan error on %s, err %s, aq_err %s\n",
vsi_name, i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, aq_err));
}
}
/**
* i40e_aqc_add_filters - Request firmware to add a set of filters
* @vsi: ptr to the VSI
* @vsi_name: name to display in messages
* @list: the list of filters to send to firmware
* @add_head: Position in the add hlist
* @num_add: the number of filters to add
* @promisc_change: set to true on exit if promiscuous mode was forced on
*
* Send a request to firmware via AdminQ to add a chunk of filters. Will set
* promisc_changed to true if the firmware has run out of space for more
* filters.
*/
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
struct i40e_mac_filter *add_head,
int num_add, bool *promisc_changed)
{
struct i40e_hw *hw = &vsi->back->hw;
i40e_status aq_ret;
int aq_err, fcnt;
aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
aq_err = hw->aq.asq_last_status;
fcnt = i40e_update_filter_state(num_add, list, add_head, aq_ret);
vsi->active_filters += fcnt;
if (fcnt != num_add) {
*promisc_changed = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
vsi_name);
}
}
/** /**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW * i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI * @vsi: ptr to the VSI
...@@ -1793,22 +1866,25 @@ i40e_update_filter_state(int count, ...@@ -1793,22 +1866,25 @@ i40e_update_filter_state(int count,
**/ **/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi) int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{ {
struct i40e_mac_filter *f, *ftmp, *add_head = NULL; struct hlist_head tmp_add_list, tmp_del_list;
struct list_head tmp_add_list, tmp_del_list; struct i40e_mac_filter *f, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw; struct i40e_hw *hw = &vsi->back->hw;
unsigned int vlan_any_filters = 0;
unsigned int non_vlan_filters = 0;
unsigned int vlan_filters = 0;
bool promisc_changed = false; bool promisc_changed = false;
char vsi_name[16] = "PF"; char vsi_name[16] = "PF";
int filter_list_len = 0; int filter_list_len = 0;
u32 changed_flags = 0;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
int retval = 0; u32 changed_flags = 0;
struct hlist_node *h;
struct i40e_pf *pf; struct i40e_pf *pf;
int num_add = 0; int num_add = 0;
int num_del = 0; int num_del = 0;
int aq_err = 0; int retval = 0;
u16 cmd_flags; u16 cmd_flags;
int list_size; int list_size;
int fcnt; int bkt;
/* empty array typed pointers, kcalloc later */ /* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list; struct i40e_aqc_add_macvlan_element_data *add_list;
...@@ -1823,8 +1899,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1823,8 +1899,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags; vsi->current_netdev_flags = vsi->netdev->flags;
} }
INIT_LIST_HEAD(&tmp_add_list); INIT_HLIST_HEAD(&tmp_add_list);
INIT_LIST_HEAD(&tmp_del_list); INIT_HLIST_HEAD(&tmp_del_list);
if (vsi->type == I40E_VSI_SRIOV) if (vsi->type == I40E_VSI_SRIOV)
snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
...@@ -1834,41 +1910,98 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1834,41 +1910,98 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* Create a list of filters to delete. */ /* Create a list of filters to delete. */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (f->state == I40E_FILTER_REMOVE) { if (f->state == I40E_FILTER_REMOVE) {
WARN_ON(f->counter != 0);
/* Move the element into temporary del_list */ /* Move the element into temporary del_list */
list_move_tail(&f->list, &tmp_del_list); hash_del(&f->hlist);
hlist_add_head(&f->hlist, &tmp_del_list);
vsi->active_filters--; vsi->active_filters--;
/* Avoid counting removed filters */
continue;
} }
if (f->state == I40E_FILTER_NEW) { if (f->state == I40E_FILTER_NEW) {
WARN_ON(f->counter == 0); hash_del(&f->hlist);
/* Move the element into temporary add_list */ hlist_add_head(&f->hlist, &tmp_add_list);
list_move_tail(&f->list, &tmp_add_list);
} }
/* Count the number of each type of filter we have
* remaining, ignoring any filters we're about to
* delete.
*/
if (f->vlan > 0)
vlan_filters++;
else if (!f->vlan)
non_vlan_filters++;
else
vlan_any_filters++;
}
/* We should never have VLAN=-1 filters at the same time as we
* have either VLAN=0 or VLAN>0 filters, so warn about this
* case here to help catch any issues.
*/
WARN_ON(vlan_any_filters && (vlan_filters + non_vlan_filters));
/* If we only have VLAN=0 filters remaining, and don't have
* any other VLAN filters, we need to convert these VLAN=0
* filters into VLAN=-1 (I40E_VLAN_ANY) so that we operate
* correctly in non-VLAN mode and receive all traffic tagged
* or untagged.
*/
if (non_vlan_filters && !vlan_filters) {
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
hlist) {
/* Only replace VLAN=0 filters */
if (f->vlan)
continue;
/* Allocate a replacement element */
add_head = kzalloc(sizeof(*add_head),
GFP_KERNEL);
if (!add_head)
goto err_no_memory_locked;
/* Copy the filter, with new state and VLAN */
*add_head = *f;
add_head->state = I40E_FILTER_NEW;
add_head->vlan = I40E_VLAN_ANY;
/* Move the replacement to the add list */
INIT_HLIST_NODE(&add_head->hlist);
hlist_add_head(&add_head->hlist,
&tmp_add_list);
/* Move the original to the delete list */
f->state = I40E_FILTER_REMOVE;
hash_del(&f->hlist);
hlist_add_head(&f->hlist, &tmp_del_list);
vsi->active_filters--;
}
/* Also update any filters on the tmp_add list */
hlist_for_each_entry(f, &tmp_add_list, hlist) {
if (!f->vlan)
f->vlan = I40E_VLAN_ANY;
}
add_head = NULL;
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} }
/* Now process 'del_list' outside the lock */ /* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) { if (!hlist_empty(&tmp_del_list)) {
filter_list_len = hw->aq.asq_buf_size / filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data); sizeof(struct i40e_aqc_remove_macvlan_element_data);
list_size = filter_list_len * list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data); sizeof(struct i40e_aqc_remove_macvlan_element_data);
del_list = kzalloc(list_size, GFP_ATOMIC); del_list = kzalloc(list_size, GFP_ATOMIC);
if (!del_list) { if (!del_list)
/* Undo VSI's MAC filter entry element updates */ goto err_no_memory;
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_undo_del_filter_entries(vsi, &tmp_del_list);
spin_unlock_bh(&vsi->mac_filter_list_lock);
retval = -ENOMEM;
goto out;
}
list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) { hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
cmd_flags = 0; cmd_flags = 0;
/* add to delete list */ /* add to delete list */
...@@ -1887,68 +2020,47 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1887,68 +2020,47 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */ /* flush a full buffer */
if (num_del == filter_list_len) { if (num_del == filter_list_len) {
aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, i40e_aqc_del_filters(vsi, vsi_name, del_list,
del_list, num_del, &retval);
num_del, NULL);
aq_err = hw->aq.asq_last_status;
num_del = 0;
memset(del_list, 0, list_size); memset(del_list, 0, list_size);
num_del = 0;
/* Explicitly ignore and do not report when
* firmware returns ENOENT.
*/
if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
retval = -EIO;
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error on %s, err %s, aq_err %s\n",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, aq_err));
}
} }
/* Release memory for MAC filter entries which were /* Release memory for MAC filter entries which were
* synced up with HW. * synced up with HW.
*/ */
list_del(&f->list); hlist_del(&f->hlist);
kfree(f); kfree(f);
} }
if (num_del) { if (num_del) {
aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, i40e_aqc_del_filters(vsi, vsi_name, del_list,
num_del, NULL); num_del, &retval);
aq_err = hw->aq.asq_last_status;
num_del = 0;
/* Explicitly ignore and do not report when firmware
* returns ENOENT.
*/
if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
retval = -EIO;
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error on %s, err %s aq_err %s\n",
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, aq_err));
}
} }
kfree(del_list); kfree(del_list);
del_list = NULL; del_list = NULL;
} }
if (!list_empty(&tmp_add_list)) { /* After finishing notifying firmware of the deleted filters, update
* the cached value of vsi->has_vlan_filter. Note that we are safe to
* use just !!vlan_filters here because if we only have VLAN=0 (that
* is, non_vlan_filters) these will all be converted to VLAN=-1 in the
* logic above already so this value would still be correct.
*/
vsi->has_vlan_filter = !!vlan_filters;
if (!hlist_empty(&tmp_add_list)) {
/* Do all the adds now. */ /* Do all the adds now. */
filter_list_len = hw->aq.asq_buf_size / filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data); sizeof(struct i40e_aqc_add_macvlan_element_data);
list_size = filter_list_len * list_size = filter_list_len *
sizeof(struct i40e_aqc_add_macvlan_element_data); sizeof(struct i40e_aqc_add_macvlan_element_data);
add_list = kzalloc(list_size, GFP_ATOMIC); add_list = kzalloc(list_size, GFP_ATOMIC);
if (!add_list) { if (!add_list)
retval = -ENOMEM; goto err_no_memory;
goto out;
}
num_add = 0; num_add = 0;
list_for_each_entry(f, &tmp_add_list, list) { hlist_for_each_entry(f, &tmp_add_list, hlist) {
if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state)) { &vsi->state)) {
f->state = I40E_FILTER_FAILED; f->state = I40E_FILTER_FAILED;
...@@ -1973,57 +2085,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -1973,57 +2085,28 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */ /* flush a full buffer */
if (num_add == filter_list_len) { if (num_add == filter_list_len) {
aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, i40e_aqc_add_filters(vsi, vsi_name, add_list,
add_list, num_add, add_head, num_add,
NULL); &promisc_changed);
aq_err = hw->aq.asq_last_status;
fcnt = i40e_update_filter_state(num_add,
add_list,
add_head,
aq_ret);
vsi->active_filters += fcnt;
if (fcnt != num_add) {
promisc_changed = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state);
vsi->promisc_threshold =
(vsi->active_filters * 3) / 4;
dev_warn(&pf->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err),
vsi_name);
}
memset(add_list, 0, list_size); memset(add_list, 0, list_size);
num_add = 0; num_add = 0;
} }
} }
if (num_add) { if (num_add) {
aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
add_list, num_add, NULL); num_add, &promisc_changed);
aq_err = hw->aq.asq_last_status;
fcnt = i40e_update_filter_state(num_add, add_list,
add_head, aq_ret);
vsi->active_filters += fcnt;
if (fcnt != num_add) {
promisc_changed = true;
set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state);
vsi->promisc_threshold =
(vsi->active_filters * 3) / 4;
dev_warn(&pf->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
i40e_aq_str(hw, aq_err), vsi_name);
}
} }
/* Now move all of the filters from the temp add list back to /* Now move all of the filters from the temp add list back to
* the VSI's list. * the VSI's list.
*/ */
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
list_move_tail(&f->list, &vsi->mac_filter_list); u64 key = i40e_addr_to_hkey(f->macaddr);
hlist_del(&f->hlist);
hash_add(vsi->mac_filter_hash, &f->hlist, key);
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
kfree(add_list); kfree(add_list);
add_list = NULL; add_list = NULL;
} }
...@@ -2035,12 +2118,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2035,12 +2118,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* See if we have any failed filters. We can't drop out of /* See if we have any failed filters. We can't drop out of
* promiscuous until these have all been deleted. * promiscuous until these have all been deleted.
*/ */
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
list_for_each_entry(f, &vsi->mac_filter_list, list) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->state == I40E_FILTER_FAILED) if (f->state == I40E_FILTER_FAILED)
failed_count++; failed_count++;
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (!failed_count) { if (!failed_count) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n", "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
...@@ -2168,6 +2251,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2168,6 +2251,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
clear_bit(__I40E_CONFIG_BUSY, &vsi->state); clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval; return retval;
err_no_memory:
/* Restore elements on the temporary add and delete lists */
spin_lock_bh(&vsi->mac_filter_hash_lock);
err_no_memory_locked:
i40e_undo_filter_entries(vsi, &tmp_del_list);
i40e_undo_filter_entries(vsi, &tmp_add_list);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return -ENOMEM;
} }
/** /**
...@@ -2322,34 +2417,33 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) ...@@ -2322,34 +2417,33 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
**/ **/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{ {
struct i40e_mac_filter *f, *ftmp, *add_f; struct i40e_mac_filter *f, *add_f, *del_f;
bool is_netdev, is_vf; struct hlist_node *h;
int bkt;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
/* Locked once because all functions invoked below iterates list*/ /* Locked once because all functions invoked below iterates list*/
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_netdev) { if (vsi->netdev) {
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid, add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid);
is_vf, is_netdev);
if (!add_f) { if (!add_f) {
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n", "Could not add vlan filter %d for %pM\n",
vid, vsi->netdev->dev_addr); vid, vsi->netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM; return -ENOMEM;
} }
} }
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); if (f->state == I40E_FILTER_REMOVE)
continue;
add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) { if (!add_f) {
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
"Could not add vlan filter %d for %pM\n", "Could not add vlan filter %d for %pM\n",
vid, f->macaddr); vid, f->macaddr);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -2359,19 +2453,17 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) ...@@ -2359,19 +2453,17 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
* with 0, so we now accept untagged and specified tagged traffic * with 0, so we now accept untagged and specified tagged traffic
* (and not all tags along with untagged) * (and not all tags along with untagged)
*/ */
if (vid > 0) { if (vid > 0 && vsi->netdev) {
if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, del_f = i40e_find_filter(vsi, vsi->netdev->dev_addr,
I40E_VLAN_ANY, I40E_VLAN_ANY);
is_vf, is_netdev)) { if (del_f) {
i40e_del_filter(vsi, vsi->netdev->dev_addr, __i40e_del_filter(vsi, del_f);
I40E_VLAN_ANY, is_vf, is_netdev); add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0);
add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
is_vf, is_netdev);
if (!add_f) { if (!add_f) {
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n", "Could not add filter 0 for %pM\n",
vsi->netdev->dev_addr); vsi->netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -2379,25 +2471,26 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) ...@@ -2379,25 +2471,26 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) { if (vid > 0 && !vsi->info.pvid) {
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, if (f->state == I40E_FILTER_REMOVE)
is_vf, is_netdev))
continue; continue;
i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, del_f = i40e_find_filter(vsi, f->macaddr,
is_vf, is_netdev); I40E_VLAN_ANY);
add_f = i40e_add_filter(vsi, f->macaddr, if (!del_f)
0, is_vf, is_netdev); continue;
__i40e_del_filter(vsi, del_f);
add_f = i40e_add_filter(vsi, f->macaddr, 0);
if (!add_f) { if (!add_f) {
dev_info(&vsi->back->pdev->dev, dev_info(&vsi->back->pdev->dev,
"Could not add filter 0 for %pM\n", "Could not add filter 0 for %pM\n",
f->macaddr); f->macaddr);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
return -ENOMEM; return -ENOMEM;
} }
} }
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of /* schedule our worker thread which will take care of
* applying the new filter changes * applying the new filter changes
...@@ -2410,79 +2503,31 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) ...@@ -2410,79 +2503,31 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
* i40e_vsi_kill_vlan - Remove vsi membership for given vlan * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
* @vsi: the vsi being configured * @vsi: the vsi being configured
* @vid: vlan id to be removed (0 = untagged only , -1 = any) * @vid: vlan id to be removed (0 = untagged only , -1 = any)
*
* Return: 0 on success or negative otherwise
**/ **/
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{ {
struct net_device *netdev = vsi->netdev; struct net_device *netdev = vsi->netdev;
struct i40e_mac_filter *f, *ftmp, *add_f; struct i40e_mac_filter *f;
bool is_vf, is_netdev; struct hlist_node *h;
int filter_count = 0; int bkt;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
/* Locked once because all functions invoked below iterates list */ /* Locked once because all functions invoked below iterates list */
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
/* go through all the filters for this VSI and if there is only if (vsi->netdev)
* vid == 0 it means there are no other filters, so vid 0 must i40e_del_filter(vsi, netdev->dev_addr, vid);
* be replaced with -1. This signifies that we should from now
* on accept any traffic (with any tag present, or untagged)
*/
list_for_each_entry(f, &vsi->mac_filter_list, list) {
if (is_netdev) {
if (f->vlan &&
ether_addr_equal(netdev->dev_addr, f->macaddr))
filter_count++;
}
if (f->vlan)
filter_count++;
}
if (!filter_count && is_netdev) {
i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
is_vf, is_netdev);
if (!f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
if (!filter_count) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (f->vlan == vid)
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); __i40e_del_filter(vsi, f);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
"Could not add filter %d for %pM\n",
I40E_VLAN_ANY, f->macaddr);
spin_unlock_bh(&vsi->mac_filter_list_lock);
return -ENOMEM;
}
}
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* schedule our worker thread which will take care of /* schedule our worker thread which will take care of
* applying the new filter changes * applying the new filter changes
*/ */
i40e_service_event_schedule(vsi->back); i40e_service_event_schedule(vsi->back);
return 0;
} }
/** /**
...@@ -3969,29 +4014,35 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) ...@@ -3969,29 +4014,35 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
} }
/** /**
* i40e_vsi_control_rings - Start or stop a VSI's rings * i40e_vsi_start_rings - Start a VSI's rings
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @enable: start or stop the rings
**/ **/
int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request) int i40e_vsi_start_rings(struct i40e_vsi *vsi)
{ {
int ret = 0; int ret = 0;
/* do rx first for enable and last for disable */ /* do rx first for enable and last for disable */
if (request) { ret = i40e_vsi_control_rx(vsi, true);
ret = i40e_vsi_control_rx(vsi, request); if (ret)
if (ret) return ret;
return ret; ret = i40e_vsi_control_tx(vsi, true);
ret = i40e_vsi_control_tx(vsi, request);
} else {
/* Ignore return value, we need to shutdown whatever we can */
i40e_vsi_control_tx(vsi, request);
i40e_vsi_control_rx(vsi, request);
}
return ret; return ret;
} }
/**
* i40e_vsi_stop_rings - Stop a VSI's rings
* @vsi: the VSI being configured
**/
void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
{
/* do rx first for enable and last for disable
* Ignore return value, we need to shutdown whatever we can
*/
i40e_vsi_control_tx(vsi, false);
i40e_vsi_control_rx(vsi, false);
}
/** /**
* i40e_vsi_free_irq - Free the irq association with the OS * i40e_vsi_free_irq - Free the irq association with the OS
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -5190,7 +5241,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi) ...@@ -5190,7 +5241,7 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
i40e_configure_msi_and_legacy(vsi); i40e_configure_msi_and_legacy(vsi);
/* start rings */ /* start rings */
err = i40e_vsi_control_rings(vsi, true); err = i40e_vsi_start_rings(vsi);
if (err) if (err)
return err; return err;
...@@ -5287,7 +5338,7 @@ void i40e_down(struct i40e_vsi *vsi) ...@@ -5287,7 +5338,7 @@ void i40e_down(struct i40e_vsi *vsi)
netif_tx_disable(vsi->netdev); netif_tx_disable(vsi->netdev);
} }
i40e_vsi_disable_irq(vsi); i40e_vsi_disable_irq(vsi);
i40e_vsi_control_rings(vsi, false); i40e_vsi_stop_rings(vsi);
i40e_napi_disable_all(vsi); i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
...@@ -6670,7 +6721,6 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi); ...@@ -6670,7 +6721,6 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf) static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{ {
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
int i;
/* quick workaround for an NVM issue that leaves a critical register /* quick workaround for an NVM issue that leaves a critical register
* uninitialized * uninitialized
...@@ -6681,6 +6731,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) ...@@ -6681,6 +6731,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb, 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21, 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
0x95b3a76d}; 0x95b3a76d};
int i;
for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++) for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
...@@ -6690,13 +6741,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) ...@@ -6690,13 +6741,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
return; return;
/* find existing VSI and see if it needs configuring */ /* find existing VSI and see if it needs configuring */
vsi = NULL; vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
break;
}
}
/* create a new VSI if none exists */ /* create a new VSI if none exists */
if (!vsi) { if (!vsi) {
...@@ -6718,15 +6763,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) ...@@ -6718,15 +6763,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
**/ **/
static void i40e_fdir_teardown(struct i40e_pf *pf) static void i40e_fdir_teardown(struct i40e_pf *pf)
{ {
int i; struct i40e_vsi *vsi;
i40e_fdir_filter_exit(pf); i40e_fdir_filter_exit(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) { vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { if (vsi)
i40e_vsi_release(pf->vsi[i]); i40e_vsi_release(vsi);
break;
}
}
} }
/** /**
...@@ -7354,7 +7396,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -7354,7 +7396,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
pf->rss_table_size : 64; pf->rss_table_size : 64;
vsi->netdev_registered = false; vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK; vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list); hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false; vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi); ret = i40e_set_num_rings_in_vsi(vsi);
...@@ -7369,7 +7411,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -7369,7 +7411,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings); i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
/* Initialize VSI lock */ /* Initialize VSI lock */
spin_lock_init(&vsi->mac_filter_list_lock); spin_lock_init(&vsi->mac_filter_hash_lock);
pf->vsi[vsi_idx] = vsi; pf->vsi[vsi_idx] = vsi;
ret = vsi_idx; ret = vsi_idx;
goto unlock_pf; goto unlock_pf;
...@@ -9154,18 +9196,18 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -9154,18 +9196,18 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (vsi->type == I40E_VSI_MAIN) { if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev); SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr); ether_addr_copy(mac_addr, hw->mac.perm_addr);
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} else { } else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */ /* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d", snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
pf->vsi[pf->lan_vsi]->netdev->name); pf->vsi[pf->lan_vsi]->netdev->name);
random_ether_addr(mac_addr); random_ether_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false); i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
} }
ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->dev_addr, mac_addr);
...@@ -9254,7 +9296,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9254,7 +9296,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt; struct i40e_vsi_context ctxt;
struct i40e_mac_filter *f, *ftmp; struct i40e_mac_filter *f;
struct hlist_node *h;
int bkt;
u8 enabled_tc = 0x1; /* TC0 enabled */ u8 enabled_tc = 0x1; /* TC0 enabled */
int f_count = 0; int f_count = 0;
...@@ -9453,13 +9497,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9453,13 +9497,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->active_filters = 0; vsi->active_filters = 0;
clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* If macvlan filters already exist, force them to get loaded */ /* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW; f->state = I40E_FILTER_NEW;
f_count++; f_count++;
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (f_count) { if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
...@@ -9489,11 +9533,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9489,11 +9533,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
**/ **/
int i40e_vsi_release(struct i40e_vsi *vsi) int i40e_vsi_release(struct i40e_vsi *vsi)
{ {
struct i40e_mac_filter *f, *ftmp; struct i40e_mac_filter *f;
struct hlist_node *h;
struct i40e_veb *veb = NULL; struct i40e_veb *veb = NULL;
struct i40e_pf *pf; struct i40e_pf *pf;
u16 uplink_seid; u16 uplink_seid;
int i, n; int i, n, bkt;
pf = vsi->back; pf = vsi->back;
...@@ -9523,11 +9568,19 @@ int i40e_vsi_release(struct i40e_vsi *vsi) ...@@ -9523,11 +9568,19 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
i40e_vsi_disable_irq(vsi); i40e_vsi_disable_irq(vsi);
} }
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, f->vlan, /* clear the sync flag on all filters */
f->is_vf, f->is_netdev); if (vsi->netdev) {
spin_unlock_bh(&vsi->mac_filter_list_lock); __dev_uc_unsync(vsi->netdev, NULL);
__dev_mc_unsync(vsi->netdev, NULL);
}
/* make sure any remaining filters are marked for deletion */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
__i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_sync_vsi_filters(vsi); i40e_sync_vsi_filters(vsi);
......
...@@ -722,9 +722,20 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, ...@@ -722,9 +722,20 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode; *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
} }
/* Clear error status on read */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
return 0; return 0;
} }
/* Clear status even it is not read and log */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
i40e_debug(hw, I40E_DEBUG_NVM,
"Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
switch (hw->nvmupd_state) { switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT: case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
...@@ -1074,6 +1085,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) ...@@ -1074,6 +1085,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
} }
hw->nvm_wait_opcode = 0; hw->nvm_wait_opcode = 0;
if (hw->aq.arq_last_status) {
hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
return;
}
switch (hw->nvmupd_state) { switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
......
...@@ -159,16 +159,15 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -159,16 +159,15 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{ {
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct timespec64 now, then; struct timespec64 now, then;
unsigned long flags;
then = ns_to_timespec64(delta); then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags); mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now); i40e_ptp_read(pf, &now);
now = timespec64_add(now, then); now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now); i40e_ptp_write(pf, (const struct timespec64 *)&now);
spin_unlock_irqrestore(&pf->tmreg_lock, flags); mutex_unlock(&pf->tmreg_lock);
return 0; return 0;
} }
...@@ -184,11 +183,10 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -184,11 +183,10 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{ {
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags); mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, ts); i40e_ptp_read(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags); mutex_unlock(&pf->tmreg_lock);
return 0; return 0;
} }
...@@ -205,11 +203,10 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp, ...@@ -205,11 +203,10 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts) const struct timespec64 *ts)
{ {
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
unsigned long flags;
spin_lock_irqsave(&pf->tmreg_lock, flags); mutex_lock(&pf->tmreg_lock);
i40e_ptp_write(pf, ts); i40e_ptp_write(pf, ts);
spin_unlock_irqrestore(&pf->tmreg_lock, flags); mutex_unlock(&pf->tmreg_lock);
return 0; return 0;
} }
...@@ -229,6 +226,47 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, ...@@ -229,6 +226,47 @@ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
/**
* i40e_ptp_update_latch_events - Read I40E_PRTTSYN_STAT_1 and latch events
* @pf: the PF data structure
*
* This function reads I40E_PRTTSYN_STAT_1 and updates the corresponding timers
* for noticed latch events. This allows the driver to keep track of the first
* time a latch event was noticed which will be used to help clear out Rx
* timestamps for packets that got dropped or lost.
*
* This function will return the current value of I40E_PRTTSYN_STAT_1 and is
* expected to be called only while under the ptp_rx_lock.
**/
static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 prttsyn_stat, new_latch_events;
int i;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
new_latch_events = prttsyn_stat & ~pf->latch_event_flags;
/* Update the jiffies time for any newly latched timestamp. This
* ensures that we store the time that we first discovered a timestamp
* was latched by the hardware. The service task will later determine
* if we should free the latch and drop that timestamp should too much
* time pass. This flow ensures that we only update jiffies for new
* events latched since the last time we checked, and not all events
* currently latched, so that the service task accounting remains
* accurate.
*/
for (i = 0; i < 4; i++) {
if (new_latch_events & BIT(i))
pf->latch_events[i] = jiffies;
}
/* Finally, we store the current status of the Rx timestamp latches */
pf->latch_event_flags = prttsyn_stat;
return prttsyn_stat;
}
/** /**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung * i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @vsi: The VSI with the rings relevant to 1588 * @vsi: The VSI with the rings relevant to 1588
...@@ -242,10 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) ...@@ -242,10 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
struct i40e_ring *rx_ring; int i;
unsigned long rx_event;
u32 prttsyn_stat;
int n;
/* Since we cannot turn off the Rx timestamp logic if the device is /* Since we cannot turn off the Rx timestamp logic if the device is
* configured for Tx timestamping, we check if Rx timestamping is * configured for Tx timestamping, we check if Rx timestamping is
...@@ -255,42 +290,30 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) ...@@ -255,42 +290,30 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx)
return; return;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); spin_lock_bh(&pf->ptp_rx_lock);
/* Unless all four receive timestamp registers are latched, we are not /* Update current latch times for Rx events */
* concerned about a possible PTP Rx hang, so just update the timeout i40e_ptp_get_rx_events(pf);
* counter and exit.
*/
if (!(prttsyn_stat & ((I40E_PRTTSYN_STAT_1_RXT0_MASK <<
I40E_PRTTSYN_STAT_1_RXT0_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT1_MASK <<
I40E_PRTTSYN_STAT_1_RXT1_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT2_MASK <<
I40E_PRTTSYN_STAT_1_RXT2_SHIFT) |
(I40E_PRTTSYN_STAT_1_RXT3_MASK <<
I40E_PRTTSYN_STAT_1_RXT3_SHIFT)))) {
pf->last_rx_ptp_check = jiffies;
return;
}
/* Determine the most recent watchdog or rx_timestamp event. */ /* Check all the currently latched Rx events and see whether they have
rx_event = pf->last_rx_ptp_check; * been latched for over a second. It is assumed that any timestamp
for (n = 0; n < vsi->num_queue_pairs; n++) { * should have been cleared within this time, or else it was captured
rx_ring = vsi->rx_rings[n]; * for a dropped frame that the driver never received. Thus, we will
if (time_after(rx_ring->last_rx_timestamp, rx_event)) * clear any timestamp that has been latched for over 1 second.
rx_event = rx_ring->last_rx_timestamp; */
for (i = 0; i < 4; i++) {
if ((pf->latch_event_flags & BIT(i)) &&
time_is_before_jiffies(pf->latch_events[i] + HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
pf->latch_event_flags &= ~BIT(i);
pf->rx_hwtstamp_cleared++;
dev_warn(&pf->pdev->dev,
"Clearing a missed Rx timestamp event for RXTIME[%d]\n",
i);
}
} }
/* Only need to read the high RXSTMP register to clear the lock */ spin_unlock_bh(&pf->ptp_rx_lock);
if (time_is_before_jiffies(rx_event + 5 * HZ)) {
rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
} }
/** /**
...@@ -353,14 +376,25 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) ...@@ -353,14 +376,25 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
hw = &pf->hw; hw = &pf->hw;
prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); spin_lock_bh(&pf->ptp_rx_lock);
if (!(prttsyn_stat & BIT(index))) /* Get current Rx events and update latch times */
prttsyn_stat = i40e_ptp_get_rx_events(pf);
/* TODO: Should we warn about missing Rx timestamp event? */
if (!(prttsyn_stat & BIT(index))) {
spin_unlock_bh(&pf->ptp_rx_lock);
return; return;
}
/* Clear the latched event since we're about to read its register */
pf->latch_event_flags &= ~BIT(index);
lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index)); lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index)); hi = rd32(hw, I40E_PRTTSYN_RXTIME_H(index));
spin_unlock_bh(&pf->ptp_rx_lock);
ns = (((u64)hi) << 32) | lo; ns = (((u64)hi) << 32) | lo;
i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns); i40e_ptp_convert_to_hwtstamp(skb_hwtstamps(skb), ns);
...@@ -514,12 +548,15 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, ...@@ -514,12 +548,15 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
} }
/* Clear out all 1588-related registers to clear and unlatch them. */ /* Clear out all 1588-related registers to clear and unlatch them. */
spin_lock_bh(&pf->ptp_rx_lock);
rd32(hw, I40E_PRTTSYN_STAT_0); rd32(hw, I40E_PRTTSYN_STAT_0);
rd32(hw, I40E_PRTTSYN_TXTIME_H); rd32(hw, I40E_PRTTSYN_TXTIME_H);
rd32(hw, I40E_PRTTSYN_RXTIME_H(0)); rd32(hw, I40E_PRTTSYN_RXTIME_H(0));
rd32(hw, I40E_PRTTSYN_RXTIME_H(1)); rd32(hw, I40E_PRTTSYN_RXTIME_H(1));
rd32(hw, I40E_PRTTSYN_RXTIME_H(2)); rd32(hw, I40E_PRTTSYN_RXTIME_H(2));
rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->latch_event_flags = 0;
spin_unlock_bh(&pf->ptp_rx_lock);
/* Enable/disable the Tx timestamp interrupt based on user input. */ /* Enable/disable the Tx timestamp interrupt based on user input. */
regval = rd32(hw, I40E_PRTTSYN_CTL0); regval = rd32(hw, I40E_PRTTSYN_CTL0);
...@@ -658,10 +695,8 @@ void i40e_ptp_init(struct i40e_pf *pf) ...@@ -658,10 +695,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
return; return;
} }
/* we have to initialize the lock first, since we can't control mutex_init(&pf->tmreg_lock);
* when the user will enter the PHC device entry points spin_lock_init(&pf->ptp_rx_lock);
*/
spin_lock_init(&pf->tmreg_lock);
/* ensure we have a clock device */ /* ensure we have a clock device */
err = i40e_ptp_create_clock(pf); err = i40e_ptp_create_clock(pf);
......
...@@ -125,10 +125,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, ...@@ -125,10 +125,7 @@ static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
u16 i; u16 i;
/* find existing FDIR VSI */ /* find existing FDIR VSI */
vsi = NULL; vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi) if (!vsi)
return -ENOENT; return -ENOENT;
...@@ -619,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -619,7 +616,7 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0; return 0;
} }
#define WB_STRIDE 0x3 #define WB_STRIDE 4
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * i40e_clean_tx_irq - Reclaim resources after transmit completes
...@@ -735,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -735,7 +732,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40e_get_tx_pending(tx_ring, false); unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
...@@ -1410,13 +1407,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring, ...@@ -1410,13 +1407,12 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
I40E_RXD_QW1_STATUS_SHIFT; I40E_RXD_QW1_STATUS_SHIFT;
u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
if (unlikely(rsyn)) { if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn); i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
rx_ring->last_rx_timestamp = jiffies;
}
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
...@@ -2704,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2704,9 +2700,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0; u16 desc_count = 1;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2789,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2789,8 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* set next_to_watch value indicating a packet is present */ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
first->next_to_watch = tx_desc;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2798,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2798,66 +2791,72 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported * if queue is stopped
* if xmit_more is true * mark RS bit
* do not update tail and do not mark RS bit. * reset packet counter
* if xmit_more is false and last xmit_more was false * else if xmit_more is supported and is true
* if every packet spanned less than 4 desc * advance packet counter to 4
* then set RS bit on 4th packet and update tail * reset desc_count to 0
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* *
* Optimization: wmb to be issued only in case of tail update. * if desc_count >= 4
* Also optimize the Descriptor WB path for RS bit with the same * mark RS bit
* algorithm. * reset packet counter
* if desc_count > 0
* update tail
* *
* Note: If there are less than 4 packets * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will * pending and interrupts were disabled the service task will
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (netif_xmit_stopped(txring_txq(tx_ring))) {
!netif_xmit_stopped(txring_txq(tx_ring))) { goto do_rs;
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; } else if (skb->xmit_more) {
tail_bump = false; /* set stride to arm on next packet and reset desc_count */
} else if (!skb->xmit_more && tx_ring->packet_stride = WB_STRIDE;
!netif_xmit_stopped(txring_txq(tx_ring)) && desc_count = 0;
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && } else if (desc_count >= WB_STRIDE) {
(tx_ring->packet_stride < WB_STRIDE) && do_rs:
(desc_count < WB_STRIDE)) { /* write last descriptor with RS bit set */
tx_ring->packet_stride++; td_cmd |= I40E_TX_DESC_CMD_RS;
} else {
tx_ring->packet_stride = 0; tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
} }
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) | build_ctob(td_cmd, td_offset, size, td_tag);
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) << /* Force memory writes to complete before letting h/w know there
I40E_TXD_QW1_CMD_SHIFT); * are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (!tail_bump) { if (desc_count) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
return; return;
dma_error: dma_error:
......
...@@ -307,15 +307,12 @@ struct i40e_ring { ...@@ -307,15 +307,12 @@ struct i40e_ring {
u8 atr_sample_rate; u8 atr_sample_rate;
u8 atr_count; u8 atr_count;
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */ bool arm_wb; /* do something to arm write back */
u8 packet_stride; u8 packet_stride;
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
/* stats structs */ /* stats structs */
struct i40e_queue_stats stats; struct i40e_queue_stats stats;
......
...@@ -366,6 +366,7 @@ enum i40e_nvmupd_state { ...@@ -366,6 +366,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING, I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT, I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT, I40E_NVMUPD_STATE_WRITE_WAIT,
I40E_NVMUPD_STATE_ERROR
}; };
/* nvm_access definition and its masks/shifts need to be accessible to /* nvm_access definition and its masks/shifts need to be accessible to
......
...@@ -686,17 +686,17 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -686,17 +686,17 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (vf->port_vlan_id) if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id); i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
if (is_valid_ether_addr(vf->default_lan_addr.addr)) { if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
f = i40e_add_filter(vsi, vf->default_lan_addr.addr, f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1, vf->port_vlan_id ?
true, false); vf->port_vlan_id : -1);
if (!f) if (!f)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"Could not add MAC filter %pM for VF %d\n", "Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id); vf->default_lan_addr.addr, vf->vf_id);
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
(u32)hena); (u32)hena);
i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
...@@ -811,6 +811,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf) ...@@ -811,6 +811,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0; vf->lan_vsi_idx = 0;
vf->lan_vsi_id = 0; vf->lan_vsi_id = 0;
vf->num_mac = 0;
} }
msix_vf = pf->hw.func_caps.num_msix_vectors_vf; msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
...@@ -990,7 +991,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) ...@@ -990,7 +991,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (vf->lan_vsi_idx == 0) if (vf->lan_vsi_idx == 0)
goto complete_reset; goto complete_reset;
i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
complete_reset: complete_reset:
/* reallocate VF resources to reset the VSI state */ /* reallocate VF resources to reset the VSI state */
i40e_free_vf_res(vf); i40e_free_vf_res(vf);
...@@ -1031,8 +1032,7 @@ void i40e_free_vfs(struct i40e_pf *pf) ...@@ -1031,8 +1032,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
i40e_notify_client_of_vf_enable(pf, 0); i40e_notify_client_of_vf_enable(pf, 0);
for (i = 0; i < pf->num_alloc_vfs; i++) for (i = 0; i < pf->num_alloc_vfs; i++)
if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], i40e_vsi_stop_rings(pf->vsi[pf->vf[i].lan_vsi_idx]);
false);
/* Disable IOV before freeing resources. This lets any VF drivers /* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank * running in the host get themselves cleaned up before we yank
...@@ -1449,9 +1449,9 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) ...@@ -1449,9 +1449,9 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
{ {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
int num_vlans = 0; int num_vlans = 0, bkt;
list_for_each_entry(f, &vsi->mac_filter_list, list) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
num_vlans++; num_vlans++;
} }
...@@ -1481,6 +1481,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1481,6 +1481,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
bool alluni = false; bool alluni = false;
int aq_err = 0; int aq_err = 0;
int bkt;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id); vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
...@@ -1507,7 +1508,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1507,7 +1508,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id, vf->port_vlan_id,
NULL); NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
list_for_each_entry(f, &vsi->mac_filter_list, list) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
continue; continue;
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
...@@ -1557,7 +1558,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, ...@@ -1557,7 +1558,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
vf->port_vlan_id, vf->port_vlan_id,
NULL); NULL);
} else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
list_for_each_entry(f, &vsi->mac_filter_list, list) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
aq_ret = 0; aq_ret = 0;
if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
aq_ret = aq_ret =
...@@ -1757,7 +1758,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1757,7 +1758,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param; goto error_param;
} }
if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT; aq_ret = I40E_ERR_TIMEOUT;
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -1796,8 +1797,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1796,8 +1797,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param; goto error_param;
} }
if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
aq_ret = I40E_ERR_TIMEOUT;
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -1927,20 +1927,18 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1927,20 +1927,18 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
/* Lock once, because all function inside for loop accesses VSI's /* Lock once, because all function inside for loop accesses VSI's
* MAC filter list which needs to be protected using same lock. * MAC filter list which needs to be protected using same lock.
*/ */
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* add new addresses to the list */ /* add new addresses to the list */
for (i = 0; i < al->num_elements; i++) { for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr, true, false); f = i40e_find_mac(vsi, al->list[i].addr);
if (!f) { if (!f) {
if (i40e_is_vsi_in_vlan(vsi)) if (i40e_is_vsi_in_vlan(vsi))
f = i40e_put_mac_in_vlan(vsi, al->list[i].addr, f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
true, false);
else else
f = i40e_add_filter(vsi, al->list[i].addr, -1, f = i40e_add_filter(vsi, al->list[i].addr, -1);
true, false);
} }
if (!f) { if (!f) {
...@@ -1948,13 +1946,13 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1948,13 +1946,13 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
"Unable to add MAC filter %pM for VF %d\n", "Unable to add MAC filter %pM for VF %d\n",
al->list[i].addr, vf->vf_id); al->list[i].addr, vf->vf_id);
ret = I40E_ERR_PARAM; ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param; goto error_param;
} else { } else {
vf->num_mac++; vf->num_mac++;
} }
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */ /* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi); ret = i40e_sync_vsi_filters(vsi);
...@@ -2003,18 +2001,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2003,18 +2001,18 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete addresses from the list */ /* delete addresses from the list */
for (i = 0; i < al->num_elements; i++) for (i = 0; i < al->num_elements; i++)
if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) { if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
ret = I40E_ERR_INVALID_MAC_ADDR; ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param; goto error_param;
} else { } else {
vf->num_mac--; vf->num_mac--;
} }
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
/* program the updated filter list */ /* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi); ret = i40e_sync_vsi_filters(vsi);
...@@ -2139,9 +2137,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2139,9 +2137,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
} }
for (i = 0; i < vfl->num_elements; i++) { for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
if (!ret) vf->num_vlan--;
vf->num_vlan--;
if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
...@@ -2153,11 +2150,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -2153,11 +2150,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
false, false,
vfl->vlan_id[i], vfl->vlan_id[i],
NULL); NULL);
if (ret)
dev_err(&pf->pdev->dev,
"Unable to delete VLAN filter %d for VF %d, error %d\n",
vfl->vlan_id[i], vf->vf_id, ret);
} }
error_param: error_param:
...@@ -2689,6 +2681,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2689,6 +2681,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
struct i40e_vf *vf; struct i40e_vf *vf;
int ret = 0; int ret = 0;
int bkt;
/* validate the request */ /* validate the request */
if (vf_id >= pf->num_alloc_vfs) { if (vf_id >= pf->num_alloc_vfs) {
...@@ -2715,23 +2708,22 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2715,23 +2708,22 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
} }
/* Lock once because below invoked function add/del_filter requires /* Lock once because below invoked function add/del_filter requires
* mac_filter_list_lock to be held * mac_filter_hash_lock to be held
*/ */
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
/* delete the temporary mac address */ /* delete the temporary mac address */
if (!is_zero_ether_addr(vf->default_lan_addr.addr)) if (!is_zero_ether_addr(vf->default_lan_addr.addr))
i40e_del_filter(vsi, vf->default_lan_addr.addr, i40e_del_filter(vsi, vf->default_lan_addr.addr,
vf->port_vlan_id ? vf->port_vlan_id : -1, vf->port_vlan_id ? vf->port_vlan_id : -1);
true, false);
/* Delete all the filters for this VSI - we're going to kill it /* Delete all the filters for this VSI - we're going to kill it
* anyway. * anyway.
*/ */
list_for_each_entry(f, &vsi->mac_filter_list, list) hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
i40e_del_filter(vsi, f->macaddr, f->vlan, true, false); i40e_del_filter(vsi, f->macaddr, f->vlan);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */ /* program mac filter */
...@@ -2803,9 +2795,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -2803,9 +2795,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */ /* duplicate request, so just return success */
goto error_pvid; goto error_pvid;
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_hash_lock);
is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi); is_vsi_in_vlan = i40e_is_vsi_in_vlan(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) { if (le16_to_cpu(vsi->info.pvid) == 0 && is_vsi_in_vlan) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -2835,13 +2827,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ...@@ -2835,13 +2827,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
if (vsi->info.pvid) { if (vsi->info.pvid) {
/* kill old VLAN */ /* kill old VLAN */
ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) & i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK)); VLAN_VID_MASK));
if (ret) {
dev_info(&vsi->back->pdev->dev,
"remove VLAN failed, ret=%d, aq_err=%d\n",
ret, pf->hw.aq.asq_last_status);
}
} }
if (vlan_id || qos) if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio); ret = i40e_vsi_add_pvid(vsi, vlanprio);
...@@ -2940,7 +2927,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, ...@@ -2940,7 +2927,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
} }
if (max_tx_rate > speed) { if (max_tx_rate > speed) {
dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
max_tx_rate, vf->vf_id); max_tx_rate, vf->vf_id);
ret = -EINVAL; ret = -EINVAL;
goto error; goto error;
......
...@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, ...@@ -912,11 +912,11 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc); desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc; desc_idx = ntc;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags); flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) { if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR; ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.arq_last_status =
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
i40e_debug(hw, i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE, I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n", "AQRX: Event received with error 0x%X.\n",
......
...@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) ...@@ -150,7 +150,7 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
return 0; return 0;
} }
#define WB_STRIDE 0x3 #define WB_STRIDE 4
/** /**
* i40e_clean_tx_irq - Reclaim resources after transmit completes * i40e_clean_tx_irq - Reclaim resources after transmit completes
...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -266,7 +266,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
unsigned int j = i40evf_get_tx_pending(tx_ring, false); unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget && if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) && ((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__I40E_DOWN, &vsi->state) && !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
...@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1950,9 +1950,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0; u16 desc_count = 1;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2035,8 +2033,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* set next_to_watch value indicating a packet is present */ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
first->next_to_watch = tx_desc;
i++; i++;
if (i == tx_ring->count) if (i == tx_ring->count)
...@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2044,66 +2041,72 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
/* We can OR these values together as they both are checked against
* 4 below and at this point desc_count will be used as a boolean value
* after this if/else block.
*/
desc_count |= ++tx_ring->packet_stride;
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported * if queue is stopped
* if xmit_more is true * mark RS bit
* do not update tail and do not mark RS bit. * reset packet counter
* if xmit_more is false and last xmit_more was false * else if xmit_more is supported and is true
* if every packet spanned less than 4 desc * advance packet counter to 4
* then set RS bit on 4th packet and update tail * reset desc_count to 0
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* *
* Optimization: wmb to be issued only in case of tail update. * if desc_count >= 4
* Also optimize the Descriptor WB path for RS bit with the same * mark RS bit
* algorithm. * reset packet counter
* if desc_count > 0
* update tail
* *
* Note: If there are less than 4 packets * Note: If there are less than 4 descriptors
* pending and interrupts were disabled the service task will * pending and interrupts were disabled the service task will
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (netif_xmit_stopped(txring_txq(tx_ring))) {
!netif_xmit_stopped(txring_txq(tx_ring))) { goto do_rs;
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; } else if (skb->xmit_more) {
tail_bump = false; /* set stride to arm on next packet and reset desc_count */
} else if (!skb->xmit_more && tx_ring->packet_stride = WB_STRIDE;
!netif_xmit_stopped(txring_txq(tx_ring)) && desc_count = 0;
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && } else if (desc_count >= WB_STRIDE) {
(tx_ring->packet_stride < WB_STRIDE) && do_rs:
(desc_count < WB_STRIDE)) { /* write last descriptor with RS bit set */
tx_ring->packet_stride++; td_cmd |= I40E_TX_DESC_CMD_RS;
} else {
tx_ring->packet_stride = 0; tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
} }
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) | build_ctob(td_cmd, td_offset, size, td_tag);
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) << /* Force memory writes to complete before letting h/w know there
I40E_TXD_QW1_CMD_SHIFT); * are new descriptors to fetch.
*
* We also use this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
/* notify HW of packet */ /* notify HW of packet */
if (!tail_bump) { if (desc_count) {
prefetchw(tx_desc + 1);
} else {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
return; return;
dma_error: dma_error:
......
...@@ -309,7 +309,6 @@ struct i40e_ring { ...@@ -309,7 +309,6 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */ bool arm_wb; /* do something to arm write back */
u8 packet_stride; u8 packet_stride;
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
......
...@@ -348,6 +348,7 @@ enum i40e_nvmupd_state { ...@@ -348,6 +348,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING, I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT, I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT, I40E_NVMUPD_STATE_WRITE_WAIT,
I40E_NVMUPD_STATE_ERROR
}; };
/* nvm_access definition and its masks/shifts need to be accessible to /* nvm_access definition and its masks/shifts need to be accessible to
......
...@@ -1746,15 +1746,17 @@ static void i40evf_reset_task(struct work_struct *work) ...@@ -1746,15 +1746,17 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */ /* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
/* sleep first to make sure a minimum wait time is met */
msleep(I40EVF_RESET_WAIT_MS);
reg_val = rd32(hw, I40E_VFGEN_RSTAT) & reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK; I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE) if (reg_val == I40E_VFR_VFACTIVE)
break; break;
msleep(I40EVF_RESET_WAIT_MS);
} }
pci_set_master(adapter->pdev); pci_set_master(adapter->pdev);
/* extra wait to make sure minimum wait is met */
msleep(I40EVF_RESET_WAIT_MS);
if (i == I40EVF_RESET_WAIT_COUNT) { if (i == I40EVF_RESET_WAIT_COUNT) {
struct i40evf_mac_filter *ftmp; struct i40evf_mac_filter *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp; struct i40evf_vlan_filter *fv, *fvtmp;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册