提交 53643a75 编写于 作者: S Shahed Shaikh 提交者: David S. Miller

qlcnic: fix ping resumption to a VM after a live migration

Delete the MAC address of a VM, from the adapter's embedded switch,
after the VM had been migrated out of this adapter/server.
Signed-off-by: NShahed Shaikh <shahed.shaikh@qlogic.com>
Signed-off-by: NRajesh Borundia <rajesh.borundia@qlogic.com>
Signed-off-by: NJitendra Kalsaria <jitendra.kalsaria@qlogic.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 99e85879
......@@ -1008,9 +1008,12 @@ struct qlcnic_adapter {
struct delayed_work idc_aen_work;
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
......@@ -1506,6 +1509,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
__le16);
/*
* QLOGIC Board information
*/
......
......@@ -578,7 +578,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct qlcnic_filter *tmp_fil;
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i, time;
int i;
unsigned long time;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
......@@ -600,6 +601,21 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
}
}
}
for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
head = &(adapter->rx_fhash.fhead[i]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
{
time = tmp_fil->ftime;
if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
spin_lock_bh(&adapter->rx_mac_learn_lock);
adapter->rx_fhash.fnum--;
hlist_del(&tmp_fil->fnode);
spin_unlock_bh(&adapter->rx_mac_learn_lock);
kfree(tmp_fil);
}
}
}
}
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
......
......@@ -152,6 +152,89 @@ static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
return handle;
}
static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
{
return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
}
void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
int loopback_pkt, __le16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
u8 hindex, found = 0, op;
int ret;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
if (loopback_pkt) {
if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
return;
hindex = qlcnic_mac_hash(src_addr) &
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
time = tmp_fil->ftime;
if (jiffies > (QLCNIC_READD_AGE * HZ + time))
tmp_fil->ftime = jiffies;
return;
}
}
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
fil->ftime = jiffies;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
fil->vlan_id = vlan_id;
spin_lock(&adapter->rx_mac_learn_lock);
hlist_add_head(&(fil->fnode), head);
adapter->rx_fhash.fnum++;
spin_unlock(&adapter->rx_mac_learn_lock);
} else {
hindex = qlcnic_mac_hash(src_addr) &
(adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
spin_lock(&adapter->rx_mac_learn_lock);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
found = 1;
break;
}
}
if (!found) {
spin_unlock(&adapter->rx_mac_learn_lock);
return;
}
op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
vlan_id, op);
if (!ret) {
op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
ret = qlcnic_sre_macaddr_change(adapter,
(u8 *)&src_addr,
vlan_id, op);
if (!ret) {
hlist_del(&(tmp_fil->fnode));
adapter->rx_fhash.fnum--;
}
}
spin_unlock(&adapter->rx_mac_learn_lock);
}
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
__le16 vlan_id)
{
......@@ -207,9 +290,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
return;
}
/* Only NPAR capable devices support vlan based learning */
if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
vlan_id = first_desc->vlan_TCI;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
......@@ -920,8 +1000,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum, pkt_offset;
u16 vid = 0xffff;
int index, length, cksum, pkt_offset, is_lb_pkt;
u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
......@@ -941,6 +1021,14 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
if (adapter->drv_mac_learn &&
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
cpu_to_le16(t_vid));
}
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
......@@ -985,8 +1073,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
struct ipv6hdr *ipv6h;
struct tcphdr *th;
bool push, timestamp;
int index, l2_hdr_offset, l4_hdr_offset;
u16 lro_length, length, data_offset, vid = 0xffff;
int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
if (unlikely(ring > adapter->max_rds_rings))
......@@ -1011,6 +1099,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
if (adapter->drv_mac_learn &&
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
cpu_to_le16(t_vid));
}
if (timestamp)
data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
else
......@@ -1357,6 +1453,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
}
}
#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
{
if (lro_pkt)
return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
else
return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
}
static struct qlcnic_rx_buffer *
qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_host_sds_ring *sds_ring,
......@@ -1367,8 +1474,8 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
int index, length, cksum;
u16 vid = 0xffff;
int index, length, cksum, is_lb_pkt;
u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
......@@ -1386,6 +1493,14 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
if (adapter->drv_mac_learn &&
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
cpu_to_le16(t_vid));
}
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
......@@ -1424,9 +1539,9 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
struct tcphdr *th;
bool push;
int l2_hdr_offset, l4_hdr_offset;
int index;
int index, is_lb_pkt;
u16 lro_length, length, data_offset, gso_size;
u16 vid = 0xffff;
u16 vid = 0xffff, t_vid;
if (unlikely(ring > adapter->max_rds_rings))
return NULL;
......@@ -1447,6 +1562,14 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
if (!skb)
return buffer;
if (adapter->drv_mac_learn &&
(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
t_vid = 0;
is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
cpu_to_le16(t_vid));
}
if (qlcnic_83xx_is_tstamp(sts_data[1]))
data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
else
......
......@@ -2208,6 +2208,7 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
act_pci_func = adapter->ahw->act_pci_func;
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
if (qlcnic_82xx_check(adapter)) {
filter_size = QLCNIC_LB_MAX_FILTERS;
......@@ -2231,6 +2232,20 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
head = kcalloc(adapter->rx_fhash.fbucket_size,
sizeof(struct hlist_head), GFP_ATOMIC);
if (!head)
return;
adapter->rx_fhash.fmax = (filter_size / act_pci_func);
adapter->rx_fhash.fhead = head;
for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
}
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
......@@ -2240,6 +2255,12 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
kfree(adapter->rx_fhash.fhead);
adapter->rx_fhash.fmax = 0;
adapter->rx_fhash.fhead = NULL;
}
int qlcnic_check_temp(struct qlcnic_adapter *adapter)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册