提交 8c1a91f1 编写于 作者: D David S. Miller

Merge branch 'mlx4-802.1ad-accel'

Amir Vadai says:

====================
net/mlx4_en: Hardware accelerated 802.1ad

This patchset by Hadar introduces support in Hardware accelerated 802.1ad, for
ConnectX-3pro NIC's.  In order to support existing deployment, and due to some
hardware limitations, the feature is disabled by default, and needed to be
enabled using a private flag in ethtool. Ofcourse user can enable the private
flag only if hardware has support.
After being enabled, the standard ethtool -k/-K can be used.

Patchset was applied and tested over commit 71790a27 ("hv_netvsc: Add structs
and handlers for VF messages")
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -871,7 +871,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (is_eth) {
wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) {
MLX4_CQE_CVLAN_PRESENT_MASK) {
wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
MLX4_CQE_VID_MASK;
} else {
......
......@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
"blueflame",
"phv-bit"
};
static const char main_strings[][ETH_GSTRING_LEN] = {
......@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
int i;
int ret = 0;
if (bf_enabled_new == bf_enabled_old)
return 0; /* Nothing to do */
if (bf_enabled_new != bf_enabled_old) {
if (bf_enabled_new) {
bool bf_supported = true;
if (bf_enabled_new) {
bool bf_supported = true;
for (i = 0; i < priv->tx_ring_num; i++)
bf_supported &= priv->tx_ring[i]->bf_alloced;
for (i = 0; i < priv->tx_ring_num; i++)
bf_supported &= priv->tx_ring[i]->bf_alloced;
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
return -EINVAL;
}
if (!bf_supported) {
en_err(priv, "BlueFlame is not supported\n");
return -EINVAL;
priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
} else {
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
} else {
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
}
for (i = 0; i < priv->tx_ring_num; i++)
priv->tx_ring[i]->bf_enabled = bf_enabled_new;
for (i = 0; i < priv->tx_ring_num; i++)
priv->tx_ring[i]->bf_enabled = bf_enabled_new;
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");
en_info(priv, "BlueFlame %s\n",
bf_enabled_new ? "Enabled" : "Disabled");
}
if (phv_enabled_new != phv_enabled_old) {
ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
if (ret)
return ret;
else if (phv_enabled_new)
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
else
priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
en_info(priv, "PHV bit %s\n",
phv_enabled_new ? "Enabled" : "Disabled");
}
return 0;
}
......
......@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx4_en_priv *en_priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = en_priv->mdev;
/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
* enable/disable make sure S-TAG flag is always in same state as
* C-TAG.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX &&
!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
features |= NETIF_F_HW_VLAN_STAG_RX;
else
features &= ~NETIF_F_HW_VLAN_STAG_RX;
return features;
}
static int mlx4_en_set_features(struct net_device *netdev,
netdev_features_t features)
{
......@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
en_info(priv, "Turn %s TX vlan strip offload\n",
(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
en_info(priv, "Turn %s TX S-VLAN strip offload\n",
(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
en_info(priv, "Turn %s loopback\n",
(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
......@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
......@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_poll_controller = mlx4_en_netpoll,
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = mlx4_en_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
......@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->hw_features |= NETIF_F_LOOPBACK |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
dev->features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_FILTER;
dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
}
if (mlx4_is_slave(mdev->dev)) {
int phv;
err = get_phv_bit(mdev->dev, port, &phv);
if (!err && phv) {
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
}
} else {
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(mdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
}
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
dev->hw_features |= NETIF_F_RXFCS;
......
......@@ -726,7 +726,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
......@@ -907,11 +907,17 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
} else if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_SVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
__vlan_hwaccel_put_tag(gro_skb,
htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
}
if (dev->features & NETIF_F_RXHASH)
......@@ -970,9 +976,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
PKT_HASH_TYPE_L3);
if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK) &&
MLX4_CQE_CVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
else if ((be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_SVLAN_PRESENT_MASK) &&
(dev->features & NETIF_F_HW_VLAN_STAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
be16_to_cpu(cqe->sl_vid));
if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
timestamp = mlx4_en_get_cqe_ts(cqe);
......@@ -1070,7 +1081,10 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
* headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
*/
int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
int buf_size = 0;
int i = 0;
......
......@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
void *fragptr = NULL;
......@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
}
if (skb_vlan_tag_present(skb))
if (skb_vlan_tag_present(skb)) {
vlan_tag = skb_vlan_tag_get(skb);
vlan_proto = be16_to_cpu(skb->vlan_proto);
}
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
......@@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->bf.offset ^= ring->bf.buf_size;
} else {
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
!!skb_vlan_tag_present(skb);
if (vlan_proto == ETH_P_8021AD)
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
else if (vlan_proto == ETH_P_8021Q)
tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
tx_desc->ctrl.fence_size = real_size;
/* Ensure new descriptor hits memory
......
......@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[26] = "Port ETS Scheduler support",
[27] = "Port beacon support",
[28] = "RX-ALL support",
[29] = "802.1ad offload support",
};
int i;
......@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
#define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
#define QUERY_FUNC_CAP_PHV_BIT 0x40
if (vhcr->op_modifier == 1) {
struct mlx4_active_ports actv_ports =
......@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
QUERY_FUNC_CAP_PHYS_PORT_ID);
if (dev->caps.phv_bit[port]) {
field = QUERY_FUNC_CAP_PHV_BIT;
MLX4_PUT(outbox->buf, field,
QUERY_FUNC_CAP_FLAGS0_OFFSET);
}
} else if (vhcr->op_modifier == 0) {
struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, slave);
......@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(func_cap->phys_port_id, outbox,
QUERY_FUNC_CAP_PHYS_PORT_ID);
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
/* All other resources are allocated by the master, but we still report
* 'num' and 'reserved' capabilities as follows:
* - num remains the maximum resource index
......@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
#define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
......@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
if (field & (1 << 2))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
if (field & 0x80)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
if (field & 0x40)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
......@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
if (byte_field & 0x2)
param->phv_check_en = 1;
out:
mlx4_free_cmd_mailbox(dev, mailbox);
......@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
MLX4_CMD_NATIVE);
}
static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
{
#define SET_PORT_GEN_PHV_VALID 0x10
#define SET_PORT_GEN_PHV_EN 0x80
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_set_port_general_context *context;
u32 in_mod;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
if (phv_bit)
context->phv_en |= SET_PORT_GEN_PHV_EN;
in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
{
int err;
struct mlx4_func_cap func_cap;
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 1, &func_cap);
if (!err)
*phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
return err;
}
EXPORT_SYMBOL(get_phv_bit);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
{
int ret;
if (mlx4_is_slave(dev))
return -EPERM;
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
if (!ret)
dev->caps.phv_bit[port] = new_val;
return ret;
}
return -EOPNOTSUPP;
}
EXPORT_SYMBOL(set_phv_bit);
......@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */
u8 rss_ip_frags;
u8 phv_check_en; /* for QUERY_HCA */
};
struct mlx4_init_ib_param {
......
......@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
struct mlx4_init_hca_param hca_param;
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
/* Turn off PHV_EN flag in case phv_check_en is set.
* phv_check_en is a HW check that parse the packet and verify
* phv bit was reported correctly in the wqe. To allow QinQ
* PHV_EN flag should be set and phv_check_en must be cleared
* otherwise QinQ packets will be drop by the HW.
*/
if (err || hca_param.phv_check_en)
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
}
/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
......
......@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
u8 pprx;
u8 pfcrx;
u16 reserved4;
u32 reserved5;
u8 phv_en;
u8 reserved6[3];
};
struct mlx4_set_port_rqp_calc_context {
......
......@@ -95,6 +95,7 @@
*/
#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
#define MLX4_EN_PRIV_FLAGS_PHV 2
#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
......
......@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
enum {
MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_CVLAN_PRESENT_MASK = 1 << 29,
MLX4_CQE_SVLAN_PRESENT_MASK = 1 << 30,
MLX4_CQE_L2_TUNNEL = 1 << 27,
MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
......
......@@ -211,6 +211,8 @@ enum {
MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
MLX4_DEV_CAP_FLAG2_PHV_EN = 1LL << 29,
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
};
enum {
......@@ -581,6 +583,7 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
u8 phv_bit[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
......@@ -1332,6 +1335,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
u8 ignore_fcs_value);
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
......
......@@ -272,7 +272,8 @@ enum {
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
MLX4_WQE_CTRL_INS_CVLAN = 1 << 6,
MLX4_WQE_CTRL_INS_SVLAN = 1 << 7,
MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册