提交 02e6dae6 编写于 作者: D David S. Miller

Merge tag 'mlx5-updates-2018-10-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2018-10-18

This series provides misc updates to mlx5 core and netdevice driver.

1) From Tariq Toukan: Refactor fragmented buffer struct fields and init flow.

2) From Vlad Buslov, Flow counters cache improvements and fixes follow up.
as a follow up work for the previous series of the mlx5 flow counters,
Vlad provides two fixes:
  2.1) Take fs_counters dellist before addlist
Fixes: 6e5e2283 ("net/mlx5: Add new list to store deleted flow counters")
  2.2) Remove counter from idr after removing it from list
Fixes: 12d6066c ("net/mlx5: Add flow counters idr")

From Shay Agroskin,
3) Add FEC set/get FW commands and FEC ethtool callbacks support
4) Add new ethtool statistics to cover errors on rx, such as FEC errors.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -393,7 +393,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
......@@ -728,16 +728,11 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
int nent,
int cqe_size)
{
struct mlx5_frag_buf_ctrl *c = &buf->fbc;
struct mlx5_frag_buf *frag_buf = &c->frag_buf;
u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
u8 log_wq_sz = ilog2(cqe_size);
int err;
MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
err = mlx5_frag_buf_alloc_node(dev->mdev,
nent * cqe_size,
frag_buf,
......@@ -745,6 +740,8 @@ static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
if (err)
return err;
mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
buf->cqe_size = cqe_size;
buf->nent = nent;
......@@ -934,7 +931,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
cq->buf.fbc.frag_buf.npages;
cq->buf.frag_buf.npages;
*cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
......@@ -942,11 +939,11 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
cq->buf.fbc.frag_buf.page_shift -
cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uar->index;
......@@ -1365,11 +1362,10 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
if (!err) {
struct mlx5_frag_buf_ctrl *c;
struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;
c = &cq->resize_buf->fbc;
npas = c->frag_buf.npages;
page_shift = c->frag_buf.page_shift;
npas = frag_buf->npages;
page_shift = frag_buf->page_shift;
}
}
......@@ -1390,8 +1386,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
pas, 0);
else
mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
pas);
mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
......
......@@ -435,6 +435,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_cq_buf {
struct mlx5_frag_buf_ctrl fbc;
struct mlx5_frag_buf frag_buf;
struct ib_umem *umem;
int cqe_size;
int nent;
......
......@@ -235,3 +235,211 @@ int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
kfree(out);
return err;
}
static u32 fec_supported_speeds[] = {
10000,
40000,
25000,
50000,
56000,
100000
};
#define MLX5E_FEC_SUPPORTED_SPEEDS ARRAY_SIZE(fec_supported_speeds)
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm,
u8 *fec_policy,
bool write,
u32 speed)
{
switch (speed) {
case 10000:
case 40000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_cap_10g_40g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_10g_40g, *fec_policy);
break;
case 25000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_admin_25g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_25g, *fec_policy);
break;
case 50000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_admin_50g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_50g, *fec_policy);
break;
case 56000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_admin_56g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_56g, *fec_policy);
break;
case 100000:
if (!write)
*fec_policy = MLX5_GET(pplm_reg, pplm,
fec_override_admin_100g);
else
MLX5_SET(pplm_reg, pplm,
fec_override_admin_100g, *fec_policy);
break;
default:
return -EINVAL;
}
return 0;
}
/* returns FEC capabilities for a given speed */
static int mlx5e_get_fec_cap_field(u32 *pplm,
u8 *fec_cap,
u32 speed)
{
switch (speed) {
case 10000:
case 40000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_admin_10g_40g);
break;
case 25000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_cap_25g);
break;
case 50000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_cap_50g);
break;
case 56000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_cap_56g);
break;
case 100000:
*fec_cap = MLX5_GET(pplm_reg, pplm,
fec_override_cap_100g);
break;
default:
return -EINVAL;
}
return 0;
}
int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps)
{
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
u32 current_fec_speed;
int err;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
return err;
err = mlx5e_port_linkspeed(dev, &current_fec_speed);
if (err)
return err;
return mlx5e_get_fec_cap_field(out, fec_caps, current_fec_speed);
}
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
u8 *fec_configured_mode)
{
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
u32 link_speed;
int err;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
return err;
*fec_mode_active = MLX5_GET(pplm_reg, out, fec_mode_active);
if (!fec_configured_mode)
return 0;
err = mlx5e_port_linkspeed(dev, &link_speed);
if (err)
return err;
return mlx5e_fec_admin_field(out, fec_configured_mode, 0, link_speed);
}
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
{
bool fec_mode_not_supp_in_speed = false;
u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
u32 current_fec_speed;
u8 fec_caps = 0;
int err;
int i;
if (!MLX5_CAP_GEN(dev, pcam_reg))
return -EOPNOTSUPP;
if (!MLX5_CAP_PCAM_REG(dev, pplm))
return -EOPNOTSUPP;
MLX5_SET(pplm_reg, in, local_port, 1);
err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0);
if (err)
return err;
err = mlx5e_port_linkspeed(dev, &current_fec_speed);
if (err)
return err;
memset(in, 0, sz);
MLX5_SET(pplm_reg, in, local_port, 1);
for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
/* policy supported for link speed */
if (!!(fec_caps & fec_policy)) {
mlx5e_fec_admin_field(in, &fec_policy, 1,
fec_supported_speeds[i]);
} else {
if (fec_supported_speeds[i] == current_fec_speed)
return -EOPNOTSUPP;
mlx5e_fec_admin_field(in, &no_fec_policy, 1,
fec_supported_speeds[i]);
fec_mode_not_supp_in_speed = true;
}
}
if (fec_mode_not_supp_in_speed)
mlx5_core_dbg(dev,
"FEC policy 0x%x is not supported for some speeds",
fec_policy);
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
}
......@@ -45,4 +45,16 @@ int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_get_fec_caps(struct mlx5_core_dev *dev, u8 *fec_caps);
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
u8 *fec_configured_mode);
int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy);
enum {
MLX5E_FEC_NOFEC,
MLX5E_FEC_FIRECODE,
MLX5E_FEC_RS_528_514,
};
#endif
......@@ -547,6 +547,70 @@ static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
static const u32 pplm_fec_2_ethtool[] = {
[MLX5E_FEC_NOFEC] = ETHTOOL_FEC_OFF,
[MLX5E_FEC_FIRECODE] = ETHTOOL_FEC_BASER,
[MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS,
};
static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size)
{
int mode = 0;
if (!fec_mode)
return ETHTOOL_FEC_AUTO;
mode = find_first_bit(&fec_mode, size);
if (mode < ARRAY_SIZE(pplm_fec_2_ethtool))
return pplm_fec_2_ethtool[mode];
return 0;
}
/* we use ETHTOOL_FEC_* offset and apply it to ETHTOOL_LINK_MODE_FEC_*_BIT */
static u32 ethtool_fec2ethtool_caps(u_long ethtool_fec_code)
{
u32 offset;
offset = find_first_bit(&ethtool_fec_code, sizeof(u32));
offset -= ETHTOOL_FEC_OFF_BIT;
offset += ETHTOOL_LINK_MODE_FEC_NONE_BIT;
return offset;
}
static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
struct ethtool_link_ksettings *link_ksettings)
{
u_long fec_caps = 0;
u32 active_fec = 0;
u32 offset;
u32 bitn;
int err;
err = mlx5e_get_fec_caps(dev, (u8 *)&fec_caps);
if (err)
return (err == -EOPNOTSUPP) ? 0 : err;
err = mlx5e_get_fec_mode(dev, &active_fec, NULL);
if (err)
return err;
for_each_set_bit(bitn, &fec_caps, ARRAY_SIZE(pplm_fec_2_ethtool)) {
u_long ethtool_bitmask = pplm_fec_2_ethtool[bitn];
offset = ethtool_fec2ethtool_caps(ethtool_bitmask);
__set_bit(offset, link_ksettings->link_modes.supported);
}
active_fec = pplm2ethtool_fec(active_fec, sizeof(u32) * BITS_PER_BYTE);
offset = ethtool_fec2ethtool_caps(active_fec);
__set_bit(offset, link_ksettings->link_modes.advertising);
return 0;
}
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type)
......@@ -742,7 +806,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
if (err) {
netdev_err(netdev, "%s: query port ptys failed: %d\n",
__func__, err);
goto err_query_ptys;
goto err_query_regs;
}
eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
......@@ -778,11 +842,17 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
AUTONEG_ENABLE;
ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
Autoneg);
err = get_fec_supported_advertised(mdev, link_ksettings);
if (err)
netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
if (!an_disable_admin)
ethtool_link_ksettings_add_link_mode(link_ksettings,
advertising, Autoneg);
err_query_ptys:
err_query_regs:
return err;
}
......@@ -1277,6 +1347,58 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
static int mlx5e_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 fec_configured = 0;
u32 fec_active = 0;
int err;
err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured);
if (err)
return err;
fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active,
sizeof(u32) * BITS_PER_BYTE);
if (!fecparam->active_fec)
return -EOPNOTSUPP;
fecparam->fec = pplm2ethtool_fec((u_long)fec_configured,
sizeof(u8) * BITS_PER_BYTE);
return 0;
}
static int mlx5e_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fecparam)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u8 fec_policy = 0;
int mode;
int err;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
if (!(pplm_fec_2_ethtool[mode] & fecparam->fec))
continue;
fec_policy |= (1 << mode);
break;
}
err = mlx5e_set_fec_mode(mdev, fec_policy);
if (err)
return err;
mlx5_toggle_port_link(mdev);
return 0;
}
static u32 mlx5e_get_msglevel(struct net_device *dev)
{
return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
......@@ -1699,4 +1821,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,
};
......@@ -614,46 +614,82 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = {
{ "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
};
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
static const struct counter_desc
pport_phy_statistical_err_lanes_stats_desc[] = {
{ "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
{ "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
{ "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
{ "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
};
#define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_stats_desc)
#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int num_stats;
/* "1" for link_down_events special counter */
return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1;
num_stats = 1;
num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
return num_stats;
}
static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_stats_desc[i].format);
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_phy_statistical_err_lanes_stats_desc[i].format);
return idx;
}
static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
{
struct mlx5_core_dev *mdev = priv->mdev;
int i;
/* link_down_events_phy has special handling since it is not stored in __be64 format */
data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
return idx;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
data[idx++] =
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_stats_desc, i);
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
data[idx++] =
MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
pport_phy_statistical_err_lanes_stats_desc,
i);
return idx;
}
......
......@@ -99,6 +99,18 @@ static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
list_add_tail(&counter->list, next);
}
static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
list_del(&counter->list);
spin_lock(&fc_stats->counters_idr_lock);
WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
spin_unlock(&fc_stats->counters_idr_lock);
}
/* The function returns the last counter that was queried so the caller
* function can continue calling it till all counters are queried.
*/
......@@ -179,20 +191,23 @@ static void mlx5_fc_stats_work(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
/* Take dellist first to ensure that counters cannot be deleted before
* they are inserted.
*/
struct llist_node *dellist = llist_del_all(&fc_stats->dellist);
struct llist_node *addlist = llist_del_all(&fc_stats->addlist);
struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
unsigned long now = jiffies;
if (tmplist || !list_empty(&fc_stats->counters))
if (addlist || !list_empty(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
llist_for_each_entry(counter, tmplist, addlist)
llist_for_each_entry(counter, addlist, addlist)
mlx5_fc_stats_insert(dev, counter);
tmplist = llist_del_all(&fc_stats->dellist);
llist_for_each_entry_safe(counter, tmp, tmplist, dellist) {
list_del(&counter->list);
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_free_fc(dev, counter);
}
......@@ -272,10 +287,6 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
if (counter->aging) {
spin_lock(&fc_stats->counters_idr_lock);
WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
spin_unlock(&fc_stats->counters_idr_lock);
llist_add(&counter->dellist, &fc_stats->dellist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return;
......
......@@ -54,54 +54,37 @@ u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
{
return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
{
return mlx5_wq_cyc_get_byte_size(&wq->rq) +
mlx5_wq_cyc_get_byte_size(&wq->sq);
}
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
{
return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride;
return ((u32)1 << log_sz) << log_stride;
}
int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
int err;
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
MLX5_GET(wq, wqc, log_wq_sz),
fbc);
wq->sz = wq->fbc.sz_m1 + 1;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
fbc->frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
wq->sz = mlx5_wq_cyc_get_size(wq);
wq_ctrl->mdev = mdev;
......@@ -113,46 +96,19 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp)
{
struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
sq_fbc = &qp->sq.fbc;
sqb = &sq_fbc->frag_buf;
*sqb = *buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
if (sq_fbc->strides_offset)
sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u16 sq_strides_offset;
u32 rq_pg_remainder;
int err;
u8 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4;
u8 log_rq_sz = MLX5_GET(qpc, qpc, log_rq_size);
u8 log_sq_stride = ilog2(MLX5_SEND_WQE_BB);
u8 log_sq_sz = MLX5_GET(qpc, qpc, log_sq_size);
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
u32 rq_byte_size;
int err;
rq_pg_remainder = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
sq_strides_offset,
&wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
......@@ -160,14 +116,32 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq),
err = mlx5_frag_buf_alloc_node(mdev,
wq_get_byte_sz(log_rq_sz, log_rq_stride) +
wq_get_byte_sz(log_sq_sz, log_sq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc);
rq_byte_size = wq_get_byte_sz(log_rq_sz, log_rq_stride);
if (rq_byte_size < PAGE_SIZE) {
/* SQ starts within the same page of the RQ */
u16 sq_strides_offset = rq_byte_size / MLX5_SEND_WQE_BB;
mlx5_init_fbc_offset(wq_ctrl->buf.frags,
log_sq_stride, log_sq_sz, sq_strides_offset,
&wq->sq.fbc);
} else {
u16 rq_npages = rq_byte_size >> PAGE_SHIFT;
mlx5_init_fbc(wq_ctrl->buf.frags + rq_npages,
log_sq_stride, log_sq_sz, &wq->sq.fbc);
}
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
......@@ -186,17 +160,19 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf,
param->buf_numa_node);
if (err) {
......@@ -205,8 +181,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc);
wq_ctrl->mdev = mdev;
......@@ -222,30 +197,29 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
struct mlx5_wqe_srq_next_seg *next_seg;
int err;
int i;
mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride),
MLX5_GET(wq, wqc, log_wq_sz),
fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
wq->db = wq_ctrl->db.db;
err = mlx5_frag_buf_alloc_node(mdev, wq_get_byte_sz(log_wq_sz, log_wq_stride),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
wq->fbc.frag_buf = wq_ctrl->buf;
wq->db = wq_ctrl->db.db;
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
for (i = 0; i < fbc->sz_m1; i++) {
next_seg = mlx5_wq_ll_get_wqe(wq, i);
......
......@@ -133,6 +133,7 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
MLX5_REG_PPLM = 0x5023,
MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
......@@ -357,7 +358,7 @@ struct mlx5_frag_buf {
};
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
struct mlx5_buf_list *frags;
u32 sz_m1;
u16 frag_sz_m1;
u16 strides_offset;
......@@ -994,10 +995,12 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
u8 log_stride, u8 log_sz,
u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
fbc->frags = frags;
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
......@@ -1006,18 +1009,11 @@ static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
fbc->strides_offset = strides_offset;
}
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
u8 log_stride, u8 log_sz,
struct mlx5_frag_buf_ctrl *fbc)
{
mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
}
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
void *cqc)
{
mlx5_fill_fbc(6 + MLX5_GET(cqc, cqc, cqe_sz),
MLX5_GET(cqc, cqc, log_cq_size),
fbc);
mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
}
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
......@@ -1028,8 +1024,7 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
ix += fbc->strides_offset;
frag = ix >> fbc->log_frag_strides;
return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
}
int mlx5_cmd_init(struct mlx5_core_dev *dev);
......
......@@ -7828,20 +7828,34 @@ struct mlx5_ifc_pplr_reg_bits {
struct mlx5_ifc_pplm_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
u8 local_port[0x8];
u8 reserved_at_10[0x10];
u8 reserved_at_20[0x20];
u8 reserved_at_20[0x20];
u8 port_profile_mode[0x8];
u8 static_port_profile[0x8];
u8 active_port_profile[0x8];
u8 reserved_at_58[0x8];
u8 port_profile_mode[0x8];
u8 static_port_profile[0x8];
u8 active_port_profile[0x8];
u8 reserved_at_58[0x8];
u8 retransmission_active[0x8];
u8 fec_mode_active[0x18];
u8 retransmission_active[0x8];
u8 fec_mode_active[0x18];
u8 reserved_at_80[0x20];
u8 rs_fec_correction_bypass_cap[0x4];
u8 reserved_at_84[0x8];
u8 fec_override_cap_56g[0x4];
u8 fec_override_cap_100g[0x4];
u8 fec_override_cap_50g[0x4];
u8 fec_override_cap_25g[0x4];
u8 fec_override_cap_10g_40g[0x4];
u8 rs_fec_correction_bypass_admin[0x4];
u8 reserved_at_a4[0x8];
u8 fec_override_admin_56g[0x4];
u8 fec_override_admin_100g[0x4];
u8 fec_override_admin_50g[0x4];
u8 fec_override_admin_25g[0x4];
u8 fec_override_admin_10g_40g[0x4];
};
struct mlx5_ifc_ppcnt_reg_bits {
......@@ -8126,7 +8140,8 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x4];
u8 reserved_at_77[0x3];
u8 per_lane_error_counters[0x1];
u8 rx_buffer_fullness_counters[0x1];
u8 ptys_connector_type[0x1];
u8 reserved_at_7d[0x1];
......@@ -8137,7 +8152,10 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_127_to_96[0x20];
u8 port_access_reg_cap_mask_95_to_64[0x20];
u8 port_access_reg_cap_mask_63_to_32[0x20];
u8 port_access_reg_cap_mask_63_to_36[0x1c];
u8 pplm[0x1];
u8 port_access_reg_cap_mask_34_to_32[0x3];
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册