提交 1543b765 编写于 作者: D David S. Miller

Merge branch 'mlx4-fixes'

Or Gerlitz says:

====================
Mellanox 10/40G mlx4 driver fixes for 4.5-rc

Bunch of fixes from the team to the mlx4 Eth and core drivers.

Series generated against net commit aac8d3c2 "qmi_wwan: add "4G LTE usb-modem U901""

Please push patches 1,2 and 6 to -stable  as well

changes from v0:
 - handled another wrongly accounted HW counter in patch #1 (Rick)
 - fixed coding style issues in patch #4 (Sergei)
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
if (qp->ibqp.uobject)
context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
context->usr_page = cpu_to_be32(
mlx4_to_hw_uar_index(dev->dev,
to_mucontext(ibqp->uobject->context)->uar.index));
else
context->usr_page = cpu_to_be32(dev->priv_uar.index);
context->usr_page = cpu_to_be32(
mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
if (attr_mask & IB_QP_DEST_QPN)
context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
......
......@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
err = mlx4_reset_slave(dev);
else
err = mlx4_reset_master(dev);
BUG_ON(err != 0);
if (!err) {
mlx4_err(dev, "device was reset successfully\n");
} else {
/* EEH could have disabled the PCI channel during reset. That's
* recoverable and the PCI error flow will handle it.
*/
if (!pci_channel_offline(dev->persist->pdev))
BUG_ON(1);
}
dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
mlx4_err(dev, "device was reset successfully\n");
mutex_unlock(&persist->device_state_mutex);
/* At that step HW was already reset, now notify clients */
......
......@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->logsize_usrpage =
cpu_to_be32((ilog2(nent) << 24) |
mlx4_to_hw_uar_index(dev, uar->index));
cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
......
......@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
#define MLX4_EN_WRAP_AROUND_SEC 10ULL
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
*/
static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
/* This comes from the reverse of clocksource_khz2mult */
return ilog2(div_u64(max_mul * freq_khz, 1000000));
}
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
......@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
/* Using shift to make calculation more accurate. Since current HW
* clock frequency is 427 MHz, and cycles are given using a 48 bits
* register, the biggest shift when calculating using u64, is 14
* (max_cycles * multiplier < 2^64)
*/
mdev->cycles.shift = 14;
mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
......
......@@ -2344,8 +2344,6 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
/* set offloads */
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
......@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
/* unset offloads */
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
......@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
......
......@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
stats->rx_missed_errors = 0;
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
......
......@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
}
context->usr_page = cpu_to_be32(mdev->priv_uar.index);
context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
mdev->priv_uar.index));
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
......
......@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, user_prio, &ring->context);
if (ring->bf_alloced)
ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
ring->context.usr_page =
cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
ring->bf.uar->index));
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
......
......@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] =
ioremap(pci_resource_start(dev->persist->pdev, 2) +
((eq->eqn / 4) << PAGE_SHIFT),
PAGE_SIZE);
ioremap(
pci_resource_start(dev->persist->pdev, 2) +
((eq->eqn / 4) << (dev->uar_page_shift)),
(1 << (dev->uar_page_shift)));
if (!priv->eq_table.uar_map[index]) {
mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
eq->eqn);
......
......@@ -168,6 +168,20 @@ struct mlx4_port_config {
static atomic_t pf_loading = ATOMIC_INIT(0);
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
/* The reserved_uars is calculated by system page size unit.
* Therefore, adjustment is added when the uar page size is less
* than the system page size
*/
dev->caps.reserved_uars =
max_t(int,
mlx4_get_num_reserved_uar(dev),
dev_cap->reserved_uars /
(1 << (PAGE_SHIFT - dev->uar_page_shift)));
}
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
......@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
/* The first 128 UARs are used for EQ doorbells */
dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
......@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
/* Save uar page shift */
if (!mlx4_is_slave(dev)) {
/* Virtual PCI function needs to determine UAR page size from
* firmware. Only master PCI function can set the uar page size
*/
dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
mlx4_set_num_reserved_uars(dev, dev_cap);
}
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
struct mlx4_init_hca_param hca_param;
......@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
/* slave gets uar page size from QUERY_HCA fw command */
dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
/* Set uar_page_shift for VF */
dev->uar_page_shift = hca_param.uar_page_sz + 12;
/* TODO: relax this assumption */
if (dev->caps.uar_page_size != PAGE_SIZE) {
mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
dev->caps.uar_page_size, PAGE_SIZE);
return -ENODEV;
/* Make sure the master uar page size is valid */
if (dev->uar_page_shift > PAGE_SHIFT) {
mlx4_err(dev,
"Invalid configuration: uar page size is larger than system page size\n");
return -ENODEV;
}
/* Set reserved_uars based on the uar_page_shift */
mlx4_set_num_reserved_uars(dev, &dev_cap);
/* Although uar page size in FW differs from system page size,
* upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
* still works with assumption that uar page size == system page size
*/
dev->caps.uar_page_size = PAGE_SIZE;
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
......@@ -2179,8 +2209,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
init_hca.uar_page_sz = PAGE_SHIFT - 12;
/* Always set UAR page size 4KB, set log_uar_sz accordingly */
init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT -
DEFAULT_UAR_PAGE_SHIFT;
init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
init_hca.mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
......
......@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
if (dev->caps.num_uars <= 128) {
mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
dev->caps.num_uars);
int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
if (dev->caps.num_uars <= num_reserved_uar) {
mlx4_err(
dev, "Only %d UAR pages (need more than %d)\n",
dev->caps.num_uars, num_reserved_uar);
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
return -ENODEV;
}
......
......@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, counter_index, RES_COUNTER);
if (!r || r->owner != slave)
if (!r || r->owner != slave) {
ret = -EINVAL;
counter = container_of(r, struct res_counter, com);
if (!counter->port)
counter->port = port;
} else {
counter = container_of(r, struct res_counter, com);
if (!counter->port)
counter->port = port;
}
spin_unlock_irq(mlx4_tlock(dev));
return ret;
......
......@@ -44,6 +44,8 @@
#include <linux/timecounter.h>
#define DEFAULT_UAR_PAGE_SHIFT 12
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
#define MIN_MSIX_P_PORT 5
......@@ -856,6 +858,7 @@ struct mlx4_dev {
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
u8 uar_page_shift;
};
struct mlx4_clock_params {
......@@ -1528,4 +1531,14 @@ int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
struct mlx4_clock_params *params);
static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
{
return (index << (PAGE_SHIFT - dev->uar_page_shift));
}
static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
{
/* The first 128 UARs are used for EQ doorbells */
return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
}
#endif /* MLX4_DEVICE_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册