提交 8b4837c8 编写于 作者: D David S. Miller

Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
mlx5 driver updates

This series includes some bug fixes and updates for the mlx5 core
and ethernet driver.

From Gal, two fixes that protects the update CQ moderation flows
when it is not allowed.

From Moshe, two fixes for the core and ethernet driver in
non-cached(NC) and write combining(WC) buffers mappings,
which prevents the driver from double memory mappings.

From Or, reduce the firmware command completion timeout.

From Tariq, several small trivial fixes.

Changes from v0:
	- "Fix global UAR mapping" commit messages updated to explain ARCH_HAS_IOREMAP_WC usage.
	- rebased to commit 8d3f2806 'Merge branch ethtool-ksettings'

Changes from v1:
	- Removed ARCH_HAS_IOREMAP_WC config flag from "Fix global UAR mapping" commit,	as it was not accurate to use it.
	- Squashed "Fix global UAR mapping" and "net/mlx5: Avoid double mapping of io mapped memory"
	- Added more info for "Fix global UAR mapping" in commit message

Changes from v2:
	- None. resubmission per Dave's request due to two parallel submissions to mlx5 driver.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -260,26 +260,28 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
"tso_bytes",
"tso_inner_packets",
"tso_inner_bytes",
"csum_offload_none",
"csum_offload_inner",
"nop",
"csum_offload_none",
"stopped",
"wake",
"dropped",
"nop"
};
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 csum_offload_none;
u64 csum_offload_inner;
u64 nop;
/* less likely accessed in data path */
u64 csum_offload_none;
u64 stopped;
u64 wake;
u64 dropped;
u64 nop;
#define NUM_SQ_STATS 11
};
......@@ -386,6 +388,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
};
struct mlx5e_sq {
......@@ -414,7 +417,6 @@ struct mlx5e_sq {
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
void __iomem *uar_map;
void __iomem *uar_bf_map;
struct netdev_queue *txq;
u32 sqn;
u16 bf_buf_size;
......@@ -555,7 +557,6 @@ struct mlx5e_priv {
struct mlx5e_vxlan_db vxlan;
struct mlx5e_params params;
spinlock_t async_events_spinlock; /* sync hw events */
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
struct delayed_work update_stats_work;
......@@ -663,16 +664,12 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
* doorbell
*/
wmb();
if (bf_sz) {
__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
/* flush the write-combining mapped buffer */
wmb();
} else {
if (bf_sz)
__iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
else
mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
}
/* flush the write-combining mapped buffer */
wmb();
sq->bf_offset ^= sq->bf_buf_size;
}
......
......@@ -211,13 +211,14 @@ static void mlx5e_get_strings(struct net_device *dev,
sprintf(data + (idx++) * ETH_GSTRING_LEN,
"rx%d_%s", i, rq_stats_strings[j]);
for (i = 0; i < priv->params.num_channels; i++)
for (tc = 0; tc < priv->params.num_tc; tc++)
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data +
(idx++) * ETH_GSTRING_LEN,
"tx%d_%d_%s", i, tc,
sq_stats_strings[j]);
(idx++) * ETH_GSTRING_LEN,
"tx%d_%s",
priv->channeltc_to_txq_map[i][tc],
sq_stats_strings[j]);
break;
}
}
......@@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
&priv->state) ? 0 :
((u64 *)&priv->channel[i]->rq.stats)[j];
for (i = 0; i < priv->params.num_channels; i++)
for (tc = 0; tc < priv->params.num_tc; tc++)
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = !test_bit(MLX5E_STATE_OPENED,
&priv->state) ? 0 :
......@@ -399,6 +400,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
......@@ -416,11 +420,18 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int tc;
int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -ENOTSUPP;
mutex_lock(&priv->state_lock);
priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto out;
for (i = 0; i < priv->params.num_channels; ++i) {
c = priv->channel[i];
......@@ -436,6 +447,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
coal->rx_max_coalesced_frames);
}
out:
mutex_unlock(&priv->state_lock);
return 0;
}
......
......@@ -275,9 +275,14 @@ static void mlx5e_update_stats_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
static void __mlx5e_async_event(struct mlx5e_priv *priv,
enum mlx5_dev_event event)
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
return;
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
......@@ -289,17 +294,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv,
}
}
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
spin_lock(&priv->async_events_spinlock);
if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
__mlx5e_async_event(priv, event);
spin_unlock(&priv->async_events_spinlock);
}
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
......@@ -307,9 +301,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
spin_lock_irq(&priv->async_events_spinlock);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
spin_unlock_irq(&priv->async_events_spinlock);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
......@@ -555,7 +548,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
int txq_ix;
int err;
err = mlx5_alloc_map_uar(mdev, &sq->uar);
err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
if (err)
return err;
......@@ -567,8 +560,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
sq->uar_bf_map = sq->uar.bf_map;
if (sq->uar.bf_map) {
set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
sq->uar_map = sq->uar.bf_map;
} else {
sq->uar_map = sq->uar.map;
}
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
......@@ -877,12 +874,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (err)
goto err_destroy_cq;
err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
moderation_usecs,
moderation_frames);
if (err)
goto err_destroy_cq;
if (MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
moderation_usecs,
moderation_frames);
return 0;
err_destroy_cq:
......@@ -1071,6 +1066,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
......@@ -1458,8 +1462,8 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_close_channels;
}
mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
......@@ -1498,8 +1502,8 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_timestamp_cleanup(priv);
mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
mlx5e_redirect_rqts(priv);
mlx5e_close_channels(priv);
return 0;
......@@ -1581,8 +1585,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
memset(&cq_param, 0, sizeof(cq_param));
memset(&rq_param, 0, sizeof(rq_param));
mlx5e_build_rx_cq_param(priv, &cq_param);
mlx5e_build_rq_param(priv, &rq_param);
mlx5e_build_drop_rq_param(&rq_param);
err = mlx5e_create_drop_cq(priv, cq, &cq_param);
if (err)
......@@ -2217,6 +2220,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
if (!MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_warn(mdev, "CQ modiration is not supported\n");
return 0;
}
......@@ -2290,7 +2295,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_ets_init(priv);
#endif
spin_lock_init(&priv->async_events_spinlock);
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
......@@ -2418,7 +2422,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
priv = netdev_priv(netdev);
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
......
......@@ -303,7 +303,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
if (bf && sq->uar_bf_map)
if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
......
......@@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
{
return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
}
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
int err;
......
......@@ -767,22 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -ENOTSUPP;
}
static int map_bf_area(struct mlx5_core_dev *dev)
{
resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
return dev->priv.bf_mapping ? 0 : -ENOMEM;
}
static void unmap_bf_area(struct mlx5_core_dev *dev)
{
if (dev->priv.bf_mapping)
io_mapping_free(dev->priv.bf_mapping);
}
static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_device_context *dev_ctx;
......@@ -1103,14 +1087,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto err_stop_eqs;
}
if (map_bf_area(dev))
dev_err(&pdev->dev, "Failed to map blue flame area\n");
err = mlx5_irq_set_affinity_hints(dev);
if (err) {
if (err)
dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
goto err_unmap_bf_area;
}
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
......@@ -1169,10 +1148,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
err_unmap_bf_area:
unmap_bf_area(dev);
free_comp_eqs(dev);
err_stop_eqs:
......@@ -1242,7 +1217,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
mlx5_irq_clear_affinity_hints(dev);
unmap_bf_area(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
......
......@@ -99,6 +99,7 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
void mlx5e_init(void);
void mlx5e_cleanup(void);
......
......@@ -226,7 +226,8 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
bool map_wc)
{
phys_addr_t pfn;
phys_addr_t uar_bar_start;
......@@ -240,20 +241,26 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
uar_bar_start = pci_resource_start(mdev->pdev, 0);
pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map) {
mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
err = -ENOMEM;
goto err_free_uar;
}
if (mdev->priv.bf_mapping)
uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
uar->index << PAGE_SHIFT);
if (map_wc) {
uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->bf_map) {
mlx5_core_warn(mdev, "ioremap_wc() failed\n");
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map)
goto err_free_uar;
}
} else {
uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!uar->map)
goto err_free_uar;
}
return 0;
err_free_uar:
mlx5_core_warn(mdev, "ioremap() failed\n");
err = -ENOMEM;
mlx5_cmd_free_uar(mdev, uar->index);
return err;
......@@ -262,8 +269,8 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
{
io_mapping_unmap(uar->bf_map);
iounmap(uar->map);
iounmap(uar->bf_map);
mlx5_cmd_free_uar(mdev, uar->index);
}
EXPORT_SYMBOL(mlx5_unmap_free_uar);
......@@ -54,7 +54,7 @@ enum {
/* one minute for the sake of bringup. Generally, commands must always
* complete and we may need to increase this timeout value
*/
MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
MLX5_CMD_WQ_MAX_NAME = 32,
};
......@@ -460,8 +460,6 @@ struct mlx5_priv {
struct mlx5_uuar_info uuari;
MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
struct io_mapping *bf_mapping;
/* pages stuff */
struct workqueue_struct *pg_wq;
struct rb_root page_root;
......@@ -719,7 +717,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
bool map_wc);
void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册