提交 edaf3825 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix 64-bit division in mlx5 IPSEC offload support, from Ilan Tayari
   and Arnd Bergmann.

2) Fix race in statistics gathering in bnxt_en driver, from Michael
   Chan.

3) Can't use a mutex in RCU reader protected section on tap driver, from
   Cong WANG.

4) Fix mdb leak in bridging code, from Eduardo Valentin.

5) Fix free of wrong pointer variable in nfp driver, from Dan Carpenter.

6) Buffer overflow in brcmfmac driver, from Arend van SPriel.

7) ioremap_nocache() return value needs to be checked in smsc911x
   driver, from Alexey Khoroshilov.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (34 commits)
  net: stmmac: revert "support future possible different internal phy mode"
  sfc: don't read beyond unicast address list
  datagram: fix kernel-doc comments
  socket: add documentation for missing elements
  smsc911x: Add check for ioremap_nocache() return code
  brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
  net: hns: Bugfix for Tx timeout handling in hns driver
  net: ipmr: ipmr_get_table() returns NULL
  nfp: freeing the wrong variable
  mlxsw: spectrum_switchdev: Check status of memory allocation
  mlxsw: spectrum_switchdev: Remove unused variable
  mlxsw: spectrum_router: Fix use-after-free in route replace
  mlxsw: spectrum_router: Add missing rollback
  samples/bpf: fix a build issue
  bridge: mdb: fix leak on complete_info ptr on fail path
  tap: convert a mutex to a spinlock
  cxgb4: fix BUG() on interrupt deallocating path of ULD
  qed: Fix printk option passed when printing ipv6 addresses
  net: Fix minor code bug in timestamping.txt
  net: stmmac: Make 'alloc_dma_[rt]x_desc_resources()' look even closer
  ...
...@@ -44,8 +44,7 @@ timeval of SO_TIMESTAMP (ms). ...@@ -44,8 +44,7 @@ timeval of SO_TIMESTAMP (ms).
Supports multiple types of timestamp requests. As a result, this Supports multiple types of timestamp requests. As a result, this
socket option takes a bitmap of flags, not a boolean. In socket option takes a bitmap of flags, not a boolean. In
err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val));
sizeof(val));
val is an integer with any of the following bits set. Setting other val is an integer with any of the following bits set. Setting other
bit returns EINVAL and does not change the current state. bit returns EINVAL and does not change the current state.
...@@ -249,8 +248,7 @@ setsockopt to receive timestamps: ...@@ -249,8 +248,7 @@ setsockopt to receive timestamps:
__u32 val = SOF_TIMESTAMPING_SOFTWARE | __u32 val = SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_OPT_ID /* or any other flag */; SOF_TIMESTAMPING_OPT_ID /* or any other flag */;
err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val));
sizeof(val));
1.4 Bytestream Timestamps 1.4 Bytestream Timestamps
......
...@@ -3458,13 +3458,18 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) ...@@ -3458,13 +3458,18 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.ver_upd = DRV_VER_UPD; req.ver_upd = DRV_VER_UPD;
if (BNXT_PF(bp)) { if (BNXT_PF(bp)) {
DECLARE_BITMAP(vf_req_snif_bmap, 256); u32 data[8];
u32 *data = (u32 *)vf_req_snif_bmap;
int i; int i;
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); memset(data, 0, sizeof(data));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); u16 cmd = bnxt_vf_req_snif[i];
unsigned int bit, idx;
idx = cmd / 32;
bit = cmd % 32;
data[idx] |= 1 << bit;
}
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
req.vf_req_fwd[i] = cpu_to_le32(data[i]); req.vf_req_fwd[i] = cpu_to_le32(data[i]);
...@@ -6279,6 +6284,12 @@ static int bnxt_open(struct net_device *dev) ...@@ -6279,6 +6284,12 @@ static int bnxt_open(struct net_device *dev)
return __bnxt_open_nic(bp, true, true); return __bnxt_open_nic(bp, true, true);
} }
static bool bnxt_drv_busy(struct bnxt *bp)
{
return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{ {
int rc = 0; int rc = 0;
...@@ -6297,7 +6308,7 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -6297,7 +6308,7 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
clear_bit(BNXT_STATE_OPEN, &bp->state); clear_bit(BNXT_STATE_OPEN, &bp->state);
smp_mb__after_atomic(); smp_mb__after_atomic();
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) while (bnxt_drv_busy(bp))
msleep(20); msleep(20);
/* Flush rings and and disable interrupts */ /* Flush rings and and disable interrupts */
...@@ -6358,8 +6369,15 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -6358,8 +6369,15 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
u32 i; u32 i;
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
if (!bp->bnapi) set_bit(BNXT_STATE_READ_STATS, &bp->state);
/* Make sure bnxt_close_nic() sees that we are reading stats before
* we check the BNXT_STATE_OPEN flag.
*/
smp_mb__after_atomic();
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
return; return;
}
/* TODO check if we need to synchronize with bnxt_close path */ /* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
...@@ -6406,6 +6424,7 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -6406,6 +6424,7 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
stats->tx_errors = le64_to_cpu(tx->tx_err); stats->tx_errors = le64_to_cpu(tx->tx_err);
} }
clear_bit(BNXT_STATE_READ_STATS, &bp->state);
} }
static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
...@@ -6904,16 +6923,13 @@ static void bnxt_sp_task(struct work_struct *work) ...@@ -6904,16 +6923,13 @@ static void bnxt_sp_task(struct work_struct *work)
} }
/* Under rtnl_lock */ /* Under rtnl_lock */
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{ {
int max_rx, max_tx, tx_sets = 1; int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed; int tx_rings_needed;
bool sh = true;
int rc; int rc;
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
sh = false;
if (tcs) if (tcs)
tx_sets = tcs; tx_sets = tcs;
...@@ -7121,7 +7137,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) ...@@ -7121,7 +7137,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
sh = true; sh = true;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
tc, bp->tx_nr_rings_xdp); sh, tc, bp->tx_nr_rings_xdp);
if (rc) if (rc)
return rc; return rc;
......
...@@ -1117,6 +1117,7 @@ struct bnxt { ...@@ -1117,6 +1117,7 @@ struct bnxt {
unsigned long state; unsigned long state;
#define BNXT_STATE_OPEN 0 #define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1 #define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
struct bnxt_irq *irq_tbl; struct bnxt_irq *irq_tbl;
int total_irqs; int total_irqs;
...@@ -1300,7 +1301,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool); ...@@ -1300,7 +1301,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp); int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp); void bnxt_restore_pf_fw_resources(struct bnxt *bp);
......
...@@ -432,7 +432,8 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -432,7 +432,8 @@ static int bnxt_set_channels(struct net_device *dev,
} }
tx_xdp = req_rx_rings; tx_xdp = req_rx_rings;
} }
rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp); rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs,
tx_xdp);
if (rc) { if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n"); netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc; return rc;
......
...@@ -170,7 +170,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) ...@@ -170,7 +170,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (!tc) if (!tc)
tc = 1; tc = 1;
rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
tc, tx_xdp); true, tc, tx_xdp);
if (rc) { if (rc) {
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
return rc; return rc;
......
...@@ -2083,12 +2083,12 @@ static void detach_ulds(struct adapter *adap) ...@@ -2083,12 +2083,12 @@ static void detach_ulds(struct adapter *adap)
mutex_lock(&uld_mutex); mutex_lock(&uld_mutex);
list_del(&adap->list_node); list_del(&adap->list_node);
for (i = 0; i < CXGB4_ULD_MAX; i++) for (i = 0; i < CXGB4_ULD_MAX; i++)
if (adap->uld && adap->uld[i].handle) { if (adap->uld && adap->uld[i].handle)
adap->uld[i].state_change(adap->uld[i].handle, adap->uld[i].state_change(adap->uld[i].handle,
CXGB4_STATE_DETACH); CXGB4_STATE_DETACH);
adap->uld[i].handle = NULL;
}
if (netevent_registered && list_empty(&adapter_list)) { if (netevent_registered && list_empty(&adapter_list)) {
unregister_netevent_notifier(&cxgb4_netevent_nb); unregister_netevent_notifier(&cxgb4_netevent_nb);
netevent_registered = false; netevent_registered = false;
...@@ -5303,8 +5303,10 @@ static void remove_one(struct pci_dev *pdev) ...@@ -5303,8 +5303,10 @@ static void remove_one(struct pci_dev *pdev)
*/ */
destroy_workqueue(adapter->workq); destroy_workqueue(adapter->workq);
if (is_uld(adapter)) if (is_uld(adapter)) {
detach_ulds(adapter); detach_ulds(adapter);
t4_uld_clean_up(adapter);
}
disable_interrupts(adapter); disable_interrupts(adapter);
...@@ -5385,7 +5387,11 @@ static void shutdown_one(struct pci_dev *pdev) ...@@ -5385,7 +5387,11 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->port[i]->reg_state == NETREG_REGISTERED) if (adapter->port[i]->reg_state == NETREG_REGISTERED)
cxgb_close(adapter->port[i]); cxgb_close(adapter->port[i]);
t4_uld_clean_up(adapter); if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);
}
disable_interrupts(adapter); disable_interrupts(adapter);
disable_msi(adapter); disable_msi(adapter);
......
...@@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap) ...@@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap)
kfree(adap->uld); kfree(adap->uld);
} }
/* This function should be called with uld_mutex taken. */
static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
{
if (adap->uld[type].handle) {
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL;
release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
free_queues_uld(adap, type);
}
}
void t4_uld_clean_up(struct adapter *adap) void t4_uld_clean_up(struct adapter *adap)
{ {
unsigned int i; unsigned int i;
if (!adap->uld) mutex_lock(&uld_mutex);
return;
for (i = 0; i < CXGB4_ULD_MAX; i++) { for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle) if (!adap->uld[i].handle)
continue; continue;
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, i); cxgb4_shutdown_uld_adapter(adap, i);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, i);
free_sge_queues_uld(adap, i);
free_queues_uld(adap, i);
} }
mutex_unlock(&uld_mutex);
} }
static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
...@@ -783,15 +798,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) ...@@ -783,15 +798,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
continue; continue;
if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
continue; continue;
adap->uld[type].handle = NULL;
adap->uld[type].add = NULL; cxgb4_shutdown_uld_adapter(adap, type);
release_sge_txq_uld(adap, type);
if (adap->flags & FULL_INIT_DONE)
quiesce_rx_uld(adap, type);
if (adap->flags & USING_MSIX)
free_msix_queue_irqs_uld(adap, type);
free_sge_queues_uld(adap, type);
free_queues_uld(adap, type);
} }
mutex_unlock(&uld_mutex); mutex_unlock(&uld_mutex);
......
...@@ -402,8 +402,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) ...@@ -402,8 +402,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
err = -ENODEV;
return -ENODEV; goto err_free_wq;
} }
enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
...@@ -414,7 +414,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) ...@@ -414,7 +414,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err) if (err)
goto err_free_wq; goto err_disable_wq;
vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
...@@ -433,8 +433,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) ...@@ -433,8 +433,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
err_free_desc_ring: err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
err_free_wq: err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq); vnic_wq_disable(&vdev->devcmd2->wq);
err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq); vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2: err_free_devcmd2:
kfree(vdev->devcmd2); kfree(vdev->devcmd2);
......
...@@ -1378,13 +1378,20 @@ void hns_nic_net_reset(struct net_device *ndev) ...@@ -1378,13 +1378,20 @@ void hns_nic_net_reset(struct net_device *ndev)
void hns_nic_net_reinit(struct net_device *netdev) void hns_nic_net_reinit(struct net_device *netdev)
{ {
struct hns_nic_priv *priv = netdev_priv(netdev); struct hns_nic_priv *priv = netdev_priv(netdev);
enum hnae_port_type type = priv->ae_handle->port_type;
netif_trans_update(priv->netdev); netif_trans_update(priv->netdev);
while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
hns_nic_net_down(netdev); hns_nic_net_down(netdev);
hns_nic_net_reset(netdev);
/* Only do hns_nic_net_reset in debug mode
* because of hardware limitation.
*/
if (type == HNAE_PORT_DEBUG)
hns_nic_net_reset(netdev);
(void)hns_nic_net_up(netdev); (void)hns_nic_net_up(netdev);
clear_bit(NIC_STATE_REINITING, &priv->state); clear_bit(NIC_STATE_REINITING, &priv->state);
} }
...@@ -1997,13 +2004,8 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) ...@@ -1997,13 +2004,8 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
rtnl_lock(); rtnl_lock();
/* put off any impending NetWatchDogTimeout */ /* put off any impending NetWatchDogTimeout */
netif_trans_update(priv->netdev); netif_trans_update(priv->netdev);
hns_nic_net_reinit(priv->netdev);
if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
} else {
netif_carrier_off(priv->netdev);
netif_tx_disable(priv->netdev);
}
rtnl_unlock(); rtnl_unlock();
} }
......
...@@ -4,14 +4,14 @@ subdir-ccflags-y += -I$(src) ...@@ -4,14 +4,14 @@ subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o lib/gid.o fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \
en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
......
...@@ -372,7 +372,7 @@ void mlx5e_ipsec_build_inverse_table(void) ...@@ -372,7 +372,7 @@ void mlx5e_ipsec_build_inverse_table(void)
*/ */
mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
for (mss = 2; mss < MAX_LSO_MSS; mss++) { for (mss = 2; mss < MAX_LSO_MSS; mss++) {
mss_inv = ((1ULL << 32) / mss) >> 16; mss_inv = div_u64(1ULL << 32, mss) >> 16;
mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
} }
} }
...@@ -464,6 +464,8 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev, ...@@ -464,6 +464,8 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
if (!perm_addr) if (!perm_addr)
return; return;
memset(perm_addr, 0xff, MAX_ADDR_LEN);
mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
} }
......
...@@ -102,7 +102,7 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) ...@@ -102,7 +102,7 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev)
return 0; return 0;
} }
int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
{ {
int err; int err;
struct mlx5_core_dev *mdev = fdev->mdev; struct mlx5_core_dev *mdev = fdev->mdev;
......
...@@ -275,7 +275,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, ...@@ -275,7 +275,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned int i; unsigned int i;
u32 *data; __be32 *data;
u32 count; u32 count;
u64 addr; u64 addr;
int ret; int ret;
...@@ -290,7 +290,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, ...@@ -290,7 +290,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
count = mlx5_fpga_ipsec_counters_count(mdev); count = mlx5_fpga_ipsec_counters_count(mdev);
data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL); data = kzalloc(sizeof(*data) * count * 2, GFP_KERNEL);
if (!data) { if (!data) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/idr.h> #include <linux/idr.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/mlx5.h"
void mlx5_init_reserved_gids(struct mlx5_core_dev *dev) void mlx5_init_reserved_gids(struct mlx5_core_dev *dev)
{ {
......
...@@ -1790,6 +1790,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, ...@@ -1790,6 +1790,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
return 0; return 0;
err_nexthop_neigh_init: err_nexthop_neigh_init:
mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
return err; return err;
} }
...@@ -1866,6 +1867,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) ...@@ -1866,6 +1867,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
nh_grp->count = fi->fib_nhs; nh_grp->count = fi->fib_nhs;
nh_grp->key.fi = fi; nh_grp->key.fi = fi;
fib_info_hold(fi);
for (i = 0; i < nh_grp->count; i++) { for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i]; nh = &nh_grp->nexthops[i];
fib_nh = &fi->fib_nh[i]; fib_nh = &fi->fib_nh[i];
...@@ -1885,6 +1887,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) ...@@ -1885,6 +1887,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
nh = &nh_grp->nexthops[i]; nh = &nh_grp->nexthops[i];
mlxsw_sp_nexthop_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
} }
fib_info_put(nh_grp->key.fi);
kfree(nh_grp); kfree(nh_grp);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -1903,6 +1906,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, ...@@ -1903,6 +1906,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
} }
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
WARN_ON_ONCE(nh_grp->adj_index_valid); WARN_ON_ONCE(nh_grp->adj_index_valid);
fib_info_put(nh_grp->key.fi);
kfree(nh_grp); kfree(nh_grp);
} }
......
...@@ -979,7 +979,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -979,7 +979,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
{ {
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
u16 old_pvid = mlxsw_sp_port->pvid; u16 old_pvid = mlxsw_sp_port->pvid;
int err; int err;
...@@ -1000,8 +999,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -1000,8 +999,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (err) if (err)
goto err_port_vlan_bridge_join; goto err_port_vlan_bridge_join;
bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
return 0; return 0;
err_port_vlan_bridge_join: err_port_vlan_bridge_join:
...@@ -1919,6 +1916,8 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, ...@@ -1919,6 +1916,8 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
memcpy(&switchdev_work->fdb_info, ptr, memcpy(&switchdev_work->fdb_info, ptr,
sizeof(switchdev_work->fdb_info)); sizeof(switchdev_work->fdb_info));
switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
if (!switchdev_work->fdb_info.addr)
goto err_addr_alloc;
ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
fdb_info->addr); fdb_info->addr);
/* Take a reference on the device. This can be either /* Take a reference on the device. This can be either
...@@ -1935,6 +1934,10 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, ...@@ -1935,6 +1934,10 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
mlxsw_core_schedule_work(&switchdev_work->work); mlxsw_core_schedule_work(&switchdev_work->work);
return NOTIFY_DONE; return NOTIFY_DONE;
err_addr_alloc:
kfree(switchdev_work);
return NOTIFY_BAD;
} }
static struct notifier_block mlxsw_sp_switchdev_notifier = { static struct notifier_block mlxsw_sp_switchdev_notifier = {
......
...@@ -419,7 +419,7 @@ int nfp_flower_metadata_init(struct nfp_app *app) ...@@ -419,7 +419,7 @@ int nfp_flower_metadata_init(struct nfp_app *app)
return 0; return 0;
err_free_last_used: err_free_last_used:
kfree(priv->stats_ids.free_list.buf); kfree(priv->mask_ids.last_used);
err_free_mask_id: err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf); kfree(priv->mask_ids.mask_id_free_list.buf);
return -ENOMEM; return -ENOMEM;
......
...@@ -575,7 +575,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, ...@@ -575,7 +575,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n", "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_ip,
p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.local_port,
p_tcp_ramrod->tcp.remote_ip, p_tcp_ramrod->tcp.remote_ip,
...@@ -583,7 +583,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, ...@@ -583,7 +583,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
p_tcp_ramrod->tcp.vlan_id); p_tcp_ramrod->tcp.vlan_id);
} else { } else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n", "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_ip,
p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.local_port,
p_tcp_ramrod->tcp.remote_ip, p_tcp_ramrod->tcp.remote_ip,
...@@ -1519,7 +1519,7 @@ qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, ...@@ -1519,7 +1519,7 @@ qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
cm_info->vlan); cm_info->vlan);
else else
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
"remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n", "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
cm_info->remote_ip, cm_info->remote_port, cm_info->remote_ip, cm_info->remote_port,
cm_info->local_ip, cm_info->local_port, cm_info->local_ip, cm_info->local_port,
cm_info->vlan); cm_info->vlan);
......
...@@ -5034,12 +5034,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) ...@@ -5034,12 +5034,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev; struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *uc; struct netdev_hw_addr *uc;
int addr_count;
unsigned int i; unsigned int i;
addr_count = netdev_uc_count(net_dev);
table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
table->dev_uc_count = 1 + addr_count;
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1; i = 1;
netdev_for_each_uc_addr(uc, net_dev) { netdev_for_each_uc_addr(uc, net_dev) {
...@@ -5050,6 +5047,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) ...@@ -5050,6 +5047,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
i++; i++;
} }
table->dev_uc_count = i;
} }
static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
...@@ -5057,12 +5056,11 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) ...@@ -5057,12 +5056,11 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev; struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *mc; struct netdev_hw_addr *mc;
unsigned int i, addr_count; unsigned int i;
table->mc_overflow = false; table->mc_overflow = false;
table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
i = 0; i = 0;
netdev_for_each_mc_addr(mc, net_dev) { netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
......
...@@ -2467,6 +2467,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) ...@@ -2467,6 +2467,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev); pdata = netdev_priv(dev);
dev->irq = irq; dev->irq = irq;
pdata->ioaddr = ioremap_nocache(res->start, res_size); pdata->ioaddr = ioremap_nocache(res->start, res_size);
if (!pdata->ioaddr) {
retval = -ENOMEM;
goto out_ioremap_fail;
}
pdata->dev = dev; pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1); pdata->msg_enable = ((1 << debug) - 1);
...@@ -2572,6 +2576,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) ...@@ -2572,6 +2576,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_free_resources(pdev); smsc911x_free_resources(pdev);
out_request_resources_fail: out_request_resources_fail:
iounmap(pdata->ioaddr); iounmap(pdata->ioaddr);
out_ioremap_fail:
free_netdev(dev); free_netdev(dev);
out_release_io_1: out_release_io_1:
release_mem_region(res->start, resource_size(res)); release_mem_region(res->start, resource_size(res));
......
...@@ -638,7 +638,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) ...@@ -638,7 +638,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
{ {
struct sunxi_priv_data *gmac = priv->plat->bsp_priv; struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
struct device_node *node = priv->device->of_node; struct device_node *node = priv->device->of_node;
int ret, phy_interface; int ret;
u32 reg, val; u32 reg, val;
regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val);
...@@ -718,11 +718,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) ...@@ -718,11 +718,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii) if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN; reg &= ~SYSCON_RMII_EN;
phy_interface = priv->plat->interface; switch (priv->plat->interface) {
/* if PHY is internal, select the mode (xMII) used by the SoC */
if (gmac->use_internal_phy)
phy_interface = gmac->variant->internal_phy;
switch (phy_interface) {
case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_MII:
/* default */ /* default */
break; break;
...@@ -936,7 +932,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) ...@@ -936,7 +932,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} }
plat_dat->interface = of_get_phy_mode(dev->of_node); plat_dat->interface = of_get_phy_mode(dev->of_node);
if (plat_dat->interface == PHY_INTERFACE_MODE_INTERNAL) { if (plat_dat->interface == gmac->variant->internal_phy) {
dev_info(&pdev->dev, "Will use internal PHY\n"); dev_info(&pdev->dev, "Will use internal PHY\n");
gmac->use_internal_phy = true; gmac->use_internal_phy = true;
gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0); gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
......
...@@ -1449,7 +1449,7 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -1449,7 +1449,7 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
static void free_dma_tx_desc_resources(struct stmmac_priv *priv) static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{ {
u32 tx_count = priv->plat->tx_queues_to_use; u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue = 0; u32 queue;
/* Free TX queue resources */ /* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++) { for (queue = 0; queue < tx_count; queue++) {
...@@ -1498,7 +1498,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) ...@@ -1498,7 +1498,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
sizeof(dma_addr_t), sizeof(dma_addr_t),
GFP_KERNEL); GFP_KERNEL);
if (!rx_q->rx_skbuff_dma) if (!rx_q->rx_skbuff_dma)
return -ENOMEM; goto err_dma;
rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
sizeof(struct sk_buff *), sizeof(struct sk_buff *),
...@@ -1561,13 +1561,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1561,13 +1561,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
sizeof(*tx_q->tx_skbuff_dma), sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff_dma) if (!tx_q->tx_skbuff_dma)
return -ENOMEM; goto err_dma;
tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
sizeof(struct sk_buff *), sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->tx_skbuff) if (!tx_q->tx_skbuff)
goto err_dma_buffers; goto err_dma;
if (priv->extend_desc) { if (priv->extend_desc) {
tx_q->dma_etx = dma_zalloc_coherent(priv->device, tx_q->dma_etx = dma_zalloc_coherent(priv->device,
...@@ -1577,7 +1577,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1577,7 +1577,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
&tx_q->dma_tx_phy, &tx_q->dma_tx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->dma_etx) if (!tx_q->dma_etx)
goto err_dma_buffers; goto err_dma;
} else { } else {
tx_q->dma_tx = dma_zalloc_coherent(priv->device, tx_q->dma_tx = dma_zalloc_coherent(priv->device,
DMA_TX_SIZE * DMA_TX_SIZE *
...@@ -1586,13 +1586,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) ...@@ -1586,13 +1586,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
&tx_q->dma_tx_phy, &tx_q->dma_tx_phy,
GFP_KERNEL); GFP_KERNEL);
if (!tx_q->dma_tx) if (!tx_q->dma_tx)
goto err_dma_buffers; goto err_dma;
} }
} }
return 0; return 0;
err_dma_buffers: err_dma:
free_dma_tx_desc_resources(priv); free_dma_tx_desc_resources(priv);
return ret; return ret;
......
...@@ -106,7 +106,7 @@ struct major_info { ...@@ -106,7 +106,7 @@ struct major_info {
struct rcu_head rcu; struct rcu_head rcu;
dev_t major; dev_t major;
struct idr minor_idr; struct idr minor_idr;
struct mutex minor_lock; spinlock_t minor_lock;
const char *device_name; const char *device_name;
struct list_head next; struct list_head next;
}; };
...@@ -416,15 +416,15 @@ int tap_get_minor(dev_t major, struct tap_dev *tap) ...@@ -416,15 +416,15 @@ int tap_get_minor(dev_t major, struct tap_dev *tap)
goto unlock; goto unlock;
} }
mutex_lock(&tap_major->minor_lock); spin_lock(&tap_major->minor_lock);
retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC);
if (retval >= 0) { if (retval >= 0) {
tap->minor = retval; tap->minor = retval;
} else if (retval == -ENOSPC) { } else if (retval == -ENOSPC) {
netdev_err(tap->dev, "Too many tap devices\n"); netdev_err(tap->dev, "Too many tap devices\n");
retval = -EINVAL; retval = -EINVAL;
} }
mutex_unlock(&tap_major->minor_lock); spin_unlock(&tap_major->minor_lock);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -442,12 +442,12 @@ void tap_free_minor(dev_t major, struct tap_dev *tap) ...@@ -442,12 +442,12 @@ void tap_free_minor(dev_t major, struct tap_dev *tap)
goto unlock; goto unlock;
} }
mutex_lock(&tap_major->minor_lock); spin_lock(&tap_major->minor_lock);
if (tap->minor) { if (tap->minor) {
idr_remove(&tap_major->minor_idr, tap->minor); idr_remove(&tap_major->minor_idr, tap->minor);
tap->minor = 0; tap->minor = 0;
} }
mutex_unlock(&tap_major->minor_lock); spin_unlock(&tap_major->minor_lock);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -467,13 +467,13 @@ static struct tap_dev *dev_get_by_tap_file(int major, int minor) ...@@ -467,13 +467,13 @@ static struct tap_dev *dev_get_by_tap_file(int major, int minor)
goto unlock; goto unlock;
} }
mutex_lock(&tap_major->minor_lock); spin_lock(&tap_major->minor_lock);
tap = idr_find(&tap_major->minor_idr, minor); tap = idr_find(&tap_major->minor_idr, minor);
if (tap) { if (tap) {
dev = tap->dev; dev = tap->dev;
dev_hold(dev); dev_hold(dev);
} }
mutex_unlock(&tap_major->minor_lock); spin_unlock(&tap_major->minor_lock);
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
...@@ -1244,7 +1244,7 @@ static int tap_list_add(dev_t major, const char *device_name) ...@@ -1244,7 +1244,7 @@ static int tap_list_add(dev_t major, const char *device_name)
tap_major->major = MAJOR(major); tap_major->major = MAJOR(major);
idr_init(&tap_major->minor_idr); idr_init(&tap_major->minor_idr);
mutex_init(&tap_major->minor_lock); spin_lock_init(&tap_major->minor_lock);
tap_major->device_name = device_name; tap_major->device_name = device_name;
......
...@@ -4934,6 +4934,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, ...@@ -4934,6 +4934,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
GFP_KERNEL); GFP_KERNEL);
} else if (ieee80211_is_action(mgmt->frame_control)) { } else if (ieee80211_is_action(mgmt->frame_control)) {
if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
brcmf_err("invalid action frame length\n");
err = -EINVAL;
goto exit;
}
af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
if (af_params == NULL) { if (af_params == NULL) {
brcmf_err("unable to allocate frame\n"); brcmf_err("unable to allocate frame\n");
......
...@@ -246,6 +246,7 @@ struct sock_common { ...@@ -246,6 +246,7 @@ struct sock_common {
* @sk_policy: flow policy * @sk_policy: flow policy
* @sk_receive_queue: incoming packets * @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed * @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
* @sk_write_queue: Packet sending queue * @sk_write_queue: Packet sending queue
* @sk_omem_alloc: "o" is "option" or "other" * @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size * @sk_wmem_queued: persistent queue size
...@@ -257,6 +258,7 @@ struct sock_common { ...@@ -257,6 +258,7 @@ struct sock_common {
* @sk_pacing_status: Pacing status (requested, handled by sch_fq) * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes * @sk_sndbuf: size of send buffer in bytes
* @__sk_flags_offset: empty field used to determine location of bitfield
* @sk_padding: unused element for alignment * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets * @sk_no_check_rx: allow zero checksum in RX packets
...@@ -277,6 +279,7 @@ struct sock_common { ...@@ -277,6 +279,7 @@ struct sock_common {
* @sk_drops: raw/udp drops counter * @sk_drops: raw/udp drops counter
* @sk_ack_backlog: current listen backlog * @sk_ack_backlog: current listen backlog
* @sk_max_ack_backlog: listen backlog set in listen() * @sk_max_ack_backlog: listen backlog set in listen()
* @sk_uid: user id of owner
* @sk_priority: %SO_PRIORITY setting * @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc) * @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family * @sk_protocol: which protocol this socket belongs in this network family
......
...@@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, ...@@ -323,7 +323,8 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
__mdb_entry_to_br_ip(entry, &complete_info->ip); __mdb_entry_to_br_ip(entry, &complete_info->ip);
mdb.obj.complete_priv = complete_info; mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_mdb_complete; mdb.obj.complete = br_mdb_complete;
switchdev_port_obj_add(port_dev, &mdb.obj); if (switchdev_port_obj_add(port_dev, &mdb.obj))
kfree(complete_info);
} }
} else if (port_dev && type == RTM_DELMDB) { } else if (port_dev && type == RTM_DELMDB) {
switchdev_port_obj_del(port_dev, &mdb.obj); switchdev_port_obj_del(port_dev, &mdb.obj);
......
...@@ -203,7 +203,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk, ...@@ -203,7 +203,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
/** /**
* __skb_try_recv_datagram - Receive a datagram skbuff * __skb_try_recv_datagram - Receive a datagram skbuff
* @sk: socket * @sk: socket
* @flags: MSG_ flags * @flags: MSG\_ flags
* @destructor: invoked under the receive lock on successful dequeue * @destructor: invoked under the receive lock on successful dequeue
* @peeked: returns non-zero if this packet has been seen before * @peeked: returns non-zero if this packet has been seen before
* @off: an offset in bytes to peek skb from. Returns an offset * @off: an offset in bytes to peek skb from. Returns an offset
...@@ -375,7 +375,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb); ...@@ -375,7 +375,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
* skb_kill_datagram - Free a datagram skbuff forcibly * skb_kill_datagram - Free a datagram skbuff forcibly
* @sk: socket * @sk: socket
* @skb: datagram skbuff * @skb: datagram skbuff
* @flags: MSG_ flags * @flags: MSG\_ flags
* *
* This function frees a datagram skbuff that was received by * This function frees a datagram skbuff that was received by
* skb_recv_datagram. The flags argument must match the one * skb_recv_datagram. The flags argument must match the one
...@@ -809,7 +809,7 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); ...@@ -809,7 +809,7 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
* sequenced packet sockets providing the socket receive queue * sequenced packet sockets providing the socket receive queue
* is only ever holding data ready to receive. * is only ever holding data ready to receive.
* *
* Note: when you _don't_ use this routine for this protocol, * Note: when you *don't* use this routine for this protocol,
* and you use a different write policy from sock_writeable() * and you use a different write policy from sock_writeable()
* then please supply your own write_space callback. * then please supply your own write_space callback.
*/ */
......
...@@ -2431,8 +2431,8 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, ...@@ -2431,8 +2431,8 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0; tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
if (IS_ERR(mrt)) { if (!mrt) {
err = PTR_ERR(mrt); err = -ENOENT;
goto errout_free; goto errout_free;
} }
......
...@@ -207,6 +207,7 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h ...@@ -207,6 +207,7 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
# useless for BPF samples. # useless for BPF samples.
$(obj)/%.o: $(src)/%.c $(obj)/%.o: $(src)/%.c
$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-I$(srctree)/tools/testing/selftests/bpf/ \
-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
-Wno-compare-distinct-pointer-types \ -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \ -Wno-gnu-variable-sized-type-not-at-end \
......
...@@ -37,6 +37,5 @@ CLANG ?= clang ...@@ -37,6 +37,5 @@ CLANG ?= clang
%.o: %.c %.o: %.c
$(CLANG) -I. -I./include/uapi -I../../../include/uapi \ $(CLANG) -I. -I./include/uapi -I../../../include/uapi \
-I../../../../samples/bpf/ \
-Wno-compare-distinct-pointer-types \ -Wno-compare-distinct-pointer-types \
-O2 -target bpf -c $< -o $@ -O2 -target bpf -c $< -o $@
...@@ -23,11 +23,19 @@ ...@@ -23,11 +23,19 @@
# define __bpf_htons(x) __builtin_bswap16(x) # define __bpf_htons(x) __builtin_bswap16(x)
# define __bpf_constant_ntohs(x) ___constant_swab16(x) # define __bpf_constant_ntohs(x) ___constant_swab16(x)
# define __bpf_constant_htons(x) ___constant_swab16(x) # define __bpf_constant_htons(x) ___constant_swab16(x)
# define __bpf_ntohl(x) __builtin_bswap32(x)
# define __bpf_htonl(x) __builtin_bswap32(x)
# define __bpf_constant_ntohl(x) ___constant_swab32(x)
# define __bpf_constant_htonl(x) ___constant_swab32(x)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define __bpf_ntohs(x) (x) # define __bpf_ntohs(x) (x)
# define __bpf_htons(x) (x) # define __bpf_htons(x) (x)
# define __bpf_constant_ntohs(x) (x) # define __bpf_constant_ntohs(x) (x)
# define __bpf_constant_htons(x) (x) # define __bpf_constant_htons(x) (x)
# define __bpf_ntohl(x) (x)
# define __bpf_htonl(x) (x)
# define __bpf_constant_ntohl(x) (x)
# define __bpf_constant_htonl(x) (x)
#else #else
# error "Fix your compiler's __BYTE_ORDER__?!" # error "Fix your compiler's __BYTE_ORDER__?!"
#endif #endif
...@@ -38,5 +46,11 @@ ...@@ -38,5 +46,11 @@
#define bpf_ntohs(x) \ #define bpf_ntohs(x) \
(__builtin_constant_p(x) ? \ (__builtin_constant_p(x) ? \
__bpf_constant_ntohs(x) : __bpf_ntohs(x)) __bpf_constant_ntohs(x) : __bpf_ntohs(x))
#define bpf_htonl(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_htonl(x) : __bpf_htonl(x))
#define bpf_ntohl(x) \
(__builtin_constant_p(x) ? \
__bpf_constant_ntohl(x) : __bpf_ntohl(x))
#endif /* __BPF_ENDIAN__ */ #endif /* __BPF_ENDIAN__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册