提交 33b40134 编写于 作者: L Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Use after free in rxrpc_put_local(), from David Howells.

 2) Fix 64-bit division error in mlxsw, from Nathan Chancellor.

 3) Make sure we clear various bits of TCP state in response to
    tcp_disconnect(). From Eric Dumazet.

 4) Fix netlink attribute policy in cls_rsvp, from Eric Dumazet.

 5) txtimer must be deleted in stmmac suspend(), from Nicolin Chen.

 6) Fix TC queue mapping in bnxt_en driver, from Michael Chan.

 7) Various netdevsim fixes from Taehee Yoo (use of uninitialized data,
    snapshot panics, stack out of bounds, etc.)

 8) cls_tcindex changes hash table size after allocating the table, fix
    from Cong Wang.

 9) Fix regression in the enforcement of session ID uniqueness in l2tp.
    We only have to enforce uniqueness for IP based tunnels not UDP
    ones. From Ridge Kennedy.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (46 commits)
  gtp: use __GFP_NOWARN to avoid memalloc warning
  l2tp: Allow duplicate session creation with UDP
  r8152: Add MAC passthrough support to new device
  net_sched: fix an OOB access in cls_tcindex
  qed: Remove set but not used variable 'p_link'
  tc-testing: add missing 'nsPlugin' to basic.json
  tc-testing: fix eBPF tests failure on linux fresh clones
  net: hsr: fix possible NULL deref in hsr_handle_frame()
  netdevsim: remove unused sdev code
  netdevsim: use __GFP_NOWARN to avoid memalloc warning
  netdevsim: use IS_ERR instead of IS_ERR_OR_NULL for debugfs
  netdevsim: fix stack-out-of-bounds in nsim_dev_debugfs_init()
  netdevsim: fix panic in nsim_dev_take_snapshot_write()
  netdevsim: disable devlink reload when resources are being used
  netdevsim: fix using uninitialized resources
  bnxt_en: Fix TC queue mapping.
  bnxt_en: Fix logic that disables Bus Master during firmware reset.
  bnxt_en: Fix RDMA driver failure with SRIOV after firmware reset.
  bnxt_en: Refactor logic to re-enable SRIOV after firmware reset detected.
  net: stmmac: Delete txtimer in suspend()
  ...
......@@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
table inet x {
flowtable f {
hook ingress priority 0 devices = { eth0, eth1 };
hook ingress priority 0; devices = { eth0, eth1 };
}
chain y {
type filter hook forward priority 0; policy accept;
......
......@@ -7658,9 +7658,8 @@ S: Orphan
F: drivers/net/usb/hso.c
HSR NETWORK PROTOCOL
M: Arvid Brodin <arvid.brodin@alten.se>
L: netdev@vger.kernel.org
S: Maintained
S: Orphan
F: net/hsr/
HT16K33 LED CONTROLLER DRIVER
......@@ -8909,8 +8908,10 @@ L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
L: netdev@vger.kernel.org
W: http://www.isdn4linux.de
S: Maintained
F: drivers/isdn/mISDN
F: drivers/isdn/hardware
F: drivers/isdn/mISDN/
F: drivers/isdn/hardware/
F: drivers/isdn/Kconfig
F: drivers/isdn/Makefile
ISDN/CMTP OVER BLUETOOTH
M: Karsten Keil <isdn@linux-pingi.de>
......
......@@ -7893,7 +7893,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
int tcs, i;
tcs = netdev_get_num_tc(dev);
if (tcs > 1) {
if (tcs) {
int i, off, count;
for (i = 0; i < tcs; i++) {
......@@ -9241,6 +9241,17 @@ void bnxt_half_close_nic(struct bnxt *bp)
bnxt_free_mem(bp, false);
}
static void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
int n = pf->active_vfs;
if (n)
bnxt_cfg_hw_sriov(bp, &n, true);
}
}
static int bnxt_open(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
......@@ -9259,15 +9270,10 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
int n = pf->active_vfs;
if (n)
bnxt_cfg_hw_sriov(bp, &n, true);
}
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
bnxt_ulp_start(bp, 0);
bnxt_reenable_sriov(bp);
}
}
bnxt_hwmon_open(bp);
}
......@@ -9307,10 +9313,6 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
......@@ -10096,9 +10098,16 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
/* When firmware is fatal state, disable PCI device to prevent
* any potential bad DMAs before freeing kernel memory.
*/
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
pci_disable_device(bp->pdev);
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
if (pci_is_enabled(bp->pdev))
pci_disable_device(bp->pdev);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
......@@ -10831,6 +10840,8 @@ static void bnxt_fw_reset_task(struct work_struct *work)
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
bnxt_dl_health_recovery_done(bp);
bnxt_dl_health_status_update(bp, true);
rtnl_unlock();
......
......@@ -171,9 +171,9 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
}
msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (!msghdr) {
if (IS_ERR(msghdr)) {
otx2_mbox_unlock(&pfvf->mbox);
return -ENOMEM;
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
......
......@@ -614,7 +614,7 @@ mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
* Kbits/s.
*/
return p->rate.rate_bytes_ps / 1000 * 8;
return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
}
static int
......
......@@ -866,7 +866,7 @@ struct ionic_rxq_comp {
#define IONIC_RXQ_COMP_CSUM_F_VLAN 0x40
#define IONIC_RXQ_COMP_CSUM_F_CALC 0x80
u8 pkt_type_color;
#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x0f
#define IONIC_RXQ_COMP_PKT_TYPE_MASK 0x7f
};
enum ionic_pkt_type {
......
......@@ -1398,14 +1398,11 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_pf_rt_init_params params;
struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids);
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id;
......
......@@ -3114,6 +3114,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (!p_hwfn->fw_overlay_mem) {
DP_NOTICE(p_hwfn,
"Failed to allocate fw overlay memory\n");
rc = -ENOMEM;
goto load_err;
}
......
......@@ -4974,6 +4974,7 @@ int stmmac_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
u32 chan;
if (!ndev || !netif_running(ndev))
return 0;
......@@ -4987,6 +4988,9 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
......
......@@ -767,12 +767,12 @@ static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
int i;
gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
GFP_KERNEL);
GFP_KERNEL | __GFP_NOWARN);
if (gtp->addr_hash == NULL)
return -ENOMEM;
gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
GFP_KERNEL);
GFP_KERNEL | __GFP_NOWARN);
if (gtp->tid_hash == NULL)
goto err1;
......
......@@ -218,6 +218,7 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
{
struct nsim_bpf_bound_prog *state;
char name[16];
int ret;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
......@@ -230,9 +231,10 @@ static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
/* Program id is not populated yet when we create the state. */
sprintf(name, "%u", nsim_dev->prog_id_gen++);
state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
if (IS_ERR_OR_NULL(state->ddir)) {
if (IS_ERR(state->ddir)) {
ret = PTR_ERR(state->ddir);
kfree(state);
return -ENOMEM;
return ret;
}
debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
......@@ -587,8 +589,8 @@ int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
nsim_dev->ddir);
if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
return -ENOMEM;
if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
......
......@@ -17,6 +17,7 @@
static DEFINE_IDA(nsim_bus_dev_ids);
static LIST_HEAD(nsim_bus_dev_list);
static DEFINE_MUTEX(nsim_bus_dev_list_lock);
static bool nsim_bus_enable;
static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
{
......@@ -28,7 +29,7 @@ static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
{
nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
sizeof(struct nsim_vf_config),
GFP_KERNEL);
GFP_KERNEL | __GFP_NOWARN);
if (!nsim_bus_dev->vfconfigs)
return -ENOMEM;
nsim_bus_dev->num_vfs = num_vfs;
......@@ -96,13 +97,25 @@ new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
struct devlink *devlink;
unsigned int port_index;
int ret;
/* Prevent to use nsim_bus_dev before initialization. */
if (!smp_load_acquire(&nsim_bus_dev->init))
return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
devlink = priv_to_devlink(nsim_dev);
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
ret = nsim_dev_port_add(nsim_bus_dev, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
......@@ -113,13 +126,25 @@ del_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
struct devlink *devlink;
unsigned int port_index;
int ret;
/* Prevent to use nsim_bus_dev before initialization. */
if (!smp_load_acquire(&nsim_bus_dev->init))
return -EBUSY;
ret = kstrtouint(buf, 0, &port_index);
if (ret)
return ret;
devlink = priv_to_devlink(nsim_dev);
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
ret = nsim_dev_port_del(nsim_bus_dev, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
......@@ -179,15 +204,30 @@ new_device_store(struct bus_type *bus, const char *buf, size_t count)
pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
return -EINVAL;
}
nsim_bus_dev = nsim_bus_dev_new(id, port_count);
if (IS_ERR(nsim_bus_dev))
return PTR_ERR(nsim_bus_dev);
mutex_lock(&nsim_bus_dev_list_lock);
/* Prevent to use resource before initialization. */
if (!smp_load_acquire(&nsim_bus_enable)) {
err = -EBUSY;
goto err;
}
nsim_bus_dev = nsim_bus_dev_new(id, port_count);
if (IS_ERR(nsim_bus_dev)) {
err = PTR_ERR(nsim_bus_dev);
goto err;
}
/* Allow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, true);
list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
mutex_unlock(&nsim_bus_dev_list_lock);
return count;
err:
mutex_unlock(&nsim_bus_dev_list_lock);
return err;
}
static BUS_ATTR_WO(new_device);
......@@ -215,6 +255,11 @@ del_device_store(struct bus_type *bus, const char *buf, size_t count)
err = -ENOENT;
mutex_lock(&nsim_bus_dev_list_lock);
/* Prevent to use resource before initialization. */
if (!smp_load_acquire(&nsim_bus_enable)) {
mutex_unlock(&nsim_bus_dev_list_lock);
return -EBUSY;
}
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
if (nsim_bus_dev->dev.id != id)
continue;
......@@ -284,6 +329,9 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
err = device_register(&nsim_bus_dev->dev);
if (err)
......@@ -299,6 +347,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
{
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
kfree(nsim_bus_dev);
......@@ -320,6 +370,8 @@ int nsim_bus_init(void)
err = driver_register(&nsim_driver);
if (err)
goto err_bus_unregister;
/* Allow using resources */
smp_store_release(&nsim_bus_enable, true);
return 0;
err_bus_unregister:
......@@ -331,12 +383,16 @@ void nsim_bus_exit(void)
{
struct nsim_bus_dev *nsim_bus_dev, *tmp;
/* Disallow using resources */
smp_store_release(&nsim_bus_enable, false);
mutex_lock(&nsim_bus_dev_list_lock);
list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
list_del(&nsim_bus_dev->list);
nsim_bus_dev_del(nsim_bus_dev);
}
mutex_unlock(&nsim_bus_dev_list_lock);
driver_unregister(&nsim_driver);
bus_unregister(&nsim_bus);
}
......@@ -73,23 +73,26 @@ static const struct file_operations nsim_dev_take_snapshot_fops = {
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[16];
char dev_ddir_name[sizeof(DRV_NAME) + 10];
sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
if (IS_ERR_OR_NULL(nsim_dev->ddir))
return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
if (IS_ERR(nsim_dev->ddir))
return PTR_ERR(nsim_dev->ddir);
nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
if (IS_ERR(nsim_dev->ports_ddir))
return PTR_ERR(nsim_dev->ports_ddir);
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
&nsim_dev->test1);
debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
&nsim_dev_take_snapshot_fops);
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
nsim_dev,
&nsim_dev_take_snapshot_fops);
debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
&nsim_dev->dont_allow_reload);
debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
......@@ -112,8 +115,8 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
nsim_dev->ports_ddir);
if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
return -ENOMEM;
if (IS_ERR(nsim_dev_port->ddir))
return PTR_ERR(nsim_dev_port->ddir);
sprintf(dev_link_name, "../../../" DRV_NAME "%u",
nsim_dev->nsim_bus_dev->dev.id);
......@@ -740,6 +743,11 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
if (err)
goto err_health_exit;
nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
0200,
nsim_dev->ddir,
nsim_dev,
&nsim_dev_take_snapshot_fops);
return 0;
err_health_exit:
......@@ -853,6 +861,7 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
if (devlink_is_reload_failed(devlink))
return;
debugfs_remove(nsim_dev->take_snapshot);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_health_exit(nsim_dev);
nsim_dev_traps_exit(devlink);
......@@ -925,8 +934,8 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
int nsim_dev_init(void)
{
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
if (IS_ERR_OR_NULL(nsim_dev_ddir))
return -ENOMEM;
if (IS_ERR(nsim_dev_ddir))
return PTR_ERR(nsim_dev_ddir);
return 0;
}
......
......@@ -82,7 +82,7 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
binary = kmalloc(binary_len, GFP_KERNEL);
binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
if (!binary)
return -ENOMEM;
get_random_bytes(binary, binary_len);
......@@ -285,8 +285,8 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
}
health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
if (IS_ERR_OR_NULL(health->ddir)) {
err = PTR_ERR_OR_ZERO(health->ddir) ?: -EINVAL;
if (IS_ERR(health->ddir)) {
err = PTR_ERR(health->ddir);
goto err_dummy_reporter_destroy;
}
......
......@@ -160,6 +160,7 @@ struct nsim_dev {
struct nsim_trap_data *trap_data;
struct dentry *ddir;
struct dentry *ports_ddir;
struct dentry *take_snapshot;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
u32 bpf_bind_verifier_delay;
......@@ -240,6 +241,9 @@ struct nsim_bus_dev {
*/
unsigned int num_vfs;
struct nsim_vf_config *vfconfigs;
/* Lock for devlink->reload_enabled in netdevsim module */
struct mutex nsim_bus_reload_lock;
bool init;
};
int nsim_bus_init(void);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "netdevsim.h"
static struct dentry *nsim_sdev_ddir;
static u32 nsim_sdev_id;
struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
{
struct netdevsim_shared_dev *sdev;
char sdev_ddir_name[10];
int err;
if (joinns) {
if (WARN_ON(!joinns->sdev))
return ERR_PTR(-EINVAL);
sdev = joinns->sdev;
sdev->refcnt++;
return sdev;
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
return ERR_PTR(-ENOMEM);
sdev->refcnt = 1;
sdev->switch_id = nsim_sdev_id++;
sprintf(sdev_ddir_name, "%u", sdev->switch_id);
sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
if (IS_ERR_OR_NULL(sdev->ddir)) {
err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
goto err_sdev_free;
}
return sdev;
err_sdev_free:
nsim_sdev_id--;
kfree(sdev);
return ERR_PTR(err);
}
void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
{
if (--sdev->refcnt)
return;
debugfs_remove_recursive(sdev->ddir);
kfree(sdev);
}
int nsim_sdev_init(void)
{
nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
if (IS_ERR_OR_NULL(nsim_sdev_ddir))
return -ENOMEM;
return 0;
}
void nsim_sdev_exit(void)
{
debugfs_remove_recursive(nsim_sdev_ddir);
}
......@@ -489,6 +489,14 @@ static int at803x_probe(struct phy_device *phydev)
return at803x_parse_dt(phydev);
}
static void at803x_remove(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
if (priv->vddio)
regulator_disable(priv->vddio);
}
static int at803x_clk_out_config(struct phy_device *phydev)
{
struct at803x_priv *priv = phydev->priv;
......@@ -711,6 +719,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8035",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
......@@ -726,6 +735,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8030",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
.set_wol = at803x_set_wol,
......@@ -741,6 +751,7 @@ static struct phy_driver at803x_driver[] = {
.name = "Qualcomm Atheros AR8031/AR8033",
.phy_id_mask = AT803X_PHY_ID_MASK,
.probe = at803x_probe,
.remove = at803x_remove,
.config_init = at803x_config_init,
.set_wol = at803x_set_wol,
.get_wol = at803x_get_wol,
......
......@@ -111,6 +111,13 @@ void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
struct mii_timestamping_desc *desc;
struct list_head *this;
/* mii_timestamper statically registered by the PHY driver won't use the
* register_mii_timestamper() and thus don't have ->device set. Don't
* try to unregister these.
*/
if (!mii_ts->device)
return;
mutex_lock(&tstamping_devices_lock);
list_for_each(this, &mii_timestamping_devices) {
desc = list_entry(this, struct mii_timestamping_desc, list);
......
......@@ -698,6 +698,9 @@ enum rtl8152_flags {
#define VENDOR_ID_NVIDIA 0x0955
#define VENDOR_ID_TPLINK 0x2357
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
......@@ -6759,9 +6762,13 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO &&
le16_to_cpu(udev->descriptor.idProduct) == 0x3082)
set_bit(LENOVO_MACPASSTHRU, &tp->flags);
if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
switch (le16_to_cpu(udev->descriptor.idProduct)) {
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
set_bit(LENOVO_MACPASSTHRU, &tp->flags);
}
}
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
(!strcmp(udev->serial, "000001000000") ||
......
......@@ -81,13 +81,15 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
else
phy = get_phy_device(mdio, addr, is_c45);
if (IS_ERR(phy)) {
unregister_mii_timestamper(mii_ts);
if (mii_ts)
unregister_mii_timestamper(mii_ts);
return PTR_ERR(phy);
}
rc = of_irq_get(child, 0);
if (rc == -EPROBE_DEFER) {
unregister_mii_timestamper(mii_ts);
if (mii_ts)
unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
return rc;
}
......@@ -116,12 +118,19 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
* register it */
rc = phy_device_register(phy);
if (rc) {
unregister_mii_timestamper(mii_ts);
if (mii_ts)
unregister_mii_timestamper(mii_ts);
phy_device_free(phy);
of_node_put(child);
return rc;
}
phy->mii_ts = mii_ts;
/* phy->mii_ts may already be defined by the PHY driver. A
* mii_timestamper probed via the device tree will still have
* precedence.
*/
if (mii_ts)
phy->mii_ts = mii_ts;
dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
child, addr);
......
......@@ -27,6 +27,8 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
rcu_read_lock(); /* hsr->node_db, hsr->ports */
port = hsr_port_get_rcu(skb->dev);
if (!port)
goto finish_pass;
if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
/* Directly kill frames sent by ourselves */
......
......@@ -2622,10 +2622,12 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
tp->total_retrans = 0;
inet_csk_delack_init(sk);
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
* issue in __tcp_select_window()
......@@ -2637,10 +2639,14 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_rx_dst = NULL;
tcp_saved_syn_free(tp);
tp->compressed_ack = 0;
tp->segs_in = 0;
tp->segs_out = 0;
tp->bytes_sent = 0;
tp->bytes_acked = 0;
tp->bytes_received = 0;
tp->bytes_retrans = 0;
tp->data_segs_in = 0;
tp->data_segs_out = 0;
tp->duplicate_sack[0].start_seq = 0;
tp->duplicate_sack[0].end_seq = 0;
tp->dsack_dups = 0;
......
......@@ -5908,8 +5908,14 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* the segment and return)"
*/
if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
/* Previous FIN/ACK or RST/ACK might be ignored. */
if (icsk->icsk_retransmits == 0)
inet_csk_reset_xmit_timer(sk,
ICSK_TIME_RETRANS,
TCP_TIMEOUT_MIN, TCP_RTO_MAX);
goto reset_and_undo;
}
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
......
......@@ -320,8 +320,13 @@ int l2tp_session_register(struct l2tp_session *session,
spin_lock_bh(&pn->l2tp_session_hlist_lock);
/* IP encap expects session IDs to be globally unique, while
* UDP encap doesn't.
*/
hlist_for_each_entry(session_walk, g_head, global_hlist)
if (session_walk->session_id == session->session_id) {
if (session_walk->session_id == session->session_id &&
(session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
tunnel->encap == L2TP_ENCAPTYPE_IP)) {
err = -EEXIST;
goto err_tlock_pnlock;
}
......
......@@ -1483,31 +1483,34 @@ ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
};
static int
dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
ip_set_dump_start(struct netlink_callback *cb)
{
struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
struct nlattr *attr = (void *)nlh + min_len;
struct sk_buff *skb = cb->skb;
struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
u32 dump_type;
ip_set_id_t index;
int ret;
ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
nlh->nlmsg_len - min_len,
ip_set_dump_policy, NULL);
if (ret)
return ret;
goto error;
cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
ip_set_id_t index;
struct ip_set *set;
set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
&index);
if (!set)
return -ENOENT;
if (!set) {
ret = -ENOENT;
goto error;
}
dump_type = DUMP_ONE;
cb->args[IPSET_CB_INDEX] = index;
} else {
......@@ -1523,10 +1526,17 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
cb->args[IPSET_CB_DUMP] = dump_type;
return 0;
error:
/* We have to create and send the error message manually :-( */
if (nlh->nlmsg_flags & NLM_F_ACK) {
netlink_ack(cb->skb, nlh, ret, NULL);
}
return ret;
}
static int
ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
{
ip_set_id_t index = IPSET_INVALID_ID, max;
struct ip_set *set = NULL;
......@@ -1537,18 +1547,8 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
bool is_destroyed;
int ret = 0;
if (!cb->args[IPSET_CB_DUMP]) {
ret = dump_init(cb, inst);
if (ret < 0) {
nlh = nlmsg_hdr(cb->skb);
/* We have to create and send the error message
* manually :-(
*/
if (nlh->nlmsg_flags & NLM_F_ACK)
netlink_ack(cb->skb, nlh, ret, NULL);
return ret;
}
}
if (!cb->args[IPSET_CB_DUMP])
return -EINVAL;
if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
goto out;
......@@ -1684,7 +1684,8 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
{
struct netlink_dump_control c = {
.dump = ip_set_dump_start,
.start = ip_set_dump_start,
.dump = ip_set_dump_do,
.done = ip_set_dump_done,
};
return netlink_dump_start(ctnl, skb, nlh, &c);
......
......@@ -2248,8 +2248,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
GFP_KERNEL | __GFP_ZERO);
hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL);
if (hash && nulls)
for (i = 0; i < nr_slots; i++)
......
......@@ -529,9 +529,9 @@ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
struct net_device *dev)
{
nf_flow_table_offload_flush(flowtable);
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
nf_flow_table_offload_flush(flowtable);
}
void nf_flow_table_cleanup(struct net_device *dev)
......@@ -553,6 +553,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
nf_flow_table_offload_flush(flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
......
......@@ -675,6 +675,7 @@ static void flow_offload_work_del(struct flow_offload_work *offload)
{
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
}
static void flow_offload_tuple_stats(struct flow_offload_work *offload,
......
......@@ -939,14 +939,14 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
*
* @size: number of entries
*
* Return: NULL or kmalloc'd or vmalloc'd array
* Return: NULL or zeroed kmalloc'd or vmalloc'd array
*/
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{
if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
return NULL;
return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
return kvcalloc(size, sizeof(unsigned int), GFP_KERNEL);
}
EXPORT_SYMBOL(xt_alloc_entry_offsets);
......
......@@ -194,6 +194,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
service_in_use:
write_unlock(&local->services_lock);
rxrpc_unuse_local(local);
rxrpc_put_local(local);
ret = -EADDRINUSE;
error_unlock:
release_sock(&rx->sk);
......@@ -899,6 +900,7 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_purge_queue(&sk->sk_receive_queue);
rxrpc_unuse_local(rx->local);
rxrpc_put_local(rx->local);
rx->local = NULL;
key_put(rx->key);
rx->key = NULL;
......
......@@ -490,6 +490,7 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
RXRPC_CALL_IS_INTR, /* The call is interruptible */
RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
};
/*
......@@ -1021,6 +1022,16 @@ void rxrpc_unuse_local(struct rxrpc_local *);
void rxrpc_queue_local(struct rxrpc_local *);
void rxrpc_destroy_all_locals(struct rxrpc_net *);
static inline bool __rxrpc_unuse_local(struct rxrpc_local *local)
{
return atomic_dec_return(&local->active_users) == 0;
}
static inline bool __rxrpc_use_local(struct rxrpc_local *local)
{
return atomic_fetch_add_unless(&local->active_users, 1, 0) != 0;
}
/*
* misc.c
*/
......
......@@ -493,7 +493,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
if (conn)
if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
rxrpc_disconnect_call(call);
if (call->security)
call->security->free_call_crypto(call);
......@@ -569,6 +569,7 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
struct rxrpc_net *rxnet = call->rxnet;
rxrpc_put_connection(call->conn);
rxrpc_put_peer(call->peer);
kfree(call->rxtx_buffer);
kfree(call->rxtx_annotations);
......@@ -590,7 +591,6 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
ASSERTCMP(call->conn, ==, NULL);
rxrpc_cleanup_ring(call);
rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
......
......@@ -785,6 +785,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
u32 cid;
spin_lock(&conn->channel_lock);
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
cid = call->cid;
if (cid) {
......@@ -792,7 +793,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
chan = &conn->channels[channel];
}
trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
call->conn = NULL;
/* Calls that have never actually been assigned a channel can simply be
* discarded. If the conn didn't get used either, it will follow
......@@ -908,7 +908,6 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
spin_unlock(&rxnet->client_conn_cache_lock);
out_2:
spin_unlock(&conn->channel_lock);
rxrpc_put_connection(conn);
_leave("");
return;
......
......@@ -438,16 +438,12 @@ static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
/*
* connection-level event processor
*/
void rxrpc_process_connection(struct work_struct *work)
static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
{
struct rxrpc_connection *conn =
container_of(work, struct rxrpc_connection, processor);
struct sk_buff *skb;
u32 abort_code = RX_PROTOCOL_ERROR;
int ret;
rxrpc_see_connection(conn);
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
......@@ -475,18 +471,32 @@ void rxrpc_process_connection(struct work_struct *work)
}
}
out:
rxrpc_put_connection(conn);
_leave("");
return;
requeue_and_leave:
skb_queue_head(&conn->rx_queue, skb);
goto out;
return;
protocol_error:
if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
goto requeue_and_leave;
rxrpc_free_skb(skb, rxrpc_skb_freed);
goto out;
return;
}
void rxrpc_process_connection(struct work_struct *work)
{
struct rxrpc_connection *conn =
container_of(work, struct rxrpc_connection, processor);
rxrpc_see_connection(conn);
if (__rxrpc_use_local(conn->params.local)) {
rxrpc_do_process_connection(conn);
rxrpc_unuse_local(conn->params.local);
}
rxrpc_put_connection(conn);
_leave("");
return;
}
......@@ -171,6 +171,8 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
_enter("%d,%x", conn->debug_id, call->cid);
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
if (rcu_access_pointer(chan->call) == call) {
/* Save the result of the call so that we can repeat it if necessary
* through the channel, whilst disposing of the actual call record.
......@@ -223,9 +225,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
__rxrpc_disconnect_call(conn, call);
spin_unlock(&conn->channel_lock);
call->conn = NULL;
conn->idle_timestamp = jiffies;
rxrpc_put_connection(conn);
}
/*
......
......@@ -599,10 +599,8 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
false, true,
rxrpc_propose_ack_input_data);
if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
}
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
unlock:
spin_unlock(&call->input_lock);
......
......@@ -364,11 +364,14 @@ void rxrpc_queue_local(struct rxrpc_local *local)
void rxrpc_put_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
unsigned int debug_id;
int n;
if (local) {
debug_id = local->debug_id;
n = atomic_dec_return(&local->usage);
trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
trace_rxrpc_local(debug_id, rxrpc_local_put, n, here);
if (n == 0)
call_rcu(&local->rcu, rxrpc_local_rcu);
......@@ -380,14 +383,11 @@ void rxrpc_put_local(struct rxrpc_local *local)
*/
struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
{
unsigned int au;
local = rxrpc_get_local_maybe(local);
if (!local)
return NULL;
au = atomic_fetch_add_unless(&local->active_users, 1, 0);
if (au == 0) {
if (!__rxrpc_use_local(local)) {
rxrpc_put_local(local);
return NULL;
}
......@@ -401,14 +401,11 @@ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
*/
void rxrpc_unuse_local(struct rxrpc_local *local)
{
unsigned int au;
if (local) {
au = atomic_dec_return(&local->active_users);
if (au == 0)
if (__rxrpc_unuse_local(local)) {
rxrpc_get_local(local);
rxrpc_queue_local(local);
else
rxrpc_put_local(local);
}
}
}
......@@ -465,7 +462,7 @@ static void rxrpc_local_processor(struct work_struct *work)
do {
again = false;
if (atomic_read(&local->active_users) == 0) {
if (!__rxrpc_use_local(local)) {
rxrpc_local_destroyer(local);
break;
}
......@@ -479,6 +476,8 @@ static void rxrpc_local_processor(struct work_struct *work)
rxrpc_process_local_events(local);
again = true;
}
__rxrpc_unuse_local(local);
} while (again);
rxrpc_put_local(local);
......
......@@ -129,7 +129,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t *_serial)
{
struct rxrpc_connection *conn = NULL;
struct rxrpc_connection *conn;
struct rxrpc_ack_buffer *pkt;
struct msghdr msg;
struct kvec iov[2];
......@@ -139,18 +139,14 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
int ret;
u8 reason;
spin_lock_bh(&call->lock);
if (call->conn)
conn = rxrpc_get_connection_maybe(call->conn);
spin_unlock_bh(&call->lock);
if (!conn)
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
if (!pkt) {
rxrpc_put_connection(conn);
if (!pkt)
return -ENOMEM;
}
conn = call->conn;
msg.msg_name = &call->peer->srx.transport;
msg.msg_namelen = call->peer->srx.transport_len;
......@@ -244,7 +240,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
}
out:
rxrpc_put_connection(conn);
kfree(pkt);
return ret;
}
......@@ -254,7 +249,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
*/
int rxrpc_send_abort_packet(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = NULL;
struct rxrpc_connection *conn;
struct rxrpc_abort_buffer pkt;
struct msghdr msg;
struct kvec iov[1];
......@@ -271,13 +266,11 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
test_bit(RXRPC_CALL_TX_LAST, &call->flags))
return 0;
spin_lock_bh(&call->lock);
if (call->conn)
conn = rxrpc_get_connection_maybe(call->conn);
spin_unlock_bh(&call->lock);
if (!conn)
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
return -ECONNRESET;
conn = call->conn;
msg.msg_name = &call->peer->srx.transport;
msg.msg_namelen = call->peer->srx.transport_len;
msg.msg_control = NULL;
......@@ -312,8 +305,6 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
rxrpc_tx_point_call_abort);
rxrpc_tx_backoff(call, ret);
rxrpc_put_connection(conn);
return ret;
}
......
......@@ -364,27 +364,31 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
if (!rxrpc_get_peer_maybe(peer))
continue;
spin_unlock_bh(&rxnet->peer_hash_lock);
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);
if (__rxrpc_use_local(peer->local)) {
spin_unlock_bh(&rxnet->peer_hash_lock);
keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
slot = keepalive_at - base;
_debug("%02x peer %u t=%d {%pISp}",
cursor, peer->debug_id, slot, &peer->srx.transport);
if (keepalive_at <= base ||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
rxrpc_send_keepalive(peer);
slot = RXRPC_KEEPALIVE_TIME;
}
if (keepalive_at <= base ||
keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
rxrpc_send_keepalive(peer);
slot = RXRPC_KEEPALIVE_TIME;
/* A transmission to this peer occurred since last we
* examined it so put it into the appropriate future
* bucket.
*/
slot += cursor;
slot &= mask;
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
rxrpc_unuse_local(peer->local);
}
/* A transmission to this peer occurred since last we examined
* it so put it into the appropriate future bucket.
*/
slot += cursor;
slot &= mask;
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
rxrpc_put_peer_locked(peer);
}
......
......@@ -463,10 +463,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
[TCA_RSVP_CLASSID] = { .type = NLA_U32 },
[TCA_RSVP_DST] = { .type = NLA_BINARY,
.len = RSVP_DST_LEN * sizeof(u32) },
[TCA_RSVP_SRC] = { .type = NLA_BINARY,
.len = RSVP_DST_LEN * sizeof(u32) },
[TCA_RSVP_DST] = { .len = RSVP_DST_LEN * sizeof(u32) },
[TCA_RSVP_SRC] = { .len = RSVP_DST_LEN * sizeof(u32) },
[TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
};
......
......@@ -333,12 +333,31 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
cp->fall_through = p->fall_through;
cp->tp = tp;
if (tb[TCA_TCINDEX_HASH])
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
if (tb[TCA_TCINDEX_MASK])
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
if (tb[TCA_TCINDEX_SHIFT])
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
*/
if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
cp->hash = (cp->mask >> cp->shift) + 1;
else
cp->hash = DEFAULT_HASH_SIZE;
}
if (p->perfect) {
int i;
if (tcindex_alloc_perfect_hash(net, cp) < 0)
goto errout;
for (i = 0; i < cp->hash; i++)
for (i = 0; i < min(cp->hash, p->hash); i++)
cp->perfect[i].res = p->perfect[i].res;
balloc = 1;
}
......@@ -350,15 +369,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (old_r)
cr = r->res;
if (tb[TCA_TCINDEX_HASH])
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
if (tb[TCA_TCINDEX_MASK])
cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
if (tb[TCA_TCINDEX_SHIFT])
cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
err = -EBUSY;
/* Hash already allocated, make sure that we still meet the
......@@ -376,16 +386,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (tb[TCA_TCINDEX_FALL_THROUGH])
cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
*/
if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
cp->hash = (cp->mask >> cp->shift) + 1;
else
cp->hash = DEFAULT_HASH_SIZE;
}
if (!cp->perfect && !cp->h)
cp->alloc_hash = cp->hash;
......
......@@ -22,3 +22,4 @@ ipv6_flowlabel_mgr
so_txtime
tcp_fastopen_backup_key
nettest
fin_ack_lat
......@@ -11,6 +11,7 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh
TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh
TEST_PROGS += fin_ack_lat.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
......@@ -18,6 +19,7 @@ TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
TEST_GEN_FILES += so_txtime ipv6_flowlabel ipv6_flowlabel_mgr
TEST_GEN_FILES += tcp_fastopen_backup_key
TEST_GEN_FILES += fin_ack_lat
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
......
// SPDX-License-Identifier: GPL-2.0
#include <arpa/inet.h>
#include <errno.h>
#include <error.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <unistd.h>
static int child_pid;
static unsigned long timediff(struct timeval s, struct timeval e)
{
unsigned long s_us, e_us;
s_us = s.tv_sec * 1000000 + s.tv_usec;
e_us = e.tv_sec * 1000000 + e.tv_usec;
if (s_us > e_us)
return 0;
return e_us - s_us;
}
static void client(int port)
{
int sock = 0;
struct sockaddr_in addr, laddr;
socklen_t len = sizeof(laddr);
struct linger sl;
int flag = 1;
int buffer;
struct timeval start, end;
unsigned long lat, sum_lat = 0, nr_lat = 0;
while (1) {
gettimeofday(&start, NULL);
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0)
error(-1, errno, "socket creation");
sl.l_onoff = 1;
sl.l_linger = 0;
if (setsockopt(sock, SOL_SOCKET, SO_LINGER, &sl, sizeof(sl)))
error(-1, errno, "setsockopt(linger)");
if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
&flag, sizeof(flag)))
error(-1, errno, "setsockopt(nodelay)");
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
if (inet_pton(AF_INET, "127.0.0.1", &addr.sin_addr) <= 0)
error(-1, errno, "inet_pton");
if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0)
error(-1, errno, "connect");
send(sock, &buffer, sizeof(buffer), 0);
if (read(sock, &buffer, sizeof(buffer)) == -1)
error(-1, errno, "waiting read");
gettimeofday(&end, NULL);
lat = timediff(start, end);
sum_lat += lat;
nr_lat++;
if (lat < 100000)
goto close;
if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
error(-1, errno, "getsockname");
printf("port: %d, lat: %lu, avg: %lu, nr: %lu\n",
ntohs(laddr.sin_port), lat,
sum_lat / nr_lat, nr_lat);
close:
fflush(stdout);
close(sock);
}
}
static void server(int sock, struct sockaddr_in address)
{
int accepted;
int addrlen = sizeof(address);
int buffer;
while (1) {
accepted = accept(sock, (struct sockaddr *)&address,
(socklen_t *)&addrlen);
if (accepted < 0)
error(-1, errno, "accept");
if (read(accepted, &buffer, sizeof(buffer)) == -1)
error(-1, errno, "read");
close(accepted);
}
}
static void sig_handler(int signum)
{
kill(SIGTERM, child_pid);
exit(0);
}
int main(int argc, char const *argv[])
{
int sock;
int opt = 1;
struct sockaddr_in address;
struct sockaddr_in laddr;
socklen_t len = sizeof(laddr);
if (signal(SIGTERM, sig_handler) == SIG_ERR)
error(-1, errno, "signal");
sock = socket(AF_INET, SOCK_STREAM, 0);
if (sock < 0)
error(-1, errno, "socket");
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
&opt, sizeof(opt)) == -1)
error(-1, errno, "setsockopt");
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
/* dynamically allocate unused port */
address.sin_port = 0;
if (bind(sock, (struct sockaddr *)&address, sizeof(address)) < 0)
error(-1, errno, "bind");
if (listen(sock, 3) < 0)
error(-1, errno, "listen");
if (getsockname(sock, (struct sockaddr *)&laddr, &len) == -1)
error(-1, errno, "getsockname");
fprintf(stderr, "server port: %d\n", ntohs(laddr.sin_port));
child_pid = fork();
if (!child_pid)
client(ntohs(laddr.sin_port));
else
server(sock, laddr);
return 0;
}
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Test latency spikes caused by FIN/ACK handling race.
set +x
set -e
tmpfile=$(mktemp /tmp/fin_ack_latency.XXXX.log)
cleanup() {
kill $(pidof fin_ack_lat)
rm -f $tmpfile
}
trap cleanup EXIT
do_test() {
RUNTIME=$1
./fin_ack_lat | tee $tmpfile &
PID=$!
sleep $RUNTIME
NR_SPIKES=$(wc -l $tmpfile | awk '{print $1}')
if [ $NR_SPIKES -gt 0 ]
then
echo "FAIL: $NR_SPIKES spikes detected"
return 1
fi
return 0
}
do_test "30"
echo "test done"
......@@ -54,7 +54,7 @@ class SubPlugin(TdcPlugin):
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ENVIR)
env=os.environ.copy())
(rawout, serr) = proc.communicate()
if proc.returncode != 0 and len(serr) > 0:
......
......@@ -6,6 +6,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -25,6 +28,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -44,6 +50,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -63,6 +72,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -82,6 +94,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -101,6 +116,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -120,6 +138,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -139,6 +160,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -158,6 +182,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -177,6 +204,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -196,6 +226,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -215,6 +248,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -234,6 +270,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -253,6 +292,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -272,6 +314,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -291,6 +336,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......@@ -310,6 +358,9 @@
"filter",
"basic"
],
"plugins": {
"requires": "nsPlugin"
},
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册