提交 ed29668d 编写于 作者: D David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Smooth Cong Wang's bug fix into 'net-next'.  Basically put
the bulk of the tcf_block_put() logic from 'net' into
tcf_block_put_ext(), but after the offload unbind.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -211,9 +211,7 @@ Description: ...@@ -211,9 +211,7 @@ Description:
device, after it has been suspended at run time, from a resume device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O, request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that in microseconds. If it is equal to 0, however, this means that
the PM QoS resume latency may be arbitrary and the special value the PM QoS resume latency may be arbitrary.
"n/a" means that user space cannot accept any resume latency at
all for the given device.
Not all drivers support this attribute. If it isn't supported, Not all drivers support this attribute. If it isn't supported,
it is not present. it is not present.
......
...@@ -6678,7 +6678,7 @@ F: include/net/ieee802154_netdev.h ...@@ -6678,7 +6678,7 @@ F: include/net/ieee802154_netdev.h
F: Documentation/networking/ieee802154.txt F: Documentation/networking/ieee802154.txt
IFE PROTOCOL IFE PROTOCOL
M: Yotam Gigi <yotamg@mellanox.com> M: Yotam Gigi <yotam.gi@gmail.com>
M: Jamal Hadi Salim <jhs@mojatatu.com> M: Jamal Hadi Salim <jhs@mojatatu.com>
F: net/ife F: net/ife
F: include/net/ife.h F: include/net/ife.h
...@@ -8751,7 +8751,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ ...@@ -8751,7 +8751,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/ F: drivers/net/ethernet/mellanox/mlxsw/
MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
M: Yotam Gigi <yotamg@mellanox.com> M: mlxsw@mellanox.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Supported
W: http://www.mellanox.com W: http://www.mellanox.com
...@@ -10899,7 +10899,7 @@ S: Maintained ...@@ -10899,7 +10899,7 @@ S: Maintained
F: drivers/block/ps3vram.c F: drivers/block/ps3vram.c
PSAMPLE PACKET SAMPLING SUPPORT: PSAMPLE PACKET SAMPLING SUPPORT:
M: Yotam Gigi <yotamg@mellanox.com> M: Yotam Gigi <yotam.gi@gmail.com>
S: Maintained S: Maintained
F: net/psample F: net/psample
F: include/net/psample.h F: include/net/psample.h
......
...@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2) ...@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
vzeroupper vzeroupper
# 4 * 32 byte stack, 32-byte aligned # 4 * 32 byte stack, 32-byte aligned
mov %rsp, %r8 lea 8(%rsp),%r10
and $~31, %rsp and $~31, %rsp
sub $0x80, %rsp sub $0x80, %rsp
...@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2) ...@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
vmovdqu %ymm15,0x01e0(%rsi) vmovdqu %ymm15,0x01e0(%rsi)
vzeroupper vzeroupper
mov %r8,%rsp lea -8(%r10),%rsp
ret ret
ENDPROC(chacha20_8block_xor_avx2) ENDPROC(chacha20_8block_xor_avx2)
...@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3) ...@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
# done with the slightly better performing SSSE3 byte shuffling, # done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR. # 7/12-bit word rotation uses traditional shift+OR.
mov %rsp,%r11 lea 8(%rsp),%r10
sub $0x80,%rsp sub $0x80,%rsp
and $~63,%rsp and $~63,%rsp
...@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3) ...@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
pxor %xmm1,%xmm15 pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi) movdqu %xmm15,0xf0(%rsi)
mov %r11,%rsp lea -8(%r10),%rsp
ret ret
ENDPROC(chacha20_4block_xor_ssse3) ENDPROC(chacha20_4block_xor_ssse3)
...@@ -1440,7 +1440,17 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1440,7 +1440,17 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
*
* Note that handle_userfault() may also release and reacquire mmap_sem
* (and not return with VM_FAULT_RETRY), when returning to userland to
* repeat the page fault later with a VM_FAULT_NOPAGE retval
* (potentially after handling any pending signal during the return to
* userland). The return to userland is identified whenever
* FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
* Thus we have to be careful about not touching vma after handling the
* fault, so we read the pkey beforehand.
*/ */
pkey = vma_pkey(vma);
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
major |= fault & VM_FAULT_MAJOR; major |= fault & VM_FAULT_MAJOR;
...@@ -1467,7 +1477,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code, ...@@ -1467,7 +1477,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
return; return;
} }
pkey = vma_pkey(vma);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
mm_fault_error(regs, error_code, address, &pkey, fault); mm_fault_error(regs, error_code, address, &pkey, fault);
......
...@@ -377,8 +377,7 @@ int register_cpu(struct cpu *cpu, int num) ...@@ -377,8 +377,7 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev; per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num)); register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev, dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0; return 0;
} }
......
...@@ -14,20 +14,23 @@ ...@@ -14,20 +14,23 @@
static int dev_update_qos_constraint(struct device *dev, void *data) static int dev_update_qos_constraint(struct device *dev, void *data)
{ {
s64 *constraint_ns_p = data; s64 *constraint_ns_p = data;
s64 constraint_ns = -1; s32 constraint_ns = -1;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data) if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
if (constraint_ns < 0) if (constraint_ns < 0) {
constraint_ns = dev_pm_qos_read_value(dev); constraint_ns = dev_pm_qos_read_value(dev);
constraint_ns *= NSEC_PER_USEC;
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) }
if (constraint_ns == 0)
return 0; return 0;
constraint_ns *= NSEC_PER_USEC; /*
* constraint_ns cannot be negative here, because the device has been
if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0) * suspended.
*/
if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
*constraint_ns_p = constraint_ns; *constraint_ns_p = constraint_ns;
return 0; return 0;
...@@ -60,14 +63,10 @@ static bool default_suspend_ok(struct device *dev) ...@@ -60,14 +63,10 @@ static bool default_suspend_ok(struct device *dev)
spin_unlock_irqrestore(&dev->power.lock, flags); spin_unlock_irqrestore(&dev->power.lock, flags);
if (constraint_ns == 0) if (constraint_ns < 0)
return false; return false;
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) constraint_ns *= NSEC_PER_USEC;
constraint_ns = -1;
else
constraint_ns *= NSEC_PER_USEC;
/* /*
* We can walk the children without any additional locking, because * We can walk the children without any additional locking, because
* they all have been suspended at this point and their * they all have been suspended at this point and their
...@@ -77,19 +76,14 @@ static bool default_suspend_ok(struct device *dev) ...@@ -77,19 +76,14 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns, device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint); dev_update_qos_constraint);
if (constraint_ns < 0) { if (constraint_ns > 0) {
/* The children have no constraints. */ constraint_ns -= td->suspend_latency_ns +
td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; td->resume_latency_ns;
td->cached_suspend_ok = true; if (constraint_ns == 0)
} else { return false;
constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
if (constraint_ns > 0) {
td->effective_constraint_ns = constraint_ns;
td->cached_suspend_ok = true;
} else {
td->effective_constraint_ns = 0;
}
} }
td->effective_constraint_ns = constraint_ns;
td->cached_suspend_ok = constraint_ns >= 0;
/* /*
* The children have been suspended already, so we don't need to take * The children have been suspended already, so we don't need to take
...@@ -151,14 +145,13 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, ...@@ -151,14 +145,13 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
td = &to_gpd_data(pdd)->td; td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns; constraint_ns = td->effective_constraint_ns;
/* default_suspend_ok() need not be called before us. */ /* default_suspend_ok() need not be called before us. */
if (constraint_ns < 0) if (constraint_ns < 0) {
constraint_ns = dev_pm_qos_read_value(pdd->dev); constraint_ns = dev_pm_qos_read_value(pdd->dev);
constraint_ns *= NSEC_PER_USEC;
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) }
if (constraint_ns == 0)
continue; continue;
constraint_ns *= NSEC_PER_USEC;
/* /*
* constraint_ns cannot be negative here, because the device has * constraint_ns cannot be negative here, because the device has
* been suspended. * been suspended.
......
...@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) ...@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list); plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->type = PM_QOS_MIN; c->type = PM_QOS_MIN;
c->notifiers = n; c->notifiers = n;
......
...@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev) ...@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending || (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME)) && dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN; retval = -EAGAIN;
else if (__dev_pm_qos_read_value(dev) == 0) else if (__dev_pm_qos_read_value(dev) < 0)
retval = -EPERM; retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED) else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1; retval = 1;
......
...@@ -218,14 +218,7 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev, ...@@ -218,14 +218,7 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
s32 value = dev_pm_qos_requested_resume_latency(dev); return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
if (value == 0)
return sprintf(buf, "n/a\n");
else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
value = 0;
return sprintf(buf, "%d\n", value);
} }
static ssize_t pm_qos_resume_latency_store(struct device *dev, static ssize_t pm_qos_resume_latency_store(struct device *dev,
...@@ -235,21 +228,11 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, ...@@ -235,21 +228,11 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value; s32 value;
int ret; int ret;
if (!kstrtos32(buf, 0, &value)) { if (kstrtos32(buf, 0, &value))
/* return -EINVAL;
* Prevent users from writing negative or "no constraint" values
* directly.
*/
if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return -EINVAL;
if (value == 0) if (value < 0)
value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
} else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
value = 0;
} else {
return -EINVAL; return -EINVAL;
}
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value); value);
......
...@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set) ...@@ -593,10 +593,22 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
return blk_mq_virtio_map_queues(set, vblk->vdev, 0); return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
} }
#ifdef CONFIG_VIRTIO_BLK_SCSI
static void virtblk_initialize_rq(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
scsi_req_init(&vbr->sreq);
}
#endif
static const struct blk_mq_ops virtio_mq_ops = { static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq, .queue_rq = virtio_queue_rq,
.complete = virtblk_request_done, .complete = virtblk_request_done,
.init_request = virtblk_init_request, .init_request = virtblk_init_request,
#ifdef CONFIG_VIRTIO_BLK_SCSI
.initialize_rq_fn = virtblk_initialize_rq,
#endif
.map_queues = virtblk_map_queues, .map_queues = virtblk_map_queues,
}; };
......
...@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) ...@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0; data->needs_update = 0;
} }
if (resume_latency < latency_req && /* resume_latency is 0 means no restriction */
resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) if (resume_latency && resume_latency < latency_req)
latency_req = resume_latency; latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */ /* Special case when user has set very strict latency requirement */
......
...@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) ...@@ -1328,6 +1328,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
struct scsi_request *req = scsi_req(rq); struct scsi_request *req = scsi_req(rq);
scsi_req_init(req);
memset(req->cmd, 0, BLK_MAX_CDB); memset(req->cmd, 0, BLK_MAX_CDB);
if (rq_data_dir(rq) == READ) if (rq_data_dir(rq) == READ)
......
...@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack); nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL; return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
......
...@@ -146,11 +146,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, ...@@ -146,11 +146,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
WARN_ON(host->sg_len > 1); WARN_ON(host->sg_len > 1);
/* This DMAC cannot handle if buffer is not 8-bytes alignment */ /* This DMAC cannot handle if buffer is not 8-bytes alignment */
if (!IS_ALIGNED(sg->offset, 8)) { if (!IS_ALIGNED(sg->offset, 8))
host->force_pio = true; goto force_pio;
renesas_sdhi_internal_dmac_enable_dma(host, false);
return;
}
if (data->flags & MMC_DATA_READ) { if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1; dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
...@@ -163,8 +160,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, ...@@ -163,8 +160,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
} }
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir); ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
if (ret < 0) if (ret == 0)
return; goto force_pio;
renesas_sdhi_internal_dmac_enable_dma(host, true); renesas_sdhi_internal_dmac_enable_dma(host, true);
...@@ -176,6 +173,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host, ...@@ -176,6 +173,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
dtran_mode); dtran_mode);
renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR, renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
sg->dma_address); sg->dma_address);
return;
force_pio:
host->force_pio = true;
renesas_sdhi_internal_dmac_enable_dma(host, false);
} }
static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg) static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/mmc/sdio.h> #include <linux/mmc/sdio.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include "tmio_mmc.h" #include "tmio_mmc.h"
...@@ -1215,6 +1216,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, ...@@ -1215,6 +1216,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->max_blk_count = pdata->max_blk_count ? : mmc->max_blk_count = pdata->max_blk_count ? :
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
/*
* Since swiotlb has memory size limitation, this will calculate
* the maximum size locally (because we don't have any APIs for it now)
* and check the current max_req_size. And then, this will update
* the max_req_size if needed as a workaround.
*/
if (swiotlb_max_segment()) {
unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
if (mmc->max_req_size > max_size)
mmc->max_req_size = max_size;
}
mmc->max_seg_size = mmc->max_req_size; mmc->max_seg_size = mmc->max_req_size;
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
......
...@@ -2369,8 +2369,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ...@@ -2369,8 +2369,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->enet_ver = AE_VERSION_2; priv->enet_ver = AE_VERSION_2;
ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
if (IS_ERR_OR_NULL(ae_node)) { if (!ae_node) {
ret = PTR_ERR(ae_node); ret = -ENODEV;
dev_err(dev, "not find ae-handle\n"); dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail; goto out_read_prop_fail;
} }
......
...@@ -294,7 +294,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, ...@@ -294,7 +294,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size; write_tran.len = MLXSW_I2C_ADDR_WIDTH + chunk_size;
mlxsw_i2c_set_slave_addr(tran_buf, off); mlxsw_i2c_set_slave_addr(tran_buf, off);
memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox + memcpy(&tran_buf[MLXSW_I2C_ADDR_BUF_SIZE], in_mbox +
chunk_size * i, chunk_size); MLXSW_I2C_BLK_MAX * i, chunk_size);
j = 0; j = 0;
end = jiffies + timeout; end = jiffies + timeout;
......
...@@ -6216,6 +6216,29 @@ MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1); ...@@ -6216,6 +6216,29 @@ MLXSW_ITEM32(reg, mtmp, mtr, 0x08, 30, 1);
*/ */
MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16); MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
/* reg_mtmp_tee
* Temperature Event Enable.
* 0 - Do not generate event
* 1 - Generate event
* 2 - Generate single event
* Access: RW
*/
MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
/* reg_mtmp_temperature_threshold_hi
* High threshold for Temperature Warning Event. In 0.125 Celsius.
* Access: RW
*/
MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
/* reg_mtmp_temperature_threshold_lo
* Low threshold for Temperature Warning Event. In 0.125 Celsius.
* Access: RW
*/
MLXSW_ITEM32(reg, mtmp, temperature_threshold_lo, 0x10, 0, 16);
#define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8 #define MLXSW_REG_MTMP_SENSOR_NAME_SIZE 8
/* reg_mtmp_sensor_name /* reg_mtmp_sensor_name
...@@ -6232,6 +6255,8 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index, ...@@ -6232,6 +6255,8 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u8 sensor_index,
mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index); mlxsw_reg_mtmp_sensor_index_set(payload, sensor_index);
mlxsw_reg_mtmp_mte_set(payload, max_temp_enable); mlxsw_reg_mtmp_mte_set(payload, max_temp_enable);
mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset); mlxsw_reg_mtmp_mtr_set(payload, max_temp_reset);
mlxsw_reg_mtmp_temperature_threshold_hi_set(payload,
MLXSW_REG_MTMP_THRESH_HI);
} }
static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
......
...@@ -681,9 +681,11 @@ static int m88e1116r_config_init(struct phy_device *phydev) ...@@ -681,9 +681,11 @@ static int m88e1116r_config_init(struct phy_device *phydev)
if (err < 0) if (err < 0)
return err; return err;
err = m88e1121_config_aneg_rgmii_delays(phydev); if (phy_interface_is_rgmii(phydev)) {
if (err < 0) err = m88e1121_config_aneg_rgmii_delays(phydev);
return err; if (err < 0)
return err;
}
err = genphy_soft_reset(phydev); err = genphy_soft_reset(phydev);
if (err < 0) if (err < 0)
......
...@@ -1032,6 +1032,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd, ...@@ -1032,6 +1032,8 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
case TUNSETSNDBUF: case TUNSETSNDBUF:
if (get_user(s, sp)) if (get_user(s, sp))
return -EFAULT; return -EFAULT;
if (s <= 0)
return -EINVAL;
q->sk.sk_sndbuf = s; q->sk.sk_sndbuf = s;
return 0; return 0;
......
...@@ -2654,6 +2654,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ...@@ -2654,6 +2654,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
if (sndbuf <= 0) {
ret = -EINVAL;
break;
}
tun->sndbuf = sndbuf; tun->sndbuf = sndbuf;
tun_set_sndbuf(tun); tun_set_sndbuf(tun);
......
...@@ -346,7 +346,6 @@ static int lapbeth_new_device(struct net_device *dev) ...@@ -346,7 +346,6 @@ static int lapbeth_new_device(struct net_device *dev)
fail: fail:
dev_put(dev); dev_put(dev);
free_netdev(ndev); free_netdev(ndev);
kfree(lapbeth);
goto out; goto out;
} }
......
...@@ -550,6 +550,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, ...@@ -550,6 +550,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
return IEEE80211_TKIP_IV_LEN; return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_HDR_LEN; return IEEE80211_CCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
return IEEE80211_CCMP_256_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return IEEE80211_GCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI: case HTT_RX_MPDU_ENCRYPT_WAPI:
break; break;
...@@ -575,6 +580,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, ...@@ -575,6 +580,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return IEEE80211_TKIP_ICV_LEN; return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN; return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
return IEEE80211_CCMP_256_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return IEEE80211_GCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128: case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI: case HTT_RX_MPDU_ENCRYPT_WAPI:
break; break;
...@@ -1051,9 +1061,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, ...@@ -1051,9 +1061,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data; hdr = (void *)msdu->data;
/* Tail */ /* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len - skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype)); ath10k_htt_rx_crypto_tail_len(ar, enctype));
} else {
/* MIC */
if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
skb_trim(msdu, msdu->len - 8);
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED &&
enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype));
}
/* MMIC */ /* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) && if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
...@@ -1075,7 +1097,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, ...@@ -1075,7 +1097,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
struct sk_buff *msdu, struct sk_buff *msdu,
struct ieee80211_rx_status *status, struct ieee80211_rx_status *status,
const u8 first_hdr[64]) const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{ {
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
...@@ -1083,6 +1106,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, ...@@ -1083,6 +1106,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
u8 da[ETH_ALEN]; u8 da[ETH_ALEN];
u8 sa[ETH_ALEN]; u8 sa[ETH_ALEN];
int l3_pad_bytes; int l3_pad_bytes;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame: /* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr * [nwifi 802.11 header] <-- replaced with 802.11 hdr
...@@ -1111,6 +1135,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar, ...@@ -1111,6 +1135,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* push original 802.11 header */ /* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr; hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control); hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in /* original 802.11 header has a different DA and in
...@@ -1171,6 +1203,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, ...@@ -1171,6 +1203,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
u8 sa[ETH_ALEN]; u8 sa[ETH_ALEN];
int l3_pad_bytes; int l3_pad_bytes;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame: /* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
...@@ -1199,6 +1232,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, ...@@ -1199,6 +1232,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
/* push original 802.11 header */ /* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr; hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control); hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in /* original 802.11 header has a different DA and in
...@@ -1212,12 +1253,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar, ...@@ -1212,12 +1253,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
struct sk_buff *msdu, struct sk_buff *msdu,
struct ieee80211_rx_status *status, struct ieee80211_rx_status *status,
const u8 first_hdr[64]) const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{ {
struct ieee80211_hdr *hdr; struct ieee80211_hdr *hdr;
size_t hdr_len; size_t hdr_len;
int l3_pad_bytes; int l3_pad_bytes;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame: /* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr * [amsdu header] <-- replaced with 802.11 hdr
...@@ -1233,6 +1276,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar, ...@@ -1233,6 +1276,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
hdr = (struct ieee80211_hdr *)first_hdr; hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control); hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
} }
...@@ -1267,13 +1318,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, ...@@ -1267,13 +1318,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
is_decrypted); is_decrypted);
break; break;
case RX_MSDU_DECAP_NATIVE_WIFI: case RX_MSDU_DECAP_NATIVE_WIFI:
ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr); ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
enctype);
break; break;
case RX_MSDU_DECAP_ETHERNET2_DIX: case RX_MSDU_DECAP_ETHERNET2_DIX:
ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype); ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break; break;
case RX_MSDU_DECAP_8023_SNAP_LLC: case RX_MSDU_DECAP_8023_SNAP_LLC:
ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr); ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
enctype);
break; break;
} }
} }
...@@ -1316,7 +1369,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu) ...@@ -1316,7 +1369,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu, struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status) struct ieee80211_rx_status *status,
bool fill_crypt_header)
{ {
struct sk_buff *first; struct sk_buff *first;
struct sk_buff *last; struct sk_buff *last;
...@@ -1326,7 +1380,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ...@@ -1326,7 +1380,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype; enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64]; u8 first_hdr[64];
u8 *qos; u8 *qos;
size_t hdr_len;
bool has_fcs_err; bool has_fcs_err;
bool has_crypto_err; bool has_crypto_err;
bool has_tkip_err; bool has_tkip_err;
...@@ -1351,15 +1404,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ...@@ -1351,15 +1404,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
* decapped header. It'll be used for undecapping of each MSDU. * decapped header. It'll be used for undecapping of each MSDU.
*/ */
hdr = (void *)rxd->rx_hdr_status; hdr = (void *)rxd->rx_hdr_status;
hdr_len = ieee80211_hdrlen(hdr->frame_control); memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
memcpy(first_hdr, hdr, hdr_len);
/* Each A-MSDU subframe will use the original header as the base and be /* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl. * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/ */
hdr = (void *)first_hdr; hdr = (void *)first_hdr;
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
/* Some attention flags are valid only in the last MSDU. */ /* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu); last = skb_peek_tail(amsdu);
...@@ -1406,9 +1461,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ...@@ -1406,9 +1461,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
status->flag |= RX_FLAG_DECRYPTED; status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt)) if (likely(!is_mgmt))
status->flag |= RX_FLAG_IV_STRIPPED | status->flag |= RX_FLAG_MMIC_STRIPPED;
RX_FLAG_MMIC_STRIPPED;
} if (fill_crypt_header)
status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
status->flag |= RX_FLAG_IV_STRIPPED;
}
skb_queue_walk(amsdu, msdu) { skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu); ath10k_htt_rx_h_csum_offload(msdu);
...@@ -1424,6 +1484,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ...@@ -1424,6 +1484,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (is_mgmt) if (is_mgmt)
continue; continue;
if (fill_crypt_header)
continue;
hdr = (void *)msdu->data; hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED); hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
} }
...@@ -1434,6 +1497,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar, ...@@ -1434,6 +1497,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
struct ieee80211_rx_status *status) struct ieee80211_rx_status *status)
{ {
struct sk_buff *msdu; struct sk_buff *msdu;
struct sk_buff *first_subframe;
first_subframe = skb_peek(amsdu);
while ((msdu = __skb_dequeue(amsdu))) { while ((msdu = __skb_dequeue(amsdu))) {
/* Setup per-MSDU flags */ /* Setup per-MSDU flags */
...@@ -1442,6 +1508,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar, ...@@ -1442,6 +1508,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
else else
status->flag |= RX_FLAG_AMSDU_MORE; status->flag |= RX_FLAG_AMSDU_MORE;
if (msdu == first_subframe) {
first_subframe = NULL;
status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
} else {
status->flag |= RX_FLAG_ALLOW_SAME_PN;
}
ath10k_process_rx(ar, status, msdu); ath10k_process_rx(ar, status, msdu);
} }
} }
...@@ -1584,7 +1657,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ...@@ -1584,7 +1657,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu); ath10k_htt_rx_h_unchain(ar, &amsdu);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return num_msdus; return num_msdus;
...@@ -1745,8 +1818,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp) ...@@ -1745,8 +1818,7 @@ static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
} }
static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
struct sk_buff_head *amsdu, struct sk_buff_head *amsdu)
int budget_left)
{ {
struct sk_buff *msdu; struct sk_buff *msdu;
struct htt_rx_desc *rxd; struct htt_rx_desc *rxd;
...@@ -1757,9 +1829,8 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list, ...@@ -1757,9 +1829,8 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
if (WARN_ON(!skb_queue_empty(amsdu))) if (WARN_ON(!skb_queue_empty(amsdu)))
return -EINVAL; return -EINVAL;
while ((msdu = __skb_dequeue(list)) && budget_left) { while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu); __skb_queue_tail(amsdu, msdu);
budget_left--;
rxd = (void *)msdu->data - sizeof(*rxd); rxd = (void *)msdu->data - sizeof(*rxd);
if (rxd->msdu_end.common.info0 & if (rxd->msdu_end.common.info0 &
...@@ -1850,8 +1921,7 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ...@@ -1850,8 +1921,7 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
return num_msdu; return num_msdu;
} }
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
int budget_left)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data; struct htt_resp *resp = (void *)skb->data;
...@@ -1908,9 +1978,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, ...@@ -1908,9 +1978,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
if (offload) if (offload)
num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list) && budget_left) { while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu); __skb_queue_head_init(&amsdu);
ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu, budget_left); ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
switch (ret) { switch (ret) {
case 0: case 0:
/* Note: The in-order indication may report interleaved /* Note: The in-order indication may report interleaved
...@@ -1920,10 +1990,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb, ...@@ -1920,10 +1990,9 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb,
* should still give an idea about rx rate to the user. * should still give an idea about rx rate to the user.
*/ */
num_msdus += skb_queue_len(&amsdu); num_msdus += skb_queue_len(&amsdu);
budget_left -= skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status); ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
ath10k_htt_rx_h_deliver(ar, &amsdu, status); ath10k_htt_rx_h_deliver(ar, &amsdu, status);
break; break;
case -EAGAIN: case -EAGAIN:
...@@ -2563,8 +2632,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) ...@@ -2563,8 +2632,7 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
} }
spin_lock_bh(&htt->rx_ring.lock); spin_lock_bh(&htt->rx_ring.lock);
num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb, num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
(budget - quota));
spin_unlock_bh(&htt->rx_ring.lock); spin_unlock_bh(&htt->rx_ring.lock);
if (num_rx_msdus < 0) { if (num_rx_msdus < 0) {
resched_napi = true; resched_napi = true;
......
...@@ -239,6 +239,9 @@ enum htt_rx_mpdu_encrypt_type { ...@@ -239,6 +239,9 @@ enum htt_rx_mpdu_encrypt_type {
HTT_RX_MPDU_ENCRYPT_WAPI = 5, HTT_RX_MPDU_ENCRYPT_WAPI = 5,
HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6, HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
HTT_RX_MPDU_ENCRYPT_NONE = 7, HTT_RX_MPDU_ENCRYPT_NONE = 7,
HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
}; };
#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff #define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
......
...@@ -812,7 +812,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, ...@@ -812,7 +812,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (!sta) { if (!sta) {
wcn36xx_err("sta %pM is not found\n", wcn36xx_err("sta %pM is not found\n",
bss_conf->bssid); bss_conf->bssid);
rcu_read_unlock();
goto out; goto out;
} }
sta_priv = wcn36xx_sta_to_priv(sta); sta_priv = wcn36xx_sta_to_priv(sta);
......
...@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk) ...@@ -1249,6 +1249,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
goto out; goto out;
} }
__nvme_revalidate_disk(disk, id);
nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid); nvme_report_ns_ids(ctrl, ns->ns_id, id, eui64, nguid, &uuid);
if (!uuid_equal(&ns->uuid, &uuid) || if (!uuid_equal(&ns->uuid, &uuid) ||
memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) || memcmp(&ns->nguid, &nguid, sizeof(ns->nguid)) ||
......
...@@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq) ...@@ -1614,12 +1614,15 @@ nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
/* /*
* reconnecting state means transport disruption, which * reconnecting state means transport disruption, which
* can take a long time and even might fail permanently, * can take a long time and even might fail permanently,
* so we can't let incoming I/O be requeued forever. * fail fast to give upper layers a chance to failover.
* fail it fast to allow upper layers a chance to * deleting state means that the ctrl will never accept
* failover. * commands again, fail it permanently.
*/ */
if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR; return BLK_STS_IOERR;
}
return BLK_STS_RESOURCE; /* try again later */ return BLK_STS_RESOURCE; /* try again later */
} }
} }
......
...@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) ...@@ -204,7 +204,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i; int i;
if (unlikely(direntry->d_name.len > if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
direntry->d_name.len >
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG; return -ENAMETOOLONG;
...@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, ...@@ -520,7 +521,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
rc = check_name(direntry, tcon); rc = check_name(direntry, tcon);
if (rc) if (rc)
goto out_free_xid; goto out;
server = tcon->ses->server; server = tcon->ses->server;
......
...@@ -27,17 +27,16 @@ enum pm_qos_flags_status { ...@@ -27,17 +27,16 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL, PM_QOS_FLAGS_ALL,
}; };
#define PM_QOS_DEFAULT_VALUE (-1) #define PM_QOS_DEFAULT_VALUE -1
#define PM_QOS_LATENCY_ANY S32_MAX
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
......
...@@ -1738,12 +1738,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk) ...@@ -1738,12 +1738,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk); tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
} }
/* Called when old skb is about to be deleted (to be combined with new skb) */ /* Called when old skb is about to be deleted and replaced by new skb */
static inline void tcp_highest_sack_combine(struct sock *sk, static inline void tcp_highest_sack_replace(struct sock *sk,
struct sk_buff *old, struct sk_buff *old,
struct sk_buff *new) struct sk_buff *new)
{ {
if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) if (old == tcp_highest_sack(sk))
tcp_sk(sk)->highest_sack = new; tcp_sk(sk)->highest_sack = new;
} }
......
...@@ -889,7 +889,6 @@ struct xdp_md { ...@@ -889,7 +889,6 @@ struct xdp_md {
enum sk_action { enum sk_action {
SK_DROP = 0, SK_DROP = 0,
SK_PASS, SK_PASS,
SK_REDIRECT,
}; };
#define BPF_TAG_SIZE 8 #define BPF_TAG_SIZE 8
......
...@@ -104,13 +104,19 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) ...@@ -104,13 +104,19 @@ static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
} }
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
__SK_REDIRECT,
};
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{ {
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
int rc; int rc;
if (unlikely(!prog)) if (unlikely(!prog))
return SK_DROP; return __SK_DROP;
skb_orphan(skb); skb_orphan(skb);
/* We need to ensure that BPF metadata for maps is also cleared /* We need to ensure that BPF metadata for maps is also cleared
...@@ -125,8 +131,10 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) ...@@ -125,8 +131,10 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
preempt_enable(); preempt_enable();
skb->sk = NULL; skb->sk = NULL;
/* Moving return codes from UAPI namespace into internal namespace */
return rc == SK_PASS ? return rc == SK_PASS ?
(TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP; (TCP_SKB_CB(skb)->bpf.map ? __SK_REDIRECT : __SK_PASS) :
__SK_DROP;
} }
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
...@@ -136,7 +144,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) ...@@ -136,7 +144,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
rc = smap_verdict_func(psock, skb); rc = smap_verdict_func(psock, skb);
switch (rc) { switch (rc) {
case SK_REDIRECT: case __SK_REDIRECT:
sk = do_sk_redirect_map(skb); sk = do_sk_redirect_map(skb);
if (likely(sk)) { if (likely(sk)) {
struct smap_psock *peer = smap_psock_sk(sk); struct smap_psock *peer = smap_psock_sk(sk);
...@@ -152,7 +160,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) ...@@ -152,7 +160,7 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
} }
} }
/* Fall through and free skb otherwise */ /* Fall through and free skb otherwise */
case SK_DROP: case __SK_DROP:
default: default:
kfree_skb(skb); kfree_skb(skb);
} }
......
...@@ -2698,7 +2698,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code) ...@@ -2698,7 +2698,7 @@ enum siginfo_layout siginfo_layout(int sig, int si_code)
[SIGSEGV] = { NSIGSEGV, SIL_FAULT }, [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
[SIGBUS] = { NSIGBUS, SIL_FAULT }, [SIGBUS] = { NSIGBUS, SIL_FAULT },
[SIGTRAP] = { NSIGTRAP, SIL_FAULT }, [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
#if defined(SIGMET) && defined(NSIGEMT) #if defined(SIGEMT) && defined(NSIGEMT)
[SIGEMT] = { NSIGEMT, SIL_FAULT }, [SIGEMT] = { NSIGEMT, SIL_FAULT },
#endif #endif
[SIGCHLD] = { NSIGCHLD, SIL_CHLD }, [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
......
...@@ -161,6 +161,7 @@ int ioremap_page_range(unsigned long addr, ...@@ -161,6 +161,7 @@ int ioremap_page_range(unsigned long addr,
unsigned long next; unsigned long next;
int err; int err;
might_sleep();
BUG_ON(addr >= end); BUG_ON(addr >= end);
start = addr; start = addr;
......
...@@ -137,6 +137,6 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval) ...@@ -137,6 +137,6 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>"); MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>");
MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
MODULE_DESCRIPTION("Inter-FE LFB action"); MODULE_DESCRIPTION("Inter-FE LFB action");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -2132,6 +2132,7 @@ static int tcp_mtu_probe(struct sock *sk) ...@@ -2132,6 +2132,7 @@ static int tcp_mtu_probe(struct sock *sk)
nskb->ip_summed = skb->ip_summed; nskb->ip_summed = skb->ip_summed;
tcp_insert_write_queue_before(nskb, skb, sk); tcp_insert_write_queue_before(nskb, skb, sk);
tcp_highest_sack_replace(sk, skb, nskb);
len = 0; len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) { tcp_for_write_queue_from_safe(skb, next, sk) {
...@@ -2735,7 +2736,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) ...@@ -2735,7 +2736,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
else if (!skb_shift(skb, next_skb, next_skb_size)) else if (!skb_shift(skb, next_skb, next_skb_size))
return false; return false;
} }
tcp_highest_sack_combine(sk, next_skb, skb); tcp_highest_sack_replace(sk, next_skb, skb);
if (next_skb->ip_summed == CHECKSUM_PARTIAL) if (next_skb->ip_summed == CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
......
...@@ -3349,6 +3349,7 @@ static void addrconf_permanent_addr(struct net_device *dev) ...@@ -3349,6 +3349,7 @@ static void addrconf_permanent_addr(struct net_device *dev)
if ((ifp->flags & IFA_F_PERMANENT) && if ((ifp->flags & IFA_F_PERMANENT) &&
fixup_permanent_addr(idev, ifp) < 0) { fixup_permanent_addr(idev, ifp) < 0) {
write_unlock_bh(&idev->lock); write_unlock_bh(&idev->lock);
in6_ifa_hold(ifp);
ipv6_del_addr(ifp); ipv6_del_addr(ifp);
write_lock_bh(&idev->lock); write_lock_bh(&idev->lock);
......
...@@ -641,6 +641,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, ...@@ -641,6 +641,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
u32 tunnel_id, peer_tunnel_id; u32 tunnel_id, peer_tunnel_id;
u32 session_id, peer_session_id; u32 session_id, peer_session_id;
bool drop_refcnt = false; bool drop_refcnt = false;
bool drop_tunnel = false;
int ver = 2; int ver = 2;
int fd; int fd;
...@@ -709,7 +710,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, ...@@ -709,7 +710,9 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (tunnel_id == 0) if (tunnel_id == 0)
goto end; goto end;
tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id); tunnel = l2tp_tunnel_get(sock_net(sk), tunnel_id);
if (tunnel)
drop_tunnel = true;
/* Special case: create tunnel context if session_id and /* Special case: create tunnel context if session_id and
* peer_session_id is 0. Otherwise look up tunnel using supplied * peer_session_id is 0. Otherwise look up tunnel using supplied
...@@ -837,6 +840,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, ...@@ -837,6 +840,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
end: end:
if (drop_refcnt) if (drop_refcnt)
l2tp_session_dec_refcount(session); l2tp_session_dec_refcount(session);
if (drop_tunnel)
l2tp_tunnel_dec_refcount(tunnel);
release_sock(sk); release_sock(sk);
return error; return error;
......
...@@ -296,6 +296,6 @@ static void __exit psample_module_exit(void) ...@@ -296,6 +296,6 @@ static void __exit psample_module_exit(void)
module_init(psample_module_init); module_init(psample_module_init);
module_exit(psample_module_exit); module_exit(psample_module_exit);
MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
MODULE_DESCRIPTION("netlink channel for packet sampling"); MODULE_DESCRIPTION("netlink channel for packet sampling");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -271,6 +271,6 @@ static void __exit sample_cleanup_module(void) ...@@ -271,6 +271,6 @@ static void __exit sample_cleanup_module(void)
module_init(sample_init_module); module_init(sample_init_module);
module_exit(sample_cleanup_module); module_exit(sample_cleanup_module);
MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>"); MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
MODULE_DESCRIPTION("Packet sampling action"); MODULE_DESCRIPTION("Packet sampling action");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -322,8 +322,8 @@ static void tcf_block_put_final(struct work_struct *work) ...@@ -322,8 +322,8 @@ static void tcf_block_put_final(struct work_struct *work)
struct tcf_block *block = container_of(work, struct tcf_block, work); struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp; struct tcf_chain *chain, *tmp;
/* At this point, all the chains should have refcnt == 1. */
rtnl_lock(); rtnl_lock();
/* Only chain 0 should be still here. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list) list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain); tcf_chain_put(chain);
rtnl_unlock(); rtnl_unlock();
...@@ -331,44 +331,24 @@ static void tcf_block_put_final(struct work_struct *work) ...@@ -331,44 +331,24 @@ static void tcf_block_put_final(struct work_struct *work)
} }
/* XXX: Standalone actions are not allowed to jump to any chain, and bound /* XXX: Standalone actions are not allowed to jump to any chain, and bound
* actions should be all removed after flushing. However, filters are destroyed * actions should be all removed after flushing. However, filters are now
* in RCU callbacks, we have to hold the chains first, otherwise we would * destroyed in tc filter workqueue with RTNL lock, they can not race here.
* always race with RCU callbacks on this list without proper locking.
*/ */
static void tcf_block_put_deferred(struct work_struct *work)
{
struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain;
rtnl_lock();
/* Hold a refcnt for all chains, except 0, in case they are gone. */
list_for_each_entry(chain, &block->chain_list, list)
if (chain->index)
tcf_chain_hold(chain);
/* No race on the list, because no chain could be destroyed. */
list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
INIT_WORK(&block->work, tcf_block_put_final);
/* Wait for RCU callbacks to release the reference count and make
* sure their works have been queued before this.
*/
rcu_barrier();
tcf_queue_work(&block->work);
rtnl_unlock();
}
void tcf_block_put_ext(struct tcf_block *block, void tcf_block_put_ext(struct tcf_block *block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
struct tcf_block_ext_info *ei) struct tcf_block_ext_info *ei)
{ {
struct tcf_chain *chain, *tmp;
if (!block) if (!block)
return; return;
tcf_block_offload_unbind(block, q, ei); tcf_block_offload_unbind(block, q, ei);
INIT_WORK(&block->work, tcf_block_put_deferred); list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_flush(chain);
INIT_WORK(&block->work, tcf_block_put_final);
/* Wait for existing RCU callbacks to cool down, make sure their works /* Wait for existing RCU callbacks to cool down, make sure their works
* have been queued before this. We can not flush pending works here * have been queued before this. We can not flush pending works here
* because we are holding the RTNL lock. * because we are holding the RTNL lock.
......
...@@ -105,6 +105,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err) ...@@ -105,6 +105,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
if (xfrm_offload(skb)) { if (xfrm_offload(skb)) {
x->type_offload->encap(x, skb); x->type_offload->encap(x, skb);
} else { } else {
/* Inner headers are invalid now. */
skb->encapsulation = 0;
err = x->type->output(x, skb); err = x->type->output(x, skb);
if (err == -EINPROGRESS) if (err == -EINPROGRESS)
goto out; goto out;
...@@ -208,7 +211,6 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) ...@@ -208,7 +211,6 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
int err; int err;
secpath_reset(skb); secpath_reset(skb);
skb->encapsulation = 0;
if (xfrm_dev_offload_ok(skb, x)) { if (xfrm_dev_offload_ok(skb, x)) {
struct sec_path *sp; struct sec_path *sp;
......
...@@ -2075,7 +2075,6 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, ...@@ -2075,7 +2075,6 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
xdst->num_xfrms = num_xfrms; xdst->num_xfrms = num_xfrms;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
dst_hold(&xdst->u.dst);
return xdst; return xdst;
inc_error: inc_error:
......
...@@ -2069,6 +2069,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen ...@@ -2069,6 +2069,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
if (err >= 0) { if (err >= 0) {
xfrm_sk_policy_insert(sk, err, pol); xfrm_sk_policy_insert(sk, err, pol);
xfrm_pol_put(pol); xfrm_pol_put(pol);
__sk_dst_reset(sk);
err = 0; err = 0;
} }
......
...@@ -645,7 +645,7 @@ union bpf_attr { ...@@ -645,7 +645,7 @@ union bpf_attr {
* @map: pointer to sockmap * @map: pointer to sockmap
* @key: key to lookup sock in map * @key: key to lookup sock in map
* @flags: reserved for future use * @flags: reserved for future use
* Return: SK_REDIRECT * Return: SK_PASS
* *
* int bpf_sock_map_update(skops, map, key, flags) * int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops * @skops: pointer to bpf_sock_ops
...@@ -889,7 +889,6 @@ struct xdp_md { ...@@ -889,7 +889,6 @@ struct xdp_md {
enum sk_action { enum sk_action {
SK_DROP = 0, SK_DROP = 0,
SK_PASS, SK_PASS,
SK_REDIRECT,
}; };
#define BPF_TAG_SIZE 8 #define BPF_TAG_SIZE 8
......
...@@ -152,11 +152,11 @@ def ns_create(): ...@@ -152,11 +152,11 @@ def ns_create():
exec_cmd(cmd, False) exec_cmd(cmd, False)
cmd = 'ip link set $DEV0 up' cmd = 'ip link set $DEV0 up'
exec_cmd(cmd, False) exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV1 up' cmd = 'ip -n $NS link set $DEV1 up'
exec_cmd(cmd, False) exec_cmd(cmd, False)
cmd = 'ip link set $DEV2 netns $NS' cmd = 'ip link set $DEV2 netns $NS'
exec_cmd(cmd, False) exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV2 up' cmd = 'ip -n $NS link set $DEV2 up'
exec_cmd(cmd, False) exec_cmd(cmd, False)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册