未验证 提交 6ff43716 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!369 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2022-47929
CVE-2023-0179
CVE-2023-23454
CVE-2023-23455
CVE-2023-23559

mm bugfixes from Cai Xinchen and Ma Wupeng
fdt and cmdline bugfixes from Zhang Zekun
xfs bugfix from Guo Xuenan
scsi bugfix from Li Nan 
 
Link:https://gitee.com/openeuler/kernel/pulls/369 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -694,8 +694,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
struct rndis_query *get;
struct rndis_query_c *get_c;
} u;
int ret, buflen;
int resplen, respoffs, copylen;
int ret;
size_t buflen, resplen, respoffs, copylen;
buflen = *len + sizeof(*u.get);
if (buflen < CONTROL_BUFFER_SIZE)
......@@ -730,22 +730,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
if (respoffs > buflen) {
/* Device returned data offset outside buffer, error. */
netdev_dbg(dev->net, "%s(%s): received invalid "
"data offset: %d > %d\n", __func__,
oid_to_string(oid), respoffs, buflen);
netdev_dbg(dev->net,
"%s(%s): received invalid data offset: %zu > %zu\n",
__func__, oid_to_string(oid), respoffs, buflen);
ret = -EINVAL;
goto exit_unlock;
}
if ((resplen + respoffs) > buflen) {
/* Device would have returned more data if buffer would
* have been big enough. Copy just the bits that we got.
*/
copylen = buflen - respoffs;
} else {
copylen = resplen;
}
copylen = min(resplen, buflen - respoffs);
if (copylen > *len)
copylen = *len;
......
......@@ -885,6 +885,8 @@ static void __init early_init_dt_check_for_initrd(unsigned long node)
if (!prop)
return;
end = of_read_number(prop, len/4);
if (start > end)
return;
__early_init_dt_declare_initrd(start, end);
phys_initrd_start = start;
......
......@@ -556,11 +556,11 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
struct enclosure_component *ecomp;
if (desc_ptr) {
if (desc_ptr >= buf + page7_len) {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
if (desc_ptr + len > buf + page7_len) {
desc_ptr = NULL;
} else {
len = (desc_ptr[2] << 8) + desc_ptr[3];
desc_ptr += 4;
/* Add trailing zero - pushes into
* reserved space */
desc_ptr[len] = '\0';
......
......@@ -1879,12 +1879,20 @@ xfs_inodegc_worker(
work);
struct llist_node *node = llist_del_all(&gc->list);
struct xfs_inode *ip, *n;
unsigned int nofs_flag;
WRITE_ONCE(gc->items, 0);
if (!node)
return;
/*
* We can allocate memory here while doing writeback on behalf of
* memory reclaim. To avoid memory allocation deadlocks set the
* task-wide nofs context for the following operations.
*/
nofs_flag = memalloc_nofs_save();
ip = llist_entry(node, struct xfs_inode, i_gclist);
trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
......@@ -1893,6 +1901,8 @@ xfs_inodegc_worker(
xfs_iflags_set(ip, XFS_INACTIVATING);
xfs_inodegc_inactivate(ip);
}
memalloc_nofs_restore(nofs_flag);
}
/*
......
......@@ -1911,7 +1911,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
if (time_before(jiffies, memcg->socket_pressure))
if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
......
......@@ -235,7 +235,7 @@ char *next_arg(char *args, char **param, char **val)
args[i-1] = '\0';
}
}
if (quoted && args[i-1] == '"')
if (quoted && i > 0 && args[i-1] == '"')
args[i-1] = '\0';
if (args[i]) {
......
......@@ -3396,7 +3396,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
*/
get_page(vmf->page);
pte_unmap_unlock(vmf->pte, vmf->ptl);
vmf->page->pgmap->ops->migrate_to_ram(vmf);
ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
put_page(vmf->page);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
......
......@@ -304,7 +304,7 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
* asserted for a second in which subsequent
* pressure events can occur.
*/
memcg->socket_pressure = jiffies + HZ;
WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
}
}
}
......
......@@ -62,7 +62,7 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
return false;
if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
......
......@@ -1113,6 +1113,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
return -ENOENT;
}
if (new && new->ops == &noqueue_qdisc_ops) {
NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
return -EINVAL;
}
err = cops->graft(parent, cl, new, &old, extack);
if (err)
return err;
......
......@@ -396,10 +396,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
result = tcf_classify(skb, fl, &res, true);
if (result < 0)
continue;
if (result == TC_ACT_SHOT)
goto done;
flow = (struct atm_flow_data *)res.class;
if (!flow)
flow = lookup_flow(sch, res.classid);
goto done;
goto drop;
}
}
flow = NULL;
......
......@@ -231,6 +231,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
result = tcf_classify(skb, fl, &res, true);
if (!fl || result < 0)
goto fallback;
if (result == TC_ACT_SHOT)
return NULL;
cl = (void *)res.class;
if (!cl) {
......@@ -251,8 +253,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
fallthrough;
case TC_ACT_SHOT:
return NULL;
case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册