未验证 提交 b982dab6 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!386 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2022-3707
CVE-2023-0394

net bugfixes from Zhengchao Shao
fs bugfixes from Baokun Li and Li Nan
mm bugfixes form Liu Shixin
 
 
Link:https://gitee.com/openeuler/kernel/pulls/386 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -1192,10 +1192,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
if (ret) {
ppgtt_invalidate_spt(spt);
return ret;
}
if (ret)
goto err;
sub_se.val64 = se->val64;
/* Copy the PAT field from PDE. */
......@@ -1214,6 +1212,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
ops->set_pfn(se, sub_spt->shadow_page.mfn);
ppgtt_set_shadow_entry(spt, se, index);
return 0;
err:
/* Cancel the existing addess mappings of DMA addr. */
for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
gvt_vdbg_mm("invalidate 4K entry\n");
ppgtt_invalidate_pte(sub_spt, &sub_se);
}
/* Release the new allocated spt. */
trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
ppgtt_free_spt(sub_spt);
return ret;
}
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
......
......@@ -1176,7 +1176,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
dev->min_mtu = 0;
/* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
......
......@@ -4217,7 +4217,8 @@ int ext4_truncate(struct inode *inode)
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
if (ext4_inode_attach_jinode(inode) < 0)
err = ext4_inode_attach_jinode(inode);
if (err)
goto out_trace;
}
......
......@@ -6370,7 +6370,7 @@ static int ext4_write_info(struct super_block *sb, int type)
handle_t *handle;
/* Data block + inode block */
handle = ext4_journal_start(d_inode(sb->s_root), EXT4_HT_QUOTA, 2);
handle = ext4_journal_start_sb(sb, EXT4_HT_QUOTA, 2);
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit_info(sb, type);
......
......@@ -40,6 +40,7 @@ STATIC void
xfs_bui_item_free(
struct xfs_bui_log_item *buip)
{
kmem_free(buip->bui_item.li_lv_shadow);
kmem_cache_free(xfs_bui_zone, buip);
}
......@@ -199,6 +200,7 @@ xfs_bud_item_release(
struct xfs_bud_log_item *budp = BUD_ITEM(lip);
xfs_bui_release(budp->bud_buip);
kmem_free(budp->bud_item.li_lv_shadow);
kmem_cache_free(xfs_bud_zone, budp);
}
......
......@@ -63,6 +63,7 @@ STATIC void
xfs_icreate_item_release(
struct xfs_log_item *lip)
{
kmem_free(ICR_ITEM(lip)->ic_item.li_lv_shadow);
kmem_cache_free(xfs_icreate_zone, ICR_ITEM(lip));
}
......
......@@ -35,6 +35,7 @@ STATIC void
xfs_cui_item_free(
struct xfs_cui_log_item *cuip)
{
kmem_free(cuip->cui_item.li_lv_shadow);
if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
kmem_free(cuip);
else
......@@ -204,6 +205,7 @@ xfs_cud_item_release(
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
xfs_cui_release(cudp->cud_cuip);
kmem_free(cudp->cud_item.li_lv_shadow);
kmem_cache_free(xfs_cud_zone, cudp);
}
......
......@@ -35,6 +35,7 @@ STATIC void
xfs_rui_item_free(
struct xfs_rui_log_item *ruip)
{
kmem_free(ruip->rui_item.li_lv_shadow);
if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
kmem_free(ruip);
else
......@@ -227,6 +228,7 @@ xfs_rud_item_release(
struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
xfs_rui_release(rudp->rud_ruip);
kmem_free(rudp->rud_item.li_lv_shadow);
kmem_cache_free(xfs_rud_zone, rudp);
}
......
......@@ -6,11 +6,13 @@
#include <linux/seq_file.h>
#ifdef CONFIG_MEMCG_MEMFS_INFO
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m);
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, char *pathbuf,
struct seq_file *m);
int mem_cgroup_memfs_files_show(struct seq_file *m, void *v);
void mem_cgroup_memfs_info_init(void);
#else
static inline void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg,
char *pathbuf,
struct seq_file *m)
{
}
......
......@@ -706,6 +706,7 @@ typedef unsigned char *sk_buff_data_t;
* @transport_header: Transport layer header
* @network_header: Network layer header
* @mac_header: Link layer header
* @kcov_handle: KCOV remote handle for remote coverage collection
* @scm_io_uring: SKB holds io_uring registered files
* @tail: Tail pointer
* @end: End pointer
......@@ -913,6 +914,10 @@ struct sk_buff {
__u16 network_header;
__u16 mac_header;
#ifdef CONFIG_KCOV
u64 kcov_handle;
#endif
/* private: */
__u32 headers_end[0];
/* public: */
......@@ -4212,9 +4217,6 @@ enum skb_ext_id {
#endif
#if IS_ENABLED(CONFIG_MPTCP)
SKB_EXT_MPTCP,
#endif
#if IS_ENABLED(CONFIG_KCOV)
SKB_EXT_KCOV_HANDLE,
#endif
SKB_EXT_NUM, /* must be last */
};
......@@ -4670,35 +4672,22 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
#endif
}
#if IS_ENABLED(CONFIG_KCOV) && IS_ENABLED(CONFIG_SKB_EXTENSIONS)
static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle)
{
/* Do not allocate skb extensions only to set kcov_handle to zero
* (as it is zero by default). However, if the extensions are
* already allocated, update kcov_handle anyway since
* skb_set_kcov_handle can be called to zero a previously set
* value.
*/
if (skb_has_extensions(skb) || kcov_handle) {
u64 *kcov_handle_ptr = skb_ext_add(skb, SKB_EXT_KCOV_HANDLE);
if (kcov_handle_ptr)
*kcov_handle_ptr = kcov_handle;
}
#ifdef CONFIG_KCOV
skb->kcov_handle = kcov_handle;
#endif
}
static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
{
u64 *kcov_handle = skb_ext_find(skb, SKB_EXT_KCOV_HANDLE);
return kcov_handle ? *kcov_handle : 0;
}
#ifdef CONFIG_KCOV
return skb->kcov_handle;
#else
static inline void skb_set_kcov_handle(struct sk_buff *skb,
const u64 kcov_handle) { }
static inline u64 skb_get_kcov_handle(struct sk_buff *skb) { return 0; }
#endif /* CONFIG_KCOV && CONFIG_SKB_EXTENSIONS */
return 0;
#endif
}
static inline bool skb_csum_is_sctp(struct sk_buff *skb)
{
......
......@@ -1945,7 +1945,6 @@ config KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select SKB_EXTENSIONS if NET
help
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
......
......@@ -157,7 +157,8 @@ static void memfs_show_files_in_mem_cgroup(struct super_block *sb, void *data)
mntput(pfc->vfsmnt);
}
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m)
void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, char *pathbuf,
struct seq_file *m)
{
struct print_files_control pfc = {
.memcg = memcg,
......@@ -165,17 +166,11 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m)
.max_print_files = memfs_max_print_files,
.size_threshold = memfs_size_threshold,
};
char *pathbuf;
int i;
if (!memfs_enable || !memcg)
return;
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf) {
SEQ_printf(m, "Show memfs failed due to OOM\n");
return;
}
pfc.pathbuf = pathbuf;
pfc.pathbuf_size = PATH_MAX;
......@@ -192,15 +187,20 @@ void mem_cgroup_print_memfs_info(struct mem_cgroup *memcg, struct seq_file *m)
SEQ_printf(m, "total files: %lu, total memory-size: %lukB\n",
pfc.total_print_files, pfc.total_files_size >> 10);
}
kfree(pfc.pathbuf);
}
int mem_cgroup_memfs_files_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
char *pathbuf;
mem_cgroup_print_memfs_info(memcg, m);
pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathbuf) {
SEQ_printf(m, "Show memfs abort: failed to allocate memory\n");
return 0;
}
mem_cgroup_print_memfs_info(memcg, pathbuf, m);
kfree(pathbuf);
return 0;
}
......
......@@ -1496,14 +1496,12 @@ static int __init memory_stats_init(void)
}
pure_initcall(memory_stats_init);
static char *memory_stat_format(struct mem_cgroup *memcg)
static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
{
struct seq_buf s;
int i;
seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
if (!s.buffer)
return NULL;
seq_buf_init(&s, buf, bufsize);
/*
* Provide statistics on the state of the memory subsystem as
......@@ -1563,8 +1561,6 @@ static char *memory_stat_format(struct mem_cgroup *memcg)
/* The above should easily fit into one page */
WARN_ON_ONCE(seq_buf_has_overflowed(&s));
return s.buffer;
}
#define K(x) ((x) << (PAGE_SHIFT-10))
......@@ -1600,7 +1596,11 @@ void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *
*/
void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
char *buf;
/* Use static buffer, for the caller is holding oom_lock. */
static char buf[PAGE_SIZE];
static char pathbuf[PATH_MAX];
lockdep_assert_held(&oom_lock);
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
......@@ -1621,13 +1621,10 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
pr_info("Memory cgroup stats for ");
pr_cont_cgroup_path(memcg->css.cgroup);
pr_cont(":");
buf = memory_stat_format(memcg);
if (!buf)
return;
memory_stat_format(memcg, buf, sizeof(buf));
pr_info("%s", buf);
kfree(buf);
mem_cgroup_print_memfs_info(memcg, NULL);
mem_cgroup_print_memfs_info(memcg, pathbuf, NULL);
}
/*
......@@ -6639,11 +6636,11 @@ static int memory_events_local_show(struct seq_file *m, void *v)
static int memory_stat_show(struct seq_file *m, void *v)
{
struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
char *buf;
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
buf = memory_stat_format(memcg);
if (!buf)
return -ENOMEM;
memory_stat_format(memcg, buf, PAGE_SIZE);
seq_puts(m, buf);
kfree(buf);
return 0;
......
......@@ -4262,9 +4262,6 @@ static const u8 skb_ext_type_len[] = {
#if IS_ENABLED(CONFIG_MPTCP)
[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
#endif
#if IS_ENABLED(CONFIG_KCOV)
[SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64),
#endif
};
static __always_inline unsigned int skb_ext_total_length(void)
......@@ -4281,9 +4278,6 @@ static __always_inline unsigned int skb_ext_total_length(void)
#endif
#if IS_ENABLED(CONFIG_MPTCP)
skb_ext_type_len[SKB_EXT_MPTCP] +
#endif
#if IS_ENABLED(CONFIG_KCOV)
skb_ext_type_len[SKB_EXT_KCOV_HANDLE] +
#endif
0;
}
......
......@@ -539,6 +539,7 @@ static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
struct ipv6_txoptions *opt;
struct sk_buff *skb;
int err = 0;
int offset;
......@@ -556,6 +557,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
opt = inet6_sk(sk)->cork.opt;
total_len -= opt ? opt->opt_flen : 0;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
......
......@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
rtnl_dereference(dev->qdisc), new);
old = rtnl_dereference(dev->qdisc);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
notify_and_destroy(net, skb, n, classid, old, new);
if (new && new->ops->attach)
new->ops->attach(new);
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册