未验证 提交 d6879a8e 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!872 [sync] PR-863: Backport CVEs and bugfixes

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/863 
 
PR sync from:  Jialin Zhang <zhangjialin11@huawei.com>
 https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/thread/UAMVHA4ICOFJJXDMX2CXEV6TEZSY7Y7U/ 
Pull new CVEs:
CVE-2023-22998

cgroup bugfix from Gaosheng Cui
sched bugfix from Xia Fukun
block bugfixes from Zhong Jinghua and Yu Kuai
iomap and ext4 bugfixes from Baokun Li
md and eulerfs bugfixes from Yu Kuai

-- 
2.25.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/872 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -186,6 +186,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
void blk_account_io_start(struct request *req);
void blk_account_io_done(struct request *req, u64 now);
int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
/*
* Plug flush limits
......
......@@ -736,17 +736,45 @@ static void register_disk(struct device *parent, struct gendisk *disk,
}
}
static void disk_scan_partitions(struct gendisk *disk)
int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
{
struct block_device *bdev;
struct block_device *claim;
int ret = 0;
if (!get_capacity(disk) || !disk_part_scan_enabled(disk))
return;
if (!disk_part_scan_enabled(disk))
return -EINVAL;
/*
* If the device is opened exclusively by current thread already, it's
* safe to scan partitons, otherwise, use bd_prepare_to_claim() to
* synchronize with other exclusive openers and other partition
* scanners.
*/
if (!(mode & FMODE_EXCL)) {
claim = bdget_part(&disk->part0);
if (!claim)
return -ENOMEM;
ret = bd_prepare_to_claim(claim, claim, disk_scan_partitions);
if (ret) {
bdput(claim);
return ret;
}
}
set_bit(GD_NEED_PART_SCAN, &disk->state);
bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL);
if (!IS_ERR(bdev))
blkdev_put(bdev, FMODE_READ);
bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
if (IS_ERR(bdev))
ret = PTR_ERR(bdev);
else
blkdev_put(bdev, mode & ~FMODE_EXCL);
if (!(mode & FMODE_EXCL)) {
bd_abort_claiming(claim, claim, disk_scan_partitions);
bdput(claim);
}
return ret;
}
static void disk_init_partition(struct gendisk *disk)
......@@ -755,7 +783,8 @@ static void disk_init_partition(struct gendisk *disk)
struct disk_part_iter piter;
struct hd_struct *part;
disk_scan_partitions(disk);
if (get_capacity(disk))
disk_scan_partitions(disk, FMODE_READ);
/* announce disk after possible partitions are created */
dev_set_uevent_suppress(ddev, 0);
......@@ -847,6 +876,10 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
disk_add_events(disk);
blk_integrity_add(disk);
/* Make sure the first partition scan will be proceed */
if (get_capacity(disk) && disk_part_scan_enabled(disk))
set_bit(GD_NEED_PART_SCAN, &disk->state);
/*
* Set the flag at last, so that block devcie can't be opened
* before it's registration is done.
......
......@@ -32,9 +32,16 @@ static int blkpg_do_ioctl(struct block_device *bdev,
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(bdev, p.pno);
if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
return -EINVAL;
start = p.start >> SECTOR_SHIFT;
length = p.length >> SECTOR_SHIFT;
/* length may be equal to 0 after right shift */
if (!length || start + length > get_capacity(bdev->bd_disk))
return -EINVAL;
/* check for fit in a hd_struct */
if (sizeof(sector_t) < sizeof(long long)) {
long pstart = start, plength = length;
......@@ -90,31 +97,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif
static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
{
struct block_device *tmp;
if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev->bd_part_count)
return -EBUSY;
/*
* Reopen the device to revalidate the driver state and force a
* partition rescan.
*/
mode &= ~FMODE_EXCL;
set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
blkdev_put(tmp, mode);
return 0;
}
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
unsigned long arg, unsigned long flags)
{
......@@ -562,7 +544,13 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
return blkdev_reread_part(bdev, mode);
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (bdev_is_partition(bdev))
return -EINVAL;
if (bdev->bd_part_count)
return -EBUSY;
return disk_scan_partitions(bdev->bd_disk, mode);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
......
......@@ -157,9 +157,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
* since virtio_gpu doesn't support dma-buf import from other devices.
*/
shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
if (IS_ERR(shmem->pages)) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
ret = PTR_ERR(shmem->pages);
shmem->pages = NULL;
return ret;
}
if (use_dma_api) {
......
......@@ -690,12 +690,14 @@ void mddev_init(struct mddev *mddev)
mutex_init(&mddev->open_mutex);
mutex_init(&mddev->reconfig_mutex);
mutex_init(&mddev->bitmap_info.mutex);
mutex_init(&mddev->sync_mutex);
INIT_LIST_HEAD(&mddev->disks);
INIT_LIST_HEAD(&mddev->all_mddevs);
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
atomic_set(&mddev->sync_seq, 0);
spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
......@@ -4855,6 +4857,68 @@ action_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", type);
}
static void stop_sync_thread(struct mddev *mddev)
{
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return;
if (mddev_lock(mddev))
return;
/*
* Check again in case MD_RECOVERY_RUNNING is cleared before lock is
* held.
*/
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
mddev_unlock(mddev);
return;
}
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
/*
* Thread might be blocked waiting for metadata update which will now
* never happen.
*/
if (mddev->sync_thread)
wake_up_process(mddev->sync_thread->tsk);
mddev_unlock(mddev);
}
static void idle_sync_thread(struct mddev *mddev)
{
int sync_seq = atomic_read(&mddev->sync_seq);
if (mutex_lock_interruptible(&mddev->sync_mutex))
return;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev);
wait_event_interruptible(resync_wait,
sync_seq != atomic_read(&mddev->sync_seq) ||
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
mutex_unlock(&mddev->sync_mutex);
}
static void frozen_sync_thread(struct mddev *mddev)
{
if (mutex_lock_interruptible(&mddev->sync_mutex))
return;
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
stop_sync_thread(mddev);
wait_event_interruptible(resync_wait, mddev->sync_thread == NULL &&
!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
mutex_unlock(&mddev->sync_mutex);
}
static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
......@@ -4862,22 +4926,11 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
if (cmd_match(page, "frozen"))
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
else
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
mddev_lock(mddev) == 0) {
if (work_pending(&mddev->del_work))
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
mddev_unlock(mddev);
}
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
if (cmd_match(page, "idle"))
idle_sync_thread(mddev);
else if (cmd_match(page, "frozen"))
frozen_sync_thread(mddev);
else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
......@@ -9437,6 +9490,8 @@ void md_reap_sync_thread(struct mddev *mddev)
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
atomic_inc(&mddev->sync_seq);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
mddev->degraded != mddev->raid_disks) {
......@@ -9481,7 +9536,6 @@ void md_reap_sync_thread(struct mddev *mddev)
if (mddev_is_clustered(mddev) && is_reshaped
&& !test_bit(MD_CLOSING, &mddev->flags))
md_cluster_ops->update_size(mddev, old_dev_sectors);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_completed);
......@@ -9489,6 +9543,7 @@ void md_reap_sync_thread(struct mddev *mddev)
md_new_event(mddev);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
wake_up(&resync_wait);
}
EXPORT_SYMBOL(md_reap_sync_thread);
......
......@@ -528,6 +528,11 @@ struct mddev {
bool has_superblocks:1;
bool fail_last_dev:1;
bool serialize_policy:1;
/* Used to synchronize idle and frozen for action_store() */
KABI_EXTEND(struct mutex sync_mutex)
/* The sequence number for sync thread */
KABI_EXTEND(atomic_t sync_seq)
};
enum recovery_flags {
......
......@@ -77,11 +77,11 @@ void destroy_dep_node_cache(void)
void *eufs_zalloc_page(void)
{
return kmem_cache_zalloc(eufs_page_cachep, GFP_NOFS);
return kmem_cache_zalloc(eufs_page_cachep, GFP_NOFS | __GFP_NOFAIL);
}
void *eufs_alloc_page(void)
{
return kmem_cache_alloc(eufs_page_cachep, GFP_NOFS);
return kmem_cache_alloc(eufs_page_cachep, GFP_NOFS | __GFP_NOFAIL);
}
void eufs_free_page(void *page)
{
......
......@@ -525,7 +525,7 @@ void nv_fini(struct super_block *sb)
kfree(sbi->gpool);
}
void nv_init(struct super_block *sb, bool init)
int nv_init(struct super_block *sb, bool init)
{
struct eufs_sb_info *sbi = EUFS_SB(sb);
struct mem_pool *ppool;
......@@ -533,6 +533,9 @@ void nv_init(struct super_block *sb, bool init)
/* allocate pools */
sbi->gpool = kmalloc(sizeof(struct mem_pool), GFP_KERNEL);
if (!sbi->gpool)
return -ENOMEM;
INIT_LIST_HEAD(&sbi->gpool->large_list);
INIT_LIST_HEAD(&sbi->gpool->page_list);
INIT_LIST_HEAD(&sbi->gpool->line4_list);
......@@ -543,6 +546,9 @@ void nv_init(struct super_block *sb, bool init)
sbi->gpool->nlines = 0;
sbi->rest_pool = kmalloc(sizeof(struct mem_pool), GFP_KERNEL);
if (!sbi->rest_pool)
goto err_rest_pool;
INIT_LIST_HEAD(&sbi->rest_pool->large_list);
INIT_LIST_HEAD(&sbi->rest_pool->page_list);
INIT_LIST_HEAD(&sbi->rest_pool->line4_list);
......@@ -554,6 +560,9 @@ void nv_init(struct super_block *sb, bool init)
sbi->rest_pool->nlines = 0;
sbi->ppool = alloc_percpu(struct mem_pool);
if (!sbi->ppool)
goto err_ppool;
for_each_online_cpu(cpu) {
ppool = per_cpu_ptr(sbi->ppool, cpu);
INIT_LIST_HEAD(&ppool->large_list);
......@@ -568,6 +577,15 @@ void nv_init(struct super_block *sb, bool init)
}
partition(sb, init);
return 0;
err_ppool:
kfree(sbi->rest_pool);
sbi->rest_pool = NULL;
err_rest_pool:
kfree(sbi->gpool);
sbi->gpool = NULL;
return -ENOMEM;
}
static int cut_from_list_remaining(struct list_head *head, int remaining,
......
......@@ -134,7 +134,7 @@ int nvmalloc_pre(struct super_block *sb, struct alloc_batch *ab, size_t count,
size_t size);
void *nvmalloc(struct super_block *sb, size_t size, u8 tag, bool nonblocking);
void nvfree(struct super_block *sb, void *ptr, bool rest);
void nv_init(struct super_block *sb, bool init);
int nv_init(struct super_block *sb, bool init);
void nv_fini(struct super_block *sb);
void eufs_get_layout(struct super_block *sb, bool init);
......
......@@ -332,7 +332,9 @@ static struct eufs_inode *eufs_init(struct super_block *sb, unsigned long size)
sbi->s_crash_ver = 1;
super->s_crash_ver = cpu_to_le64(1);
nv_init(sb, true);
if (nv_init(sb, true))
return ERR_PTR(-ENOMEM);
super->s_page_map = cpu_to_le64(p2o(sb, sbi->page_map));
super->s_mtime = 0;
......@@ -478,7 +480,9 @@ static int eufs_fill_super(struct super_block *sb, void *data, int silent)
eufs_pbarrier();
}
nv_init(sb, false);
err = nv_init(sb, false);
if (err)
goto out;
root_pi = (struct eufs_inode *)s2p(sb, super->s_root_pi);
......
......@@ -2879,11 +2879,9 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
crc = crc16(crc, (__u8 *)gdp, offset);
offset += sizeof(gdp->bg_checksum); /* skip checksum */
/* for checksum of struct ext4_group_desc do the rest...*/
if (ext4_has_feature_64bit(sb) &&
offset < le16_to_cpu(sbi->s_es->s_desc_size))
if (ext4_has_feature_64bit(sb) && offset < sbi->s_desc_size)
crc = crc16(crc, (__u8 *)gdp + offset,
le16_to_cpu(sbi->s_es->s_desc_size) -
offset);
sbi->s_desc_size - offset);
out:
return cpu_to_le16(crc);
......
......@@ -217,6 +217,7 @@ iomap_read_inline_data(struct inode *inode, struct page *page,
if (PageUptodate(page))
return;
BUG_ON(page_has_private(page));
BUG_ON(page->index);
BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
......@@ -241,7 +242,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
{
struct iomap_readpage_ctx *ctx = data;
struct page *page = ctx->cur_page;
struct iomap_page *iop = iomap_page_create(inode, page);
struct iomap_page *iop;
bool same_page = false, is_contig = false;
loff_t orig_pos = pos;
unsigned poff, plen;
......@@ -254,6 +255,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
/* zero post-eof blocks as the page may be mapped */
iop = iomap_page_create(inode, page);
iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
......@@ -995,7 +997,6 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
block_commit_write(page, 0, length);
} else {
WARN_ON_ONCE(!PageUptodate(page));
iomap_page_create(inode, page);
set_page_dirty(page);
}
......@@ -1377,14 +1378,13 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
struct page *page, u64 end_offset)
{
struct iomap_page *iop = to_iomap_page(page);
struct iomap_page *iop = iomap_page_create(inode, page);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
u64 file_offset; /* file offset of page */
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
......@@ -1433,7 +1433,6 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
if (wpc->ops->discard_page)
wpc->ops->discard_page(page, file_offset);
if (!count) {
ClearPageUptodate(page);
unlock_page(page);
goto done;
}
......
......@@ -525,7 +525,7 @@ xfs_discard_page(
int error;
if (XFS_FORCED_SHUTDOWN(mp))
goto out_invalidate;
return;
xfs_alert_ratelimited(mp,
"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
......@@ -535,8 +535,6 @@ xfs_discard_page(
i_blocks_per_page(inode, page) - pageoff_fsb);
if (error && !XFS_FORCED_SHUTDOWN(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
}
static const struct iomap_writeback_ops xfs_writeback_ops = {
......
......@@ -47,6 +47,7 @@ struct kernel_clone_args;
/* internal flags */
#define CSS_TASK_ITER_SKIPPED (1U << 16)
#define CSS_TASK_ITER_STOPPED (1U << 17)
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
......
......@@ -215,6 +215,8 @@ static int cgroup_apply_control(struct cgroup *cgrp);
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
static void css_task_iter_skip(struct css_task_iter *it,
struct task_struct *task);
static void css_task_iter_stop(struct css_task_iter *it,
struct cgroup_subsys *ss);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss);
......@@ -855,6 +857,19 @@ static void css_set_skip_task_iters(struct css_set *cset,
css_task_iter_skip(it, task);
}
/*
* @cset is moving to other list, it's not safe to continue the iteration,
* because the cset_head of css_task_iter which is from the old list can
* not be used as the stop condition of iteration.
*/
static void css_set_stop_iters(struct css_set *cset, struct cgroup_subsys *ss)
{
struct css_task_iter *it, *pos;
list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
css_task_iter_stop(it, ss);
}
/**
* css_set_move_task - move a task from one css_set to another
* @task: task being moved
......@@ -1775,9 +1790,11 @@ int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
css->cgroup = dcgrp;
spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
hash_for_each(css_set_table, i, cset, hlist) {
css_set_stop_iters(cset, ss);
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
}
spin_unlock_irq(&css_set_lock);
if (ss->css_rstat_flush) {
......@@ -4706,6 +4723,16 @@ static void css_task_iter_skip(struct css_task_iter *it,
}
}
static void css_task_iter_stop(struct css_task_iter *it,
struct cgroup_subsys *ss)
{
lockdep_assert_held(&css_set_lock);
if (it->ss == ss) {
it->flags |= CSS_TASK_ITER_STOPPED;
}
}
static void css_task_iter_advance(struct css_task_iter *it)
{
struct task_struct *task;
......@@ -4809,6 +4836,11 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
spin_lock_irq(&css_set_lock);
if (it->flags & CSS_TASK_ITER_STOPPED) {
spin_unlock_irq(&css_set_lock);
return NULL;
}
/* @it may be half-advanced by skips, finish advancing */
if (it->flags & CSS_TASK_ITER_SKIPPED)
css_task_iter_advance(it);
......
......@@ -2140,7 +2140,7 @@ static void sd_llc_free_all(const struct cpumask *cpu_map)
for_each_sd_topology(tl) {
sdd = &tl->data;
if (!sdd)
if (!sdd || !sdd->sd)
continue;
for_each_cpu(j, cpu_map) {
sd = *per_cpu_ptr(sdd->sd, j);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册