未验证 提交 1e50e74f 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!358 Backport CVEs, bugfixes and other

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-20928
CVE-2022-4696
CVE-2023-0210
CVE-2022-4842

fs bugfixes from Li Lingfeng, yangerkun and Zhihao Cheng
mm bugfixes from Ye Weihua
tracing bugfixes from Zheng Yejian
other from Guo Mengqi 
 
Link:https://gitee.com/openeuler/kernel/pulls/358 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -3405,7 +3405,6 @@ CONFIG_TCG_TIS_ST33ZP24_I2C=m
CONFIG_TCG_TIS_ST33ZP24_SPI=m
# CONFIG_XILLYBUS is not set
CONFIG_PIN_MEMORY_DEV=m
CONFIG_HISI_SVM=m
# CONFIG_RANDOM_TRUST_CPU is not set
# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
# end of Character devices
......
......@@ -6081,6 +6081,7 @@ const struct file_operations binder_fops = {
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
.may_pollfree = true,
};
static int __init init_binder_device(const char *name)
......
......@@ -212,7 +212,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
mm = alloc->vma_vm_mm;
if (mm) {
mmap_read_lock(mm);
mmap_write_lock(mm);
vma = alloc->vma;
}
......@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_alloc_page_end(alloc, index);
}
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return 0;
......@@ -303,7 +303,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
err_no_vma:
if (mm) {
mmap_read_unlock(mm);
mmap_write_unlock(mm);
mmput(mm);
}
return vma ? -ENOMEM : -ESRCH;
......
......@@ -478,16 +478,6 @@ config PIN_MEMORY_DEV
help
pin memory driver
config HISI_SVM
tristate "Hisilicon svm driver"
depends on ARM64 && ARM_SMMU_V3 && MMU_NOTIFIER
default m
help
This driver provides character-level access to Hisilicon
SVM chipset. Typically, you can bind a task to the
svm and share the virtual memory with hisilicon svm device.
When in doubt, say "N".
config RANDOM_TRUST_CPU
bool "Initialize RNG using CPU RNG instructions"
default y
......
......@@ -48,4 +48,3 @@ obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o
obj-$(CONFIG_ADI) += adi.o
obj-$(CONFIG_PIN_MEMORY_DEV) += pin_memory.o
obj-$(CONFIG_HISI_SVM) += svm.o
此差异已折叠。
......@@ -935,7 +935,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.work_flags = IO_WQ_WORK_BLKCG,
.work_flags = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FILES,
},
[IORING_OP_PROVIDE_BUFFERS] = {},
[IORING_OP_REMOVE_BUFFERS] = {},
......@@ -5233,6 +5233,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
struct io_ring_ctx *ctx = req->ctx;
bool cancel = false;
if (req->file->f_op->may_pollfree) {
spin_lock_irq(&ctx->completion_lock);
return -EOPNOTSUPP;
}
INIT_HLIST_NODE(&req->hash_node);
io_init_poll_iocb(poll, mask, wake_func);
poll->file = req->file;
......
......@@ -1455,13 +1455,6 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
PF_MEMALLOC))
goto redirty;
/*
* Given that we do not allow direct reclaim to call us, we should
* never be called in a recursive filesystem reclaim context.
*/
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
goto redirty;
/*
* Is this page beyond the end of the file?
*
......
......@@ -321,7 +321,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
dn_len = le16_to_cpu(authblob->DomainName.Length);
if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
nt_len < CIFS_ENCPWD_SIZE)
return -EINVAL;
/* TODO : use domain name that imported from configuration file */
......
......@@ -1949,7 +1949,7 @@ int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
return -ENOENT;
if (!attr_b->non_res) {
u32 data_size = le32_to_cpu(attr->res.data_size);
u32 data_size = le32_to_cpu(attr_b->res.data_size);
u32 from, to;
if (vbo > data_size)
......
......@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
.poll = signalfd_poll,
.read = signalfd_read,
.llseek = noop_llseek,
.may_pollfree = true,
};
static int do_signalfd4(int ufd, sigset_t *mask, int flags)
......
......@@ -2811,7 +2811,7 @@ xfs_btree_split_worker(
struct xfs_btree_split_args *args = container_of(work,
struct xfs_btree_split_args, work);
unsigned long pflags;
unsigned long new_pflags = PF_MEMALLOC_NOFS;
unsigned long new_pflags = 0;
/*
* we are in a transaction context here, but may also be doing work
......@@ -2823,12 +2823,20 @@ xfs_btree_split_worker(
new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
current_set_flags_nested(&pflags, new_pflags);
xfs_trans_set_context(args->cur->bc_tp);
args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
args->key, args->curp, args->stat);
complete(args->done);
xfs_trans_clear_context(args->cur->bc_tp);
current_restore_flags_nested(&pflags, new_pflags);
/*
* Do not access args after complete() has run here. We don't own args
* and the owner may run and free args before we return here.
*/
complete(args->done);
}
/*
......
......@@ -98,7 +98,7 @@ xfs_setfilesize_ioend(
* thus we need to mark ourselves as being in a transaction manually.
* Similarly for freeze protection.
*/
current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_set_context(tp);
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */
......@@ -538,6 +538,12 @@ xfs_vm_writepage(
{
struct xfs_writepage_ctx wpc = { };
if (WARN_ON_ONCE(current->journal_info)) {
redirty_page_for_writepage(wbc, page);
unlock_page(page);
return 0;
}
return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
}
......@@ -548,6 +554,13 @@ xfs_vm_writepages(
{
struct xfs_writepage_ctx wpc = { };
/*
* Writing back data in a transaction context can result in recursive
* transactions. This is bad, so issue a warning and get out of here.
*/
if (WARN_ON_ONCE(current->journal_info))
return 0;
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
......
......@@ -374,46 +374,36 @@ xfs_reserve_blocks(
* If the request is larger than the current reservation, reserve the
* blocks before we update the reserve counters. Sample m_fdblocks and
* perform a partial reservation if the request exceeds free space.
*
* The code below estimates how many blocks it can request from
* fdblocks to stash in the reserve pool. This is a classic TOCTOU
* race since fdblocks updates are not always coordinated via
* m_sb_lock. Set the reserve size even if there's not enough free
* space to fill it because mod_fdblocks will refill an undersized
* reserve when it can.
*/
error = -ENOSPC;
do {
free = percpu_counter_sum(&mp->m_fdblocks) -
mp->m_alloc_set_aside;
if (free <= 0)
break;
delta = request - mp->m_resblks;
lcounter = free - delta;
if (lcounter < 0)
/* We can't satisfy the request, just get what we can */
fdblks_delta = free;
else
fdblks_delta = delta;
free = percpu_counter_sum(&mp->m_fdblocks) -
xfs_fdblocks_unavailable(mp);
delta = request - mp->m_resblks;
mp->m_resblks = request;
if (delta > 0 && free > 0) {
/*
* We'll either succeed in getting space from the free block
* count or we'll get an ENOSPC. If we get a ENOSPC, it means
* things changed while we were calculating fdblks_delta and so
* we should try again to see if there is anything left to
* reserve.
* count or we'll get an ENOSPC. Don't set the reserved flag
* here - we don't want to reserve the extra reserve blocks
* from the reserve.
*
* Don't set the reserved flag here - we don't want to reserve
* the extra reserve blocks from the reserve.....
* The desired reserve size can change after we drop the lock.
* Use mod_fdblocks to put the space into the reserve or into
* fdblocks as appropriate.
*/
fdblks_delta = min(free, delta);
spin_unlock(&mp->m_sb_lock);
error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
if (!error)
xfs_mod_fdblocks(mp, fdblks_delta, 0);
spin_lock(&mp->m_sb_lock);
} while (error == -ENOSPC);
/*
* Update the reserve counters if blocks have been successfully
* allocated.
*/
if (!error && fdblks_delta) {
mp->m_resblks += fdblks_delta;
mp->m_resblks_avail += fdblks_delta;
}
out:
if (outval) {
outval->resblks = mp->m_resblks;
......
......@@ -467,6 +467,14 @@ extern void xfs_unmountfs(xfs_mount_t *);
*/
#define XFS_FDBLOCKS_BATCH 1024
/* Accessor added for 5.10.y backport */
static inline uint64_t
xfs_fdblocks_unavailable(
struct xfs_mount *mp)
{
return mp->m_alloc_set_aside;
}
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
bool reserved);
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
......
......@@ -72,6 +72,7 @@ xfs_trans_free(
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_);
xfs_trans_clear_context(tp);
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
......@@ -123,7 +124,8 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
ntp->t_pflags = tp->t_pflags;
xfs_trans_switch_context(tp, ntp);
/* move deferred ops over to the new tp */
xfs_defer_move(ntp, tp);
......@@ -157,9 +159,6 @@ xfs_trans_reserve(
int error = 0;
bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* Mark this thread as being in a transaction */
current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
/*
* Attempt to reserve the needed disk blocks by decrementing
* the number needed from the number available. This will
......@@ -167,10 +166,8 @@ xfs_trans_reserve(
*/
if (blocks > 0) {
error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd);
if (error != 0) {
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
if (error != 0)
return -ENOSPC;
}
tp->t_blk_res += blocks;
}
......@@ -244,9 +241,6 @@ xfs_trans_reserve(
xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd);
tp->t_blk_res = 0;
}
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
return error;
}
......@@ -272,6 +266,7 @@ xfs_trans_alloc(
tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL);
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
xfs_trans_set_context(tp);
/*
* Zero-reservation ("empty") transactions can't modify anything, so
......@@ -893,7 +888,6 @@ __xfs_trans_commit(
xlog_cil_commit(mp->m_log, tp, &commit_seq, regrant);
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_free(tp);
/*
......@@ -925,7 +919,6 @@ __xfs_trans_commit(
xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket);
tp->t_ticket = NULL;
}
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_free_items(tp, !!error);
xfs_trans_free(tp);
......@@ -985,9 +978,6 @@ xfs_trans_cancel(
tp->t_ticket = NULL;
}
/* mark this thread as no longer being in a transaction */
current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
xfs_trans_free_items(tp, dirty);
xfs_trans_free(tp);
}
......
......@@ -266,4 +266,34 @@ int xfs_trans_alloc_ichange(struct xfs_inode *ip, struct xfs_dquot *udqp,
struct xfs_dquot *gdqp, struct xfs_dquot *pdqp, bool force,
struct xfs_trans **tpp);
static inline void
xfs_trans_set_context(
struct xfs_trans *tp)
{
ASSERT(current->journal_info == NULL);
tp->t_pflags = memalloc_nofs_save();
current->journal_info = tp;
}
static inline void
xfs_trans_clear_context(
struct xfs_trans *tp)
{
if (current->journal_info == tp) {
memalloc_nofs_restore(tp->t_pflags);
current->journal_info = NULL;
}
}
static inline void
xfs_trans_switch_context(
struct xfs_trans *old_tp,
struct xfs_trans *new_tp)
{
ASSERT(current->journal_info == old_tp);
new_tp->t_pflags = old_tp->t_pflags;
old_tp->t_pflags = 0;
current->journal_info = new_tp;
}
#endif /* __XFS_TRANS_H__ */
......@@ -20,7 +20,6 @@ struct fault_attr {
atomic_t space;
unsigned long verbose;
bool task_filter;
bool no_warn;
unsigned long stacktrace_depth;
unsigned long require_start;
unsigned long require_end;
......@@ -32,6 +31,10 @@ struct fault_attr {
struct dentry *dname;
};
enum fault_flags {
FAULT_NOWARN = 1 << 0,
};
#define FAULT_ATTR_INITIALIZER { \
.interval = 1, \
.times = ATOMIC_INIT(1), \
......@@ -40,11 +43,11 @@ struct fault_attr {
.ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
.verbose = 2, \
.dname = NULL, \
.no_warn = false, \
}
#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
int setup_fault_attr(struct fault_attr *attr, char *str);
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags);
bool should_fail(struct fault_attr *attr, ssize_t size);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
......
......@@ -1899,7 +1899,7 @@ struct file_operations {
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
KABI_RESERVE(1)
KABI_USE(1, bool may_pollfree)
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
......
......@@ -2103,6 +2103,13 @@ static int osnoise_hook_events(void)
return -EINVAL;
}
static void osnoise_unhook_events(void)
{
unhook_thread_events();
unhook_softirq_events();
unhook_irq_events();
}
/*
* osnoise_workload_start - start the workload and hook to events
*/
......@@ -2135,7 +2142,14 @@ static int osnoise_workload_start(void)
retval = start_per_cpu_kthreads();
if (retval) {
unhook_irq_events();
trace_osnoise_callback_enabled = false;
/*
* Make sure that ftrace_nmi_enter/exit() see
* trace_osnoise_callback_enabled as false before continuing.
*/
barrier();
osnoise_unhook_events();
return retval;
}
......@@ -2157,6 +2171,17 @@ static void osnoise_workload_stop(void)
if (osnoise_has_registered_instances())
return;
/*
* If callbacks were already disabled in a previous stop
* call, there is no need to disable then again.
*
* For instance, this happens when tracing is stopped via:
* echo 0 > tracing_on
* echo nop > current_tracer.
*/
if (!trace_osnoise_callback_enabled)
return;
trace_osnoise_callback_enabled = false;
/*
* Make sure that ftrace_nmi_enter/exit() see
......@@ -2166,9 +2191,7 @@ static void osnoise_workload_stop(void)
stop_per_cpu_kthreads();
unhook_irq_events();
unhook_softirq_events();
unhook_thread_events();
osnoise_unhook_events();
}
static void osnoise_tracer_start(struct trace_array *tr)
......
......@@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
if (attr->no_warn)
return;
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
......@@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
* http://www.nongnu.org/failmalloc/
*/
bool should_fail(struct fault_attr *attr, ssize_t size)
bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
......@@ -146,13 +143,19 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
fail:
fail_dump(attr);
if (!(flags & FAULT_NOWARN))
fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
bool should_fail(struct fault_attr *attr, ssize_t size)
{
return should_fail_ex(attr, size, 0);
}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
......
......@@ -16,6 +16,8 @@ static struct {
bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
{
int flags = 0;
/* No fault-injection for bootstrap cache */
if (unlikely(s == kmem_cache))
return false;
......@@ -30,10 +32,16 @@ bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags)
if (failslab.cache_filter && !(s->flags & SLAB_FAILSLAB))
return false;
/*
* In some cases, it expects to specify __GFP_NOWARN
* to avoid printing any information(not just a warning),
* thus avoiding deadlocks. See commit 6b9dbedbe349 for
* details.
*/
if (gfpflags & __GFP_NOWARN)
failslab.attr.no_warn = true;
flags |= FAULT_NOWARN;
return should_fail(&failslab.attr, s->object_size);
return should_fail_ex(&failslab.attr, s->object_size, flags);
}
static int __init setup_failslab(char *str)
......
......@@ -1479,9 +1479,11 @@ unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file,
pkey = 0;
}
#ifdef CONFIG_ASCEND_FEATURES
/* Physical address is within 4G */
if (flags & MAP_PA32BIT)
vm_flags |= VM_PA32BIT;
#endif
/* Do simple checking here so the lower-level routines won't have
* to. we assume access permissions have been handled by the open
......
......@@ -3545,6 +3545,8 @@ __setup("fail_page_alloc=", setup_fail_page_alloc);
static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
int flags = 0;
if (order < fail_page_alloc.min_order)
return false;
if (gfp_mask & __GFP_NOFAIL)
......@@ -3555,10 +3557,11 @@ static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
(gfp_mask & __GFP_DIRECT_RECLAIM))
return false;
/* See comment in __should_failslab() */
if (gfp_mask & __GFP_NOWARN)
fail_page_alloc.attr.no_warn = true;
flags |= FAULT_NOWARN;
return should_fail(&fail_page_alloc.attr, 1 << order);
return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags);
}
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册