未验证 提交 1b5e968d 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!775 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-32269
CVE-2023-2002
CVE-2023-26544
CVE-2023-0459

mm bugfixes from Yu Kuai
fs bugfix from yangerkun
fs perfs from Zhihao Cheng 
 
Link:https://gitee.com/openeuler/kernel/pulls/775 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -101,6 +101,10 @@ int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni, ...@@ -101,6 +101,10 @@ int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
asize = le32_to_cpu(attr->size); asize = le32_to_cpu(attr->size);
run_off = le16_to_cpu(attr->nres.run_off); run_off = le16_to_cpu(attr->nres.run_off);
if (run_off > asize)
return -EINVAL;
err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
vcn ? *vcn : svcn, Add2Ptr(attr, run_off), vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
asize - run_off); asize - run_off);
...@@ -1157,6 +1161,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -1157,6 +1161,10 @@ int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
} }
ro = le16_to_cpu(attr->nres.run_off); ro = le16_to_cpu(attr->nres.run_off);
if (ro > le32_to_cpu(attr->size))
return -EINVAL;
err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn, err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
if (err < 0) if (err < 0)
...@@ -1832,6 +1840,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) ...@@ -1832,6 +1840,11 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
u16 le_sz; u16 le_sz;
u16 roff = le16_to_cpu(attr->nres.run_off); u16 roff = le16_to_cpu(attr->nres.run_off);
if (roff > le32_to_cpu(attr->size)) {
err = -EINVAL;
goto out;
}
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
evcn1 - 1, svcn, Add2Ptr(attr, roff), evcn1 - 1, svcn, Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff); le32_to_cpu(attr->size) - roff);
......
...@@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) ...@@ -68,6 +68,11 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
run_init(&ni->attr_list.run); run_init(&ni->attr_list.run);
if (run_off > le32_to_cpu(attr->size)) {
err = -EINVAL;
goto out;
}
err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno, err = run_unpack_ex(&ni->attr_list.run, ni->mi.sbi, ni->mi.rno,
0, le64_to_cpu(attr->nres.evcn), 0, 0, le64_to_cpu(attr->nres.evcn), 0,
Add2Ptr(attr, run_off), Add2Ptr(attr, run_off),
......
...@@ -567,6 +567,12 @@ static int ni_repack(struct ntfs_inode *ni) ...@@ -567,6 +567,12 @@ static int ni_repack(struct ntfs_inode *ni)
} }
roff = le16_to_cpu(attr->nres.run_off); roff = le16_to_cpu(attr->nres.run_off);
if (roff > le32_to_cpu(attr->size)) {
err = -EINVAL;
break;
}
err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn, err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff), Add2Ptr(attr, roff),
le32_to_cpu(attr->size) - roff); le32_to_cpu(attr->size) - roff);
...@@ -1541,6 +1547,9 @@ int ni_delete_all(struct ntfs_inode *ni) ...@@ -1541,6 +1547,9 @@ int ni_delete_all(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size); asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off); roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize)
return -EINVAL;
/* run==1 means unpack and deallocate. */ /* run==1 means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff), asize - roff); Add2Ptr(attr, roff), asize - roff);
...@@ -2238,6 +2247,11 @@ int ni_decompress_file(struct ntfs_inode *ni) ...@@ -2238,6 +2247,11 @@ int ni_decompress_file(struct ntfs_inode *ni)
asize = le32_to_cpu(attr->size); asize = le32_to_cpu(attr->size);
roff = le16_to_cpu(attr->nres.run_off); roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize) {
err = -EINVAL;
goto out;
}
/*run==1 Means unpack and deallocate. */ /*run==1 Means unpack and deallocate. */
run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn,
Add2Ptr(attr, roff), asize - roff); Add2Ptr(attr, roff), asize - roff);
......
...@@ -2727,6 +2727,9 @@ static inline bool check_attr(const struct MFT_REC *rec, ...@@ -2727,6 +2727,9 @@ static inline bool check_attr(const struct MFT_REC *rec,
return false; return false;
} }
if (run_off > asize)
return false;
if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn, if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
Add2Ptr(attr, run_off), asize - run_off) < 0) { Add2Ptr(attr, run_off), asize - run_off) < 0) {
return false; return false;
...@@ -4767,6 +4770,12 @@ int log_replay(struct ntfs_inode *ni, bool *initialized) ...@@ -4767,6 +4770,12 @@ int log_replay(struct ntfs_inode *ni, bool *initialized)
u16 roff = le16_to_cpu(attr->nres.run_off); u16 roff = le16_to_cpu(attr->nres.run_off);
CLST svcn = le64_to_cpu(attr->nres.svcn); CLST svcn = le64_to_cpu(attr->nres.svcn);
if (roff > t32) {
kfree(oa->attr);
oa->attr = NULL;
goto fake_attr;
}
err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn, err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
le64_to_cpu(attr->nres.evcn), svcn, le64_to_cpu(attr->nres.evcn), svcn,
Add2Ptr(attr, roff), t32 - roff); Add2Ptr(attr, roff), t32 - roff);
......
...@@ -373,7 +373,13 @@ static struct inode *ntfs_read_mft(struct inode *inode, ...@@ -373,7 +373,13 @@ static struct inode *ntfs_read_mft(struct inode *inode,
attr_unpack_run: attr_unpack_run:
roff = le16_to_cpu(attr->nres.run_off); roff = le16_to_cpu(attr->nres.run_off);
if (roff > asize) {
err = -EINVAL;
goto out;
}
t64 = le64_to_cpu(attr->nres.svcn); t64 = le64_to_cpu(attr->nres.svcn);
err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn), err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn),
t64, Add2Ptr(attr, roff), asize - roff); t64, Add2Ptr(attr, roff), asize - roff);
if (err < 0) if (err < 0)
......
...@@ -1303,6 +1303,6 @@ const struct inode_operations ovl_dir_inode_operations = { ...@@ -1303,6 +1303,6 @@ const struct inode_operations ovl_dir_inode_operations = {
.permission = ovl_permission, .permission = ovl_permission,
.getattr = ovl_getattr, .getattr = ovl_getattr,
.listxattr = ovl_listxattr, .listxattr = ovl_listxattr,
.get_acl = ovl_get_acl, .get_acl2 = ovl_get_acl,
.update_time = ovl_update_time, .update_time = ovl_update_time,
}; };
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/posix_acl.h> #include <linux/posix_acl.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/fiemap.h> #include <linux/fiemap.h>
#include <linux/namei.h>
#include "overlayfs.h" #include "overlayfs.h"
...@@ -441,15 +442,26 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) ...@@ -441,15 +442,26 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return res; return res;
} }
struct posix_acl *ovl_get_acl(struct inode *inode, int type) struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu)
{ {
struct inode *realinode = ovl_inode_real(inode); struct inode *realinode = ovl_inode_real(inode);
const struct cred *old_cred; const struct cred *old_cred;
struct posix_acl *acl; struct posix_acl *acl;
if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode)) if (!IS_ENABLED(CONFIG_FS_POSIX_ACL))
return NULL; return NULL;
if (!realinode) {
WARN_ON(!rcu);
return ERR_PTR(-ECHILD);
}
if (!IS_POSIXACL(realinode))
return NULL;
if (rcu)
return get_cached_acl_rcu(realinode, type);
old_cred = ovl_override_creds(inode->i_sb); old_cred = ovl_override_creds(inode->i_sb);
acl = get_acl(realinode, type); acl = get_acl(realinode, type);
revert_creds(old_cred); revert_creds(old_cred);
...@@ -496,7 +508,7 @@ static const struct inode_operations ovl_file_inode_operations = { ...@@ -496,7 +508,7 @@ static const struct inode_operations ovl_file_inode_operations = {
.permission = ovl_permission, .permission = ovl_permission,
.getattr = ovl_getattr, .getattr = ovl_getattr,
.listxattr = ovl_listxattr, .listxattr = ovl_listxattr,
.get_acl = ovl_get_acl, .get_acl2 = ovl_get_acl,
.update_time = ovl_update_time, .update_time = ovl_update_time,
.fiemap = ovl_fiemap, .fiemap = ovl_fiemap,
}; };
...@@ -514,7 +526,7 @@ static const struct inode_operations ovl_special_inode_operations = { ...@@ -514,7 +526,7 @@ static const struct inode_operations ovl_special_inode_operations = {
.permission = ovl_permission, .permission = ovl_permission,
.getattr = ovl_getattr, .getattr = ovl_getattr,
.listxattr = ovl_listxattr, .listxattr = ovl_listxattr,
.get_acl = ovl_get_acl, .get_acl2 = ovl_get_acl,
.update_time = ovl_update_time, .update_time = ovl_update_time,
}; };
......
...@@ -466,7 +466,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name, ...@@ -466,7 +466,7 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name, int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
void *value, size_t size); void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
struct posix_acl *ovl_get_acl(struct inode *inode, int type); struct posix_acl *ovl_get_acl(struct inode *inode, int type, bool rcu);
int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags); int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags);
bool ovl_is_private_xattr(struct super_block *sb, const char *name); bool ovl_is_private_xattr(struct super_block *sb, const char *name);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/xattr.h> #include <linux/xattr.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/user_namespace.h> #include <linux/user_namespace.h>
#include <linux/namei.h>
static struct posix_acl **acl_by_type(struct inode *inode, int type) static struct posix_acl **acl_by_type(struct inode *inode, int type)
{ {
...@@ -56,7 +57,17 @@ EXPORT_SYMBOL(get_cached_acl); ...@@ -56,7 +57,17 @@ EXPORT_SYMBOL(get_cached_acl);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type) struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type)
{ {
return rcu_dereference(*acl_by_type(inode, type)); struct posix_acl *acl = rcu_dereference(*acl_by_type(inode, type));
if (acl == ACL_DONT_CACHE && inode->i_op->get_acl2) {
struct posix_acl *ret;
ret = inode->i_op->get_acl2(inode, type, LOOKUP_RCU);
if (!IS_ERR(ret))
acl = ret;
}
return acl;
} }
EXPORT_SYMBOL(get_cached_acl_rcu); EXPORT_SYMBOL(get_cached_acl_rcu);
...@@ -134,11 +145,14 @@ struct posix_acl *get_acl(struct inode *inode, int type) ...@@ -134,11 +145,14 @@ struct posix_acl *get_acl(struct inode *inode, int type)
* If the filesystem doesn't have a get_acl() function at all, we'll * If the filesystem doesn't have a get_acl() function at all, we'll
* just create the negative cache entry. * just create the negative cache entry.
*/ */
if (!inode->i_op->get_acl) { if (!inode->i_op->get_acl && !inode->i_op->get_acl2) {
set_cached_acl(inode, type, NULL); set_cached_acl(inode, type, NULL);
return NULL; return NULL;
} }
acl = inode->i_op->get_acl(inode, type); if (inode->i_op->get_acl)
acl = inode->i_op->get_acl(inode, type);
else
acl = inode->i_op->get_acl2(inode, type, false);
if (IS_ERR(acl)) { if (IS_ERR(acl)) {
/* /*
......
...@@ -370,10 +370,18 @@ xfs_buf_item_format( ...@@ -370,10 +370,18 @@ xfs_buf_item_format(
* This is called to pin the buffer associated with the buf log item in memory * This is called to pin the buffer associated with the buf log item in memory
* so it cannot be written out. * so it cannot be written out.
* *
* We also always take a reference to the buffer log item here so that the bli * We take a reference to the buffer log item here so that the BLI life cycle
* is held while the item is pinned in memory. This means that we can * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and
* unconditionally drop the reference count a transaction holds when the * inserted into the AIL.
* transaction is completed. *
* We also need to take a reference to the buffer itself as the BLI unpin
* processing requires accessing the buffer after the BLI has dropped the final
* BLI reference. See xfs_buf_item_unpin() for an explanation.
* If unpins race to drop the final BLI reference and only the
* BLI owns a reference to the buffer, then the loser of the race can have the
* buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per
* pin count ensures the life cycle of the buffer extends for as
* long as we hold the buffer pin reference in xfs_buf_item_unpin().
*/ */
STATIC void STATIC void
xfs_buf_item_pin( xfs_buf_item_pin(
...@@ -388,13 +396,30 @@ xfs_buf_item_pin( ...@@ -388,13 +396,30 @@ xfs_buf_item_pin(
trace_xfs_buf_item_pin(bip); trace_xfs_buf_item_pin(bip);
xfs_buf_hold(bip->bli_buf);
atomic_inc(&bip->bli_refcount); atomic_inc(&bip->bli_refcount);
atomic_inc(&bip->bli_buf->b_pin_count); atomic_inc(&bip->bli_buf->b_pin_count);
} }
/* /*
* This is called to unpin the buffer associated with the buf log item which * This is called to unpin the buffer associated with the buf log item which was
* was previously pinned with a call to xfs_buf_item_pin(). * previously pinned with a call to xfs_buf_item_pin(). We enter this function
* with a buffer pin count, a buffer reference and a BLI reference.
*
* We must drop the BLI reference before we unpin the buffer because the AIL
* doesn't acquire a BLI reference whenever it accesses it. Therefore if the
* refcount drops to zero, the bli could still be AIL resident and the buffer
* submitted for I/O at any point before we return. This can result in IO
* completion freeing the buffer while we are still trying to access it here.
* This race condition can also occur in shutdown situations where we abort and
* unpin buffers from contexts other that journal IO completion.
*
* Hence we have to hold a buffer reference per pin count to ensure that the
* buffer cannot be freed until we have finished processing the unpin operation.
* The reference is taken in xfs_buf_item_pin(), and we must hold it until we
* are done processing the buffer state. In the case of an abort (remove =
* true) then we re-use the current pin reference as the IO reference we hand
* off to IO failure handling.
*/ */
STATIC void STATIC void
xfs_buf_item_unpin( xfs_buf_item_unpin(
...@@ -411,24 +436,18 @@ xfs_buf_item_unpin( ...@@ -411,24 +436,18 @@ xfs_buf_item_unpin(
trace_xfs_buf_item_unpin(bip); trace_xfs_buf_item_unpin(bip);
/*
* Drop the bli ref associated with the pin and grab the hold required
* for the I/O simulation failure in the abort case. We have to do this
* before the pin count drops because the AIL doesn't acquire a bli
* reference. Therefore if the refcount drops to zero, the bli could
* still be AIL resident and the buffer submitted for I/O (and freed on
* completion) at any point before we return. This can be removed once
* the AIL properly holds a reference on the bli.
*/
freed = atomic_dec_and_test(&bip->bli_refcount); freed = atomic_dec_and_test(&bip->bli_refcount);
if (freed && !stale && remove)
xfs_buf_hold(bp);
if (atomic_dec_and_test(&bp->b_pin_count)) if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&bp->b_waiters); wake_up_all(&bp->b_waiters);
/* nothing to do but drop the pin count if the bli is active */ /*
if (!freed) * Nothing to do but drop the buffer pin reference if the BLI is
* still active
*/
if (!freed) {
xfs_buf_rele(bp);
return; return;
}
if (stale) { if (stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE); ASSERT(bip->bli_flags & XFS_BLI_STALE);
...@@ -440,6 +459,15 @@ xfs_buf_item_unpin( ...@@ -440,6 +459,15 @@ xfs_buf_item_unpin(
trace_xfs_buf_item_unpin_stale(bip); trace_xfs_buf_item_unpin_stale(bip);
/*
* The buffer has been locked and referenced since it was marked
* stale so we own both lock and reference exclusively here. We
* do not need the pin reference any more, so drop it now so
* that we only have one reference to drop once item completion
* processing is complete.
*/
xfs_buf_rele(bp);
/* /*
* If we get called here because of an IO error, we may or may * If we get called here because of an IO error, we may or may
* not have the item on the AIL. xfs_trans_ail_delete() will * not have the item on the AIL. xfs_trans_ail_delete() will
...@@ -456,16 +484,30 @@ xfs_buf_item_unpin( ...@@ -456,16 +484,30 @@ xfs_buf_item_unpin(
ASSERT(bp->b_log_item == NULL); ASSERT(bp->b_log_item == NULL);
} }
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else if (remove) { return;
}
if (remove) {
/* /*
* The buffer must be locked and held by the caller to simulate * We need to simulate an async IO failures here to ensure that
* an async I/O failure. We acquired the hold for this case * the correct error completion is run on this buffer. This
* before the buffer was unpinned. * requires a reference to the buffer and for the buffer to be
* locked. We can safely pass ownership of the pin reference to
* the IO to ensure that nothing can free the buffer while we
* wait for the lock and then run the IO failure completion.
*/ */
xfs_buf_lock(bp); xfs_buf_lock(bp);
bp->b_flags |= XBF_ASYNC; bp->b_flags |= XBF_ASYNC;
xfs_buf_ioend_fail(bp); xfs_buf_ioend_fail(bp);
return;
} }
/*
* BLI has no more active references - it will be moved to the AIL to
* manage the remaining BLI/buffer life cycle. There is nothing left for
* us to do here so drop the pin reference to the buffer.
*/
xfs_buf_rele(bp);
} }
STATIC uint STATIC uint
......
...@@ -600,6 +600,11 @@ static inline void mapping_allow_writable(struct address_space *mapping) ...@@ -600,6 +600,11 @@ static inline void mapping_allow_writable(struct address_space *mapping)
struct posix_acl; struct posix_acl;
#define ACL_NOT_CACHED ((void *)(-1)) #define ACL_NOT_CACHED ((void *)(-1))
/*
* ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to
* cache the ACL. This also means that ->get_acl2() can be called in RCU mode
* with the LOOKUP_RCU flag.
*/
#define ACL_DONT_CACHE ((void *)(-3)) #define ACL_DONT_CACHE ((void *)(-3))
static inline struct posix_acl * static inline struct posix_acl *
...@@ -1934,7 +1939,7 @@ struct inode_operations { ...@@ -1934,7 +1939,7 @@ struct inode_operations {
int (*tmpfile) (struct inode *, struct dentry *, umode_t); int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int); int (*set_acl)(struct inode *, struct posix_acl *, int);
KABI_RESERVE(1) KABI_USE(1, struct posix_acl * (*get_acl2)(struct inode *, int, bool))
KABI_RESERVE(2) KABI_RESERVE(2)
KABI_RESERVE(3) KABI_RESERVE(3)
KABI_RESERVE(4) KABI_RESERVE(4)
......
...@@ -11,6 +11,10 @@ ...@@ -11,6 +11,10 @@
struct task_struct; struct task_struct;
#ifndef barrier_nospec
# define barrier_nospec() do { } while (0)
#endif
/** /**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index * @index: array element index
......
...@@ -71,6 +71,8 @@ extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); ...@@ -71,6 +71,8 @@ extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
extern struct posix_acl *get_posix_acl(struct inode *, int); extern struct posix_acl *get_posix_acl(struct inode *, int);
extern int set_posix_acl(struct inode *, int, struct posix_acl *); extern int set_posix_acl(struct inode *, int, struct posix_acl *);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
#ifdef CONFIG_FS_POSIX_ACL #ifdef CONFIG_FS_POSIX_ACL
extern int posix_acl_chmod(struct inode *, umode_t); extern int posix_acl_chmod(struct inode *, umode_t);
extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
...@@ -81,7 +83,6 @@ extern int simple_set_acl(struct inode *, struct posix_acl *, int); ...@@ -81,7 +83,6 @@ extern int simple_set_acl(struct inode *, struct posix_acl *, int);
extern int simple_acl_create(struct inode *, struct inode *); extern int simple_acl_create(struct inode *, struct inode *);
struct posix_acl *get_cached_acl(struct inode *inode, int type); struct posix_acl *get_cached_acl(struct inode *inode, int type);
struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
void forget_cached_acl(struct inode *inode, int type); void forget_cached_acl(struct inode *inode, int type);
void forget_all_cached_acls(struct inode *inode); void forget_all_cached_acls(struct inode *inode);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/nospec.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
...@@ -1642,9 +1643,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1642,9 +1643,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
* reuse preexisting logic from Spectre v1 mitigation that * reuse preexisting logic from Spectre v1 mitigation that
* happens to produce the required code on x86 for v4 as well. * happens to produce the required code on x86 for v4 as well.
*/ */
#ifdef CONFIG_X86
barrier_nospec(); barrier_nospec();
#endif
CONT; CONT;
#define LDST(SIZEOP, SIZE) \ #define LDST(SIZEOP, SIZE) \
STX_MEM_##SIZEOP: \ STX_MEM_##SIZEOP: \
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/fault-inject-usercopy.h> #include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h> #include <linux/instrumented.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/nospec.h>
/* out-of-line parts */ /* out-of-line parts */
...@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n ...@@ -12,6 +13,12 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n; unsigned long res = n;
might_fault(); might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) { if (!should_fail_usercopy() && likely(access_ok(from, n))) {
/*
* Ensure that bad access_ok() speculation will not
* lead to nasty side effects *after* the copy is
* finished:
*/
barrier_nospec();
instrument_copy_from_user(to, from, n); instrument_copy_from_user(to, from, n);
res = raw_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n);
} }
......
...@@ -2534,7 +2534,7 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb, ...@@ -2534,7 +2534,7 @@ ssize_t generic_file_buffered_read(struct kiocb *iocb,
* When a read accesses a page several times, only * When a read accesses a page several times, only
* mark it as accessed the first time. * mark it as accessed the first time.
*/ */
if (pos_same_page(iocb->ki_pos, ra->prev_pos -1, pages[0])) if (!pos_same_page(iocb->ki_pos, ra->prev_pos -1, pages[0]))
mark_page_accessed(pages[0]); mark_page_accessed(pages[0]);
for (i = 1; i < pg_nr; i++) for (i = 1; i < pg_nr; i++)
mark_page_accessed(pages[i]); mark_page_accessed(pages[i]);
......
...@@ -1000,7 +1000,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, ...@@ -1000,7 +1000,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
if (hci_sock_gen_cookie(sk)) { if (hci_sock_gen_cookie(sk)) {
struct sk_buff *skb; struct sk_buff *skb;
if (capable(CAP_NET_ADMIN)) /* Perform careful checks before setting the HCI_SOCK_TRUSTED
* flag. Make sure that not only the current task but also
* the socket opener has the required capability, since
* privileged programs can be tricked into making ioctl calls
* on HCI sockets, and the socket should not be marked as
* trusted simply because the ioctl caller is privileged.
*/
if (sk_capable(sk, CAP_NET_ADMIN))
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
/* Send event to monitor */ /* Send event to monitor */
......
...@@ -400,6 +400,11 @@ static int nr_listen(struct socket *sock, int backlog) ...@@ -400,6 +400,11 @@ static int nr_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
lock_sock(sk); lock_sock(sk);
if (sock->state != SS_UNCONNECTED) {
release_sock(sk);
return -EINVAL;
}
if (sk->sk_state != TCP_LISTEN) { if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog; sk->sk_max_ack_backlog = backlog;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册