未验证 提交 4dad1506 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!511 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-28328
CVE-2023-1382
CVE-2023-1079

livepatch bugfix from Zheng Yejian
seccomp bugfix from GONG, Ruiqi
mm bugfixes from ZhangPeng
ftrace bugfix from Chen Zhongjin
ext4 bugfix from Baokun Li
jbd2 bugfix from Zhihao Cheng 
 
Link:https://gitee.com/openeuler/kernel/pulls/511 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -95,6 +95,7 @@ struct asus_kbd_leds {
struct hid_device *hdev;
struct work_struct work;
unsigned int brightness;
spinlock_t lock;
bool removed;
};
......@@ -397,24 +398,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret;
}
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
if (!led->removed)
schedule_work(&led->work);
spin_unlock_irqrestore(&led->lock, flags);
}
static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
if (led->brightness == brightness)
return;
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
led->brightness = brightness;
schedule_work(&led->work);
spin_unlock_irqrestore(&led->lock, flags);
asus_schedule_work(led);
}
static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
enum led_brightness brightness;
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
brightness = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
return led->brightness;
return brightness;
}
static void asus_kbd_backlight_work(struct work_struct *work)
......@@ -422,11 +441,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
int ret;
unsigned long flags;
if (led->removed)
return;
spin_lock_irqsave(&led->lock, flags);
buf[4] = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
if (ret < 0)
......@@ -488,6 +507,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
spin_lock_init(&drvdata->kbd_backlight->lock);
ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
if (ret < 0) {
......@@ -1016,9 +1036,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
static void asus_remove(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
unsigned long flags;
if (drvdata->kbd_backlight) {
spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
drvdata->kbd_backlight->removed = true;
spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
cancel_work_sync(&drvdata->kbd_backlight->work);
}
......
......@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
if (msg[i].addr == 0x99) {
req = 0xBE;
index = 0;
if (msg[i].len < 1) {
i = -EOPNOTSUPP;
break;
}
value = msg[i].buf[0] & 0x00ff;
length = 1;
az6027_usb_out_op(d, req, value, index, data, length);
......
......@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1;
if (keys[1].fmr_physical < keys[0].fmr_physical)
return 0;
start_fsb = keys[0].fmr_physical;
end_fsb = keys[1].fmr_physical;
......
......@@ -984,36 +984,28 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
* ie. locked but not dirty) or tune2fs (which may actually have
* the buffer dirtied, ugh.) */
if (buffer_dirty(bh)) {
/*
* First question: is this buffer already part of the current
* transaction or the existing committing transaction?
*/
if (jh->b_transaction) {
J_ASSERT_JH(jh,
jh->b_transaction == transaction ||
jh->b_transaction ==
journal->j_committing_transaction);
if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction);
if (buffer_dirty(bh) && jh->b_transaction) {
warn_dirty_buffer(bh);
}
/*
* In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race
* with running write-out.
* We need to clean the dirty flag and we must do it under the
* buffer lock to be sure we don't race with running write-out.
*/
JBUFFER_TRACE(jh, "Journalling dirty buffer");
clear_buffer_dirty(bh);
/*
* The buffer is going to be added to BJ_Reserved list now and
* nothing guarantees jbd2_journal_dirty_metadata() will be
* ever called for it. So we need to set jbddirty bit here to
* make sure the buffer is dirtied and written out when the
* journaling machinery is done with it.
*/
set_buffer_jbddirty(bh);
}
unlock_buffer(bh);
error = -EROFS;
if (is_handle_aborted(handle)) {
spin_unlock(&jh->b_state_lock);
unlock_buffer(bh);
goto out;
}
error = 0;
......@@ -1023,8 +1015,10 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
* b_next_transaction points to it
*/
if (jh->b_transaction == transaction ||
jh->b_next_transaction == transaction)
jh->b_next_transaction == transaction) {
unlock_buffer(bh);
goto done;
}
/*
* this is the first time this transaction is touching this buffer,
......@@ -1048,10 +1042,24 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
*/
smp_wmb();
spin_lock(&journal->j_list_lock);
if (test_clear_buffer_dirty(bh)) {
/*
* Execute buffer dirty clearing and jh->b_transaction
* assignment under journal->j_list_lock locked to
* prevent bh being removed from checkpoint list if
* the buffer is in an intermediate state (not dirty
* and jh->b_transaction is NULL).
*/
JBUFFER_TRACE(jh, "Journalling dirty buffer");
set_buffer_jbddirty(bh);
}
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock);
unlock_buffer(bh);
goto done;
}
unlock_buffer(bh);
/*
* If there is already a copy-out version of this buffer, then we don't
* need to make another one
......
......@@ -445,6 +445,9 @@ void put_task_stack(struct task_struct *tsk)
void free_task(struct task_struct *tsk)
{
#ifdef CONFIG_SECCOMP
WARN_ON_ONCE(tsk->seccomp.filter);
#endif
scs_release(tsk);
#ifndef CONFIG_THREAD_INFO_IN_TASK
......@@ -2332,12 +2335,6 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock(&current->sighand->siglock);
/*
* Copy seccomp details explicitly here, in case they were changed
* before holding sighand lock.
*/
copy_seccomp(p);
rseq_fork(p, clone_flags);
/* Don't start children in a dying pid namespace */
......@@ -2352,6 +2349,14 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup;
}
/* No more failure paths after this point. */
/*
* Copy seccomp details explicitly here, in case they were changed
* before holding sighand lock.
*/
copy_seccomp(p);
init_task_pid_links(p);
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
......
......@@ -1669,6 +1669,27 @@ static void klp_breakpoint_post_process(struct klp_patch *patch, bool restore)
module_put(patch->mod);
}
static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
int ret;
/*
* Cpu hotplug locking is a "percpu" rw semaphore, however write
* lock and read lock on it are globally mutual exclusive, that is
* cpus_write_lock() on one cpu can block all cpus_read_lock()
* on other cpus, vice versa.
*
* Since cpu hotplug take the cpus_write_lock() before text_mutex,
* here take cpus_read_lock() before text_mutex to avoid deadlock.
*/
cpus_read_lock();
arch_klp_code_modify_prepare();
ret = stop_machine_cpuslocked(fn, data, cpus);
arch_klp_code_modify_post_process();
cpus_read_unlock();
return ret;
}
static int __klp_disable_patch(struct klp_patch *patch)
{
int ret;
......@@ -1689,9 +1710,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
}
#endif
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
arch_klp_code_modify_post_process();
ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
if (ret)
return ret;
......@@ -1960,10 +1979,8 @@ static int klp_breakpoint_optimize(struct klp_patch *patch)
cnt++;
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_enable_patch, &patch_data,
ret = klp_stop_machine(klp_try_enable_patch, &patch_data,
cpu_online_mask);
arch_klp_code_modify_post_process();
if (!ret || ret != -EAGAIN)
break;
......@@ -2010,10 +2027,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (ret)
return ret;
arch_klp_code_modify_prepare();
ret = stop_machine(klp_try_enable_patch, &patch_data,
cpu_online_mask);
arch_klp_code_modify_post_process();
ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
if (!ret)
goto move_patch_to_tail;
if (ret != -EAGAIN)
......
......@@ -1538,7 +1538,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) {
if (end < pg->records[0].ip ||
if (pg->index == 0 ||
end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue;
rec = bsearch(&key, pg->records, pg->index,
......
......@@ -3197,19 +3197,35 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
if (PageAnon(vmf->page)) {
struct page *page = vmf->page;
/* PageKsm() doesn't necessarily raise the page refcount */
if (PageKsm(page) || page_count(page) != 1)
/*
* We have to verify under page lock: these early checks are
* just an optimization to avoid locking the page and freeing
* the swapcache if there is little hope that we can reuse.
*
* PageKsm() doesn't necessarily raise the page refcount.
*/
if (PageKsm(page) || page_count(page) > 3)
goto copy;
if (!PageLRU(page))
/*
* Note: We cannot easily detect+handle references from
* remote LRU pagevecs or references to PageLRU() pages.
*/
lru_add_drain();
if (page_count(page) > 1 + PageSwapCache(page))
goto copy;
if (!trylock_page(page))
goto copy;
if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
if (PageSwapCache(page))
try_to_free_swap(page);
if (PageKsm(page) || page_count(page) != 1) {
unlock_page(page);
goto copy;
}
/*
* Ok, we've got the only map reference, and the only
* page count reference, and the page is locked,
* it's dark out, and we're wearing sunglasses. Hit it.
* Ok, we've got the only page reference from our mapping
* and the page is locked, it's dark out, and we're wearing
* sunglasses. Hit it.
*/
unlock_page(page);
wp_page_reuse(vmf);
......
......@@ -2641,14 +2641,17 @@ static void __vunmap(const void *addr, int deallocate_pages)
vm_remove_mappings(area, deallocate_pages);
if (deallocate_pages) {
unsigned int page_order = vm_area_page_order(area);
int i;
for (i = 0; i < area->nr_pages; i += 1U << page_order) {
for (i = 0; i < area->nr_pages; i++) {
struct page *page = area->pages[i];
BUG_ON(!page);
__free_pages(page, page_order);
/*
* High-order allocs for huge vmallocs are split, so
* can be freed as an array of order-0 allocations
*/
__free_pages(page, 0);
}
atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
......@@ -2930,8 +2933,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page;
int p;
/* Compound pages required for remap_vmalloc_page */
page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
page = alloc_pages_node(node, gfp_mask, page_order);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vfree() */
area->nr_pages = i;
......@@ -2943,6 +2945,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail;
}
/*
* Higher order allocations must be able to be treated as
* indepdenent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
*/
if (page_order)
split_page(page, page_order);
for (p = 0; p < (1U << page_order); p++)
area->pages[i + p] = page + p;
......
......@@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con)
conn_put(con);
}
static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
{
struct tipc_conn *con;
int ret;
......@@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
}
con->conid = ret;
s->idr_in_use++;
spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags);
con->server = s;
con->sock = sock;
conn_get(con);
spin_unlock_bh(&s->idr_lock);
return con;
}
......@@ -460,7 +462,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0)
return;
con = tipc_conn_alloc(srv);
con = tipc_conn_alloc(srv, newsock);
if (IS_ERR(con)) {
ret = PTR_ERR(con);
sock_release(newsock);
......@@ -472,11 +474,11 @@ static void tipc_topsrv_accept(struct work_struct *work)
newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con;
con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */
newsk->sk_data_ready(newsk);
conn_put(con);
}
}
......@@ -570,17 +572,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.filter = filter;
*(u32 *)&sub.usr_handle = port;
con = tipc_conn_alloc(tipc_topsrv(net));
con = tipc_conn_alloc(tipc_topsrv(net), NULL);
if (IS_ERR(con))
return false;
*conid = con->conid;
con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
if (rc >= 0)
return true;
if (rc)
conn_put(con);
return false;
conn_put(con);
return !rc;
}
void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册