未验证 提交 be7b6c55 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!512 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-28328
CVE-2023-1382
CVE-2023-1079

livepatch bugfixes from Zheng Yejian
seccomp bugfix from GONG, Ruiqi
mm bugfixes from ZhangPeng
ftrace bugfix from Chen Zhongjin
ext4 bugfix from Baokun Li
jbd2 bugfix from Zhihao Cheng 
 
Link:https://gitee.com/openeuler/kernel/pulls/512 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -386,8 +386,15 @@ void arch_klp_code_modify_post_process(void) ...@@ -386,8 +386,15 @@ void arch_klp_code_modify_post_process(void)
long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func)
{ {
return copy_from_kernel_nofault(arch_data->old_code, long ret;
old_func, JMP_E9_INSN_SIZE);
/* Prevent text modification */
mutex_lock(&text_mutex);
ret = copy_from_kernel_nofault(arch_data->old_code,
old_func, JMP_E9_INSN_SIZE);
mutex_unlock(&text_mutex);
return ret;
} }
int arch_klp_patch_func(struct klp_func *func) int arch_klp_patch_func(struct klp_func *func)
......
...@@ -95,6 +95,7 @@ struct asus_kbd_leds { ...@@ -95,6 +95,7 @@ struct asus_kbd_leds {
struct hid_device *hdev; struct hid_device *hdev;
struct work_struct work; struct work_struct work;
unsigned int brightness; unsigned int brightness;
spinlock_t lock;
bool removed; bool removed;
}; };
...@@ -397,24 +398,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev, ...@@ -397,24 +398,42 @@ static int asus_kbd_get_functions(struct hid_device *hdev,
return ret; return ret;
} }
static void asus_schedule_work(struct asus_kbd_leds *led)
{
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
if (!led->removed)
schedule_work(&led->work);
spin_unlock_irqrestore(&led->lock, flags);
}
static void asus_kbd_backlight_set(struct led_classdev *led_cdev, static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness) enum led_brightness brightness)
{ {
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev); cdev);
if (led->brightness == brightness) unsigned long flags;
return;
spin_lock_irqsave(&led->lock, flags);
led->brightness = brightness; led->brightness = brightness;
schedule_work(&led->work); spin_unlock_irqrestore(&led->lock, flags);
asus_schedule_work(led);
} }
static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev) static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
{ {
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds, struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev); cdev);
enum led_brightness brightness;
unsigned long flags;
spin_lock_irqsave(&led->lock, flags);
brightness = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
return led->brightness; return brightness;
} }
static void asus_kbd_backlight_work(struct work_struct *work) static void asus_kbd_backlight_work(struct work_struct *work)
...@@ -422,11 +441,11 @@ static void asus_kbd_backlight_work(struct work_struct *work) ...@@ -422,11 +441,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work); struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 }; u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
int ret; int ret;
unsigned long flags;
if (led->removed) spin_lock_irqsave(&led->lock, flags);
return;
buf[4] = led->brightness; buf[4] = led->brightness;
spin_unlock_irqrestore(&led->lock, flags);
ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf)); ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
...@@ -488,6 +507,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev) ...@@ -488,6 +507,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set; drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get; drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work); INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
spin_lock_init(&drvdata->kbd_backlight->lock);
ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev); ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
if (ret < 0) { if (ret < 0) {
...@@ -1016,9 +1036,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) ...@@ -1016,9 +1036,13 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
static void asus_remove(struct hid_device *hdev) static void asus_remove(struct hid_device *hdev)
{ {
struct asus_drvdata *drvdata = hid_get_drvdata(hdev); struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
unsigned long flags;
if (drvdata->kbd_backlight) { if (drvdata->kbd_backlight) {
spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
drvdata->kbd_backlight->removed = true; drvdata->kbd_backlight->removed = true;
spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
cancel_work_sync(&drvdata->kbd_backlight->work); cancel_work_sync(&drvdata->kbd_backlight->work);
} }
......
...@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n ...@@ -975,6 +975,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
if (msg[i].addr == 0x99) { if (msg[i].addr == 0x99) {
req = 0xBE; req = 0xBE;
index = 0; index = 0;
if (msg[i].len < 1) {
i = -EOPNOTSUPP;
break;
}
value = msg[i].buf[0] & 0x00ff; value = msg[i].buf[0] & 0x00ff;
length = 1; length = 1;
az6027_usb_out_op(d, req, value, index, data, length); az6027_usb_out_op(d, req, value, index, data, length);
......
...@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb, ...@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
keys[0].fmr_physical = bofs; keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs) if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1; keys[1].fmr_physical = eofs - 1;
if (keys[1].fmr_physical < keys[0].fmr_physical)
return 0;
start_fsb = keys[0].fmr_physical; start_fsb = keys[0].fmr_physical;
end_fsb = keys[1].fmr_physical; end_fsb = keys[1].fmr_physical;
......
...@@ -984,36 +984,28 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -984,36 +984,28 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
* ie. locked but not dirty) or tune2fs (which may actually have * ie. locked but not dirty) or tune2fs (which may actually have
* the buffer dirtied, ugh.) */ * the buffer dirtied, ugh.) */
if (buffer_dirty(bh)) { if (buffer_dirty(bh) && jh->b_transaction) {
warn_dirty_buffer(bh);
/* /*
* First question: is this buffer already part of the current * We need to clean the dirty flag and we must do it under the
* transaction or the existing committing transaction? * buffer lock to be sure we don't race with running write-out.
*/
if (jh->b_transaction) {
J_ASSERT_JH(jh,
jh->b_transaction == transaction ||
jh->b_transaction ==
journal->j_committing_transaction);
if (jh->b_next_transaction)
J_ASSERT_JH(jh, jh->b_next_transaction ==
transaction);
warn_dirty_buffer(bh);
}
/*
* In any case we need to clean the dirty flag and we must
* do it under the buffer lock to be sure we don't race
* with running write-out.
*/ */
JBUFFER_TRACE(jh, "Journalling dirty buffer"); JBUFFER_TRACE(jh, "Journalling dirty buffer");
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
/*
* The buffer is going to be added to BJ_Reserved list now and
* nothing guarantees jbd2_journal_dirty_metadata() will be
* ever called for it. So we need to set jbddirty bit here to
* make sure the buffer is dirtied and written out when the
* journaling machinery is done with it.
*/
set_buffer_jbddirty(bh); set_buffer_jbddirty(bh);
} }
unlock_buffer(bh);
error = -EROFS; error = -EROFS;
if (is_handle_aborted(handle)) { if (is_handle_aborted(handle)) {
spin_unlock(&jh->b_state_lock); spin_unlock(&jh->b_state_lock);
unlock_buffer(bh);
goto out; goto out;
} }
error = 0; error = 0;
...@@ -1023,8 +1015,10 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -1023,8 +1015,10 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
* b_next_transaction points to it * b_next_transaction points to it
*/ */
if (jh->b_transaction == transaction || if (jh->b_transaction == transaction ||
jh->b_next_transaction == transaction) jh->b_next_transaction == transaction) {
unlock_buffer(bh);
goto done; goto done;
}
/* /*
* this is the first time this transaction is touching this buffer, * this is the first time this transaction is touching this buffer,
...@@ -1048,10 +1042,24 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -1048,10 +1042,24 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
*/ */
smp_wmb(); smp_wmb();
spin_lock(&journal->j_list_lock); spin_lock(&journal->j_list_lock);
if (test_clear_buffer_dirty(bh)) {
/*
* Execute buffer dirty clearing and jh->b_transaction
* assignment under journal->j_list_lock locked to
* prevent bh being removed from checkpoint list if
* the buffer is in an intermediate state (not dirty
* and jh->b_transaction is NULL).
*/
JBUFFER_TRACE(jh, "Journalling dirty buffer");
set_buffer_jbddirty(bh);
}
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
unlock_buffer(bh);
goto done; goto done;
} }
unlock_buffer(bh);
/* /*
* If there is already a copy-out version of this buffer, then we don't * If there is already a copy-out version of this buffer, then we don't
* need to make another one * need to make another one
......
...@@ -445,6 +445,9 @@ void put_task_stack(struct task_struct *tsk) ...@@ -445,6 +445,9 @@ void put_task_stack(struct task_struct *tsk)
void free_task(struct task_struct *tsk) void free_task(struct task_struct *tsk)
{ {
#ifdef CONFIG_SECCOMP
WARN_ON_ONCE(tsk->seccomp.filter);
#endif
scs_release(tsk); scs_release(tsk);
#ifndef CONFIG_THREAD_INFO_IN_TASK #ifndef CONFIG_THREAD_INFO_IN_TASK
...@@ -2301,12 +2304,6 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2301,12 +2304,6 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock(&current->sighand->siglock); spin_lock(&current->sighand->siglock);
/*
* Copy seccomp details explicitly here, in case they were changed
* before holding sighand lock.
*/
copy_seccomp(p);
rseq_fork(p, clone_flags); rseq_fork(p, clone_flags);
/* Don't start children in a dying pid namespace */ /* Don't start children in a dying pid namespace */
...@@ -2321,6 +2318,14 @@ static __latent_entropy struct task_struct *copy_process( ...@@ -2321,6 +2318,14 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup; goto bad_fork_cancel_cgroup;
} }
/* No more failure paths after this point. */
/*
* Copy seccomp details explicitly here, in case they were changed
* before holding sighand lock.
*/
copy_seccomp(p);
init_task_pid_links(p); init_task_pid_links(p);
if (likely(p->pid)) { if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
......
...@@ -1408,6 +1408,18 @@ static void func_node_free(struct klp_func *func) ...@@ -1408,6 +1408,18 @@ static void func_node_free(struct klp_func *func)
} }
} }
static void klp_mem_recycle(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
klp_for_each_object(patch, obj) {
klp_for_each_func(obj, func) {
func_node_free(func);
}
}
}
static int klp_mem_prepare(struct klp_patch *patch) static int klp_mem_prepare(struct klp_patch *patch)
{ {
struct klp_object *obj; struct klp_object *obj;
...@@ -1417,6 +1429,7 @@ static int klp_mem_prepare(struct klp_patch *patch) ...@@ -1417,6 +1429,7 @@ static int klp_mem_prepare(struct klp_patch *patch)
klp_for_each_func(obj, func) { klp_for_each_func(obj, func) {
func->func_node = func_node_alloc(func); func->func_node = func_node_alloc(func);
if (func->func_node == NULL) { if (func->func_node == NULL) {
klp_mem_recycle(patch);
pr_err("alloc func_node failed\n"); pr_err("alloc func_node failed\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -1425,16 +1438,25 @@ static int klp_mem_prepare(struct klp_patch *patch) ...@@ -1425,16 +1438,25 @@ static int klp_mem_prepare(struct klp_patch *patch)
return 0; return 0;
} }
static void klp_mem_recycle(struct klp_patch *patch) static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{ {
struct klp_object *obj; int ret;
struct klp_func *func;
klp_for_each_object(patch, obj) { /*
klp_for_each_func(obj, func) { * Cpu hotplug locking is a "percpu" rw semaphore, however write
func_node_free(func); * lock and read lock on it are globally mutual exclusive, that is
} * cpus_write_lock() on one cpu can block all cpus_read_lock()
} * on other cpus, vice versa.
*
* Since cpu hotplug take the cpus_write_lock() before text_mutex,
* here take cpus_read_lock() before text_mutex to avoid deadlock.
*/
cpus_read_lock();
arch_klp_code_modify_prepare();
ret = stop_machine_cpuslocked(fn, data, cpus);
arch_klp_code_modify_post_process();
cpus_read_unlock();
return ret;
} }
static int __klp_disable_patch(struct klp_patch *patch) static int __klp_disable_patch(struct klp_patch *patch)
...@@ -1457,9 +1479,7 @@ static int __klp_disable_patch(struct klp_patch *patch) ...@@ -1457,9 +1479,7 @@ static int __klp_disable_patch(struct klp_patch *patch)
} }
#endif #endif
arch_klp_code_modify_prepare(); ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask);
arch_klp_code_modify_post_process();
if (ret) if (ret)
return ret; return ret;
...@@ -1695,11 +1715,10 @@ static int __klp_enable_patch(struct klp_patch *patch) ...@@ -1695,11 +1715,10 @@ static int __klp_enable_patch(struct klp_patch *patch)
} }
#endif #endif
arch_klp_code_modify_prepare();
ret = klp_mem_prepare(patch); ret = klp_mem_prepare(patch);
if (ret == 0) if (ret)
ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); return ret;
arch_klp_code_modify_post_process(); ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask);
if (ret) { if (ret) {
klp_mem_recycle(patch); klp_mem_recycle(patch);
return ret; return ret;
......
...@@ -1538,7 +1538,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) ...@@ -1538,7 +1538,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
key.flags = end; /* overload flags, as it is unsigned long */ key.flags = end; /* overload flags, as it is unsigned long */
for (pg = ftrace_pages_start; pg; pg = pg->next) { for (pg = ftrace_pages_start; pg; pg = pg->next) {
if (end < pg->records[0].ip || if (pg->index == 0 ||
end < pg->records[0].ip ||
start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
continue; continue;
rec = bsearch(&key, pg->records, pg->index, rec = bsearch(&key, pg->records, pg->index,
......
...@@ -3179,19 +3179,35 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) ...@@ -3179,19 +3179,35 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
if (PageAnon(vmf->page)) { if (PageAnon(vmf->page)) {
struct page *page = vmf->page; struct page *page = vmf->page;
/* PageKsm() doesn't necessarily raise the page refcount */ /*
if (PageKsm(page) || page_count(page) != 1) * We have to verify under page lock: these early checks are
* just an optimization to avoid locking the page and freeing
* the swapcache if there is little hope that we can reuse.
*
* PageKsm() doesn't necessarily raise the page refcount.
*/
if (PageKsm(page) || page_count(page) > 3)
goto copy;
if (!PageLRU(page))
/*
* Note: We cannot easily detect+handle references from
* remote LRU pagevecs or references to PageLRU() pages.
*/
lru_add_drain();
if (page_count(page) > 1 + PageSwapCache(page))
goto copy; goto copy;
if (!trylock_page(page)) if (!trylock_page(page))
goto copy; goto copy;
if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) { if (PageSwapCache(page))
try_to_free_swap(page);
if (PageKsm(page) || page_count(page) != 1) {
unlock_page(page); unlock_page(page);
goto copy; goto copy;
} }
/* /*
* Ok, we've got the only map reference, and the only * Ok, we've got the only page reference from our mapping
* page count reference, and the page is locked, * and the page is locked, it's dark out, and we're wearing
* it's dark out, and we're wearing sunglasses. Hit it. * sunglasses. Hit it.
*/ */
unlock_page(page); unlock_page(page);
wp_page_reuse(vmf); wp_page_reuse(vmf);
......
...@@ -2637,14 +2637,17 @@ static void __vunmap(const void *addr, int deallocate_pages) ...@@ -2637,14 +2637,17 @@ static void __vunmap(const void *addr, int deallocate_pages)
vm_remove_mappings(area, deallocate_pages); vm_remove_mappings(area, deallocate_pages);
if (deallocate_pages) { if (deallocate_pages) {
unsigned int page_order = vm_area_page_order(area);
int i; int i;
for (i = 0; i < area->nr_pages; i += 1U << page_order) { for (i = 0; i < area->nr_pages; i++) {
struct page *page = area->pages[i]; struct page *page = area->pages[i];
BUG_ON(!page); BUG_ON(!page);
__free_pages(page, page_order); /*
* High-order allocs for huge vmallocs are split, so
* can be freed as an array of order-0 allocations
*/
__free_pages(page, 0);
} }
atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
...@@ -2924,8 +2927,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -2924,8 +2927,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page; struct page *page;
int p; int p;
/* Compound pages required for remap_vmalloc_page */ page = alloc_pages_node(node, gfp_mask, page_order);
page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
if (unlikely(!page)) { if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vfree() */ /* Successfully allocated i pages, free them in __vfree() */
area->nr_pages = i; area->nr_pages = i;
...@@ -2937,6 +2939,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, ...@@ -2937,6 +2939,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail; goto fail;
} }
/*
* Higher order allocations must be able to be treated as
* indepdenent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
*/
if (page_order)
split_page(page, page_order);
for (p = 0; p < (1U << page_order); p++) for (p = 0; p < (1U << page_order); p++)
area->pages[i + p] = page + p; area->pages[i + p] = page + p;
......
...@@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con) ...@@ -176,7 +176,7 @@ static void tipc_conn_close(struct tipc_conn *con)
conn_put(con); conn_put(con);
} }
static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
{ {
struct tipc_conn *con; struct tipc_conn *con;
int ret; int ret;
...@@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s) ...@@ -202,10 +202,12 @@ static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s)
} }
con->conid = ret; con->conid = ret;
s->idr_in_use++; s->idr_in_use++;
spin_unlock_bh(&s->idr_lock);
set_bit(CF_CONNECTED, &con->flags); set_bit(CF_CONNECTED, &con->flags);
con->server = s; con->server = s;
con->sock = sock;
conn_get(con);
spin_unlock_bh(&s->idr_lock);
return con; return con;
} }
...@@ -460,7 +462,7 @@ static void tipc_topsrv_accept(struct work_struct *work) ...@@ -460,7 +462,7 @@ static void tipc_topsrv_accept(struct work_struct *work)
ret = kernel_accept(lsock, &newsock, O_NONBLOCK); ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
if (ret < 0) if (ret < 0)
return; return;
con = tipc_conn_alloc(srv); con = tipc_conn_alloc(srv, newsock);
if (IS_ERR(con)) { if (IS_ERR(con)) {
ret = PTR_ERR(con); ret = PTR_ERR(con);
sock_release(newsock); sock_release(newsock);
...@@ -472,11 +474,11 @@ static void tipc_topsrv_accept(struct work_struct *work) ...@@ -472,11 +474,11 @@ static void tipc_topsrv_accept(struct work_struct *work)
newsk->sk_data_ready = tipc_conn_data_ready; newsk->sk_data_ready = tipc_conn_data_ready;
newsk->sk_write_space = tipc_conn_write_space; newsk->sk_write_space = tipc_conn_write_space;
newsk->sk_user_data = con; newsk->sk_user_data = con;
con->sock = newsock;
write_unlock_bh(&newsk->sk_callback_lock); write_unlock_bh(&newsk->sk_callback_lock);
/* Wake up receive process in case of 'SYN+' message */ /* Wake up receive process in case of 'SYN+' message */
newsk->sk_data_ready(newsk); newsk->sk_data_ready(newsk);
conn_put(con);
} }
} }
...@@ -570,17 +572,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, ...@@ -570,17 +572,17 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
sub.filter = filter; sub.filter = filter;
*(u32 *)&sub.usr_handle = port; *(u32 *)&sub.usr_handle = port;
con = tipc_conn_alloc(tipc_topsrv(net)); con = tipc_conn_alloc(tipc_topsrv(net), NULL);
if (IS_ERR(con)) if (IS_ERR(con))
return false; return false;
*conid = con->conid; *conid = con->conid;
con->sock = NULL;
rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub); rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
if (rc >= 0) if (rc)
return true; conn_put(con);
conn_put(con); conn_put(con);
return false; return !rc;
} }
void tipc_topsrv_kern_unsubscr(struct net *net, int conid) void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册