未验证 提交 0d3cf6e0 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!410 Backport CVEs and bugfixes

Merge Pull Request from: @zhangjialin11 
 
Pull new CVEs:
CVE-2023-0597
CVE-2023-0615

Huawei BMA bugfix from Huajingjing
mm bugfixes from Lu Jialin and Zhang Peng
net bugfixes from Baisong Zhong, Liu Jian and Zhengchao Shao
arm32 kaslr bugfix from Cui GaoSheng
fs bugfix from ZhaoLong Wang 
 
Link:https://gitee.com/openeuler/kernel/pulls/410 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -49,7 +49,8 @@ KBUILD_LDFLAGS += -EL
endif
ifeq ($(CONFIG_RELOCATABLE),y)
KBUILD_CFLAGS += -fpic -include $(srctree)/include/linux/hidden.h
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
CFLAGS_KERNEL += -fpic
CFLAGS_MODULE += -fno-pic
LDFLAGS_vmlinux += -pie -shared -Bsymbolic
endif
......
......@@ -130,10 +130,6 @@ struct cpu_entry_area {
};
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
/* Total size includes the readonly IDT mapping page as well: */
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
......
......@@ -28,9 +28,12 @@
#ifdef CONFIG_KASAN
void __init kasan_early_init(void);
void __init kasan_init(void);
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid);
#else
static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size,
int nid) { }
#endif
#endif
......
......@@ -11,6 +11,12 @@
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
#ifdef CONFIG_X86_32
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
CPU_ENTRY_AREA_BASE)
#else
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
#endif
#endif /* _ASM_X86_PGTABLE_AREAS_H */
......@@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
/* CPU entry erea is always used for CPU entry */
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
CPU_ENTRY_AREA_TOTAL_SIZE))
CPU_ENTRY_AREA_MAP_SIZE))
return true;
/*
......
......@@ -5,26 +5,69 @@
#include <linux/kallsyms.h>
#include <linux/kcore.h>
#include <linux/pgtable.h>
#include <linux/random.h>
#include <asm/cpu_entry_area.h>
#include <asm/fixmap.h>
#include <asm/desc.h>
#include <asm/kasan.h>
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
#ifdef CONFIG_X86_32
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return per_cpu(_cea_offset, cpu);
}
static __init void init_cea_offsets(void)
{
unsigned int max_cea;
unsigned int i, j;
max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
/* O(sodding terrible) */
for_each_possible_cpu(i) {
unsigned int cea;
again:
/*
* Directly use get_random_u32() instead of prandom_u32_max
* to avoid seed can't be generated when CONFIG_RANDOMIZE_BASE=n.
*/
cea = (u32)(((u64) get_random_u32() * max_cea) >> 32);
for_each_possible_cpu(j) {
if (cea_offset(j) == cea)
goto again;
if (i == j)
break;
}
per_cpu(_cea_offset, i) = cea;
}
}
#else /* !X86_64 */
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return cpu;
}
static inline void init_cea_offsets(void) { }
#endif
/* Is called from entry code, so must be noinstr */
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
return (struct cpu_entry_area *) va;
......@@ -152,6 +195,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif
kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
early_cpu_to_node(cpu));
cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
cea_map_percpu_pages(&cea->entry_stack_page,
......@@ -205,7 +251,6 @@ static __init void setup_cpu_entry_area_ptes(void)
/* The +1 is for the readonly IDT: */
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
start = CPU_ENTRY_AREA_BASE;
......@@ -221,6 +266,8 @@ void __init setup_cpu_entry_areas(void)
{
unsigned int cpu;
init_cea_offsets();
setup_cpu_entry_area_ptes();
for_each_possible_cpu(cpu)
......
......@@ -318,10 +318,33 @@ void __init kasan_early_init(void)
kasan_map_early_shadow(init_top_pgt);
}
static unsigned long kasan_mem_to_shadow_align_down(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
return round_down(shadow, PAGE_SIZE);
}
static unsigned long kasan_mem_to_shadow_align_up(unsigned long va)
{
unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
return round_up(shadow, PAGE_SIZE);
}
void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
{
unsigned long shadow_start, shadow_end;
shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va);
shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size);
kasan_populate_shadow(shadow_start, shadow_end, nid);
}
void __init kasan_init(void)
{
unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end;
int i;
void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
......@@ -362,16 +385,10 @@ void __init kasan_init(void)
map_range(&pfn_mapped[i]);
}
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
shadow_cpu_entry_begin = (void *)round_down(
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
shadow_cpu_entry_end = (void *)round_up(
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE);
shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU);
shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
......@@ -393,12 +410,18 @@ void __init kasan_init(void)
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);
(void *)shadow_cea_begin);
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);
/*
* Populate the shadow for the shared portion of the CPU entry area.
* Shadows for the per-CPU areas are mapped on-demand, as each CPU's
* area is randomly placed somewhere in the 512GiB range and mapping
* the entire 512GiB range is prohibitively expensive.
*/
kasan_populate_shadow(shadow_cea_begin,
shadow_cea_per_cpu_begin, 0);
kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_populate_early_shadow((void *)shadow_cea_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
......
......@@ -953,6 +953,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
if (dev->has_compose_cap) {
v4l2_rect_set_min_size(compose, &min_rect);
v4l2_rect_set_max_size(compose, &max_rect);
v4l2_rect_map_inside(compose, &fmt);
}
dev->fmt_cap_rect = fmt;
tpg_s_buf_height(&dev->tpg, fmt.height);
......
......@@ -28,7 +28,7 @@
#ifdef DRV_VERSION
#define CDEV_VERSION MICRO_TO_STR(DRV_VERSION)
#else
#define CDEV_VERSION "0.3.4"
#define CDEV_VERSION "0.3.5"
#endif
#define CDEV_DEFAULT_NUM 4
......
......@@ -419,8 +419,10 @@ EXPORT_SYMBOL(bma_intf_int_to_bmc);
int bma_intf_is_link_ok(void)
{
return (g_bma_dev->edma_host.statistics.remote_status ==
REGISTERED) ? 1 : 0;
if ((&g_bma_dev->edma_host != NULL) &&
(g_bma_dev->edma_host.statistics.remote_status == REGISTERED))
return 1;
return 0;
}
EXPORT_SYMBOL(bma_intf_is_link_ok);
......@@ -460,14 +462,10 @@ int bma_cdev_recv_msg(void *handle, char __user *data, size_t count)
}
EXPORT_SYMBOL_GPL(bma_cdev_recv_msg);
int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len)
static int check_cdev_add_msg_param(struct bma_priv_data_s *handle,
const char __user *msg, size_t msg_len)
{
struct bma_priv_data_s *priv = NULL;
struct edma_msg_hdr_s *hdr = NULL;
unsigned long flags = 0;
int total_len = 0;
int ret = 0;
struct edma_host_s *phost = &g_bma_dev->edma_host;
if (!handle || !msg || msg_len == 0) {
BMA_LOG(DLOG_DEBUG, "input NULL point!\n");
......@@ -479,54 +477,80 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len)
return -EINVAL;
}
priv = (struct bma_priv_data_s *)handle;
priv = handle;
if (priv->user.type >= TYPE_MAX) {
BMA_LOG(DLOG_DEBUG, "error type = %d\n", priv->user.type);
return -EFAULT;
}
total_len = SIZE_OF_MSG_HDR + msg_len;
return 0;
}
static void edma_msg_hdr_init(struct edma_msg_hdr_s *hdr,
struct bma_priv_data_s *private_data,
char *msg_buf, size_t msg_len)
{
hdr->type = private_data->user.type;
hdr->sub_type = private_data->user.sub_type;
hdr->user_id = private_data->user.user_id;
hdr->datalen = msg_len;
BMA_LOG(DLOG_DEBUG, "msg_len is %zu\n", msg_len);
memcpy(hdr->data, msg_buf, msg_len);
}
int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len)
{
struct bma_priv_data_s *priv = NULL;
struct edma_msg_hdr_s *hdr = NULL;
unsigned long flags = 0;
unsigned int total_len = 0;
int ret = 0;
struct edma_host_s *phost = &g_bma_dev->edma_host;
char *msg_buf = NULL;
ret = check_cdev_add_msg_param(handle, msg, msg_len);
if (ret != 0)
return ret;
priv = (struct bma_priv_data_s *)handle;
total_len = (unsigned int)(SIZE_OF_MSG_HDR + msg_len);
if (phost->msg_send_write + total_len > HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) {
BMA_LOG(DLOG_DEBUG, "msg lost,msg_send_write: %u,msg_len:%u,max_len: %d\n",
phost->msg_send_write, total_len, HOST_MAX_SEND_MBX_LEN);
return -ENOSPC;
}
msg_buf = (char *)kmalloc(msg_len, GFP_KERNEL);
if (!msg_buf) {
BMA_LOG(DLOG_ERROR, "malloc msg_buf failed\n");
return -ENOMEM;
}
if (copy_from_user(msg_buf, msg, msg_len)) {
BMA_LOG(DLOG_ERROR, "copy_from_user error\n");
kfree(msg_buf);
return -EFAULT;
}
spin_lock_irqsave(&phost->send_msg_lock, flags);
if (phost->msg_send_write + total_len <=
HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) {
hdr = (struct edma_msg_hdr_s *)(phost->msg_send_buf +
phost->msg_send_write);
hdr->type = priv->user.type;
hdr->sub_type = priv->user.sub_type;
hdr->user_id = priv->user.user_id;
hdr->datalen = msg_len;
BMA_LOG(DLOG_DEBUG, "msg_len is %zu\n", msg_len);
if (copy_from_user(hdr->data, msg, msg_len)) {
BMA_LOG(DLOG_ERROR, "copy_from_user error\n");
ret = -EFAULT;
goto end;
}
hdr = (struct edma_msg_hdr_s *)(phost->msg_send_buf + phost->msg_send_write);
edma_msg_hdr_init(hdr, priv, msg_buf, msg_len);
phost->msg_send_write += total_len;
phost->statistics.send_bytes += total_len;
phost->statistics.send_pkgs++;
phost->msg_send_write += total_len;
phost->statistics.send_bytes += total_len;
phost->statistics.send_pkgs++;
#ifdef EDMA_TIMER
(void)mod_timer(&phost->timer, jiffies_64);
(void)mod_timer(&phost->timer, jiffies_64);
#endif
BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n",
phost->msg_send_write);
ret = msg_len;
goto end;
} else {
BMA_LOG(DLOG_DEBUG,
"msg lost,msg_send_write: %d,msg_len:%d,max_len: %d\n",
phost->msg_send_write, total_len,
HOST_MAX_SEND_MBX_LEN);
ret = -ENOSPC;
goto end;
}
BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n", phost->msg_send_write);
end:
ret = msg_len;
spin_unlock_irqrestore(&g_bma_dev->edma_host.send_msg_lock, flags);
kfree(msg_buf);
return ret;
}
EXPORT_SYMBOL_GPL(bma_cdev_add_msg);
......
......@@ -71,7 +71,7 @@ struct bma_pci_dev_s {
#ifdef DRV_VERSION
#define BMA_VERSION MICRO_TO_STR(DRV_VERSION)
#else
#define BMA_VERSION "0.3.4"
#define BMA_VERSION "0.3.5"
#endif
#ifdef CONFIG_ARM64
......
......@@ -23,7 +23,7 @@
#ifdef DRV_VERSION
#define KBOX_VERSION MICRO_TO_STR(DRV_VERSION)
#else
#define KBOX_VERSION "0.3.4"
#define KBOX_VERSION "0.3.5"
#endif
#define UNUSED(x) (x = x)
......
......@@ -135,7 +135,7 @@ int kbox_panic_init(void)
int ret = KBOX_TRUE;
g_panic_info_buf = kmalloc(SLOT_LENGTH, GFP_KERNEL);
if (IS_ERR(g_panic_info_buf) || !g_panic_info_buf) {
if (!g_panic_info_buf) {
KBOX_MSG("kmalloc g_panic_info_buf fail!\n");
ret = -ENOMEM;
goto fail;
......@@ -144,7 +144,7 @@ int kbox_panic_init(void)
memset(g_panic_info_buf, 0, SLOT_LENGTH);
g_panic_info_buf_tmp = kmalloc(SLOT_LENGTH, GFP_KERNEL);
if (IS_ERR(g_panic_info_buf_tmp) || !g_panic_info_buf_tmp) {
if (!g_panic_info_buf_tmp) {
KBOX_MSG("kmalloc g_panic_info_buf_tmp fail!\n");
ret = -ENOMEM;
goto fail;
......
......@@ -304,7 +304,7 @@ int kbox_printk_init(int kbox_proc_exist)
g_printk_info_buf = kmalloc(SECTION_PRINTK_LEN,
GFP_KERNEL);
if (IS_ERR(g_printk_info_buf) || !g_printk_info_buf) {
if (!g_printk_info_buf) {
KBOX_MSG("kmalloc g_printk_info_buf fail!\n");
ret = -ENOMEM;
goto fail;
......@@ -314,7 +314,7 @@ int kbox_printk_init(int kbox_proc_exist)
g_printk_info_buf_tmp = kmalloc(SECTION_PRINTK_LEN,
GFP_KERNEL);
if (IS_ERR(g_printk_info_buf_tmp) || !g_printk_info_buf_tmp) {
if (!g_printk_info_buf_tmp) {
KBOX_MSG("kmalloc g_printk_info_buf_tmp fail!\n");
ret = -ENOMEM;
goto fail;
......
......@@ -432,7 +432,7 @@ int kbox_write_op(long long offset, unsigned int count,
return KBOX_FALSE;
temp_buf_char = kmalloc(TEMP_BUF_DATA_SIZE, GFP_KERNEL);
if (!temp_buf_char || IS_ERR(temp_buf_char)) {
if (!temp_buf_char) {
KBOX_MSG("kmalloc temp_buf_char fail!\n");
up(&user_sem);
return -ENOMEM;
......
......@@ -638,6 +638,7 @@ s32 veth_refill_rxskb(struct bspveth_rxtx_q *prx_queue, int queue)
next_to_fill = (next_to_fill + 1) & BSPVETH_POINT_MASK;
}
mb();/* memory barriers. */
prx_queue->next_to_fill = next_to_fill;
tail = prx_queue->tail;
......@@ -672,6 +673,7 @@ s32 bspveth_setup_rx_skb(struct bspveth_device *pvethdev,
if (!idx) /* Can't alloc even one packets */
return -EFAULT;
mb();/* memory barriers. */
prx_queue->next_to_fill = idx;
VETH_LOG(DLOG_DEBUG, "prx_queue->next_to_fill=%d\n",
......@@ -886,8 +888,6 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev)
err = bspveth_setup_rx_resources(pvethdev,
pvethdev->prx_queue[qid]);
if (err) {
kfree(pvethdev->prx_queue[qid]);
pvethdev->prx_queue[qid] = NULL;
VETH_LOG(DLOG_ERROR,
"Allocation for Rx Queue %u failed\n", qid);
......@@ -1328,6 +1328,7 @@ s32 veth_send_one_pkt(struct sk_buff *skb, int queue)
pbd_v->off = off;
pbd_v->len = skb->len;
mb();/* memory barriers. */
head = (head + 1) & BSPVETH_POINT_MASK;
ptx_queue->head = head;
......@@ -1424,6 +1425,7 @@ s32 veth_free_txskb(struct bspveth_rxtx_q *ptx_queue, int queue)
next_to_free = (next_to_free + 1) & BSPVETH_POINT_MASK;
}
mb(); /* memory barriers. */
ptx_queue->next_to_free = next_to_free;
tail = ptx_queue->tail;
......@@ -1522,6 +1524,7 @@ s32 veth_recv_pkt(struct bspveth_rxtx_q *prx_queue, int queue)
}
}
mb();/* memory barriers. */
prx_queue->tail = tail;
head = prx_queue->head;
......
......@@ -31,7 +31,7 @@ extern "C" {
#ifdef DRV_VERSION
#define VETH_VERSION MICRO_TO_STR(DRV_VERSION)
#else
#define VETH_VERSION "0.3.4"
#define VETH_VERSION "0.3.5"
#endif
#define MODULE_NAME "veth"
......
......@@ -1610,6 +1610,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
int len;
unsigned char *cp;
skb->dev = ppp->dev;
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
/* check if we should pass this packet */
......
......@@ -3533,7 +3533,7 @@ uncached_fill_pages(struct TCP_Server_Info *server,
rdata->got_bytes += result;
}
return rdata->got_bytes > 0 && result != -ECONNABORTED ?
return result != -ECONNABORTED && rdata->got_bytes > 0 ?
rdata->got_bytes : result;
}
......@@ -4287,7 +4287,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
rdata->got_bytes += result;
}
return rdata->got_bytes > 0 && result != -ECONNABORTED ?
return result != -ECONNABORTED && rdata->got_bytes > 0 ?
rdata->got_bytes : result;
}
......
......@@ -1860,10 +1860,13 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
static inline void mem_cgroup_track_foreign_dirty(struct page *page,
struct bdi_writeback *wb)
{
struct mem_cgroup *memcg;
if (mem_cgroup_disabled())
return;
if (unlikely(&page_memcg(page)->css != wb->memcg_css))
memcg = page_memcg(page);
if (unlikely(memcg && &memcg->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(page, wb);
}
......
......@@ -2572,11 +2572,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
spin_lock_irq(&hugetlb_lock);
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetHPageRestoreReserve(page);
h->resv_huge_pages--;
}
spin_lock_irq(&hugetlb_lock);
list_add(&page->lru, &h->hugepage_activelist);
/* Fall through */
}
......
......@@ -3609,6 +3609,38 @@ int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
}
#endif
void br_multicast_set_startup_query_intvl(struct net_bridge *br,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
br_info(br,
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
}
br->multicast_startup_query_interval = intvl_jiffies;
}
void br_multicast_set_query_intvl(struct net_bridge *br,
unsigned long val)
{
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
br_info(br,
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
}
br->multicast_query_interval = intvl_jiffies;
}
/**
* br_multicast_list_adjacent - Returns snooped multicast addresses
* @dev: The bridge port adjacent to which to retrieve addresses
......
......@@ -1286,7 +1286,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
br->multicast_query_interval = clock_t_to_jiffies(val);
br_multicast_set_query_intvl(br, val);
}
if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
......@@ -1298,7 +1298,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
br_multicast_set_startup_query_intvl(br, val);
}
if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
......
......@@ -28,6 +28,8 @@
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
#define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
#define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
#define BR_VERSION "2.3"
......@@ -1568,4 +1570,8 @@ void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br,
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
void br_multicast_set_startup_query_intvl(struct net_bridge *br,
unsigned long val);
void br_multicast_set_query_intvl(struct net_bridge *br,
unsigned long val);
#endif
......@@ -594,7 +594,7 @@ static ssize_t multicast_query_interval_show(struct device *d,
static int set_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_query_interval = clock_t_to_jiffies(val);
br_multicast_set_query_intvl(br, val);
return 0;
}
......@@ -640,7 +640,7 @@ static ssize_t multicast_startup_query_interval_show(
static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
br_multicast_set_startup_query_intvl(br, val);
return 0;
}
......
......@@ -2127,6 +2127,10 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
if (mlen) {
__skb_pull(skb, mlen);
if (unlikely(!skb->len)) {
kfree_skb(skb);
return -ERANGE;
}
/* At ingress, the mac header has already been pulled once.
* At egress, skb_pospull_rcsum has to be done in case that
......
......@@ -358,11 +358,13 @@ static void sock_map_free(struct bpf_map *map)
sk = xchg(psk, NULL);
if (sk) {
sock_hold(sk);
lock_sock(sk);
rcu_read_lock();
sock_map_unref(sk, psk);
rcu_read_unlock();
release_sock(sk);
sock_put(sk);
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册