diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 8300530757aea204e4102736b757252d4717c701..d6817de42f2486ee06be4881eff8f9edeec781e4 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -49,7 +49,8 @@ KBUILD_LDFLAGS += -EL endif ifeq ($(CONFIG_RELOCATABLE),y) -KBUILD_CFLAGS += -fpic -include $(srctree)/include/linux/hidden.h +KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h +CFLAGS_KERNEL += -fpic CFLAGS_MODULE += -fno-pic LDFLAGS_vmlinux += -pie -shared -Bsymbolic endif diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h index dd5ea1bdf04c5fae8c4fc9af3aa592533055bca5..e2c04a5015b029912d1e1b1f775e1e37adb40ee9 100644 --- a/arch/x86/include/asm/cpu_entry_area.h +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -130,10 +130,6 @@ struct cpu_entry_area { }; #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) -#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) - -/* Total size includes the readonly IDT mapping page as well: */ -#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE) DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks); diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h index 13e70da38bedaaba39ad1624d78d57a0de947a35..de75306b932efd25d8ed4183f4b475fffe4d3bd1 100644 --- a/arch/x86/include/asm/kasan.h +++ b/arch/x86/include/asm/kasan.h @@ -28,9 +28,12 @@ #ifdef CONFIG_KASAN void __init kasan_early_init(void); void __init kasan_init(void); +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid); #else static inline void kasan_early_init(void) { } static inline void kasan_init(void) { } +static inline void kasan_populate_shadow_for_vaddr(void *va, size_t size, + int nid) { } #endif #endif diff --git a/arch/x86/include/asm/pgtable_areas.h b/arch/x86/include/asm/pgtable_areas.h index d34cce1b995cf133b81058c4f53808880f0efe0a..4f056fb88174bb20036a53e809a9cb4759bd4dc3 100644 --- a/arch/x86/include/asm/pgtable_areas.h +++ b/arch/x86/include/asm/pgtable_areas.h @@ -11,6 +11,12 @@ #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) -#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE) +#ifdef CONFIG_X86_32 +#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \ + (CPU_ENTRY_AREA_SIZE * NR_CPUS) - \ + CPU_ENTRY_AREA_BASE) +#else +#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE +#endif #endif /* _ASM_X86_PGTABLE_AREAS_H */ diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 668a4a6533d923c7eddef457f3f2ebac0040a00b..bbb0f737aab1904e82a54f08e4ad41b90d435ef5 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end) /* CPU entry erea is always used for CPU entry */ if (within_area(addr, end, CPU_ENTRY_AREA_BASE, - CPU_ENTRY_AREA_TOTAL_SIZE)) + CPU_ENTRY_AREA_MAP_SIZE)) return true; /* diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 6c2f1b76a0b61028548e33f215b1eb548ad8bf01..88e2cc4d4e7539cb6e9299116ed69eec520bea39 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -5,26 +5,69 @@ #include #include #include +#include #include #include #include +#include static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); #ifdef CONFIG_X86_64 static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks); DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks); -#endif -#ifdef CONFIG_X86_32 +static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset); + +static __always_inline unsigned int cea_offset(unsigned int cpu) +{ + return per_cpu(_cea_offset, cpu); +} + +static __init void init_cea_offsets(void) +{ + unsigned int max_cea; + unsigned int i, j; + + max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE; + + /* O(sodding terrible) */ + for_each_possible_cpu(i) { + unsigned int cea; + +again: + /* + * Directly use get_random_u32() instead of prandom_u32_max + * to avoid seed can't be generated when CONFIG_RANDOMIZE_BASE=n. + */ + cea = (u32)(((u64) get_random_u32() * max_cea) >> 32); + + for_each_possible_cpu(j) { + if (cea_offset(j) == cea) + goto again; + + if (i == j) + break; + } + + per_cpu(_cea_offset, i) = cea; + } +} +#else /* !X86_64 */ DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack); + +static __always_inline unsigned int cea_offset(unsigned int cpu) +{ + return cpu; +} +static inline void init_cea_offsets(void) { } #endif /* Is called from entry code, so must be noinstr */ noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu) { - unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE; BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); return (struct cpu_entry_area *) va; @@ -152,6 +195,9 @@ static void __init setup_cpu_entry_area(unsigned int cpu) pgprot_t tss_prot = PAGE_KERNEL; #endif + kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE, + early_cpu_to_node(cpu)); + cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot); cea_map_percpu_pages(&cea->entry_stack_page, @@ -205,7 +251,6 @@ static __init void setup_cpu_entry_area_ptes(void) /* The +1 is for the readonly IDT: */ BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE); - BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE); BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); start = CPU_ENTRY_AREA_BASE; @@ -221,6 +266,8 @@ void __init setup_cpu_entry_areas(void) { unsigned int cpu; + init_cea_offsets(); + setup_cpu_entry_area_ptes(); for_each_possible_cpu(cpu) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 1a50434c8a4dab44d6d23742d3bcd2dbbc1e2d99..b4b187960dd0ff4e29cd910559c5c460b6c681fc 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -318,10 +318,33 @@ void __init kasan_early_init(void) kasan_map_early_shadow(init_top_pgt); } +static unsigned long kasan_mem_to_shadow_align_down(unsigned long va) +{ + unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); + + return round_down(shadow, PAGE_SIZE); +} + +static unsigned long kasan_mem_to_shadow_align_up(unsigned long va) +{ + unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va); + + return round_up(shadow, PAGE_SIZE); +} + +void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid) +{ + unsigned long shadow_start, shadow_end; + + shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va); + shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size); + kasan_populate_shadow(shadow_start, shadow_end, nid); +} + void __init kasan_init(void) { + unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end; int i; - void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); @@ -362,16 +385,10 @@ void __init kasan_init(void) map_range(&pfn_mapped[i]); } - shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; - shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); - shadow_cpu_entry_begin = (void *)round_down( - (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); - - shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + - CPU_ENTRY_AREA_MAP_SIZE); - shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); - shadow_cpu_entry_end = (void *)round_up( - (unsigned long)shadow_cpu_entry_end, PAGE_SIZE); + shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE); + shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU); + shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); kasan_populate_early_shadow( kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), @@ -393,12 +410,18 @@ void __init kasan_init(void) kasan_populate_early_shadow( kasan_mem_to_shadow((void *)VMALLOC_END + 1), - shadow_cpu_entry_begin); + (void *)shadow_cea_begin); - kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, - (unsigned long)shadow_cpu_entry_end, 0); + /* + * Populate the shadow for the shared portion of the CPU entry area. + * Shadows for the per-CPU areas are mapped on-demand, as each CPU's + * area is randomly placed somewhere in the 512GiB range and mapping + * the entire 512GiB range is prohibitively expensive. + */ + kasan_populate_shadow(shadow_cea_begin, + shadow_cea_per_cpu_begin, 0); - kasan_populate_early_shadow(shadow_cpu_entry_end, + kasan_populate_early_shadow((void *)shadow_cea_end, kasan_mem_to_shadow((void *)__START_KERNEL_map)); kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index eadf28ab1e393d77da83684a637fd460c93adce2..eeb0aeb62f791860a249a69b6cd4228bcec72f12 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -953,6 +953,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection if (dev->has_compose_cap) { v4l2_rect_set_min_size(compose, &min_rect); v4l2_rect_set_max_size(compose, &max_rect); + v4l2_rect_map_inside(compose, &fmt); } dev->fmt_cap_rect = fmt; tpg_s_buf_height(&dev->tpg, fmt.height); diff --git a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c index 0348a83005d61525e22a15e02ff24bcca55c6c77..9468a5a0c768d8adafcc3bed3286f82f6b5169c8 100644 --- a/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c +++ b/drivers/net/ethernet/huawei/bma/cdev_drv/bma_cdev.c @@ -28,7 +28,7 @@ #ifdef DRV_VERSION #define CDEV_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define CDEV_VERSION "0.3.4" +#define CDEV_VERSION "0.3.5" #endif #define CDEV_DEFAULT_NUM 4 diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c index acf6bbfc50ff6827168bb5f6718f3aed92eed7fa..149c393e8b0d4d4bdb8c5307fa1973551751a8f3 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_devintf.c @@ -419,8 +419,10 @@ EXPORT_SYMBOL(bma_intf_int_to_bmc); int bma_intf_is_link_ok(void) { - return (g_bma_dev->edma_host.statistics.remote_status == - REGISTERED) ? 1 : 0; + if ((&g_bma_dev->edma_host != NULL) && + (g_bma_dev->edma_host.statistics.remote_status == REGISTERED)) + return 1; + return 0; } EXPORT_SYMBOL(bma_intf_is_link_ok); @@ -460,14 +462,10 @@ int bma_cdev_recv_msg(void *handle, char __user *data, size_t count) } EXPORT_SYMBOL_GPL(bma_cdev_recv_msg); -int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len) +static int check_cdev_add_msg_param(struct bma_priv_data_s *handle, +const char __user *msg, size_t msg_len) { struct bma_priv_data_s *priv = NULL; - struct edma_msg_hdr_s *hdr = NULL; - unsigned long flags = 0; - int total_len = 0; - int ret = 0; - struct edma_host_s *phost = &g_bma_dev->edma_host; if (!handle || !msg || msg_len == 0) { BMA_LOG(DLOG_DEBUG, "input NULL point!\n"); @@ -479,54 +477,80 @@ int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len) return -EINVAL; } - priv = (struct bma_priv_data_s *)handle; + priv = handle; if (priv->user.type >= TYPE_MAX) { BMA_LOG(DLOG_DEBUG, "error type = %d\n", priv->user.type); return -EFAULT; } - total_len = SIZE_OF_MSG_HDR + msg_len; + + return 0; +} + +static void edma_msg_hdr_init(struct edma_msg_hdr_s *hdr, + struct bma_priv_data_s *private_data, + char *msg_buf, size_t msg_len) +{ + hdr->type = private_data->user.type; + hdr->sub_type = private_data->user.sub_type; + hdr->user_id = private_data->user.user_id; + hdr->datalen = msg_len; + BMA_LOG(DLOG_DEBUG, "msg_len is %zu\n", msg_len); + + memcpy(hdr->data, msg_buf, msg_len); +} + +int bma_cdev_add_msg(void *handle, const char __user *msg, size_t msg_len) +{ + struct bma_priv_data_s *priv = NULL; + struct edma_msg_hdr_s *hdr = NULL; + unsigned long flags = 0; + unsigned int total_len = 0; + int ret = 0; + struct edma_host_s *phost = &g_bma_dev->edma_host; + char *msg_buf = NULL; + + ret = check_cdev_add_msg_param(handle, msg, msg_len); + if (ret != 0) + return ret; + + priv = (struct bma_priv_data_s *)handle; + + total_len = (unsigned int)(SIZE_OF_MSG_HDR + msg_len); + if (phost->msg_send_write + total_len > HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) { + BMA_LOG(DLOG_DEBUG, "msg lost,msg_send_write: %u,msg_len:%u,max_len: %d\n", + phost->msg_send_write, total_len, HOST_MAX_SEND_MBX_LEN); + return -ENOSPC; + } + + msg_buf = (char *)kmalloc(msg_len, GFP_KERNEL); + if (!msg_buf) { + BMA_LOG(DLOG_ERROR, "malloc msg_buf failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, msg, msg_len)) { + BMA_LOG(DLOG_ERROR, "copy_from_user error\n"); + kfree(msg_buf); + return -EFAULT; + } spin_lock_irqsave(&phost->send_msg_lock, flags); - if (phost->msg_send_write + total_len <= - HOST_MAX_SEND_MBX_LEN - SIZE_OF_MBX_HDR) { - hdr = (struct edma_msg_hdr_s *)(phost->msg_send_buf + - phost->msg_send_write); - hdr->type = priv->user.type; - hdr->sub_type = priv->user.sub_type; - hdr->user_id = priv->user.user_id; - hdr->datalen = msg_len; - BMA_LOG(DLOG_DEBUG, "msg_len is %zu\n", msg_len); - - if (copy_from_user(hdr->data, msg, msg_len)) { - BMA_LOG(DLOG_ERROR, "copy_from_user error\n"); - ret = -EFAULT; - goto end; - } + hdr = (struct edma_msg_hdr_s *)(phost->msg_send_buf + phost->msg_send_write); + edma_msg_hdr_init(hdr, priv, msg_buf, msg_len); - phost->msg_send_write += total_len; - phost->statistics.send_bytes += total_len; - phost->statistics.send_pkgs++; + phost->msg_send_write += total_len; + phost->statistics.send_bytes += total_len; + phost->statistics.send_pkgs++; #ifdef EDMA_TIMER - (void)mod_timer(&phost->timer, jiffies_64); + (void)mod_timer(&phost->timer, jiffies_64); #endif - BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n", - phost->msg_send_write); - - ret = msg_len; - goto end; - } else { - BMA_LOG(DLOG_DEBUG, - "msg lost,msg_send_write: %d,msg_len:%d,max_len: %d\n", - phost->msg_send_write, total_len, - HOST_MAX_SEND_MBX_LEN); - ret = -ENOSPC; - goto end; - } + BMA_LOG(DLOG_DEBUG, "msg_send_write = %d\n", phost->msg_send_write); -end: + ret = msg_len; spin_unlock_irqrestore(&g_bma_dev->edma_host.send_msg_lock, flags); + kfree(msg_buf); return ret; } EXPORT_SYMBOL_GPL(bma_cdev_add_msg); diff --git a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h index 92893b6bfd081f1e08aeb53c7e9fa4179b1ec549..639ed4e58a8b838d815f5f8ecd1e66e494e5d0bf 100644 --- a/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h +++ b/drivers/net/ethernet/huawei/bma/edma_drv/bma_pci.h @@ -71,7 +71,7 @@ struct bma_pci_dev_s { #ifdef DRV_VERSION #define BMA_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define BMA_VERSION "0.3.4" +#define BMA_VERSION "0.3.5" #endif #ifdef CONFIG_ARM64 diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h index ffadf3727734a29b0479a3a46afcdf8c449947e9..b027306e52c1ad454a1830521c924cfd5f9da491 100644 --- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_include.h @@ -23,7 +23,7 @@ #ifdef DRV_VERSION #define KBOX_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define KBOX_VERSION "0.3.4" +#define KBOX_VERSION "0.3.5" #endif #define UNUSED(x) (x = x) diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c index 0c17cd2bae497b4eb2667b99c8624917f4dd0a4f..2b142ae9bff6cada37863093a846ea603e3faf14 100644 --- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_panic.c @@ -135,7 +135,7 @@ int kbox_panic_init(void) int ret = KBOX_TRUE; g_panic_info_buf = kmalloc(SLOT_LENGTH, GFP_KERNEL); - if (IS_ERR(g_panic_info_buf) || !g_panic_info_buf) { + if (!g_panic_info_buf) { KBOX_MSG("kmalloc g_panic_info_buf fail!\n"); ret = -ENOMEM; goto fail; @@ -144,7 +144,7 @@ int kbox_panic_init(void) memset(g_panic_info_buf, 0, SLOT_LENGTH); g_panic_info_buf_tmp = kmalloc(SLOT_LENGTH, GFP_KERNEL); - if (IS_ERR(g_panic_info_buf_tmp) || !g_panic_info_buf_tmp) { + if (!g_panic_info_buf_tmp) { KBOX_MSG("kmalloc g_panic_info_buf_tmp fail!\n"); ret = -ENOMEM; goto fail; diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c index 3b04ba2061082e4e327a3bd4f1cf94ca9196e431..630a1e16ea24b72f5fe9ded6a1d9260331ee90aa 100644 --- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_printk.c @@ -304,7 +304,7 @@ int kbox_printk_init(int kbox_proc_exist) g_printk_info_buf = kmalloc(SECTION_PRINTK_LEN, GFP_KERNEL); - if (IS_ERR(g_printk_info_buf) || !g_printk_info_buf) { + if (!g_printk_info_buf) { KBOX_MSG("kmalloc g_printk_info_buf fail!\n"); ret = -ENOMEM; goto fail; @@ -314,7 +314,7 @@ int kbox_printk_init(int kbox_proc_exist) g_printk_info_buf_tmp = kmalloc(SECTION_PRINTK_LEN, GFP_KERNEL); - if (IS_ERR(g_printk_info_buf_tmp) || !g_printk_info_buf_tmp) { + if (!g_printk_info_buf_tmp) { KBOX_MSG("kmalloc g_printk_info_buf_tmp fail!\n"); ret = -ENOMEM; goto fail; diff --git a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c index 49690bab1cefeeae590b055af6248d799acdbb3e..9f6dfe55e3fb18162217b3d65e14f5a73f7ddc94 100644 --- a/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c +++ b/drivers/net/ethernet/huawei/bma/kbox_drv/kbox_ram_op.c @@ -432,7 +432,7 @@ int kbox_write_op(long long offset, unsigned int count, return KBOX_FALSE; temp_buf_char = kmalloc(TEMP_BUF_DATA_SIZE, GFP_KERNEL); - if (!temp_buf_char || IS_ERR(temp_buf_char)) { + if (!temp_buf_char) { KBOX_MSG("kmalloc temp_buf_char fail!\n"); up(&user_sem); return -ENOMEM; diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c index 240db31d7178fd2a78a03b7bda5aa4167bbdd705..7705bb919ead63fb46c71ebb84564d37cb889889 100644 --- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.c @@ -638,6 +638,7 @@ s32 veth_refill_rxskb(struct bspveth_rxtx_q *prx_queue, int queue) next_to_fill = (next_to_fill + 1) & BSPVETH_POINT_MASK; } + mb();/* memory barriers. */ prx_queue->next_to_fill = next_to_fill; tail = prx_queue->tail; @@ -672,6 +673,7 @@ s32 bspveth_setup_rx_skb(struct bspveth_device *pvethdev, if (!idx) /* Can't alloc even one packets */ return -EFAULT; + mb();/* memory barriers. */ prx_queue->next_to_fill = idx; VETH_LOG(DLOG_DEBUG, "prx_queue->next_to_fill=%d\n", @@ -886,8 +888,6 @@ s32 bspveth_setup_all_rx_resources(struct bspveth_device *pvethdev) err = bspveth_setup_rx_resources(pvethdev, pvethdev->prx_queue[qid]); if (err) { - kfree(pvethdev->prx_queue[qid]); - pvethdev->prx_queue[qid] = NULL; VETH_LOG(DLOG_ERROR, "Allocation for Rx Queue %u failed\n", qid); @@ -1328,6 +1328,7 @@ s32 veth_send_one_pkt(struct sk_buff *skb, int queue) pbd_v->off = off; pbd_v->len = skb->len; + mb();/* memory barriers. */ head = (head + 1) & BSPVETH_POINT_MASK; ptx_queue->head = head; @@ -1424,6 +1425,7 @@ s32 veth_free_txskb(struct bspveth_rxtx_q *ptx_queue, int queue) next_to_free = (next_to_free + 1) & BSPVETH_POINT_MASK; } + mb(); /* memory barriers. */ ptx_queue->next_to_free = next_to_free; tail = ptx_queue->tail; @@ -1522,6 +1524,7 @@ s32 veth_recv_pkt(struct bspveth_rxtx_q *prx_queue, int queue) } } + mb();/* memory barriers. */ prx_queue->tail = tail; head = prx_queue->head; diff --git a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h index 9a4d699e6421df30ebee29b9ba3ab0cb812aad3d..5036365493209481b42339d7901b06f5cc3638a1 100644 --- a/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h +++ b/drivers/net/ethernet/huawei/bma/veth_drv/veth_hb.h @@ -31,7 +31,7 @@ extern "C" { #ifdef DRV_VERSION #define VETH_VERSION MICRO_TO_STR(DRV_VERSION) #else -#define VETH_VERSION "0.3.4" +#define VETH_VERSION "0.3.5" #endif #define MODULE_NAME "veth" diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 2b9815ec4a622ff9b03805ae06f5daec40fa151e..b825c6a9b6dded5fe45da78a47c17520536a629c 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1610,6 +1610,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) int len; unsigned char *cp; + skb->dev = ppp->dev; + if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 6c06870f90184e03861edac28a74e4e506b03b35..6a86a6533ddeb53cce1cc1defd0e95dfd0ba734c 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3533,7 +3533,7 @@ uncached_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } @@ -4287,7 +4287,7 @@ readpages_fill_pages(struct TCP_Server_Info *server, rdata->got_bytes += result; } - return rdata->got_bytes > 0 && result != -ECONNABORTED ? + return result != -ECONNABORTED && rdata->got_bytes > 0 ? rdata->got_bytes : result; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b681ee988b3f90085c88a94351bb3aaa5248650..7eb72defec9e454fdcd0220dca18d9f186380261 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1860,10 +1860,13 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page, static inline void mem_cgroup_track_foreign_dirty(struct page *page, struct bdi_writeback *wb) { + struct mem_cgroup *memcg; + if (mem_cgroup_disabled()) return; - if (unlikely(&page_memcg(page)->css != wb->memcg_css)) + memcg = page_memcg(page); + if (unlikely(memcg && &memcg->css != wb->memcg_css)) mem_cgroup_track_foreign_dirty_slowpath(page, wb); } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5b0d2264b99ba667f5ed8ae7840a5134fd726534..72e16b7584716f39ff25e7d411331c283949bbd0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2572,11 +2572,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock_irq(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetHPageRestoreReserve(page); h->resv_huge_pages--; } - spin_lock_irq(&hugetlb_lock); list_add(&page->lru, &h->hugepage_activelist); /* Fall through */ } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index e5328a2777ecd038d1b0474166e89cce4b956999..de79fbfe16117228616666d77613376cf8dabf87 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -3609,6 +3609,38 @@ int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val) } #endif +void br_multicast_set_startup_query_intvl(struct net_bridge *br, + unsigned long val) +{ + unsigned long intvl_jiffies = clock_t_to_jiffies(val); + + if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) { + br_info(br, + "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN), + jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN)); + intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN; + } + + br->multicast_startup_query_interval = intvl_jiffies; +} + +void br_multicast_set_query_intvl(struct net_bridge *br, + unsigned long val) +{ + unsigned long intvl_jiffies = clock_t_to_jiffies(val); + + if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) { + br_info(br, + "trying to set multicast query interval below minimum, setting to %lu (%ums)\n", + jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN), + jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN)); + intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN; + } + + br->multicast_query_interval = intvl_jiffies; +} + /** * br_multicast_list_adjacent - Returns snooped multicast addresses * @dev: The bridge port adjacent to which to retrieve addresses diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 31b00ba5dcc84d402970d13f6fe5416879c6de69..bed6c798fea9fe8e42232df9978fbbdd08ba5f24 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1286,7 +1286,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (data[IFLA_BR_MCAST_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]); - br->multicast_query_interval = clock_t_to_jiffies(val); + br_multicast_set_query_intvl(br, val); } if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) { @@ -1298,7 +1298,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) { u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]); - br->multicast_startup_query_interval = clock_t_to_jiffies(val); + br_multicast_set_startup_query_intvl(br, val); } if (data[IFLA_BR_MCAST_STATS_ENABLED]) { diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2b88b17cc8b25bd3929309fd6223429c1650e782..bfd0e995b078a4f7d3d8b770cb12733bb6e5fbaa 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -28,6 +28,8 @@ #define BR_MAX_PORTS (1<multicast_query_interval = clock_t_to_jiffies(val); + br_multicast_set_query_intvl(br, val); return 0; } @@ -640,7 +640,7 @@ static ssize_t multicast_startup_query_interval_show( static int set_startup_query_interval(struct net_bridge *br, unsigned long val) { - br->multicast_startup_query_interval = clock_t_to_jiffies(val); + br_multicast_set_startup_query_intvl(br, val); return 0; } diff --git a/net/core/filter.c b/net/core/filter.c index 8102f8ddc97495aab2ad4efc2d96d173dd259b30..8c7fa0025341327d6b702ea4986c5041a971d2d0 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2127,6 +2127,10 @@ static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, if (mlen) { __skb_pull(skb, mlen); + if (unlikely(!skb->len)) { + kfree_skb(skb); + return -ERANGE; + } /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 98b5a7bb2226936de8f319ebcdcc7ebca14755fc..f8c287788beacb5212c3cab6d0a861ea3be502ad 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -358,11 +358,13 @@ static void sock_map_free(struct bpf_map *map) sk = xchg(psk, NULL); if (sk) { + sock_hold(sk); lock_sock(sk); rcu_read_lock(); sock_map_unref(sk, psk); rcu_read_unlock(); release_sock(sk); + sock_put(sk); } }