未验证 提交 95b70300 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!372 Backport 5.10.141 LTS

Merge Pull Request from: @zhangjialin11 
 
Backport 5.10.141 LTS patches from upstream.

Conflicts:

Already merged(12):
895428ee124a mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
38267d266336 Bluetooth: L2CAP: Fix build errors in some archs
bacb37bdc2a2 media: pvrusb2: fix memory leak in pvr_probe
6204bf78b2a9 bpf: Don't redirect packets with invalid pkt_len
98f401d36396 mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
744b0d308070 kprobes: don't call disarm_kprobe() for disabled kprobes
28d8d2737e82 io_uring: disable polling pollfree files
cb41f22df3ec xfs: remove infinite loop when reserving free block pool
72a259bdd50d xfs: always succeed at setting the reserve pool size
f168801da95f xfs: fix overfilling of reserve pool
d34798d846d7 xfs: fix soft lockup via spinning in filestream ag selection loop
64f6da455b66 xfs: revert "xfs: actually bump warning counts when we send warnings"

Total patches: 37 - 12 = 25 
 
Link:https://gitee.com/openeuler/kernel/pulls/372 

Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
...@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void) ...@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
int rc; int rc;
if (diag204_probe()) { if (diag204_probe()) {
pr_err("The hardware system does not support hypfs\n"); pr_info("The hardware system does not support hypfs\n");
return -ENODATA; return -ENODATA;
} }
......
...@@ -496,9 +496,9 @@ static int __init hypfs_init(void) ...@@ -496,9 +496,9 @@ static int __init hypfs_init(void)
hypfs_vm_exit(); hypfs_vm_exit();
fail_hypfs_diag_exit: fail_hypfs_diag_exit:
hypfs_diag_exit(); hypfs_diag_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit: fail_dbfs_exit:
hypfs_dbfs_exit(); hypfs_dbfs_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc; return rc;
} }
device_initcall(hypfs_init) device_initcall(hypfs_init)
...@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags = FAULT_FLAG_DEFAULT; flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) if ((trans_exc_code & store_indication) == 0x400)
access = VM_WRITE;
if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
mmap_read_lock(mm); mmap_read_lock(mm);
......
...@@ -35,33 +35,56 @@ ...@@ -35,33 +35,56 @@
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
/* /*
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
*/
#define __FILL_RETURN_SLOT \
ANNOTATE_INTRA_FUNCTION_CALL; \
call 772f; \
int3; \
772:
/*
* Stuff the entire RSB.
*
* Google experimented with loop-unrolling and this turned out to be * Google experimented with loop-unrolling and this turned out to be
* the optimal version — two calls, each with their own speculation * the optimal version — two calls, each with their own speculation
* trap should their return address end up getting used, in a loop. * trap should their return address end up getting used, in a loop.
*/ */
#define __FILL_RETURN_BUFFER(reg, nr, sp) \ #ifdef CONFIG_X86_64
mov $(nr/2), reg; \ #define __FILL_RETURN_BUFFER(reg, nr) \
771: \ mov $(nr/2), reg; \
ANNOTATE_INTRA_FUNCTION_CALL; \ 771: \
call 772f; \ __FILL_RETURN_SLOT \
773: /* speculation trap */ \ __FILL_RETURN_SLOT \
UNWIND_HINT_EMPTY; \ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
pause; \ dec reg; \
lfence; \ jnz 771b; \
jmp 773b; \ /* barrier for jnz misprediction */ \
772: \ lfence;
ANNOTATE_INTRA_FUNCTION_CALL; \ #else
call 774f; \ /*
775: /* speculation trap */ \ * i386 doesn't unconditionally have LFENCE, as such it can't
UNWIND_HINT_EMPTY; \ * do a loop.
pause; \ */
lfence; \ #define __FILL_RETURN_BUFFER(reg, nr) \
jmp 775b; \ .rept nr; \
774: \ __FILL_RETURN_SLOT; \
add $(BITS_PER_LONG/8) * 2, sp; \ .endr; \
dec reg; \ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
jnz 771b; \ #endif
/* barrier for jnz misprediction */ \
/*
* Stuff a single RSB slot.
*
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
* forced to retire before letting a RET instruction execute.
*
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
* before this point.
*/
#define __FILL_ONE_RETURN \
__FILL_RETURN_SLOT \
add $(BITS_PER_LONG/8), %_ASM_SP; \
lfence; lfence;
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
...@@ -120,28 +143,15 @@ ...@@ -120,28 +143,15 @@
#endif #endif
.endm .endm
.macro ISSUE_UNBALANCED_RET_GUARD
ANNOTATE_INTRA_FUNCTION_CALL
call .Lunbalanced_ret_guard_\@
int3
.Lunbalanced_ret_guard_\@:
add $(BITS_PER_LONG/8), %_ASM_SP
lfence
.endm
/* /*
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually. * monstrosity above, manually.
*/ */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
.ifb \ftr2 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
.else __stringify(__FILL_ONE_RETURN), \ftr2
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
.endif
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
.Lunbalanced_\@:
ISSUE_UNBALANCED_RET_GUARD
.Lskip_rsb_\@: .Lskip_rsb_\@:
.endm .endm
......
...@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = { ...@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {
static int __init udmabuf_dev_init(void) static int __init udmabuf_dev_init(void)
{ {
return misc_register(&udmabuf_misc); int ret;
ret = misc_register(&udmabuf_misc);
if (ret < 0) {
pr_err("Could not initialize udmabuf device\n");
return ret;
}
ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
DMA_BIT_MASK(64));
if (ret < 0) {
pr_err("Could not setup DMA mask for udmabuf device\n");
misc_deregister(&udmabuf_misc);
return ret;
}
return 0;
} }
static void __exit udmabuf_dev_exit(void) static void __exit udmabuf_dev_exit(void)
......
...@@ -283,7 +283,7 @@ enum amdgpu_kiq_irq { ...@@ -283,7 +283,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST AMDGPU_CP_KIQ_IRQ_LAST
}; };
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
......
...@@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq; uint32_t seq;
uint16_t queried_pasid; uint16_t queried_pasid;
bool ret; bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
...@@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) { if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME; return -ETIME;
......
...@@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq; uint32_t seq;
uint16_t queried_pasid; uint16_t queried_pasid;
bool ret; bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
...@@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, ...@@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) { if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_sem); up_read(&adev->reset_sem);
......
...@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper ( ...@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) { switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010: case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break; break;
case COLOR_DEPTH_121212: case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break; break;
case COLOR_DEPTH_161616: case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
......
...@@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) ...@@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) { while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id) if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc; return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot; tmp_mpcc = tmp_mpcc->mpcc_bot;
} }
return NULL; return NULL;
......
...@@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable) ...@@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1, OTG_CLOCK_ON, 1,
1, 1000); 1, 1000);
} else { } else {
//last chance to clear underflow, otherwise, it will always there due to clock is off.
if (optc->funcs->is_optc_underflow_occurred(optc) == true)
optc->funcs->clear_optc_underflow(optc);
REG_UPDATE_2(OTG_CLOCK_CONTROL, REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0, OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0); OTG_CLOCK_EN, 0);
......
...@@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) ...@@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) { while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc; return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot; tmp_mpcc = tmp_mpcc->mpcc_bot;
} }
return NULL; return NULL;
......
...@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr( ...@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid); VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else { } else {
......
...@@ -2759,6 +2759,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { ...@@ -2759,6 +2759,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable, .dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode, .init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode, .load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables, .init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables, .fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power, .init_power = smu_v11_0_init_power,
......
...@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam, ...@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret; int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
if (!r) {
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
return -EINVAL;
}
if (hid_report_len(r) < 64) if (hid_report_len(r) < 64)
return -EINVAL; return -EINVAL;
...@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam, ...@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret; int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
if (!r) {
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
return -EINVAL;
}
if (hid_report_len(r) < 64) if (hid_report_len(r) < 64)
return -EINVAL; return -EINVAL;
......
...@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file) ...@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
unsigned int minor = iminor(inode); unsigned int minor = iminor(inode);
struct hidraw_list *list = file->private_data; struct hidraw_list *list = file->private_data;
unsigned long flags; unsigned long flags;
int i;
mutex_lock(&minors_lock); mutex_lock(&minors_lock);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
for (i = list->tail; i < list->head; i++)
kfree(list->buffer[i].value);
list_del(&list->node); list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list); kfree(list);
......
...@@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) ...@@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */ /* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
if (recovery) { if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL, sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1); MSDC_DMA_CTRL_STOP, 1);
...@@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev) ...@@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
{ {
struct mmc_host *mmc = dev_get_drvdata(dev); struct mmc_host *mmc = dev_get_drvdata(dev);
int ret; int ret;
u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) { if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc); ret = cqhci_suspend(mmc);
if (ret) if (ret)
return ret; return ret;
val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
} }
return pm_runtime_force_suspend(dev); return pm_runtime_force_suspend(dev);
......
...@@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev) ...@@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev)
#ifdef CONFIG_PCIEAER #ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() && if (dev->aer_cap && pci_aer_available() &&
(pcie_ports_native || host->native_aer)) (pcie_ports_native || host->native_aer)) {
services |= PCIE_PORT_SERVICE_AER; services |= PCIE_PORT_SERVICE_AER;
/*
* Disable AER on this port in case it's been enabled by the
* BIOS (the AER service driver will enable it when necessary).
*/
pci_disable_pcie_error_reporting(dev);
}
#endif #endif
/* Root Ports and Root Complex Event Collectors may generate PMEs */ /* Root Ports and Root Complex Event Collectors may generate PMEs */
......
...@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) ...@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL; return -EINVAL;
} }
if (!var->pixclock) {
DPRINTK("pixclock is zero\n");
return -EINVAL;
}
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n", DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock)); PICOS2KHZ(var->pixclock));
......
...@@ -294,7 +294,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) ...@@ -294,7 +294,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk) static inline struct sk_psock *sk_psock(const struct sock *sk)
{ {
return rcu_dereference_sk_user_data(sk); return __rcu_dereference_sk_user_data_with_flags(sk,
SK_USER_DATA_PSOCK);
} }
static inline void sk_psock_set_state(struct sk_psock *psock, static inline void sk_psock_set_state(struct sk_psock *psock,
......
...@@ -548,14 +548,26 @@ enum sk_pacing { ...@@ -548,14 +548,26 @@ enum sk_pacing {
SK_PACING_FQ = 2, SK_PACING_FQ = 2,
}; };
/* Pointer stored in sk_user_data might not be suitable for copying /* flag bits in sk_user_data
* when cloning the socket. For instance, it can point to a reference *
* counted object. sk_user_data bottom bit is set if pointer must not * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
* be copied. * not be suitable for copying when cloning the socket. For instance,
* it can point to a reference counted object. sk_user_data bottom
* bit is set if pointer must not be copied.
*
* - SK_USER_DATA_BPF: Mark whether sk_user_data field is
* managed/owned by a BPF reuseport array. This bit should be set
* when sk_user_data's sk is added to the bpf's reuseport_array.
*
* - SK_USER_DATA_PSOCK: Mark whether pointer stored in
* sk_user_data points to psock type. This bit should be set
* when sk_user_data is assigned to a psock object.
*/ */
#define SK_USER_DATA_NOCOPY 1UL #define SK_USER_DATA_NOCOPY 1UL
#define SK_USER_DATA_BPF 2UL /* Managed by BPF */ #define SK_USER_DATA_BPF 2UL
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) #define SK_USER_DATA_PSOCK 4UL
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
SK_USER_DATA_PSOCK)
/** /**
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
...@@ -568,24 +580,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk) ...@@ -568,24 +580,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
/**
* __rcu_dereference_sk_user_data_with_flags - return the pointer
* only if argument flags all has been set in sk_user_data. Otherwise
* return NULL
*
* @sk: socket
* @flags: flag bits
*/
static inline void *
__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
uintptr_t flags)
{
uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
if ((sk_user_data & flags) == flags)
return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
return NULL;
}
#define rcu_dereference_sk_user_data(sk) \ #define rcu_dereference_sk_user_data(sk) \
__rcu_dereference_sk_user_data_with_flags(sk, 0)
#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
({ \ ({ \
void *__tmp = rcu_dereference(__sk_user_data((sk))); \ uintptr_t __tmp1 = (uintptr_t)(ptr), \
(void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ __tmp2 = (uintptr_t)(flags); \
}) WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
#define rcu_assign_sk_user_data(sk, ptr) \ WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
({ \
uintptr_t __tmp = (uintptr_t)(ptr); \
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
})
#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
({ \
uintptr_t __tmp = (uintptr_t)(ptr); \
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), \ rcu_assign_pointer(__sk_user_data((sk)), \
__tmp | SK_USER_DATA_NOCOPY); \ __tmp1 | __tmp2); \
}) })
#define rcu_assign_sk_user_data(sk, ptr) \
__rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
/* /*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
......
...@@ -2900,6 +2900,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command) ...@@ -2900,6 +2900,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_startup_enable(command); ftrace_startup_enable(command);
/*
* If ftrace is in an undefined state, we just remove ops from list
* to prevent the NULL pointer, instead of totally rolling it back and
* free trampoline, because those actions could cause further damage.
*/
if (unlikely(ftrace_disabled)) {
__unregister_ftrace_function(ops);
return -ENODEV;
}
ops->flags &= ~FTRACE_OPS_FL_ADDING; ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0; return 0;
......
...@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA ...@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC config CRYPTO_LIB_CHACHA_GENERIC
tristate tristate
select XOR_BLOCKS
help help
This symbol can be depended upon by arch implementations of the This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a ChaCha library interface that require the generic code as a
......
...@@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles) ...@@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif #endif
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct vdso_data *vd = __arch_get_timens_vdso_data();
const struct timens_offset *offs = &vdns->offset[clk]; const struct timens_offset *offs = &vdns->offset[clk];
...@@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) ...@@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
return NULL; return NULL;
} }
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
return -EINVAL; return -EINVAL;
} }
...@@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, ...@@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
} }
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct vdso_data *vd = __arch_get_timens_vdso_data();
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
...@@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, ...@@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
return 0; return 0;
} }
#else #else
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
return -1; return -1;
} }
......
...@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n) ...@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
return 0; return 0;
} }
static void pneigh_queue_purge(struct sk_buff_head *list) static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
{ {
struct sk_buff_head tmp;
unsigned long flags;
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL) { skb_queue_head_init(&tmp);
spin_lock_irqsave(&list->lock, flags);
skb = skb_peek(list);
while (skb != NULL) {
struct sk_buff *skb_next = skb_peek_next(skb, list);
if (net == NULL || net_eq(dev_net(skb->dev), net)) {
__skb_unlink(skb, list);
__skb_queue_tail(&tmp, skb);
}
skb = skb_next;
}
spin_unlock_irqrestore(&list->lock, flags);
while ((skb = __skb_dequeue(&tmp))) {
dev_put(skb->dev); dev_put(skb->dev);
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev, ...@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock); write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm); neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev); pneigh_ifdown_and_unlock(tbl, dev);
pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
del_timer_sync(&tbl->proxy_timer); if (skb_queue_empty_lockless(&tbl->proxy_queue))
pneigh_queue_purge(&tbl->proxy_queue); del_timer_sync(&tbl->proxy_timer);
return 0; return 0;
} }
...@@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl) ...@@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
/* It is not clean... Fix it to unload IPv6 module safely */ /* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->gc_work); cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer); del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue); pneigh_queue_purge(&tbl->proxy_queue, NULL);
neigh_ifdown(tbl, NULL); neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries)) if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n"); pr_crit("neighbour leakage\n");
......
...@@ -631,7 +631,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node) ...@@ -631,7 +631,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
refcount_set(&psock->refcnt, 1); refcount_set(&psock->refcnt, 1);
rcu_assign_sk_user_data_nocopy(sk, psock); __rcu_assign_sk_user_data_with_flags(sk, psock,
SK_USER_DATA_NOCOPY |
SK_USER_DATA_PSOCK);
sock_hold(sk); sock_hold(sk);
out: out:
......
...@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES ...@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
config NF_CONNTRACK_PROCFS config NF_CONNTRACK_PROCFS
bool "Supply CT list in procfs (OBSOLETE)" bool "Supply CT list in procfs (OBSOLETE)"
default y
depends on PROC_FS depends on PROC_FS
help help
This option enables for the list of known conntrack entries This option enables for the list of known conntrack entries
......
...@@ -2996,8 +2996,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) ...@@ -2996,8 +2996,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err) if (err)
goto out_free; goto out_free;
if (sock->type == SOCK_RAW && if ((sock->type == SOCK_RAW &&
!dev_validate_header(dev, skb->data, len)) { !dev_validate_header(dev, skb->data, len)) || !skb->len) {
err = -EINVAL; err = -EINVAL;
goto out_free; goto out_free;
} }
......
...@@ -87,8 +87,7 @@ obj := $(KBUILD_EXTMOD) ...@@ -87,8 +87,7 @@ obj := $(KBUILD_EXTMOD)
src := $(obj) src := $(obj)
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \ include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
# modpost option for external modules # modpost option for external modules
MODPOST += -e MODPOST += -e
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册