diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 72624a16b79284c0f2484741144e6b90ac084064..c94b4675d021ffd374de22d7d83df61dbb6c34dd 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise snapshot of a moment, you can see /proc//smaps file and scan page table. It's slow but very precise. -Table 1-2: Contents of the status files (as of 4.1) +Table 1-2: Contents of the status files (as of 4.8) .............................................................................. Field Content Name filename of the executable + Umask file mode creation mask State state (R is running, S is sleeping, D is sleeping in an uninterruptible wait, Z is zombie, T is traced or stopped) @@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1) TracerPid PID of process tracing this process (0 if not) Uid Real, effective, saved set, and file system UIDs Gid Real, effective, saved set, and file system GIDs - Umask file mode creation mask FDSize number of file descriptor slots currently allocated Groups supplementary group list NStgid descendant namespace thread group ID hierarchy @@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1) VmPeak peak virtual memory size VmSize total program size VmLck locked memory size + VmPin pinned memory size VmHWM peak resident set size ("high water mark") VmRSS size of memory portions. It contains the three following parts (VmRSS = RssAnon + RssFile + RssShmem) diff --git a/MAINTAINERS b/MAINTAINERS index 795942555b4d9cb0cbba640773297f3b7f43fc59..50e6f7c561d8bd540c203c49db8cd4aaecbdea9b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13625,6 +13625,7 @@ F: drivers/net/hamradio/z8530.h ZBUD COMPRESSED PAGE ALLOCATOR M: Seth Jennings +M: Dan Streetman L: linux-mm@kvack.org S: Maintained F: mm/zbud.c @@ -13680,6 +13681,7 @@ F: Documentation/vm/zsmalloc.txt ZSWAP COMPRESSED SWAP CACHING M: Seth Jennings +M: Dan Streetman L: linux-mm@kvack.org S: Maintained F: mm/zswap.c diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7..e93c9494503ac8fc3cfaa8167ea3523abb3e2925 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) - +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) @@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) +{ + long long c, old; + + c = atomic64_read(v); + for (;;) { + if (unlikely(c == u)) + break; + old = atomic64_cmpxchg(v, c, c + i); + if (likely(old == c)) + break; + c = old; + } + return c != u; +} + +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long c, old, dec; + + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + #define ATOMIC_OP(op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h index 393d311735c8b573bd5702eac1dcaaac1103600e..67e333aa7629c406745564cb24acc5903733ec41 100644 --- a/arch/mn10300/include/asm/switch_to.h +++ b/arch/mn10300/include/asm/switch_to.h @@ -16,7 +16,7 @@ struct task_struct; struct thread_struct; -#if !defined(CONFIG_LAZY_SAVE_FPU) +#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU) struct fpu_state_struct; extern asmlinkage void fpu_save(struct fpu_state_struct *); #define switch_fpu(prev, next) \ diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8ab8ea1253e62310a68d9e6bf039d8d866ee4019..dacb6a8418aa927e8d75a86470b35b414bf48598 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev, sprintf(buf, "%s", zone->name); /* MMOP_ONLINE_KERNEL */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); + zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); } /* MMOP_ONLINE_MOVABLE */ - zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); + zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift); if (zone_shift) { strcat(buf, " "); strcat(buf, (zone + zone_shift)->name); diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index a0547dbf980645104d862fdc39e2ad740107ad15..76382c858c35435b98e061a7dda49d5dacad9585 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card, struct ms_id_register id_reg; if (!(*mrq)) { - memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL, + memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, sizeof(struct ms_id_register)); *mrq = &card->current_mrq; return 0; diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c index f89245b8ba8e9a28483c4ff5edb03b80a1a9b2e3..68a113594808f220aa818424cd6e342897806a74 100644 --- a/drivers/video/fbdev/core/fbcmap.c +++ b/drivers/video/fbdev/core/fbcmap.c @@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap) int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); @@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to) int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) { - int tooff = 0, fromoff = 0; - int size; + unsigned int tooff = 0, fromoff = 0; + size_t size; if (to->start > from->start) fromoff = to->start - from->start; else tooff = from->start - to->start; - size = to->len - tooff; - if (size > (int) (from->len - fromoff)) - size = from->len - fromoff; - if (size <= 0) + if (fromoff >= from->len || tooff >= to->len) + return -EINVAL; + + size = min_t(size_t, to->len - tooff, from->len - fromoff); + if (size == 0) return -EINVAL; size *= sizeof(u16); diff --git a/fs/Kconfig b/fs/Kconfig index c2a377cdda2b03d6efe8768e4ef7894a06ebe853..83eab52fb3f69a75aa06a9f2a31760a384508f41 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -38,6 +38,7 @@ config FS_DAX bool "Direct Access (DAX) support" depends on MMU depends on !(ARM || MIPS || SPARC) + select FS_IOMAP help Direct Access (DAX) can be used on memory-backed block devices. If the block device supports DAX and the filesystem supports DAX, diff --git a/fs/dax.c b/fs/dax.c index ddcddfeaa03bd942e83738d34c4abaed06fa2709..3af2da5e64ce77fa8ae4b3f294c82882d350120f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(__dax_zero_page_range); -#ifdef CONFIG_FS_IOMAP static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos) { return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9); @@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault); #endif /* CONFIG_FS_DAX_PMD */ -#endif /* CONFIG_FS_IOMAP */ diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig index 36bea5adcabaa735056b20290ce23cdc7dfc0851..c634874e12d969fbd0b00ad8da745553168876e6 100644 --- a/fs/ext2/Kconfig +++ b/fs/ext2/Kconfig @@ -1,6 +1,5 @@ config EXT2_FS tristate "Second extended fs support" - select FS_IOMAP if FS_DAX help Ext2 is a standard Linux file system for hard disks. diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 7b90691e98c4f5fdd3b2d162285b00e71ac51b63..e38039fd96ff59ab59ce17407abcf26de4c5a950 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -37,7 +37,6 @@ config EXT4_FS select CRC16 select CRYPTO select CRYPTO_CRC32C - select FS_IOMAP if FS_DAX help This is the next generation of the ext3 filesystem. diff --git a/fs/proc/base.c b/fs/proc/base.c index 8e7e61b28f31c037961c081d09a7be5f818013ef..87c9a9aacda3601e2686e239243f447728137943 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) iter.tgid += 1, iter = next_tgid(ns, iter)) { char name[PROC_NUMBUF]; int len; + + cond_resched(); if (!has_pid_permissions(ns, iter.task, 2)) continue; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index d0f8a38dfafacd8f3d524d1ff69ae8f621eea278..0186fe6d39f3b4d2e77497d4d34a7691204ae9fa 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "internal.h" static struct kmem_cache *romfs_inode_cachep; @@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode) static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + u64 id = 0; + + /* When calling huge_encode_dev(), + * use sb->s_bdev->bd_dev when, + * - CONFIG_ROMFS_ON_BLOCK defined + * use sb->s_dev when, + * - CONFIG_ROMFS_ON_BLOCK undefined and + * - CONFIG_ROMFS_ON_MTD defined + * leave id as 0 when, + * - CONFIG_ROMFS_ON_BLOCK undefined and + * - CONFIG_ROMFS_ON_MTD undefined + */ + if (sb->s_bdev) + id = huge_encode_dev(sb->s_bdev->bd_dev); + else if (sb->s_dev) + id = huge_encode_dev(sb->s_dev); buf->f_type = ROMFS_MAGIC; buf->f_namelen = ROMFS_MAXFN; @@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= MS_RDONLY | MS_NOATIME; sb->s_op = &romfs_super_ops; +#ifdef CONFIG_ROMFS_ON_MTD + /* Use same dev ID from the underlying mtdblock device */ + if (sb->s_mtd) + sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); +#endif /* read the image superblock and check it */ rsb = kmalloc(512, GFP_KERNEL); if (!rsb) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index d96e2f30084bcfab552ffe3005af090abfe319c9..43953e03c35682723c6658dfe9b8cceed9de22ef 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -63,6 +63,7 @@ struct userfaultfd_wait_queue { struct uffd_msg msg; wait_queue_t wq; struct userfaultfd_ctx *ctx; + bool waken; }; struct userfaultfd_wake_range { @@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, if (len && (start > uwq->msg.arg.pagefault.address || start + len <= uwq->msg.arg.pagefault.address)) goto out; + WRITE_ONCE(uwq->waken, true); + /* + * The implicit smp_mb__before_spinlock in try_to_wake_up() + * renders uwq->waken visible to other CPUs before the task is + * waken. + */ ret = wake_up_state(wq->private, mode); if (ret) /* @@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) struct userfaultfd_wait_queue uwq; int ret; bool must_wait, return_to_userland; + long blocking_state; BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); @@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) uwq.wq.private = current; uwq.msg = userfault_msg(vmf->address, vmf->flags, reason); uwq.ctx = ctx; + uwq.waken = false; return_to_userland = (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); + blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : + TASK_KILLABLE; spin_lock(&ctx->fault_pending_wqh.lock); /* @@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) * following the spin_unlock to happen before the list_add in * __add_wait_queue. */ - set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : - TASK_KILLABLE); + set_current_state(blocking_state); spin_unlock(&ctx->fault_pending_wqh.lock); must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, @@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason) wake_up_poll(&ctx->fd_wqh, POLLIN); schedule(); ret |= VM_FAULT_MAJOR; + + /* + * False wakeups can orginate even from rwsem before + * up_read() however userfaults will wait either for a + * targeted wakeup on the specific uwq waitqueue from + * wake_userfault() or for signals or for uffd + * release. + */ + while (!READ_ONCE(uwq.waken)) { + /* + * This needs the full smp_store_mb() + * guarantee as the state write must be + * visible to other CPUs before reading + * uwq.waken from other CPUs. + */ + set_current_state(blocking_state); + if (READ_ONCE(uwq.waken) || + READ_ONCE(ctx->released) || + (return_to_userland ? signal_pending(current) : + fatal_signal_pending(current))) + break; + schedule(); + } } __set_current_state(TASK_RUNNING); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 01033fadea4766d5e6efddc78ca595d68c021464..c1784c0b4f3585e0d20ca8253813c31d47f11c04 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); -extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, - enum zone_type target); +extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target, int *zone_shift); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 36d9896fbc1eb0d12e60682f15e96648c13ebf98..f4aac87adcc3555014f6215d6599b604c70388e6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z, * @zonelist - The zonelist to search for a suitable zone * @highest_zoneidx - The zone index of the highest zone to return * @nodes - An optional nodemask to filter the zonelist with - * @zone - The first suitable zone found is returned via this parameter + * @return - Zoneref pointer for the first suitable zone found (see below) * * This function returns the first zone at or below a given zone index that is * within the allowed nodemask. The zoneref returned is a cursor that can be * used to iterate the zonelist with next_zones_zonelist by advancing it by * one before calling. + * + * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is + * never NULL). This may happen either genuinely, or due to concurrent nodemask + * update due to cpuset modification. */ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, enum zone_type highest_zoneidx, diff --git a/include/linux/nmi.h b/include/linux/nmi.h index aacca824a6aef4fcc4d2480aa9eeefd2fe82d6f9..0a3fadc32693a9cf869693f4c406eee5d168e36b 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -110,6 +110,7 @@ extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +extern atomic_t watchdog_park_in_progress; #ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; diff --git a/kernel/panic.c b/kernel/panic.c index 901c4fb46002e38c98394110c49aa50230d58180..08aa88dde7de806d4cb2b14fd93e87be8dd94501 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -249,7 +249,7 @@ void panic(const char *fmt, ...) * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ - pr_emerg("Rebooting in %d seconds..", panic_timeout); + pr_emerg("Rebooting in %d seconds..\n", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d4b0fa01cae39cd720661d7a62f50a7926f9db69..63177be0159e9493f6d6ade90efae743aaf117b7 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); #define for_each_watchdog_cpu(cpu) \ for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) +atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); + /* * The 'watchdog_running' variable is set to 1 when the watchdog threads * are registered/started and is set to 0 when the watchdog threads are @@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) int duration; int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; + if (atomic_read(&watchdog_park_in_progress) != 0) + return HRTIMER_NORESTART; + /* kick the hardlockup detector */ watchdog_interrupt_count(); @@ -467,12 +472,16 @@ static int watchdog_park_threads(void) { int cpu, ret = 0; + atomic_set(&watchdog_park_in_progress, 1); + for_each_watchdog_cpu(cpu) { ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); if (ret) break; } + atomic_set(&watchdog_park_in_progress, 0); + return ret; } diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 84016c8aee6b5d2769495a8c6eee0b4ac559b1a6..12b8dd64078655dd9004d03caa8167da16b57cf5 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event, /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; + if (atomic_read(&watchdog_park_in_progress) != 0) + return; + if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; diff --git a/lib/ioremap.c b/lib/ioremap.c index 86c8911b0e3a6fff02b9e52faa11816cfe508362..a3e14ce92a5684a662c2c8f80f97e6fef95943b7 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr, return err; } -EXPORT_SYMBOL_GPL(ioremap_page_range); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 0b92d605fb69cc805a96c8333dab36174f755e22..84812a9fb16fbbd1409315ea3752fb9a1e3e39ef 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) struct radix_tree_node *old = child; offset = child->offset + 1; child = child->parent; - WARN_ON_ONCE(!list_empty(&node->private_list)); + WARN_ON_ONCE(!list_empty(&old->private_list)); radix_tree_node_free(old); if (old == entry_to_node(node)) return; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9a6bd6c8d55a6691047e516a46c2cf6b931b912d..5f3ad65c85de01fa6e4c8a07ef9494410bf2b133 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, assert_spin_locked(pmd_lockptr(mm, pmd)); + /* + * When we COW a devmap PMD entry, we split it into PTEs, so we should + * not be in this function with `flags & FOLL_COW` set. + */ + WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); + if (flags & FOLL_WRITE && !pmd_write(*pmd)) return NULL; @@ -1128,6 +1134,16 @@ int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) return ret; } +/* + * FOLL_FORCE can write to even unwritable pmd's, but only + * after we've gone through a COW cycle and they are dirty. + */ +static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) +{ + return pmd_write(pmd) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); +} + struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, assert_spin_locked(pmd_lockptr(mm, pmd)); - if (flags & FOLL_WRITE && !pmd_write(*pmd)) + if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags)) goto out; /* Avoid dumping huge zero page */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a63a8f8326647b92bdc63810c8a93be96047f748..b822e158b319e8f2f02ecbfe76c31b6466be51f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count) return ret; } - /* Try charges one by one with reclaim */ + /* Try charges one by one with reclaim, but do not retry */ while (count--) { - ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1); + ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1); if (ret) return ret; mc.precharge++; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e43142c15631fefdf5a605ced247a6429825252f..ca2723d4733849eab01b323a50e6b1bc609e308c 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg) node_set_state(node, N_MEMORY); } -int zone_can_shift(unsigned long pfn, unsigned long nr_pages, - enum zone_type target) +bool zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target, int *zone_shift) { struct zone *zone = page_zone(pfn_to_page(pfn)); enum zone_type idx = zone_idx(zone); int i; + *zone_shift = 0; + if (idx < target) { /* pages must be at end of current zone */ if (pfn + nr_pages != zone_end_pfn(zone)) - return 0; + return false; /* no zones in use between current zone and target */ for (i = idx + 1; i < target; i++) if (zone_is_initialized(zone - idx + i)) - return 0; + return false; } if (target < idx) { /* pages must be at beginning of current zone */ if (pfn != zone->zone_start_pfn) - return 0; + return false; /* no zones in use between current zone and target */ for (i = target + 1; i < idx; i++) if (zone_is_initialized(zone - idx + i)) - return 0; + return false; } - return target - idx; + *zone_shift = target - idx; + return true; } /* Must be protected by mem_hotplug_begin() */ @@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ !can_online_high_movable(zone)) return -EINVAL; - if (online_type == MMOP_ONLINE_KERNEL) - zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); - else if (online_type == MMOP_ONLINE_MOVABLE) - zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); + if (online_type == MMOP_ONLINE_KERNEL) { + if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift)) + return -EINVAL; + } else if (online_type == MMOP_ONLINE_MOVABLE) { + if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift)) + return -EINVAL; + } zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); if (!zone) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 2e346645eb80d6bb8f97761c30aed6a512017e59..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2017,8 +2017,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, nmask = policy_nodemask(gfp, pol); zl = policy_zonelist(gfp, pol, node); - mpol_cond_put(pol); page = __alloc_pages_nodemask(gfp, order, zl, nmask); + mpol_cond_put(pol); out: if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) goto retry_cpuset; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d604d2596b7bed41b9748ee3242571b771db4d5e..f3e0c69a97b76997d9fa65cda0b7e1b1fb8fa29a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; unsigned int alloc_flags; unsigned long did_some_progress; - enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; + enum compact_priority compact_priority; enum compact_result compact_result; - int compaction_retries = 0; - int no_progress_loops = 0; + int compaction_retries; + int no_progress_loops; unsigned long alloc_start = jiffies; unsigned int stall_timeout = 10 * HZ; + unsigned int cpuset_mems_cookie; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) gfp_mask &= ~__GFP_ATOMIC; +retry_cpuset: + compaction_retries = 0; + no_progress_loops = 0; + compact_priority = DEF_COMPACT_PRIORITY; + cpuset_mems_cookie = read_mems_allowed_begin(); + /* + * We need to recalculate the starting point for the zonelist iterator + * because we might have used different nodemask in the fast path, or + * there was a cpuset modification and we are retrying - otherwise we + * could end up iterating over non-eligible zones endlessly. + */ + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, + ac->high_zoneidx, ac->nodemask); + if (!ac->preferred_zoneref->zone) + goto nopage; + + /* * The fast path uses conservative alloc_flags to succeed only until * kswapd needs to be woken up, and to avoid the cost of setting up @@ -3708,6 +3726,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, &compaction_retries)) goto retry; + /* + * It's possible we raced with cpuset update so the OOM would be + * premature (see below the nopage: label for full explanation). + */ + if (read_mems_allowed_retry(cpuset_mems_cookie)) + goto retry_cpuset; + /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); if (page) @@ -3720,6 +3745,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, } nopage: + /* + * When updating a task's mems_allowed or mempolicy nodemask, it is + * possible to race with parallel threads in such a way that our + * allocation can fail while the mask is being updated. If we are about + * to fail, check if the cpuset changed during allocation and if so, + * retry. + */ + if (read_mems_allowed_retry(cpuset_mems_cookie)) + goto retry_cpuset; + warn_alloc(gfp_mask, "page allocation failure: order:%u", order); got_pg: @@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, nodemask_t *nodemask) { struct page *page; - unsigned int cpuset_mems_cookie; unsigned int alloc_flags = ALLOC_WMARK_LOW; gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ struct alloc_context ac = { @@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; -retry_cpuset: - cpuset_mems_cookie = read_mems_allowed_begin(); - /* Dirty zone balancing only done in the fast path */ ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); @@ -3784,8 +3815,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, */ ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, ac.nodemask); - if (!ac.preferred_zoneref) { + if (!ac.preferred_zoneref->zone) { page = NULL; + /* + * This might be due to race with cpuset_current_mems_allowed + * update, so make sure we retry with original nodemask in the + * slow path. + */ goto no_zone; } @@ -3794,6 +3830,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (likely(page)) goto out; +no_zone: /* * Runtime PM, block IO and its error handling path can deadlock * because I/O on the device might not complete. @@ -3805,21 +3842,10 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, * Restore the original nodemask if it was potentially replaced with * &cpuset_current_mems_allowed to optimize the fast-path attempt. */ - if (cpusets_enabled()) + if (unlikely(ac.nodemask != nodemask)) ac.nodemask = nodemask; - page = __alloc_pages_slowpath(alloc_mask, order, &ac); -no_zone: - /* - * When updating a task's mems_allowed, it is possible to race with - * parallel threads in such a way that an allocation can fail while - * the mask is being updated. If a page allocation is about to fail, - * check if the cpuset changed during allocation and if so, retry. - */ - if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) { - alloc_mask = gfp_mask; - goto retry_cpuset; - } + page = __alloc_pages_slowpath(alloc_mask, order, &ac); out: if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && @@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, .zone = page_zone(pfn_to_page(start)), .mode = MIGRATE_SYNC, .ignore_skip_hint = true, + .gfp_mask = GFP_KERNEL, }; INIT_LIST_HEAD(&cc.migratepages); diff --git a/mm/slub.c b/mm/slub.c index 067598a008493fabb68d48120a904943fff4e08c..7aa6f433f4de554d308e774d9e9b40507c6ab48a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s, return 1; } -static void print_section(char *text, u8 *addr, unsigned int length) +static void print_section(char *level, char *text, u8 *addr, + unsigned int length) { metadata_access_enable(); - print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, + print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, length, 1); metadata_access_disable(); } @@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) p, p - addr, get_freepointer(s, p)); if (s->flags & SLAB_RED_ZONE) - print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); + print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, + s->red_left_pad); else if (p > addr + 16) - print_section("Bytes b4 ", p - 16, 16); + print_section(KERN_ERR, "Bytes b4 ", p - 16, 16); - print_section("Object ", p, min_t(unsigned long, s->object_size, - PAGE_SIZE)); + print_section(KERN_ERR, "Object ", p, + min_t(unsigned long, s->object_size, PAGE_SIZE)); if (s->flags & SLAB_RED_ZONE) - print_section("Redzone ", p + s->object_size, + print_section(KERN_ERR, "Redzone ", p + s->object_size, s->inuse - s->object_size); if (s->offset) @@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ - print_section("Padding ", p + off, size_from_object(s) - off); + print_section(KERN_ERR, "Padding ", p + off, + size_from_object(s) - off); dump_stack(); } @@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) end--; slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); - print_section("Padding ", end - remainder, remainder); + print_section(KERN_ERR, "Padding ", end - remainder, remainder); restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); return 0; @@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, page->freelist); if (!alloc) - print_section("Object ", (void *)object, + print_section(KERN_INFO, "Object ", (void *)object, s->object_size); dump_stack();