diff --git a/.mailmap b/.mailmap index df1baba43a64c7e3bab28b01ca66cbbe3e831f50..1ad68731fb47a9658733520c1982038cf6aaaa00 100644 --- a/.mailmap +++ b/.mailmap @@ -62,6 +62,11 @@ Jeff Garzik Jens Axboe Jens Osterkamp John Stultz + + + + + Juha Yrjola Juha Yrjola Juha Yrjola diff --git a/CREDITS b/CREDITS index 28ee1514b9deec0d19bca725b74ca6934d719f24..a80b66718f66550abee52ea2e13e1058917325c4 100644 --- a/CREDITS +++ b/CREDITS @@ -3511,10 +3511,11 @@ S: MacGregor A.C.T 2615 S: Australia N: Josh Triplett -E: josh@freedesktop.org -P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB -D: rcutorture maintainer +E: josh@joshtriplett.org +P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C 1E67 0ED9 A3DF 8AFF 873D +D: RCU and rcutorture D: lock annotations, finding and fixing lock bugs +D: kernel tinification N: Winfried Trümper E: winni@xpilot.org diff --git a/MAINTAINERS b/MAINTAINERS index 86efa7e213c257e59f4285d30bad7555b259c566..95990dd2678c877bc2e0fba9405f435b15ac9f0a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7424,7 +7424,7 @@ S: Orphan F: drivers/net/wireless/ray* RCUTORTURE MODULE -M: Josh Triplett +M: Josh Triplett M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 9b60b1f3261c1182e2c799e2affaf7e49605015c..44341dc5b148301b6e185d2bbc4e5d0248b0a3c5 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) "desc %p not ACKed\n", tx_desc); } + if (ret == NULL) { + dev_dbg(bdma_chan->dchan.device->dev, + "%s: unable to obtain tx descriptor\n", __func__); + goto err_out; + } + i = bdma_chan->wr_count_next % bdma_chan->bd_num; if (i == bdma_chan->bd_num - 1) { i = 0; @@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan) tx_desc->txd.phys = bdma_chan->bd_phys + i * sizeof(struct tsi721_dma_desc); tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i]; - +err_out: spin_unlock_bh(&bdma_chan->lock); return ret; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 255cd5cc07541034fd0f80e1d944ca775b6c78df..a23c096b30807c20db8d967dbb130f4842fa44e2 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); bool is_hugepage_active(struct page *page); +void free_huge_page(struct page *page); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); diff --git a/kernel/kexec.c b/kernel/kexec.c index 369f41a9412481029354034d4c6e0193fe132f56..23a088fec3c0a62e10f6ad149cb684f1b5de8364 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -1619,6 +1620,7 @@ static int __init crash_save_vmcoreinfo_init(void) #endif VMCOREINFO_NUMBER(PG_head_mask); VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); + VMCOREINFO_SYMBOL(free_huge_page); arch_crash_save_vmcoreinfo(); update_vmcoreinfo_note(); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 7fa34f86e5bab4a0c99df15233e799b9aa8663de..948a7693748ed3d0be04fb022d5ac6a195903e3c 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -18,7 +18,7 @@ * Copyright (C) IBM Corporation, 2005, 2006 * * Authors: Paul E. McKenney - * Josh Triplett + * Josh Triplett * * See also: Documentation/RCU/torture.txt */ @@ -51,7 +51,7 @@ #include MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); +MODULE_AUTHOR("Paul E. McKenney and Josh Triplett "); torture_param(int, fqs_duration, 0, diff --git a/mm/filemap.c b/mm/filemap.c index dafb06f70a09dd97b1fa690969a638f596714091..900edfaf6df5735a1979c866cd288c46c81e50ca 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1031,18 +1031,21 @@ EXPORT_SYMBOL(find_lock_entry); * @mapping: the address_space to search * @offset: the page index * @fgp_flags: PCG flags - * @gfp_mask: gfp mask to use if a page is to be allocated + * @cache_gfp_mask: gfp mask to use for the page cache data page allocation + * @radix_gfp_mask: gfp mask to use for radix tree node allocation * * Looks up the page cache slot at @mapping & @offset. * - * PCG flags modify how the page is returned + * PCG flags modify how the page is returned. * * FGP_ACCESSED: the page will be marked accessed * FGP_LOCK: Page is return locked * FGP_CREAT: If page is not present then a new page is allocated using - * @gfp_mask and added to the page cache and the VM's LRU - * list. The page is returned locked and with an increased - * refcount. Otherwise, %NULL is returned. + * @cache_gfp_mask and added to the page cache and the VM's LRU + * list. If radix tree nodes are allocated during page cache + * insertion then @radix_gfp_mask is used. The page is returned + * locked and with an increased refcount. Otherwise, %NULL is + * returned. * * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even * if the GFP flags specified for FGP_CREAT are atomic. diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9221c02ed9e2b8ab495e0b39482e2bcfa5ba9957..7a0a73d2fcff128850b32af9910a873d6fb384f5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size) return NULL; } -static void free_huge_page(struct page *page) +void free_huge_page(struct page *page) { /* * Can't pass hstate in here because it is called from the diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a2c7bcb0e6ebf10672f91b8117aef69d08b41ade..1f14a430c65691b5135c0752a5c9c151cbd2f0f9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) { struct mem_cgroup_eventfd_list *ev; + spin_lock(&memcg_oom_lock); + list_for_each_entry(ev, &memcg->oom_notify, list) eventfd_signal(ev->eventfd, 1); + + spin_unlock(&memcg_oom_lock); return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7211a73ba14d16155c1514c078b1e3c9d25e0b22..a013bc94ebbed4af3764e396b087475310ed5185 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -895,7 +895,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, struct page *hpage = *hpagep; struct page *ppage; - if (PageReserved(p) || PageSlab(p) || !PageLRU(p)) + /* + * Here we are interested only in user-mapped pages, so skip any + * other types of pages. + */ + if (PageReserved(p) || PageSlab(p)) + return SWAP_SUCCESS; + if (!(PageLRU(hpage) || PageHuge(p))) return SWAP_SUCCESS; /* @@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, if (!page_mapped(hpage)) return SWAP_SUCCESS; - if (PageKsm(p)) + if (PageKsm(p)) { + pr_err("MCE %#lx: can't handle KSM pages.\n", pfn); return SWAP_FAIL; + } if (PageSwapCache(p)) { printk(KERN_ERR @@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) */ if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage) != SWAP_SUCCESS) { - printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); + action_result(pfn, "unmapping failed", IGNORED); res = -EBUSY; goto out; } diff --git a/mm/memory.c b/mm/memory.c index 7e8d8205b6108fcf5b0a97aad3da36c9455b230f..8b44f765b64584a9a2e7c6f8873d6fcb6acb7726 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, update_mmu_cache(vma, address, pte); } -static unsigned long fault_around_bytes = 65536; +static unsigned long fault_around_bytes = rounddown_pow_of_two(65536); -/* - * fault_around_pages() and fault_around_mask() round down fault_around_bytes - * to nearest page order. It's what do_fault_around() expects to see. - */ static inline unsigned long fault_around_pages(void) { - return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE; + return fault_around_bytes >> PAGE_SHIFT; } static inline unsigned long fault_around_mask(void) { - return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK; + return ~(fault_around_bytes - 1) & PAGE_MASK; } - #ifdef CONFIG_DEBUG_FS static int fault_around_bytes_get(void *data, u64 *val) { @@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val) return 0; } +/* + * fault_around_pages() and fault_around_mask() expects fault_around_bytes + * rounded down to nearest page order. It's what do_fault_around() expects to + * see. + */ static int fault_around_bytes_set(void *data, u64 val) { if (val / PAGE_SIZE > PTRS_PER_PTE) return -EINVAL; - fault_around_bytes = val; + if (val > PAGE_SIZE) + fault_around_bytes = rounddown_pow_of_two(val); + else + fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ return 0; } DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 518e2c3f4c75f0014a03817b4910cf4050a2f480..e0c943014eb74ce3d4cb4c021d6f896740386d6d 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi, *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); if (bdi_bg_thresh) - *bdi_bg_thresh = div_u64((u64)*bdi_thresh * - background_thresh, - dirty_thresh); + *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh * + background_thresh, + dirty_thresh) : 0; /* * In order to avoid the stacked BDI deadlock we need diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8bcfe3ae20cb85c7dd1e02a69a489e71d382521f..ef44ad736ca17f79606021439b89a9fba4455564 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2447,7 +2447,7 @@ static inline int gfp_to_alloc_flags(gfp_t gfp_mask) { int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; - const gfp_t wait = gfp_mask & __GFP_WAIT; + const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); @@ -2456,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask) * The caller may dip into page reserves a bit more if the caller * cannot run direct reclaim, or if the caller has realtime scheduling * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). + * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH). */ alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); - if (!wait) { + if (atomic) { /* - * Not worth trying to allocate harder for - * __GFP_NOMEMALLOC even if it can't schedule. + * Not worth trying to allocate harder for __GFP_NOMEMALLOC even + * if it can't schedule. */ - if (!(gfp_mask & __GFP_NOMEMALLOC)) + if (!(gfp_mask & __GFP_NOMEMALLOC)) alloc_flags |= ALLOC_HARDER; /* - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. - * See also cpuset_zone_allowed() comment in kernel/cpuset.c. + * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the + * comment for __cpuset_node_allowed_softwall(). */ alloc_flags &= ~ALLOC_CPUSET; } else if (unlikely(rt_task(current)) && !in_interrupt())