提交 800f1ac4 编写于 作者: L Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "17 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  MIPS: fix DMA contiguous allocation
  sh64: fix __NR_fgetxattr
  ocfs2: fix SGID not inherited issue
  mm/oom_kill.c: avoid attempting to kill init sharing same memory
  drivers/base/memory.c: prohibit offlining of memory blocks with missing sections
  tmpfs: fix shmem_evict_inode() warnings on i_blocks
  mm/hugetlb.c: fix resv map memory leak for placeholder entries
  mm: hugetlb: call huge_pte_alloc() only if ptep is null
  kernel: remove stop_machine() Kconfig dependency
  mm: kmemleak: mark kmemleak_init prototype as __init
  mm: fix kerneldoc on mem_cgroup_replace_page
  osd fs: __r4w_get_page rely on PageUptodate for uptodate
  MAINTAINERS: make Vladimir co-maintainer of the memory controller
  mm, vmstat: allow WQ concurrency to discover memory reclaim doesn't make any progress
  mm: fix swapped Movable and Reclaimable in /proc/pagetypeinfo
  memcg: fix memory.high target
  mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
......@@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org>
M: Vladimir Davydov <vdavydov@virtuozzo.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Maintained
......
......@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
gfp = massage_gfp_flags(dev, gfp);
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev,
count, get_order(size));
if (!page)
......
......@@ -278,7 +278,7 @@
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
#define __NR_fgetxattr 269
#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262
......
......@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
/* Can't offline block with non-present sections */
if (mem->section_count != sections_per_block)
return -EINVAL;
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
......
......@@ -592,10 +592,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
}
unlock_page(page);
}
if (PageDirty(page) || PageWriteback(page))
*uptodate = true;
else
*uptodate = PageUptodate(page);
*uptodate = PageUptodate(page);
EXOFS_DBGMSG2("index=0x%lx uptodate=%d\n", index, *uptodate);
return page;
} else {
......
......@@ -476,10 +476,7 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
}
unlock_page(page);
}
if (PageDirty(page) || PageWriteback(page))
*uptodate = true;
else
*uptodate = PageUptodate(page);
*uptodate = PageUptodate(page);
dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
return page;
}
......
......@@ -367,13 +367,11 @@ static int ocfs2_mknod(struct inode *dir,
goto leave;
}
status = posix_acl_create(dir, &mode, &default_acl, &acl);
status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
if (status) {
mlog_errno(status);
goto leave;
}
/* update inode->i_mode after mask with "umask". */
inode->i_mode = mode;
handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
S_ISDIR(mode),
......
......@@ -25,7 +25,7 @@
#ifdef CONFIG_DEBUG_KMEMLEAK
extern void kmemleak_init(void) __ref;
extern void kmemleak_init(void) __init;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
......
......@@ -99,7 +99,7 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preemption.
*/
#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
......@@ -118,7 +118,7 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
......@@ -137,5 +137,5 @@ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
return stop_machine(fn, data, cpus);
}
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_STOP_MACHINE */
......@@ -2030,13 +2030,6 @@ config INIT_ALL_POSSIBLE
it was better to provide this option than to break all the archs
and have several arch maintainers pursuing me down dark alleys.
config STOP_MACHINE
bool
default y
depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
help
Need stop_machine() primitive.
source "block/Kconfig"
config PREEMPT_NOTIFIERS
......
......@@ -531,7 +531,7 @@ static int __init cpu_stop_init(void)
}
early_initcall(cpu_stop_init);
#ifdef CONFIG_STOP_MACHINE
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
......@@ -631,4 +631,4 @@ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
return ret ?: done.ret;
}
#endif /* CONFIG_STOP_MACHINE */
#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
......@@ -957,8 +957,9 @@ EXPORT_SYMBOL(congestion_wait);
* jiffies for either a BDI to exit congestion of the given @sync queue
* or a write to complete.
*
* In the absence of zone congestion, cond_resched() is called to yield
* the processor if necessary but otherwise does not sleep.
* In the absence of zone congestion, a short sleep or a cond_resched is
* performed to yield the processor and to allow other subsystems to make
* a forward progress.
*
* The return value is 0 if the sleep is for the full timeout. Otherwise,
* it is the number of jiffies that were still remaining when the function
......@@ -978,7 +979,19 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
*/
if (atomic_read(&nr_wb_congested[sync]) == 0 ||
!test_bit(ZONE_CONGESTED, &zone->flags)) {
cond_resched();
/*
* Memory allocation/reclaim might be called from a WQ
* context and the current implementation of the WQ
* concurrency control doesn't recognize that a particular
* WQ is congested if the worker thread is looping without
* ever sleeping. Therefore we have to do a short sleep
* here rather than calling cond_resched().
*/
if (current->flags & PF_WQ_WORKER)
schedule_timeout(1);
else
cond_resched();
/* In case we scheduled, work out time remaining */
ret = timeout - (jiffies - start);
......
......@@ -372,8 +372,10 @@ static long region_chg(struct resv_map *resv, long f, long t)
spin_unlock(&resv->lock);
trg = kmalloc(sizeof(*trg), GFP_KERNEL);
if (!trg)
if (!trg) {
kfree(nrg);
return -ENOMEM;
}
spin_lock(&resv->lock);
list_add(&trg->link, &resv->region_cache);
......@@ -483,8 +485,16 @@ static long region_del(struct resv_map *resv, long f, long t)
retry:
spin_lock(&resv->lock);
list_for_each_entry_safe(rg, trg, head, link) {
if (rg->to <= f)
/*
* Skip regions before the range to be deleted. file_region
* ranges are normally of the form [from, to). However, there
* may be a "placeholder" entry in the map which is of the form
* (from, to) with from == to. Check for placeholder entries
* at the beginning of the range to be deleted.
*/
if (rg->to <= f && (rg->to != rg->from || rg->to != f))
continue;
if (rg->from >= t)
break;
......@@ -1886,7 +1896,10 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
if (!page)
goto out_uncharge_cgroup;
if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
SetPagePrivate(page);
h->resv_huge_pages--;
}
spin_lock(&hugetlb_lock);
list_move(&page->lru, &h->hugepage_activelist);
/* Fall through */
......@@ -3693,12 +3706,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
} else {
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
......
......@@ -2128,7 +2128,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
*/
do {
if (page_counter_read(&memcg->memory) > memcg->high) {
current->memcg_nr_pages_over_high += nr_pages;
current->memcg_nr_pages_over_high += batch;
set_notify_resume(current);
break;
}
......@@ -5512,11 +5512,11 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
* mem_cgroup_replace_page - migrate a charge to another page
* @oldpage: currently charged page
* @newpage: page to transfer the charge to
* @lrucare: either or both pages might be on the LRU already
*
* Migrate the charge from @oldpage to @newpage.
*
* Both pages must be locked, @newpage->mapping must be set up.
* Either or both pages might be on the LRU already.
*/
void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
{
......
......@@ -608,6 +608,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
continue;
if (unlikely(p->flags & PF_KTHREAD))
continue;
if (is_global_init(p))
continue;
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
continue;
......
......@@ -3647,8 +3647,9 @@ static void show_migration_types(unsigned char type)
{
static const char types[MIGRATE_TYPES] = {
[MIGRATE_UNMOVABLE] = 'U',
[MIGRATE_RECLAIMABLE] = 'E',
[MIGRATE_MOVABLE] = 'M',
[MIGRATE_RECLAIMABLE] = 'E',
[MIGRATE_HIGHATOMIC] = 'H',
#ifdef CONFIG_CMA
[MIGRATE_CMA] = 'C',
#endif
......
......@@ -843,14 +843,14 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
list_add_tail(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
swap_shmem_alloc(swap);
shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
spin_lock(&info->lock);
info->swapped++;
shmem_recalc_inode(inode);
info->swapped++;
spin_unlock(&info->lock);
swap_shmem_alloc(swap);
shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
......@@ -1078,7 +1078,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
goto unlock;
}
if (page && sgp == SGP_WRITE)
......@@ -1246,11 +1246,15 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
/* Perhaps the file has been truncated since we checked */
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
if (alloced) {
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
}
error = -EINVAL;
if (alloced)
goto trunc;
else
goto failed;
goto unlock;
}
*pagep = page;
return 0;
......@@ -1258,23 +1262,13 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
/*
* Error recovery.
*/
trunc:
info = SHMEM_I(inode);
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
info->alloced--;
inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
decused:
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
shmem_unacct_blocks(info->flags, 1);
failed:
if (swap.val && error != -EINVAL &&
!shmem_confirm_swap(mapping, index, swap))
if (swap.val && !shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
unlock:
if (page) {
......
......@@ -921,8 +921,8 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
#ifdef CONFIG_PROC_FS
static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Reclaimable",
"Movable",
"Reclaimable",
"HighAtomic",
#ifdef CONFIG_CMA
"CMA",
......@@ -1379,6 +1379,7 @@ static const struct file_operations proc_vmstat_file_operations = {
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
static struct workqueue_struct *vmstat_wq;
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
static cpumask_var_t cpu_stat_off;
......@@ -1391,7 +1392,7 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
schedule_delayed_work_on(smp_processor_id(),
queue_delayed_work_on(smp_processor_id(), vmstat_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
} else {
......@@ -1460,7 +1461,7 @@ static void vmstat_shepherd(struct work_struct *w)
if (need_update(cpu) &&
cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
schedule_delayed_work_on(cpu,
queue_delayed_work_on(cpu, vmstat_wq,
&per_cpu(vmstat_work, cpu), 0);
put_online_cpus();
......@@ -1549,6 +1550,7 @@ static int __init setup_vmstat(void)
start_shepherd_timer();
cpu_notifier_register_done();
vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册