提交 1170532b 编写于 作者: J Joe Perches 提交者: Linus Torvalds

mm: convert printk(KERN_<LEVEL> to pr_<level>

Most of the mm subsystem uses pr_<level> so make it consistent.

Miscellanea:

 - Realign arguments
 - Add missing newline to format
 - kmemleak-test.c has a "kmemleak: " prefix added to the
   "Kmemleak testing" logging message via pr_fmt
Signed-off-by: NJoe Perches <joe@perches.com>
Acked-by: Tejun Heo <tj@kernel.org>	[percpu]
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 756a025f
...@@ -1026,8 +1026,8 @@ int pdflush_proc_obsolete(struct ctl_table *table, int write, ...@@ -1026,8 +1026,8 @@ int pdflush_proc_obsolete(struct ctl_table *table, int write,
if (copy_to_user(buffer, kbuf, sizeof(kbuf))) if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
return -EFAULT; return -EFAULT;
printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n", pr_warn_once("%s exported in /proc is scheduled for removal\n",
table->procname); table->procname);
*lenp = 2; *lenp = 2;
*ppos += *lenp; *ppos += *lenp;
......
...@@ -50,8 +50,7 @@ early_param("bootmem_debug", bootmem_debug_setup); ...@@ -50,8 +50,7 @@ early_param("bootmem_debug", bootmem_debug_setup);
#define bdebug(fmt, args...) ({ \ #define bdebug(fmt, args...) ({ \
if (unlikely(bootmem_debug)) \ if (unlikely(bootmem_debug)) \
printk(KERN_INFO \ pr_info("bootmem::%s " fmt, \
"bootmem::%s " fmt, \
__func__, ## args); \ __func__, ## args); \
}) })
...@@ -680,7 +679,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, ...@@ -680,7 +679,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
/* /*
* Whoops, we cannot satisfy the allocation request. * Whoops, we cannot satisfy the allocation request.
*/ */
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory"); panic("Out of memory");
return NULL; return NULL;
} }
...@@ -755,7 +754,7 @@ void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, ...@@ -755,7 +754,7 @@ void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (ptr) if (ptr)
return ptr; return ptr;
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory"); panic("Out of memory");
return NULL; return NULL;
} }
......
...@@ -294,8 +294,7 @@ void dma_pool_destroy(struct dma_pool *pool) ...@@ -294,8 +294,7 @@ void dma_pool_destroy(struct dma_pool *pool)
"dma_pool_destroy %s, %p busy\n", "dma_pool_destroy %s, %p busy\n",
pool->name, page->vaddr); pool->name, page->vaddr);
else else
printk(KERN_ERR pr_err("dma_pool_destroy %s, %p busy\n",
"dma_pool_destroy %s, %p busy\n",
pool->name, page->vaddr); pool->name, page->vaddr);
/* leak the still-in-use consistent memory */ /* leak the still-in-use consistent memory */
list_del(&page->page_list); list_del(&page->page_list);
...@@ -424,7 +423,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -424,7 +423,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
"dma_pool_free %s, %p/%lx (bad dma)\n", "dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma); pool->name, vaddr, (unsigned long)dma);
else else
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", pr_err("dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma); pool->name, vaddr, (unsigned long)dma);
return; return;
} }
...@@ -438,8 +437,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -438,8 +437,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
"dma_pool_free %s, %p (bad vaddr)/%Lx\n", "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma); pool->name, vaddr, (unsigned long long)dma);
else else
printk(KERN_ERR pr_err("dma_pool_free %s, %p (bad vaddr)/%Lx\n",
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma); pool->name, vaddr, (unsigned long long)dma);
return; return;
} }
...@@ -455,8 +453,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -455,8 +453,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
pool->name, (unsigned long long)dma); pool->name, (unsigned long long)dma);
else else
printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n", pr_err("dma_pool_free %s, dma %Lx already free\n",
pool->name, (unsigned long long)dma); pool->name, (unsigned long long)dma);
return; return;
} }
} }
......
...@@ -386,7 +386,7 @@ extern int mminit_loglevel; ...@@ -386,7 +386,7 @@ extern int mminit_loglevel;
do { \ do { \
if (level < mminit_loglevel) { \ if (level < mminit_loglevel) { \
if (level <= MMINIT_WARNING) \ if (level <= MMINIT_WARNING) \
printk(KERN_WARNING "mminit::" prefix " " fmt, ##arg); \ pr_warn("mminit::" prefix " " fmt, ##arg); \
else \ else \
printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \ printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
} \ } \
......
...@@ -20,7 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) ...@@ -20,7 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) { if (!shadow) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n"); pr_err("kmemcheck: failed to allocate shadow bitmap\n");
return; return;
} }
......
...@@ -49,7 +49,7 @@ static int __init kmemleak_test_init(void) ...@@ -49,7 +49,7 @@ static int __init kmemleak_test_init(void)
struct test_node *elem; struct test_node *elem;
int i; int i;
printk(KERN_INFO "Kmemleak testing\n"); pr_info("Kmemleak testing\n");
/* make some orphan objects */ /* make some orphan objects */
pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); pr_info("kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL));
......
...@@ -184,9 +184,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, ...@@ -184,9 +184,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
struct siginfo si; struct siginfo si;
int ret; int ret;
printk(KERN_ERR pr_err("MCE %#lx: Killing %s:%d due to hardware memory corruption\n",
"MCE %#lx: Killing %s:%d due to hardware memory corruption\n", pfn, t->comm, t->pid);
pfn, t->comm, t->pid);
si.si_signo = SIGBUS; si.si_signo = SIGBUS;
si.si_errno = 0; si.si_errno = 0;
si.si_addr = (void *)addr; si.si_addr = (void *)addr;
...@@ -209,8 +208,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, ...@@ -209,8 +208,8 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
} }
if (ret < 0) if (ret < 0)
printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", pr_info("MCE: Error sending signal to %s:%d: %d\n",
t->comm, t->pid, ret); t->comm, t->pid, ret);
return ret; return ret;
} }
...@@ -290,8 +289,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, ...@@ -290,8 +289,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
} else { } else {
tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
if (!tk) { if (!tk) {
printk(KERN_ERR pr_err("MCE: Out of memory while machine check handling\n");
"MCE: Out of memory while machine check handling\n");
return; return;
} }
} }
...@@ -336,9 +334,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, ...@@ -336,9 +334,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
* signal and then access the memory. Just kill it. * signal and then access the memory. Just kill it.
*/ */
if (fail || tk->addr_valid == 0) { if (fail || tk->addr_valid == 0) {
printk(KERN_ERR pr_err("MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
"MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", pfn, tk->tsk->comm, tk->tsk->pid);
pfn, tk->tsk->comm, tk->tsk->pid);
force_sig(SIGKILL, tk->tsk); force_sig(SIGKILL, tk->tsk);
} }
...@@ -350,9 +347,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno, ...@@ -350,9 +347,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
*/ */
else if (kill_proc(tk->tsk, tk->addr, trapno, else if (kill_proc(tk->tsk, tk->addr, trapno,
pfn, page, flags) < 0) pfn, page, flags) < 0)
printk(KERN_ERR pr_err("MCE %#lx: Cannot send advisory machine check signal to %s:%d\n",
"MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", pfn, tk->tsk->comm, tk->tsk->pid);
pfn, tk->tsk->comm, tk->tsk->pid);
} }
put_task_struct(tk->tsk); put_task_struct(tk->tsk);
kfree(tk); kfree(tk);
...@@ -563,7 +559,7 @@ static int me_kernel(struct page *p, unsigned long pfn) ...@@ -563,7 +559,7 @@ static int me_kernel(struct page *p, unsigned long pfn)
*/ */
static int me_unknown(struct page *p, unsigned long pfn) static int me_unknown(struct page *p, unsigned long pfn)
{ {
printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); pr_err("MCE %#lx: Unknown page state\n", pfn);
return MF_FAILED; return MF_FAILED;
} }
...@@ -608,8 +604,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) ...@@ -608,8 +604,8 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
if (mapping->a_ops->error_remove_page) { if (mapping->a_ops->error_remove_page) {
err = mapping->a_ops->error_remove_page(mapping, p); err = mapping->a_ops->error_remove_page(mapping, p);
if (err != 0) { if (err != 0) {
printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", pr_info("MCE %#lx: Failed to punch page: %d\n",
pfn, err); pfn, err);
} else if (page_has_private(p) && } else if (page_has_private(p) &&
!try_to_release_page(p, GFP_NOIO)) { !try_to_release_page(p, GFP_NOIO)) {
pr_info("MCE %#lx: failed to release buffers\n", pfn); pr_info("MCE %#lx: failed to release buffers\n", pfn);
...@@ -624,8 +620,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) ...@@ -624,8 +620,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn)
if (invalidate_inode_page(p)) if (invalidate_inode_page(p))
ret = MF_RECOVERED; ret = MF_RECOVERED;
else else
printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", pr_info("MCE %#lx: Failed to invalidate\n", pfn);
pfn);
} }
return ret; return ret;
} }
...@@ -854,8 +849,7 @@ static int page_action(struct page_state *ps, struct page *p, ...@@ -854,8 +849,7 @@ static int page_action(struct page_state *ps, struct page *p,
if (ps->action == me_swapcache_dirty && result == MF_DELAYED) if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
count--; count--;
if (count != 0) { if (count != 0) {
printk(KERN_ERR pr_err("MCE %#lx: %s still referenced by %d users\n",
"MCE %#lx: %s still referenced by %d users\n",
pfn, action_page_types[ps->type], count); pfn, action_page_types[ps->type], count);
result = MF_FAILED; result = MF_FAILED;
} }
...@@ -934,8 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -934,8 +928,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
} }
if (PageSwapCache(p)) { if (PageSwapCache(p)) {
printk(KERN_ERR pr_err("MCE %#lx: keeping poisoned page in swap cache\n", pfn);
"MCE %#lx: keeping poisoned page in swap cache\n", pfn);
ttu |= TTU_IGNORE_HWPOISON; ttu |= TTU_IGNORE_HWPOISON;
} }
...@@ -953,8 +946,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -953,8 +946,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
} else { } else {
kill = 0; kill = 0;
ttu |= TTU_IGNORE_HWPOISON; ttu |= TTU_IGNORE_HWPOISON;
printk(KERN_INFO pr_info("MCE %#lx: corrupted page was clean: dropped without side effects\n",
"MCE %#lx: corrupted page was clean: dropped without side effects\n",
pfn); pfn);
} }
} }
...@@ -972,8 +964,8 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -972,8 +964,8 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
ret = try_to_unmap(hpage, ttu); ret = try_to_unmap(hpage, ttu);
if (ret != SWAP_SUCCESS) if (ret != SWAP_SUCCESS)
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", pr_err("MCE %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage)); pfn, page_mapcount(hpage));
/* /*
* Now that the dirty bit has been propagated to the * Now that the dirty bit has been propagated to the
...@@ -1040,16 +1032,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1040,16 +1032,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
panic("Memory failure from trap %d on page %lx", trapno, pfn); panic("Memory failure from trap %d on page %lx", trapno, pfn);
if (!pfn_valid(pfn)) { if (!pfn_valid(pfn)) {
printk(KERN_ERR pr_err("MCE %#lx: memory outside kernel control\n", pfn);
"MCE %#lx: memory outside kernel control\n",
pfn);
return -ENXIO; return -ENXIO;
} }
p = pfn_to_page(pfn); p = pfn_to_page(pfn);
orig_head = hpage = compound_head(p); orig_head = hpage = compound_head(p);
if (TestSetPageHWPoison(p)) { if (TestSetPageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); pr_err("MCE %#lx: already hardware poisoned\n", pfn);
return 0; return 0;
} }
...@@ -1180,7 +1170,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags) ...@@ -1180,7 +1170,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* unpoison always clear PG_hwpoison inside page lock * unpoison always clear PG_hwpoison inside page lock
*/ */
if (!PageHWPoison(p)) { if (!PageHWPoison(p)) {
printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); pr_err("MCE %#lx: just unpoisoned\n", pfn);
num_poisoned_pages_sub(nr_pages); num_poisoned_pages_sub(nr_pages);
unlock_page(hpage); unlock_page(hpage);
put_hwpoison_page(hpage); put_hwpoison_page(hpage);
......
...@@ -660,9 +660,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, ...@@ -660,9 +660,8 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
return; return;
} }
if (nr_unshown) { if (nr_unshown) {
printk(KERN_ALERT pr_alert("BUG: Bad page map: %lu messages suppressed\n",
"BUG: Bad page map: %lu messages suppressed\n", nr_unshown);
nr_unshown);
nr_unshown = 0; nr_unshown = 0;
} }
nr_shown = 0; nr_shown = 0;
...@@ -673,15 +672,13 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, ...@@ -673,15 +672,13 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
index = linear_page_index(vma, addr); index = linear_page_index(vma, addr);
printk(KERN_ALERT pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
"BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", current->comm,
current->comm, (long long)pte_val(pte), (long long)pmd_val(*pmd));
(long long)pte_val(pte), (long long)pmd_val(*pmd));
if (page) if (page)
dump_page(page, "bad pte"); dump_page(page, "bad pte");
printk(KERN_ALERT pr_alert("addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
/* /*
* Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
*/ */
......
...@@ -55,13 +55,12 @@ void __init mminit_verify_zonelist(void) ...@@ -55,13 +55,12 @@ void __init mminit_verify_zonelist(void)
/* Iterate the zonelist */ /* Iterate the zonelist */
for_each_zone_zonelist(zone, z, zonelist, zoneid) { for_each_zone_zonelist(zone, z, zonelist, zoneid) {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
printk(KERN_CONT "%d:%s ", pr_cont("%d:%s ", zone->node, zone->name);
zone->node, zone->name);
#else #else
printk(KERN_CONT "0:%s ", zone->name); pr_cont("0:%s ", zone->name);
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
} }
printk(KERN_CONT "\n"); pr_cont("\n");
} }
} }
} }
......
...@@ -288,7 +288,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align, ...@@ -288,7 +288,7 @@ static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
/* /*
* Whoops, we cannot satisfy the allocation request. * Whoops, we cannot satisfy the allocation request.
*/ */
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory"); panic("Out of memory");
return NULL; return NULL;
} }
...@@ -360,7 +360,7 @@ static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, ...@@ -360,7 +360,7 @@ static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
if (ptr) if (ptr)
return ptr; return ptr;
printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size); pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory"); panic("Out of memory");
return NULL; return NULL;
} }
......
...@@ -544,11 +544,11 @@ static int __init debug_guardpage_minorder_setup(char *buf) ...@@ -544,11 +544,11 @@ static int __init debug_guardpage_minorder_setup(char *buf)
unsigned long res; unsigned long res;
if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); pr_err("Bad debug_guardpage_minorder value\n");
return 0; return 0;
} }
_debug_guardpage_minorder = res; _debug_guardpage_minorder = res;
printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); pr_info("Setting debug_guardpage_minorder to %lu\n", res);
return 0; return 0;
} }
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
...@@ -4073,8 +4073,7 @@ static int __parse_numa_zonelist_order(char *s) ...@@ -4073,8 +4073,7 @@ static int __parse_numa_zonelist_order(char *s)
} else if (*s == 'z' || *s == 'Z') { } else if (*s == 'z' || *s == 'Z') {
user_zonelist_order = ZONELIST_ORDER_ZONE; user_zonelist_order = ZONELIST_ORDER_ZONE;
} else { } else {
printk(KERN_WARNING pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
"Ignoring invalid numa_zonelist_order value: %s\n", s);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -5458,8 +5457,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) ...@@ -5458,8 +5457,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
" %s zone: %lu pages used for memmap\n", " %s zone: %lu pages used for memmap\n",
zone_names[j], memmap_pages); zone_names[j], memmap_pages);
} else } else
printk(KERN_WARNING pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
" %s zone: %lu pages exceeds freesize %lu\n",
zone_names[j], memmap_pages, freesize); zone_names[j], memmap_pages, freesize);
} }
...@@ -5667,8 +5665,7 @@ static unsigned long __init find_min_pfn_for_node(int nid) ...@@ -5667,8 +5665,7 @@ static unsigned long __init find_min_pfn_for_node(int nid)
min_pfn = min(min_pfn, start_pfn); min_pfn = min(min_pfn, start_pfn);
if (min_pfn == ULONG_MAX) { if (min_pfn == ULONG_MAX) {
printk(KERN_WARNING pr_warn("Could not find start_pfn for node %d\n", nid);
"Could not find start_pfn for node %d\n", nid);
return 0; return 0;
} }
...@@ -6686,11 +6683,8 @@ void *__init alloc_large_system_hash(const char *tablename, ...@@ -6686,11 +6683,8 @@ void *__init alloc_large_system_hash(const char *tablename,
if (!table) if (!table)
panic("Failed to allocate %s hash table\n", tablename); panic("Failed to allocate %s hash table\n", tablename);
printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
tablename, tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
(1UL << log2qty),
ilog2(size) - PAGE_SHIFT,
size);
if (_hash_shift) if (_hash_shift)
*_hash_shift = log2qty; *_hash_shift = log2qty;
...@@ -7191,8 +7185,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -7191,8 +7185,8 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
BUG_ON(!PageBuddy(page)); BUG_ON(!PageBuddy(page));
order = page_order(page); order = page_order(page);
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
printk(KERN_INFO "remove from free list %lx %d %lx\n", pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn); pfn, 1 << order, end_pfn);
#endif #endif
list_del(&page->lru); list_del(&page->lru);
rmv_page_order(page); rmv_page_order(page);
......
...@@ -56,10 +56,10 @@ void end_swap_bio_write(struct bio *bio) ...@@ -56,10 +56,10 @@ void end_swap_bio_write(struct bio *bio)
* Also clear PG_reclaim to avoid rotate_reclaimable_page() * Also clear PG_reclaim to avoid rotate_reclaimable_page()
*/ */
set_page_dirty(page); set_page_dirty(page);
printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
imajor(bio->bi_bdev->bd_inode), imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
ClearPageReclaim(page); ClearPageReclaim(page);
} }
end_page_writeback(page); end_page_writeback(page);
...@@ -73,10 +73,10 @@ static void end_swap_bio_read(struct bio *bio) ...@@ -73,10 +73,10 @@ static void end_swap_bio_read(struct bio *bio)
if (bio->bi_error) { if (bio->bi_error) {
SetPageError(page); SetPageError(page);
ClearPageUptodate(page); ClearPageUptodate(page);
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
imajor(bio->bi_bdev->bd_inode), imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
goto out; goto out;
} }
...@@ -216,7 +216,7 @@ int generic_swapfile_activate(struct swap_info_struct *sis, ...@@ -216,7 +216,7 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
out: out:
return ret; return ret;
bad_bmap: bad_bmap:
printk(KERN_ERR "swapon: swapfile has holes\n"); pr_err("swapon: swapfile has holes\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -290,8 +290,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, ...@@ -290,8 +290,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
*/ */
set_page_dirty(page); set_page_dirty(page);
ClearPageReclaim(page); ClearPageReclaim(page);
pr_err_ratelimited("Write error on dio swapfile (%Lu)\n", pr_err_ratelimited("Write error on dio swapfile (%llu)\n",
page_file_offset(page)); page_file_offset(page));
} }
end_page_writeback(page); end_page_writeback(page);
return ret; return ret;
......
...@@ -95,7 +95,7 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) ...@@ -95,7 +95,7 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
/* all units must be in a single group */ /* all units must be in a single group */
if (ai->nr_groups != 1) { if (ai->nr_groups != 1) {
printk(KERN_CRIT "percpu: can't handle more than one groups\n"); pr_crit("percpu: can't handle more than one groups\n");
return -EINVAL; return -EINVAL;
} }
...@@ -103,8 +103,8 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) ...@@ -103,8 +103,8 @@ static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
alloc_pages = roundup_pow_of_two(nr_pages); alloc_pages = roundup_pow_of_two(nr_pages);
if (alloc_pages > nr_pages) if (alloc_pages > nr_pages)
printk(KERN_WARNING "percpu: wasting %zu pages per chunk\n", pr_warn("percpu: wasting %zu pages per chunk\n",
alloc_pages - nr_pages); alloc_pages - nr_pages);
return 0; return 0;
} }
...@@ -1449,20 +1449,20 @@ static void pcpu_dump_alloc_info(const char *lvl, ...@@ -1449,20 +1449,20 @@ static void pcpu_dump_alloc_info(const char *lvl,
for (alloc_end += gi->nr_units / upa; for (alloc_end += gi->nr_units / upa;
alloc < alloc_end; alloc++) { alloc < alloc_end; alloc++) {
if (!(alloc % apl)) { if (!(alloc % apl)) {
printk(KERN_CONT "\n"); pr_cont("\n");
printk("%spcpu-alloc: ", lvl); printk("%spcpu-alloc: ", lvl);
} }
printk(KERN_CONT "[%0*d] ", group_width, group); pr_cont("[%0*d] ", group_width, group);
for (unit_end += upa; unit < unit_end; unit++) for (unit_end += upa; unit < unit_end; unit++)
if (gi->cpu_map[unit] != NR_CPUS) if (gi->cpu_map[unit] != NR_CPUS)
printk(KERN_CONT "%0*d ", cpu_width, pr_cont("%0*d ",
gi->cpu_map[unit]); cpu_width, gi->cpu_map[unit]);
else else
printk(KERN_CONT "%s ", empty_str); pr_cont("%s ", empty_str);
} }
} }
printk(KERN_CONT "\n"); pr_cont("\n");
} }
/** /**
......
...@@ -2823,9 +2823,8 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, ...@@ -2823,9 +2823,8 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
if ((value = strchr(this_char,'=')) != NULL) { if ((value = strchr(this_char,'=')) != NULL) {
*value++ = 0; *value++ = 0;
} else { } else {
printk(KERN_ERR pr_err("tmpfs: No value for mount option '%s'\n",
"tmpfs: No value for mount option '%s'\n", this_char);
this_char);
goto error; goto error;
} }
...@@ -2880,8 +2879,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, ...@@ -2880,8 +2879,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
if (mpol_parse_str(value, &mpol)) if (mpol_parse_str(value, &mpol))
goto bad_val; goto bad_val;
} else { } else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n", pr_err("tmpfs: Bad mount option %s\n", this_char);
this_char);
goto error; goto error;
} }
} }
...@@ -2889,7 +2887,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, ...@@ -2889,7 +2887,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
return 0; return 0;
bad_val: bad_val:
printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
value, this_char); value, this_char);
error: error:
mpol_put(mpol); mpol_put(mpol);
...@@ -3286,14 +3284,14 @@ int __init shmem_init(void) ...@@ -3286,14 +3284,14 @@ int __init shmem_init(void)
error = register_filesystem(&shmem_fs_type); error = register_filesystem(&shmem_fs_type);
if (error) { if (error) {
printk(KERN_ERR "Could not register tmpfs\n"); pr_err("Could not register tmpfs\n");
goto out2; goto out2;
} }
shm_mnt = kern_mount(&shmem_fs_type); shm_mnt = kern_mount(&shmem_fs_type);
if (IS_ERR(shm_mnt)) { if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt); error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n"); pr_err("Could not kern_mount tmpfs\n");
goto out1; goto out1;
} }
return 0; return 0;
......
...@@ -474,7 +474,7 @@ static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, ...@@ -474,7 +474,7 @@ static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size,
static void __slab_error(const char *function, struct kmem_cache *cachep, static void __slab_error(const char *function, struct kmem_cache *cachep,
char *msg) char *msg)
{ {
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", pr_err("slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg); function, cachep->name, msg);
dump_stack(); dump_stack();
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
...@@ -1553,7 +1553,7 @@ static void dump_line(char *data, int offset, int limit) ...@@ -1553,7 +1553,7 @@ static void dump_line(char *data, int offset, int limit)
unsigned char error = 0; unsigned char error = 0;
int bad_count = 0; int bad_count = 0;
printk(KERN_ERR "%03x: ", offset); pr_err("%03x: ", offset);
for (i = 0; i < limit; i++) { for (i = 0; i < limit; i++) {
if (data[offset + i] != POISON_FREE) { if (data[offset + i] != POISON_FREE) {
error = data[offset + i]; error = data[offset + i];
...@@ -1566,11 +1566,11 @@ static void dump_line(char *data, int offset, int limit) ...@@ -1566,11 +1566,11 @@ static void dump_line(char *data, int offset, int limit)
if (bad_count == 1) { if (bad_count == 1) {
error ^= POISON_FREE; error ^= POISON_FREE;
if (!(error & (error - 1))) { if (!(error & (error - 1))) {
printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n"); pr_err("Single bit error detected. Probably bad RAM.\n");
#ifdef CONFIG_X86 #ifdef CONFIG_X86
printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n"); pr_err("Run memtest86+ or a similar memory test tool.\n");
#else #else
printk(KERN_ERR "Run a memory test tool.\n"); pr_err("Run a memory test tool.\n");
#endif #endif
} }
} }
...@@ -1585,13 +1585,13 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) ...@@ -1585,13 +1585,13 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
char *realobj; char *realobj;
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", pr_err("Redzone: 0x%llx/0x%llx\n",
*dbg_redzone1(cachep, objp), *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp)); *dbg_redzone2(cachep, objp));
} }
if (cachep->flags & SLAB_STORE_USER) { if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>](%pSR)\n", pr_err("Last user: [<%p>](%pSR)\n",
*dbg_userword(cachep, objp), *dbg_userword(cachep, objp),
*dbg_userword(cachep, objp)); *dbg_userword(cachep, objp));
} }
...@@ -1627,9 +1627,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) ...@@ -1627,9 +1627,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Mismatch ! */ /* Mismatch ! */
/* Print header */ /* Print header */
if (lines == 0) { if (lines == 0) {
printk(KERN_ERR pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
"Slab corruption (%s): %s start=%p, len=%d\n", print_tainted(), cachep->name,
print_tainted(), cachep->name, realobj, size); realobj, size);
print_objinfo(cachep, objp, 0); print_objinfo(cachep, objp, 0);
} }
/* Hexdump the affected line */ /* Hexdump the affected line */
...@@ -1656,15 +1656,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) ...@@ -1656,15 +1656,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
if (objnr) { if (objnr) {
objp = index_to_obj(cachep, page, objnr - 1); objp = index_to_obj(cachep, page, objnr - 1);
realobj = (char *)objp + obj_offset(cachep); realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n", pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
realobj, size);
print_objinfo(cachep, objp, 2); print_objinfo(cachep, objp, 2);
} }
if (objnr + 1 < cachep->num) { if (objnr + 1 < cachep->num) {
objp = index_to_obj(cachep, page, objnr + 1); objp = index_to_obj(cachep, page, objnr + 1);
realobj = (char *)objp + obj_offset(cachep); realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n", pr_err("Next obj: start=%p, len=%d\n", realobj, size);
realobj, size);
print_objinfo(cachep, objp, 2); print_objinfo(cachep, objp, 2);
} }
} }
...@@ -2463,7 +2461,7 @@ static void slab_put_obj(struct kmem_cache *cachep, ...@@ -2463,7 +2461,7 @@ static void slab_put_obj(struct kmem_cache *cachep,
/* Verify double free bug */ /* Verify double free bug */
for (i = page->active; i < cachep->num; i++) { for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) { if (get_free_obj(page, i) == objnr) {
printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n", pr_err("slab: double free detected in cache '%s', objp %p\n",
cachep->name, objp); cachep->name, objp);
BUG(); BUG();
} }
...@@ -2583,7 +2581,7 @@ static int cache_grow(struct kmem_cache *cachep, ...@@ -2583,7 +2581,7 @@ static int cache_grow(struct kmem_cache *cachep,
static void kfree_debugcheck(const void *objp) static void kfree_debugcheck(const void *objp)
{ {
if (!virt_addr_valid(objp)) { if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", pr_err("kfree_debugcheck: out of range ptr %lxh\n",
(unsigned long)objp); (unsigned long)objp);
BUG(); BUG();
} }
...@@ -2607,8 +2605,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) ...@@ -2607,8 +2605,8 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
else else
slab_error(cache, "memory outside object was overwritten"); slab_error(cache, "memory outside object was overwritten");
printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
obj, redzone1, redzone2); obj, redzone1, redzone2);
} }
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
...@@ -2896,10 +2894,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -2896,10 +2894,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) { *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside object was overwritten"); slab_error(cachep, "double free, or memory outside object was overwritten");
printk(KERN_ERR pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n", objp, *dbg_redzone1(cachep, objp),
objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp));
*dbg_redzone2(cachep, objp));
} }
*dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE;
...@@ -2910,7 +2907,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -2910,7 +2907,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
cachep->ctor(objp); cachep->ctor(objp);
if (ARCH_SLAB_MINALIGN && if (ARCH_SLAB_MINALIGN &&
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
objp, (int)ARCH_SLAB_MINALIGN); objp, (int)ARCH_SLAB_MINALIGN);
} }
return objp; return objp;
...@@ -3837,7 +3834,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -3837,7 +3834,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
skip_setup: skip_setup:
err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
if (err) if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", pr_err("enable_cpucache failed for %s, error %d\n",
cachep->name, -err); cachep->name, -err);
return err; return err;
} }
...@@ -3993,7 +3990,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) ...@@ -3993,7 +3990,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
name = cachep->name; name = cachep->name;
if (error) if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error); pr_err("slab: cache %s error: %s\n", name, error);
sinfo->active_objs = active_objs; sinfo->active_objs = active_objs;
sinfo->num_objs = num_objs; sinfo->num_objs = num_objs;
......
...@@ -442,7 +442,7 @@ kmem_cache_create(const char *name, size_t size, size_t align, ...@@ -442,7 +442,7 @@ kmem_cache_create(const char *name, size_t size, size_t align,
panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
name, err); name, err);
else { else {
printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", pr_warn("kmem_cache_create(%s) failed with error %d\n",
name, err); name, err);
dump_stack(); dump_stack();
} }
......
...@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node, ...@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
int actual_node = early_pfn_to_nid(pfn); int actual_node = early_pfn_to_nid(pfn);
if (node_distance(actual_node, node) > LOCAL_DISTANCE) if (node_distance(actual_node, node) > LOCAL_DISTANCE)
printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n", pr_warn("[%lx-%lx] potential offnode page_structs\n",
start, end - 1); start, end - 1);
} }
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
...@@ -292,7 +292,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, ...@@ -292,7 +292,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (map_map[pnum]) if (map_map[pnum])
continue; continue;
ms = __nr_to_section(pnum); ms = __nr_to_section(pnum);
printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
} }
......
...@@ -313,9 +313,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) ...@@ -313,9 +313,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
if (usemap_nid != nid) { if (usemap_nid != nid) {
printk(KERN_INFO pr_info("node %d must be removed before remove section %ld\n",
"node %d must be removed before remove section %ld\n", nid, usemap_snr);
nid, usemap_snr);
return; return;
} }
/* /*
...@@ -324,10 +323,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap) ...@@ -324,10 +323,8 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
* gather other removable sections for dynamic partitioning. * gather other removable sections for dynamic partitioning.
* Just notify un-removable section's number here. * Just notify un-removable section's number here.
*/ */
printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
pgdat_snr, nid); usemap_snr, pgdat_snr, nid);
printk(KERN_CONT
" have a circular dependency on usemap and pgdat allocations\n");
} }
#else #else
static unsigned long * __init static unsigned long * __init
...@@ -355,7 +352,7 @@ static void __init sparse_early_usemaps_alloc_node(void *data, ...@@ -355,7 +352,7 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
size * usemap_count); size * usemap_count);
if (!usemap) { if (!usemap) {
printk(KERN_WARNING "%s: allocation failed\n", __func__); pr_warn("%s: allocation failed\n", __func__);
return; return;
} }
...@@ -428,7 +425,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, ...@@ -428,7 +425,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (map_map[pnum]) if (map_map[pnum])
continue; continue;
ms = __nr_to_section(pnum); ms = __nr_to_section(pnum);
printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
} }
...@@ -456,7 +453,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) ...@@ -456,7 +453,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
if (map) if (map)
return map; return map;
printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n", pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
__func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
return NULL; return NULL;
......
...@@ -174,9 +174,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) ...@@ -174,9 +174,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
return 0; return 0;
nomem: nomem:
printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); pr_info("couldn't allocate enough memory for swap_cgroup\n");
printk(KERN_INFO pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n");
"swap_cgroup can be disabled by swapaccount=0 boot option\n");
return -ENOMEM; return -ENOMEM;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册