提交 8be04b93 编写于 作者: J Joe Perches 提交者: Jiri Kosina

treewide: Add __GFP_NOWARN to k.alloc calls with v.alloc fallbacks

Don't emit OOM warnings when k.alloc calls fail when
there there is a v.alloc immediately afterwards.

Converted a kmalloc/vmalloc with memset to kzalloc/vzalloc.
Signed-off-by: NJoe Perches <joe@perches.com>
Acked-by: N"Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: NJiri Kosina <jkosina@suse.cz>
上级 85dbe706
......@@ -393,7 +393,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
* we must not block on IO to ourselves.
* Context is receiver thread or dmsetup. */
bytes = sizeof(struct page *)*want;
new_pages = kzalloc(bytes, GFP_NOIO);
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
if (!new_pages) {
new_pages = __vmalloc(bytes,
GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
......
......@@ -222,7 +222,8 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue->small_page = NULL;
/* allocate queue page pointers */
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
GFP_KERNEL | __GFP_NOWARN);
if (!queue->queue_pages) {
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) {
......
......@@ -1157,7 +1157,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
*/
void *cxgb_alloc_mem(unsigned long size)
{
void *p = kzalloc(size, GFP_KERNEL);
void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!p)
p = vzalloc(size);
......
......@@ -1133,7 +1133,7 @@ out: release_firmware(fw);
*/
void *t4_alloc_mem(size_t size)
{
void *p = kzalloc(size, GFP_KERNEL);
void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!p)
p = vzalloc(size);
......
......@@ -658,11 +658,11 @@ static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
static inline void *cxgbi_alloc_big_mem(unsigned int size,
gfp_t gfp)
{
void *p = kmalloc(size, gfp);
void *p = kzalloc(size, gfp | __GFP_NOWARN);
if (!p)
p = vmalloc(size);
if (p)
memset(p, 0, size);
p = vzalloc(size);
return p;
}
......
......@@ -219,7 +219,7 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
len = PAGE_ALIGN(len);
if (p->buf == p->inline_buf) {
tmp_buf = kmalloc(len, GFP_NOFS);
tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN);
if (!tmp_buf) {
tmp_buf = vmalloc(len);
if (!tmp_buf)
......
......@@ -162,7 +162,7 @@ void *ext4_kvmalloc(size_t size, gfp_t flags)
{
void *ret;
ret = kmalloc(size, flags);
ret = kmalloc(size, flags | __GFP_NOWARN);
if (!ret)
ret = __vmalloc(size, flags, PAGE_KERNEL);
return ret;
......@@ -172,7 +172,7 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
{
void *ret;
ret = kzalloc(size, flags);
ret = kzalloc(size, flags | __GFP_NOWARN);
if (!ret)
ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
return ret;
......
......@@ -1859,7 +1859,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
ht = kzalloc(size, GFP_NOFS);
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
if (ht == NULL)
ht = vzalloc(size);
if (!ht)
......
......@@ -438,7 +438,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
if (mask != q->tab_mask) {
struct sk_buff **ntab;
ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
GFP_KERNEL | __GFP_NOWARN);
if (!ntab)
ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
if (!ntab)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册