提交 ec3f64fc 编写于 作者: D David Rientjes 提交者: Linus Torvalds

mm: remove gfp mask from pcpu_get_vm_areas

pcpu_get_vm_areas() only uses GFP_KERNEL allocations, so remove the gfp_t
formal and use the mask internally.
Signed-off-by: NDavid Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 e5a5623b
...@@ -117,7 +117,7 @@ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); ...@@ -117,7 +117,7 @@ extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms, const size_t *sizes, int nr_vms,
size_t align, gfp_t gfp_mask); size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
#endif #endif
......
...@@ -421,7 +421,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void) ...@@ -421,7 +421,7 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
return NULL; return NULL;
vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL); pcpu_nr_groups, pcpu_atom_size);
if (!vms) { if (!vms) {
pcpu_free_chunk(chunk); pcpu_free_chunk(chunk);
return NULL; return NULL;
......
...@@ -2196,17 +2196,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, ...@@ -2196,17 +2196,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
* @sizes: array containing size of each area * @sizes: array containing size of each area
* @nr_vms: the number of areas to allocate * @nr_vms: the number of areas to allocate
* @align: alignment, all entries in @offsets and @sizes must be aligned to this * @align: alignment, all entries in @offsets and @sizes must be aligned to this
* @gfp_mask: allocation mask
* *
* Returns: kmalloc'd vm_struct pointer array pointing to allocated * Returns: kmalloc'd vm_struct pointer array pointing to allocated
* vm_structs on success, %NULL on failure * vm_structs on success, %NULL on failure
* *
* Percpu allocator wants to use congruent vm areas so that it can * Percpu allocator wants to use congruent vm areas so that it can
* maintain the offsets among percpu areas. This function allocates * maintain the offsets among percpu areas. This function allocates
* congruent vmalloc areas for it. These areas tend to be scattered * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
* pretty far, distance between two areas easily going up to * be scattered pretty far, distance between two areas easily going up
* gigabytes. To avoid interacting with regular vmallocs, these areas * to gigabytes. To avoid interacting with regular vmallocs, these
* are allocated from top. * areas are allocated from top.
* *
* Despite its complicated look, this allocator is rather simple. It * Despite its complicated look, this allocator is rather simple. It
* does everything top-down and scans areas from the end looking for * does everything top-down and scans areas from the end looking for
...@@ -2217,7 +2216,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, ...@@ -2217,7 +2216,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
*/ */
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms, const size_t *sizes, int nr_vms,
size_t align, gfp_t gfp_mask) size_t align)
{ {
const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
...@@ -2227,8 +2226,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -2227,8 +2226,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
unsigned long base, start, end, last_end; unsigned long base, start, end, last_end;
bool purged = false; bool purged = false;
gfp_mask &= GFP_RECLAIM_MASK;
/* verify parameters and allocate data structures */ /* verify parameters and allocate data structures */
BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
for (last_area = 0, area = 0; area < nr_vms; area++) { for (last_area = 0, area = 0; area < nr_vms; area++) {
...@@ -2261,14 +2258,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, ...@@ -2261,14 +2258,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
return NULL; return NULL;
} }
vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
if (!vas || !vms) if (!vas || !vms)
goto err_free; goto err_free;
for (area = 0; area < nr_vms; area++) { for (area = 0; area < nr_vms; area++) {
vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
if (!vas[area] || !vms[area]) if (!vas[area] || !vms[area])
goto err_free; goto err_free;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册