提交 eb804533 编写于 作者: C Christoph Hellwig 提交者: Dan Williams

mm: merge vmem_altmap_alloc into altmap_alloc_block_buf

There is no clear separation between the two, so merge them.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Reviewed-by: NLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
上级 a8fc357b
...@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) ...@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
} }
/** /**
* vmem_altmap_alloc - allocate pages from the vmem_altmap reservation * altmap_alloc_block_buf - allocate pages from the device page map
* @altmap - reserved page pool for the allocation * @altmap: device page map
* @nr_pfns - size (in pages) of the allocation * @size: size (in bytes) of the allocation
* *
* Allocations are aligned to the size of the request * Allocations are aligned to the size of the request.
*/ */
static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
unsigned long nr_pfns)
{
unsigned long pfn = vmem_altmap_next_pfn(altmap);
unsigned long nr_align;
nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
nr_align = ALIGN(pfn, nr_align) - pfn;
if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
return ULONG_MAX;
altmap->alloc += nr_pfns;
altmap->align += nr_align;
return pfn + nr_align;
}
void * __meminit altmap_alloc_block_buf(unsigned long size, void * __meminit altmap_alloc_block_buf(unsigned long size,
struct vmem_altmap *altmap) struct vmem_altmap *altmap)
{ {
unsigned long pfn, nr_pfns; unsigned long pfn, nr_pfns, nr_align;
void *ptr;
if (size & ~PAGE_MASK) { if (size & ~PAGE_MASK) {
pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
...@@ -141,16 +124,20 @@ void * __meminit altmap_alloc_block_buf(unsigned long size, ...@@ -141,16 +124,20 @@ void * __meminit altmap_alloc_block_buf(unsigned long size,
return NULL; return NULL;
} }
pfn = vmem_altmap_next_pfn(altmap);
nr_pfns = size >> PAGE_SHIFT; nr_pfns = size >> PAGE_SHIFT;
pfn = vmem_altmap_alloc(altmap, nr_pfns); nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
if (pfn < ULONG_MAX) nr_align = ALIGN(pfn, nr_align) - pfn;
ptr = __va(__pfn_to_phys(pfn)); if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
else return NULL;
ptr = NULL;
altmap->alloc += nr_pfns;
altmap->align += nr_align;
pfn += nr_align;
pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
__func__, pfn, altmap->alloc, altmap->align, nr_pfns); __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
return __va(__pfn_to_phys(pfn));
return ptr;
} }
void __meminit vmemmap_verify(pte_t *pte, int node, void __meminit vmemmap_verify(pte_t *pte, int node,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册