diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c9a7e9e1414f344c9dfd515600e3e4378bf61d81..6b6985f15d02a9da0f24f06a341f43e30246e82f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -227,7 +227,7 @@ config GENERIC_CSUM config GENERIC_CALIBRATE_DELAY def_bool y -config ZONE_DMA +config ZONE_DMA32 def_bool y config HAVE_GENERIC_GUP diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 6840426bbe77f357722611a28a0abd7ca43ddd7c..0d641875b20e9760c203ae56f21ccd6bd6d0bfe2 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -95,9 +95,9 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { - if (IS_ENABLED(CONFIG_ZONE_DMA) && + if (IS_ENABLED(CONFIG_ZONE_DMA32) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) - flags |= GFP_DMA; + flags |= GFP_DMA32; if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { struct page *page; void *addr; @@ -397,7 +397,7 @@ static int __init atomic_pool_init(void) page = dma_alloc_from_contiguous(NULL, nr_pages, pool_size_order, GFP_KERNEL); else - page = alloc_pages(GFP_DMA, pool_size_order); + page = alloc_pages(GFP_DMA32, pool_size_order); if (page) { int ret; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 00e7b900ca4193e83dfa7de7dd506984afe90bce..8f03276443c91f0d0ab2b34f2ba172d6e8f20561 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */ /* - * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It * currently assumes that for memory starting above 4G, 32-bit devices will * use a DMA offset. */ @@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; - if (IS_ENABLED(CONFIG_ZONE_DMA)) - max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); + if (IS_ENABLED(CONFIG_ZONE_DMA32)) + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); @@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) memset(zone_size, 0, sizeof(zone_size)); /* 4GB maximum for 32-bit only capable devices */ -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 max_dma = PFN_DOWN(arm64_dma_phys_limit); - zone_size[ZONE_DMA] = max_dma - min; + zone_size[ZONE_DMA32] = max_dma - min; #endif zone_size[ZONE_NORMAL] = max - max_dma; @@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (start >= max) continue; -#ifdef CONFIG_ZONE_DMA +#ifdef CONFIG_ZONE_DMA32 if (start < max_dma) { unsigned long dma_end = min(end, max_dma); - zhole_size[ZONE_DMA] -= dma_end - start; + zhole_size[ZONE_DMA32] -= dma_end - start; } #endif if (end > max_dma) { @@ -467,7 +467,7 @@ void __init arm64_memblock_init(void) early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ - if (IS_ENABLED(CONFIG_ZONE_DMA)) + if (IS_ENABLED(CONFIG_ZONE_DMA32)) arm64_dma_phys_limit = max_zone_dma_phys(); else arm64_dma_phys_limit = PHYS_MASK + 1;