From 806e33fff465a3c1133173b094017962c9bdbd62 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 13 Sep 2012 19:04:01 +0100 Subject: arm64: mm: register zone holes with page allocator Ensure that we register any holes with the page allocator so that the corresponding memmap is freed correctly. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/init.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 31428cadcf1..5f719ba949b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -71,12 +71,12 @@ early_param("initrd", early_initrd); static void __init zone_sizes_init(unsigned long min, unsigned long max) { - unsigned long zone_size[MAX_NR_ZONES]; + struct memblock_region *reg; + unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; unsigned long max_dma32 = min; memset(zone_size, 0, sizeof(zone_size)); - zone_size[0] = max - min; #ifdef CONFIG_ZONE_DMA32 /* 4GB maximum for 32-bit only capable devices */ max_dma32 = min(max, MAX_DMA32_PFN); @@ -84,7 +84,28 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) #endif zone_size[ZONE_NORMAL] = max - max_dma32; - free_area_init(zone_size); + memcpy(zhole_size, zone_size, sizeof(zhole_size)); + + for_each_memblock(memory, reg) { + unsigned long start = memblock_region_memory_base_pfn(reg); + unsigned long end = memblock_region_memory_end_pfn(reg); + + if (start >= max) + continue; +#ifdef CONFIG_ZONE_DMA32 + if (start < max_dma32) { + unsigned long dma_end = min(end, max_dma32); + zhole_size[ZONE_DMA32] -= dma_end - start; + } +#endif + if (end > max_dma32) { + unsigned long normal_end = min(end, max); + unsigned long normal_start = max(start, max_dma32); + zhole_size[ZONE_NORMAL] -= normal_end - normal_start; + } + } + + free_area_init_node(0, zone_size, min, zhole_size); } #ifdef CONFIG_HAVE_ARCH_PFN_VALID -- cgit v1.2.3