aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/init.c')
-rw-r--r--arch/arm64/mm/init.c548
1 files changed, 210 insertions, 338 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b65dffdfb201..4b4651ee47f2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -21,19 +21,22 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-direct.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/hugetlb.h>
+#include <linux/acpi_iort.h>
+#include <linux/kmemleak.h>
#include <asm/boot.h>
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_host.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
@@ -41,8 +44,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
-
-#define ARM64_ZONE_DMA_BITS 30
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -53,22 +55,67 @@
s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
-s64 physvirt_offset __ro_after_init;
-EXPORT_SYMBOL(physvirt_offset);
-
-struct page *vmemmap __ro_after_init;
-EXPORT_SYMBOL(vmemmap);
-
/*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
+ *
+ * Memory reservation for crash kernel either done early or deferred
+ * depending on DMA memory zones configs (ZONE_DMA) --
+ *
+ * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
+ * here instead of max_zone_phys(). This lets early reservation of
+ * crash kernel memory which has a dependency on arm64_dma_phys_limit.
+ * Reserving memory early for crash kernel allows linear creation of block
+ * mappings (greater than page-granularity) for all the memory bank rangs.
+ * In this scheme a comparatively quicker boot is observed.
+ *
+ * If ZONE_DMA configs are defined, crash kernel memory reservation
+ * is delayed until DMA zone memory range size initialization performed in
+ * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
+ * memory range to avoid overlap allocation. So crash kernel memory boundaries
+ * are not known when mapping all bank memory ranges, which otherwise means
+ * not possible to exclude crash kernel range from creating block mappings
+ * so page-granularity mappings are created for the entire memory range.
+ * Hence a slightly slower boot is observed.
+ *
+ * Note: Page-granularity mappings are necessary for crash kernel memory
+ * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
*/
-phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
+#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
+phys_addr_t __ro_after_init arm64_dma_phys_limit;
+#else
+phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
+#endif
+
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+ insert_resource(&iomem_resource, &crashk_low_res);
+
+ return 0;
+}
-#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -79,206 +126,127 @@ static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
+ unsigned long long crash_low_size = 0;
+ unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ char *cmdline = boot_command_line;
int ret;
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
- &crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
+ if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- crash_size = PAGE_ALIGN(crash_size);
-
- if (crash_base == 0) {
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
- crash_size, SZ_2M);
- if (crash_base == 0) {
- pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
- crash_size);
- return;
- }
- } else {
- /* User specifies base address explicitly. */
- if (!memblock_is_region_memory(crash_base, crash_size)) {
- pr_warn("cannot reserve crashkernel: region is not memory\n");
+ /* crashkernel=X[@offset] */
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
+ &crash_size, &crash_base);
+ if (ret == -ENOENT) {
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
return;
- }
- if (memblock_is_region_reserved(crash_base, crash_size)) {
- pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
+ /*
+ * crashkernel=Y,low can be specified or not, but invalid value
+ * is not allowed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret && (ret != -ENOENT))
return;
- }
- if (!IS_ALIGNED(crash_base, SZ_2M)) {
- pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
- return;
- }
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ } else if (ret || !crash_size) {
+ /* The specified value is invalid */
+ return;
}
- memblock_reserve(crash_base, crash_size);
-
- pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- crash_base, crash_base + crash_size, crash_size >> 20);
-
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
-}
-#else
-static void __init reserve_crashkernel(void)
-{
-}
-#endif /* CONFIG_KEXEC_CORE */
-
-#ifdef CONFIG_CRASH_DUMP
-static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
- const char *uname, int depth, void *data)
-{
- const __be32 *reg;
- int len;
-
- if (depth != 1 || strcmp(uname, "chosen") != 0)
- return 0;
- reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
- if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
- return 1;
-
- elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg);
- elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- return 1;
-}
+ crash_size = PAGE_ALIGN(crash_size);
-/*
- * reserve_elfcorehdr() - reserves memory for elf core header
- *
- * This function reserves the memory occupied by an elf core header
- * described in the device tree. This region contains all the
- * information about primary kernel's core image and is used by a dump
- * capture kernel to access the system memory on primary kernel.
- */
-static void __init reserve_elfcorehdr(void)
-{
- of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
+ /* User specifies base address explicitly. */
+ if (crash_base)
+ crash_max = crash_base + crash_size;
- if (!elfcorehdr_size)
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+ crash_base, crash_max);
+ if (!crash_base) {
+ pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
+ crash_size);
return;
+ }
- if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
- pr_warn("elfcorehdr is overlapped\n");
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
return;
}
- memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+ pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
+ crash_base, crash_base + crash_size, crash_size >> 20);
- pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
- elfcorehdr_size >> 10, elfcorehdr_addr);
-}
-#else
-static void __init reserve_elfcorehdr(void)
-{
+ /*
+ * The crashkernel memory will be removed from the kernel linear
+ * map. Inform kmemleak so that it won't try to access it.
+ */
+ kmemleak_ignore_phys(crash_base);
+ if (crashk_low_res.end)
+ kmemleak_ignore_phys(crashk_low_res.start);
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
}
-#endif /* CONFIG_CRASH_DUMP */
/*
- * Return the maximum physical address for a zone with a given address size
- * limit. It currently assumes that for memory starting above 4G, 32-bit
- * devices will use a DMA offset.
+ * Return the maximum physical address for a zone accessible by the given bits
+ * limit. If DRAM starts above 32-bit, expand the zone to the maximum
+ * available memory, otherwise cap it at 32-bit.
*/
static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
- phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits);
- return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
-}
+ phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
+ phys_addr_t phys_start = memblock_start_of_DRAM();
-#ifdef CONFIG_NUMA
+ if (phys_start > U32_MAX)
+ zone_mask = PHYS_ADDR_MAX;
+ else if (phys_start > zone_mask)
+ zone_mask = U32_MAX;
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
-{
- unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
-
-#ifdef CONFIG_ZONE_DMA
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
-#endif
-#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
-#endif
- max_zone_pfns[ZONE_NORMAL] = max;
-
- free_area_init_nodes(max_zone_pfns);
+ return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
-#else
-
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
+static void __init zone_sizes_init(void)
{
- struct memblock_region *reg;
- unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long __maybe_unused max_dma, max_dma32;
-
- memset(zone_size, 0, sizeof(zone_size));
-
- max_dma = max_dma32 = min;
-#ifdef CONFIG_ZONE_DMA
- max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA] = max_dma - min;
-#endif
-#ifdef CONFIG_ZONE_DMA32
- max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
- zone_size[ZONE_DMA32] = max_dma32 - max_dma;
-#endif
- zone_size[ZONE_NORMAL] = max - max_dma32;
-
- memcpy(zhole_size, zone_size, sizeof(zhole_size));
-
- for_each_memblock(memory, reg) {
- unsigned long start = memblock_region_memory_base_pfn(reg);
- unsigned long end = memblock_region_memory_end_pfn(reg);
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+ unsigned int __maybe_unused acpi_zone_dma_bits;
+ unsigned int __maybe_unused dt_zone_dma_bits;
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
- if (start >= min && start < max_dma) {
- unsigned long dma_end = min(end, max_dma);
- zhole_size[ZONE_DMA] -= dma_end - start;
- start = dma_end;
- }
+ acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
+ dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
+ zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
- if (start >= max_dma && start < max_dma32) {
- unsigned long dma32_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma32_end - start;
- start = dma32_end;
- }
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+ if (!arm64_dma_phys_limit)
+ arm64_dma_phys_limit = dma32_phys_limit;
#endif
- if (start >= max_dma32 && start < max) {
- unsigned long normal_end = min(end, max);
- zhole_size[ZONE_NORMAL] -= normal_end - start;
- }
- }
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
- free_area_init_node(0, zone_size, min, zhole_size);
+ free_area_init(max_zone_pfns);
}
-#endif /* CONFIG_NUMA */
-
-int pfn_valid(unsigned long pfn)
+int pfn_is_map_memory(unsigned long pfn)
{
- phys_addr_t addr = pfn << PAGE_SHIFT;
-
- if ((addr >> PAGE_SHIFT) != pfn)
- return 0;
+ phys_addr_t addr = PFN_PHYS(pfn);
-#ifdef CONFIG_SPARSEMEM
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
+ if (PHYS_PFN(addr) != pfn)
return 0;
- if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
- return 0;
-#endif
return memblock_is_map_memory(addr);
}
-EXPORT_SYMBOL(pfn_valid);
+EXPORT_SYMBOL(pfn_is_map_memory);
-static phys_addr_t memory_limit = PHYS_ADDR_MAX;
+static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
/*
* Limit the memory size that was specified via FDT.
@@ -295,44 +263,23 @@ static int __init early_mem(char *p)
}
early_param("mem", early_mem);
-static int __init early_init_dt_scan_usablemem(unsigned long node,
- const char *uname, int depth, void *data)
-{
- struct memblock_region *usablemem = data;
- const __be32 *reg;
- int len;
-
- if (depth != 1 || strcmp(uname, "chosen") != 0)
- return 0;
-
- reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
- if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
- return 1;
-
- usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg);
-
- return 1;
-}
-
-static void __init fdt_enforce_memory_region(void)
-{
- struct memblock_region reg = {
- .size = 0,
- };
-
- of_scan_flat_dt(early_init_dt_scan_usablemem, &reg);
-
- if (reg.size)
- memblock_cap_memory_range(reg.base, reg.size);
-}
-
void __init arm64_memblock_init(void)
{
- const s64 linear_region_size = BIT(vabits_actual - 1);
+ s64 linear_region_size = PAGE_END - _PAGE_OFFSET(vabits_actual);
- /* Handle linux,usable-memory-range property */
- fdt_enforce_memory_region();
+ /*
+ * Corner case: 52-bit VA capable systems running KVM in nVHE mode may
+ * be limited in their ability to support a linear map that exceeds 51
+ * bits of VA space, depending on the placement of the ID map. Given
+ * that the placement of the ID map may be randomized, let's simply
+ * limit the kernel's linear map to 51 bits as well if we detect this
+ * configuration.
+ */
+ if (IS_ENABLED(CONFIG_KVM) && vabits_actual == 52 &&
+ is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
+ pr_info("Capping linear region to 51 bits for KVM in nVHE mode on LVA capable hardware.\n");
+ linear_region_size = min_t(u64, linear_region_size, BIT(51));
+ }
/* Remove memory above our supported physical address size */
memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
@@ -343,19 +290,8 @@ void __init arm64_memblock_init(void)
memstart_addr = round_down(memblock_start_of_DRAM(),
ARM64_MEMSTART_ALIGN);
- physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
-
- vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
-
- /*
- * If we are running with a 52-bit kernel VA config on a system that
- * does not support it, we have to offset our vmemmap and physvirt_offset
- * s.t. we avoid the 52-bit portion of the direct linear map
- */
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
- vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
- physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
- }
+ if ((memblock_end_of_DRAM() - memstart_addr) > linear_region_size)
+ pr_warn("Memory doesn't fit in the linear mapping, VA_BITS too small\n");
/*
* Remove the memory that we will not be able to cover with the
@@ -372,6 +308,16 @@ void __init arm64_memblock_init(void)
}
/*
+ * If we are running with a 52-bit kernel VA config on a system that
+ * does not support it, we have to place the available physical
+ * memory in the 48-bit addressable part of the linear region, i.e.,
+ * we have to move it upward. Since memstart_addr represents the
+ * physical address of PAGE_OFFSET, we have to *subtract* from it.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
+ memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
+
+ /*
* Apply the memory limit if it was set. Since the kernel may be loaded
* high up in memory, add back the kernel region that must be accessible
* via the linear mapping.
@@ -404,23 +350,26 @@ void __init arm64_memblock_init(void)
"initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
phys_initrd_size = 0;
} else {
- memblock_remove(base, size); /* clear MEMBLOCK_ flags */
memblock_add(base, size);
+ memblock_clear_nomap(base, size);
memblock_reserve(base, size);
}
}
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
extern u16 memstart_offset_seed;
- u64 range = linear_region_size -
- (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+ int parange = cpuid_feature_extract_unsigned_field(
+ mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
+ s64 range = linear_region_size -
+ BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
/*
* If the size of the linear region exceeds, by a sufficient
- * margin, the size of the region that the available physical
- * memory spans, randomize the linear region as well.
+ * margin, the size of the region that the physical memory can
+ * span, randomize the linear region as well.
*/
- if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+ if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
range /= ARM64_MEMSTART_ALIGN;
memstart_addr -= ARM64_MEMSTART_ALIGN *
((range * memstart_offset_seed) >> 16);
@@ -431,7 +380,7 @@ void __init arm64_memblock_init(void)
* Register the kernel text, kernel data, initrd, and initial
* pagetables with memblock.
*/
- memblock_reserve(__pa_symbol(_text), _end - _text);
+ memblock_reserve(__pa_symbol(_stext), _end - _stext);
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/* the generic initrd code expects virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
@@ -440,23 +389,10 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- if (IS_ENABLED(CONFIG_ZONE_DMA)) {
- zone_dma_bits = ARM64_ZONE_DMA_BITS;
- arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
- }
-
- if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma32_phys_limit = max_zone_phys(32);
- else
- arm64_dma32_phys_limit = PHYS_MASK + 1;
-
- reserve_crashkernel();
-
- reserve_elfcorehdr();
+ if (!defer_reserve_crashkernel())
+ reserve_crashkernel();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
- dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -471,86 +407,42 @@ void __init bootmem_init(void)
max_pfn = max_low_pfn = max;
min_low_pfn = min;
- arm64_numa_init();
+ arch_numa_init();
+
/*
- * Sparsemem tries to allocate bootmem in memory_present(), so must be
- * done after the fixed reservations.
+ * must be done after arch_numa_init() which calls numa_init() to
+ * initialize node_online_map that gets used in hugetlb_cma_reserve()
+ * while allocating required CMA size across online nodes.
*/
- memblocks_present();
-
- sparse_init();
- zone_sizes_init(min, max);
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
+ arm64_hugetlb_cma_reserve();
+#endif
- memblock_dump_all();
-}
+ dma_pernuma_cma_reserve();
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
-static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
-{
- struct page *start_pg, *end_pg;
- unsigned long pg, pgend;
+ kvm_hyp_reserve();
/*
- * Convert start_pfn/end_pfn to a struct page pointer.
+ * sparse_init() tries to allocate memory from memblock, so must be
+ * done after the fixed reservations
*/
- start_pg = pfn_to_page(start_pfn - 1) + 1;
- end_pg = pfn_to_page(end_pfn - 1) + 1;
+ sparse_init();
+ zone_sizes_init();
/*
- * Convert to physical addresses, and round start upwards and end
- * downwards.
+ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
*/
- pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
- pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+ dma_contiguous_reserve(arm64_dma_phys_limit);
/*
- * If there are free pages between these, free the section of the
- * memmap array.
+ * request_standard_resources() depends on crashkernel's memory being
+ * reserved, so do it here.
*/
- if (pg < pgend)
- memblock_free(pg, pgend - pg);
-}
-
-/*
- * The mem_map array can get very big. Free the unused area of the memory map.
- */
-static void __init free_unused_memmap(void)
-{
- unsigned long start, prev_end = 0;
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg) {
- start = __phys_to_pfn(reg->base);
+ if (defer_reserve_crashkernel())
+ reserve_crashkernel();
-#ifdef CONFIG_SPARSEMEM
- /*
- * Take care not to free memmap entries that don't exist due
- * to SPARSEMEM sections which aren't present.
- */
- start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
- /*
- * If we had a previous bank, and there is a space between the
- * current bank and the previous, free it.
- */
- if (prev_end && prev_end < start)
- free_memmap(prev_end, start);
-
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
- MAX_ORDER_NR_PAGES);
- }
-
-#ifdef CONFIG_SPARSEMEM
- if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
- free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
-#endif
+ memblock_dump_all();
}
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
/*
* mem_init() marks the free areas in the mem_map and tells us how much memory
@@ -559,22 +451,11 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
- if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
- swiotlb_init(1);
- else
- swiotlb_force = SWIOTLB_NO_FORCE;
-
- set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
+ swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE);
-#ifndef CONFIG_SPARSEMEM_VMEMMAP
- free_unused_memmap();
-#endif
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
@@ -583,6 +464,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
@@ -603,30 +491,14 @@ void free_initmem(void)
* prevents the region from being reused for kernel modules, which
* is not supported by kallsyms.
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ vunmap_range((u64)__init_begin, (u64)__init_end);
}
-/*
- * Dump out memory limit information on panic.
- */
-static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+void dump_mem_limit(void)
{
if (memory_limit != PHYS_ADDR_MAX) {
pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
} else {
pr_emerg("Memory Limit: none\n");
}
- return 0;
-}
-
-static struct notifier_block mem_limit_notifier = {
- .notifier_call = dump_mem_limit,
-};
-
-static int __init register_mem_limit_dumper(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &mem_limit_notifier);
- return 0;
}
-__initcall(register_mem_limit_dumper);