aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/copypage.c4
-rw-r--r--arch/arm64/mm/fault.c73
-rw-r--r--arch/arm64/mm/hugetlbpage.c46
-rw-r--r--arch/arm64/mm/init.c71
-rw-r--r--arch/arm64/mm/trans_pgd.c2
5 files changed, 133 insertions, 63 deletions
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index b5447e53cd73..0dea80bf6de4 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -16,8 +16,8 @@
void copy_highpage(struct page *to, struct page *from)
{
- struct page *kto = page_address(to);
- struct page *kfrom = page_address(from);
+ void *kto = page_address(to);
+ void *kfrom = page_address(from);
copy_page(kto, kfrom);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 77341b160aca..c5e11768e5c1 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -43,7 +43,7 @@
#include <asm/traps.h>
struct fault_info {
- int (*fn)(unsigned long far, unsigned int esr,
+ int (*fn)(unsigned long far, unsigned long esr,
struct pt_regs *regs);
int sig;
int code;
@@ -53,17 +53,17 @@ struct fault_info {
static const struct fault_info fault_info[];
static struct fault_info debug_fault_info[];
-static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
{
return fault_info + (esr & ESR_ELx_FSC);
}
-static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
+static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
{
return debug_fault_info + DBG_ESR_EVT(esr);
}
-static void data_abort_decode(unsigned int esr)
+static void data_abort_decode(unsigned long esr)
{
pr_alert("Data abort info:\n");
@@ -85,11 +85,11 @@ static void data_abort_decode(unsigned int esr)
(esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT);
}
-static void mem_abort_decode(unsigned int esr)
+static void mem_abort_decode(unsigned long esr)
{
pr_alert("Mem abort info:\n");
- pr_alert(" ESR = 0x%08x\n", esr);
+ pr_alert(" ESR = 0x%016lx\n", esr);
pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
ESR_ELx_EC(esr), esr_get_class_string(esr),
(esr & ESR_ELx_IL) ? 32 : 16);
@@ -99,7 +99,7 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
- pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC),
esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
@@ -229,20 +229,20 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
return 1;
}
-static bool is_el1_instruction_abort(unsigned int esr)
+static bool is_el1_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static bool is_el1_data_abort(unsigned int esr)
+static bool is_el1_data_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
}
-static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
+static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
- unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
+ unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE;
if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
@@ -258,7 +258,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
}
static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
- unsigned int esr,
+ unsigned long esr,
struct pt_regs *regs)
{
unsigned long flags;
@@ -290,7 +290,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
}
static void die_kernel_fault(const char *msg, unsigned long addr,
- unsigned int esr, struct pt_regs *regs)
+ unsigned long esr, struct pt_regs *regs)
{
bust_spinlocks(1);
@@ -308,7 +308,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
}
#ifdef CONFIG_KASAN_HW_TAGS
-static void report_tag_fault(unsigned long addr, unsigned int esr,
+static void report_tag_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
/*
@@ -320,11 +320,11 @@ static void report_tag_fault(unsigned long addr, unsigned int esr,
}
#else
/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
-static inline void report_tag_fault(unsigned long addr, unsigned int esr,
+static inline void report_tag_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs) { }
#endif
-static void do_tag_recovery(unsigned long addr, unsigned int esr,
+static void do_tag_recovery(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
@@ -335,13 +335,14 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
* It will be done lazily on the other CPUs when they will hit a
* tag fault.
*/
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE);
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
+ SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE));
isb();
}
-static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
+static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
{
- unsigned int fsc = esr & ESR_ELx_FSC;
+ unsigned long fsc = esr & ESR_ELx_FSC;
if (!is_el1_data_abort(esr))
return false;
@@ -352,7 +353,7 @@ static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
return false;
}
-static void __do_kernel_fault(unsigned long addr, unsigned int esr,
+static void __do_kernel_fault(unsigned long addr, unsigned long esr,
struct pt_regs *regs)
{
const char *msg;
@@ -393,7 +394,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
die_kernel_fault(msg, addr, esr, regs);
}
-static void set_thread_esr(unsigned long address, unsigned int esr)
+static void set_thread_esr(unsigned long address, unsigned long esr)
{
current->thread.fault_address = address;
@@ -441,7 +442,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* exception level). Fail safe by not providing an ESR
* context record at all.
*/
- WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr);
+ WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr);
esr = 0;
break;
}
@@ -450,7 +451,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
current->thread.fault_code = esr;
}
-static void do_bad_area(unsigned long far, unsigned int esr,
+static void do_bad_area(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
unsigned long addr = untagged_addr(far);
@@ -501,7 +502,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
return handle_mm_fault(vma, addr, mm_flags, regs);
}
-static bool is_el0_instruction_abort(unsigned int esr)
+static bool is_el0_instruction_abort(unsigned long esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
}
@@ -510,12 +511,12 @@ static bool is_el0_instruction_abort(unsigned int esr)
* Note: not valid for EL1 DC IVAC, but we never use that such that it
* should fault. EL0 cannot issue DC IVAC (undef).
*/
-static bool is_write_abort(unsigned int esr)
+static bool is_write_abort(unsigned long esr)
{
return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
}
-static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
+static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf;
@@ -671,7 +672,7 @@ no_context:
}
static int __kprobes do_translation_fault(unsigned long far,
- unsigned int esr,
+ unsigned long esr,
struct pt_regs *regs)
{
unsigned long addr = untagged_addr(far);
@@ -683,19 +684,19 @@ static int __kprobes do_translation_fault(unsigned long far,
return 0;
}
-static int do_alignment_fault(unsigned long far, unsigned int esr,
+static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
do_bad_area(far, esr, regs);
return 0;
}
-static int do_bad(unsigned long far, unsigned int esr, struct pt_regs *regs)
+static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
return 1; /* "fault" */
}
-static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
+static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf;
unsigned long siaddr;
@@ -725,7 +726,7 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
return 0;
}
-static int do_tag_check_fault(unsigned long far, unsigned int esr,
+static int do_tag_check_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
/*
@@ -805,7 +806,7 @@ static const struct fault_info fault_info[] = {
{ do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
};
-void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
+void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_fault_info(esr);
unsigned long addr = untagged_addr(far);
@@ -825,14 +826,14 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
addr, esr);
}
NOKPROBE_SYMBOL(do_sp_pc_abort);
-int __init early_brk64(unsigned long addr, unsigned int esr,
+int __init early_brk64(unsigned long addr, unsigned long esr,
struct pt_regs *regs);
/*
@@ -852,7 +853,7 @@ static struct fault_info __refdata debug_fault_info[] = {
};
void __init hook_debug_fault_code(int nr,
- int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int (*fn)(unsigned long, unsigned long, struct pt_regs *),
int sig, int code, const char *name)
{
BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
@@ -885,7 +886,7 @@ static void debug_exception_exit(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(debug_exception_exit);
-void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
+void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
struct pt_regs *regs)
{
const struct fault_info *inf = esr_to_debug_fault_info(esr);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index cbace1c9e137..64bb078e2e7b 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -158,6 +158,28 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
return contig_ptes;
}
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ int ncontig, i;
+ size_t pgsize;
+ pte_t orig_pte = ptep_get(ptep);
+
+ if (!pte_present(orig_pte) || !pte_cont(orig_pte))
+ return orig_pte;
+
+ ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
+ for (i = 0; i < ncontig; i++, ptep++) {
+ pte_t pte = ptep_get(ptep);
+
+ if (pte_dirty(pte))
+ orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
+ }
+ return orig_pte;
+}
+
/*
* Changing some bits of contiguous entries requires us to follow a
* Break-Before-Make approach, breaking the whole contiguous set
@@ -166,15 +188,14 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
*
* This helper performs the break step.
*/
-static pte_t get_clear_flush(struct mm_struct *mm,
+static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
unsigned long pgsize,
unsigned long ncontig)
{
- pte_t orig_pte = huge_ptep_get(ptep);
- bool valid = pte_valid(orig_pte);
- unsigned long i, saddr = addr;
+ pte_t orig_pte = ptep_get(ptep);
+ unsigned long i;
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@@ -190,11 +211,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
if (pte_young(pte))
orig_pte = pte_mkyoung(orig_pte);
}
-
- if (valid) {
- struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
- flush_tlb_range(&vma, saddr, addr);
- }
return orig_pte;
}
@@ -385,14 +401,14 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
{
int ncontig;
size_t pgsize;
- pte_t orig_pte = huge_ptep_get(ptep);
+ pte_t orig_pte = ptep_get(ptep);
if (!pte_cont(orig_pte))
return ptep_get_and_clear(mm, addr, ptep);
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
- return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
}
/*
@@ -408,11 +424,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
{
int i;
- if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ if (pte_write(pte) != pte_write(ptep_get(ptep)))
return 1;
for (i = 0; i < ncontig; i++) {
- pte_t orig_pte = huge_ptep_get(ptep + i);
+ pte_t orig_pte = ptep_get(ptep + i);
if (pte_dirty(pte) != pte_dirty(orig_pte))
return 1;
@@ -443,7 +459,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
if (!__cont_access_flags_changed(ptep, pte, ncontig))
return 0;
- orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
+ orig_pte = get_clear_contig(vma->vm_mm, addr, ptep, pgsize, ncontig);
/* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
@@ -476,7 +492,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
- pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig);
+ pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
pte = pte_wrprotect(pte);
hugeprot = pte_pgprot(pte);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 1e7b1550e2fc..a1410143ea62 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -90,6 +90,32 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+ insert_resource(&iomem_resource, &crashk_low_res);
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -100,17 +126,35 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
- unsigned long long crash_max = arm64_dma_phys_limit;
+ unsigned long long crash_low_size = 0;
+ unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ char *cmdline = boot_command_line;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ /* crashkernel=X[@offset] */
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low can be specified or not, but invalid value
+ * is not allowed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret && (ret != -ENOENT))
+ return;
+
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ } else if (ret || !crash_size) {
+ /* The specified value is invalid */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
@@ -118,8 +162,7 @@ static void __init reserve_crashkernel(void)
if (crash_base)
crash_max = crash_base + crash_size;
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
if (!crash_base) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -127,6 +170,12 @@ static void __init reserve_crashkernel(void)
return;
}
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
+ }
+
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
@@ -135,8 +184,12 @@ static void __init reserve_crashkernel(void)
* map. Inform kmemleak so that it won't try to access it.
*/
kmemleak_ignore_phys(crash_base);
+ if (crashk_low_res.end)
+ kmemleak_ignore_phys(crashk_low_res.start);
+
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
}
/*
@@ -157,7 +210,7 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
}
-static void __init zone_sizes_init(unsigned long min, unsigned long max)
+static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
@@ -176,7 +229,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
- max_zone_pfns[ZONE_NORMAL] = max;
+ max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init(max_zone_pfns);
}
@@ -374,7 +427,7 @@ void __init bootmem_init(void)
* done after the fixed reservations
*/
sparse_init();
- zone_sizes_init(min, max);
+ zone_sizes_init();
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c
index d7da8ca40d2e..4ea2eefbc053 100644
--- a/arch/arm64/mm/trans_pgd.c
+++ b/arch/arm64/mm/trans_pgd.c
@@ -238,7 +238,7 @@ int trans_pgd_idmap_page(struct trans_pgd_info *info, phys_addr_t *trans_ttbr0,
int this_level, index, level_lsb, level_msb;
dst_addr &= PAGE_MASK;
- prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ prev_level_entry = pte_val(pfn_pte(pfn, PAGE_KERNEL_ROX));
for (this_level = 3; this_level >= 0; this_level--) {
levels[this_level] = trans_alloc(info);