diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2022-08-06 09:24:07 +0200 |
---|---|---|
committer | Alexander Gordeev <agordeev@linux.ibm.com> | 2022-08-06 09:24:07 +0200 |
commit | 5e441f61f509617a3f57fcb156b7aa2870cc8752 (patch) | |
tree | 267f0773e33ab237296a566040bc0993ad4ebb0c /arch/s390/mm/maccess.c | |
parent | Revert "s390/smp,ptdump: add absolute lowcore markers" (diff) | |
download | wireguard-linux-5e441f61f509617a3f57fcb156b7aa2870cc8752.tar.xz wireguard-linux-5e441f61f509617a3f57fcb156b7aa2870cc8752.zip |
Revert "s390/smp: rework absolute lowcore access"
This reverts commit 7d06fed77b7d8fc9f6cc41b4e3f2823d32532ad8.
This introduced vmem_mutex locking from vmem_map_4k_page()
function called from smp_reinit_ipl_cpu() with interrupts
disabled. While it is a pre-SMP early initcall no other CPUs
running in parallel nor other code taking vmem_mutex on this
boot stage - it still needs to be fixed.
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/maccess.c')
-rw-r--r-- | arch/s390/mm/maccess.c | 67 |
1 files changed, 37 insertions, 30 deletions
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index b8451ddbb3d6..d6d84e02f35a 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -15,7 +15,6 @@ #include <asm/asm-extable.h> #include <asm/ctl_reg.h> #include <asm/io.h> -#include <asm/abs_lowcore.h> #include <asm/stacktrace.h> static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) @@ -149,20 +148,46 @@ int memcpy_real(void *dest, unsigned long src, size_t count) } /* - * Find CPU that owns swapped prefix page + * Copy memory in absolute mode (kernel to kernel) */ -static int get_swapped_owner(phys_addr_t addr) +void memcpy_absolute(void *dest, void *src, size_t count) +{ + unsigned long cr0, flags, prefix; + + flags = arch_local_irq_save(); + __ctl_store(cr0, 0, 0); + __ctl_clear_bit(0, 28); /* disable lowcore protection */ + prefix = store_prefix(); + if (prefix) { + local_mcck_disable(); + set_prefix(0); + memcpy(dest, src, count); + set_prefix(prefix); + local_mcck_enable(); + } else { + memcpy(dest, src, count); + } + __ctl_load(cr0, 0, 0); + arch_local_irq_restore(flags); +} + +/* + * Check if physical address is within prefix or zero page + */ +static int is_swapped(phys_addr_t addr) { phys_addr_t lc; int cpu; + if (addr < sizeof(struct lowcore)) + return 1; for_each_online_cpu(cpu) { lc = virt_to_phys(lowcore_ptr[cpu]); if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) continue; - return cpu; + return 1; } - return -1; + return 0; } /* @@ -175,35 +200,17 @@ void *xlate_dev_mem_ptr(phys_addr_t addr) { void *ptr = phys_to_virt(addr); void *bounce = ptr; - struct lowcore *abs_lc; - unsigned long flags; unsigned long size; - int this_cpu, cpu; cpus_read_lock(); - this_cpu = get_cpu(); - if (addr >= sizeof(struct lowcore)) { - cpu = get_swapped_owner(addr); - if (cpu < 0) - goto out; - } - bounce = (void *)__get_free_page(GFP_ATOMIC); - if (!bounce) - goto out; - size = PAGE_SIZE - (addr & ~PAGE_MASK); - if (addr < sizeof(struct lowcore)) { - abs_lc = get_abs_lowcore(&flags); - ptr = (void *)abs_lc + addr; - memcpy(bounce, ptr, size); - put_abs_lowcore(abs_lc, flags); - } else if (cpu == this_cpu) { - ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu])); - memcpy(bounce, ptr, size); - } else { - memcpy(bounce, ptr, size); + preempt_disable(); + if (is_swapped(addr)) { + size = PAGE_SIZE - (addr & ~PAGE_MASK); + bounce = (void *) __get_free_page(GFP_ATOMIC); + if (bounce) + memcpy_absolute(bounce, ptr, size); } -out: - put_cpu(); + preempt_enable(); cpus_read_unlock(); return bounce; } |